query
stringlengths
7
3.85k
document
stringlengths
11
430k
metadata
dict
negatives
sequencelengths
0
101
negative_scores
sequencelengths
0
101
document_score
stringlengths
3
10
document_rank
stringclasses
102 values
SetPayload sets the payload to the get search default response
func (o *GetSearchDefault) SetPayload(payload *models.Error) { o.Payload = payload }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (o *GetSearchOK) SetPayload(payload *models.User) {\n\to.Payload = payload\n}", "func (o *GetIndexSearchInternalServerError) SetPayload(payload *GetIndexSearchInternalServerErrorBody) {\n\to.Payload = payload\n}", "func (o *GetIndexSearchOK) SetPayload(payload *GetIndexSearchOKBody) {\n\to.Payload = payload\n}", "func (o *GetmoviesinfoDefault) SetPayload(payload *models.Error) {\n\to.Payload = payload\n}", "func (o *GetServicesDefault) SetPayload(payload *models.Error) {\n\to.Payload = payload\n}", "func (o *GetRepositoryInfoDefault) SetPayload(payload *models.Error) {\n\to.Payload = payload\n}", "func (o *GetLegacyUserSearchKeywordOK) SetPayload(payload *models.SearchUsersByKeyword) {\n\to.Payload = payload\n}", "func (o *GetIndexSearchNotFound) SetPayload(payload *GetIndexSearchNotFoundBody) {\n\to.Payload = payload\n}", "func (o *GetIndexSearchBadRequest) SetPayload(payload *GetIndexSearchBadRequestBody) {\n\to.Payload = payload\n}", "func (o *GetApisOK) SetPayload(payload *models.APIMeta) {\n\to.Payload = payload\n}", "func (o *GetProviderRegistersDefault) SetPayload(payload *models.Error) {\n\to.Payload = payload\n}", "func (o *NewDiscoveryDefault) SetPayload(payload *models.Error) {\n\to.Payload = payload\n}", "func (o *GetAllReposDefault) SetPayload(payload *models.Error) {\n\to.Payload = payload\n}", "func (o *GetProjectProjectNameServiceServiceNameResourceDefault) SetPayload(payload *models.Error) {\n\to.Payload = payload\n}", "func (o *GetPrefilterOK) SetPayload(payload *models.Prefilter) {\n\to.Payload = payload\n}", "func (o *ArtifactListerNotFound) SetPayload(payload *weles.ErrResponse) {\n\to.Payload = payload\n}", "func (o *GetReadyDefault) SetPayload(payload *models.ReturnCode) {\n\to.Payload = payload\n}", "func (o *AddItemDefault) SetPayload(payload models.ErrorResponse) {\n\to.Payload = payload\n}", "func (o *GetS3BackupDefault) SetPayload(payload *models.Response) {\n\to.Payload = payload\n}", "func (o *ShopGetProductDefault) SetPayload(payload *models.RuntimeError) {\n\to.Payload = payload\n}", "func (o *GetInteractionsNotFound) SetPayload(payload *models.APIError) {\n\to.Payload = payload\n}", "func (o *ServiceInstanceLastOperationGetNotFound) SetPayload(payload *models.Error) {\n\to.Payload = payload\n}", "func (o *GetServicesNotFound) SetPayload(payload *models.Error) {\n\to.Payload = payload\n}", "func (o *GetfeedsDefault) SetPayload(payload *models.Error) {\n\to.Payload = payload\n}", "func (o *GetRepositoryInfoNotFound) SetPayload(payload *models.Error) {\n\to.Payload = payload\n}", "func (o *GetPracticesDefault) SetPayload(payload *models.Error) {\n\to.Payload = payload\n}", "func (o *GetmoviesinfoInternalServerError) SetPayload(payload *models.Error) {\n\to.Payload = payload\n}", "func (o *GetBackendDefault) SetPayload(payload *models.Error) {\n\to.Payload = payload\n}", "func (o *GetServiceInstanceByNameDefault) SetPayload(payload *v1.Error) {\n\to.Payload = payload\n}", "func (o *GetSectionOK) SetPayload(payload *models.SectionResponse) {\n\to.Payload = payload\n}", "func (o *GetTagDefault) SetPayload(payload models.Error) {\n\to.Payload = payload\n}", "func (o *PostWordDefault) SetPayload(payload *models.ReturnCode) {\n\to.Payload = payload\n}", "func (o *PutSlideSuperlikeDefault) SetPayload(payload *models.Error) {\n\to.Payload = payload\n}", "func (o *GetImagesListDefault) SetPayload(payload *models.Error) {\n\to.Payload = payload\n}", "func (o *UpdateClusterNotFound) SetPayload(payload *models.APIResponse) {\n\to.Payload = payload\n}", "func (o *GetProjectProjectNameServiceServiceNameResourceNotFound) SetPayload(payload *models.Error) {\n\to.Payload = payload\n}", "func (o *WeaviateThingsGetOK) SetPayload(payload *models.ThingGetResponse) {\n\to.Payload = payload\n}", "func (o *GetRepositoryInfoOK) SetPayload(payload *models.RepositoryInfo) {\n\to.Payload = payload\n}", "func (o *GetVMVolumeDefault) SetPayload(payload *models.Error) {\n\to.Payload = payload\n}", "func (o *SearchTournamentsOK) SetPayload(payload []*models.Tournament) {\n\to.Payload = payload\n}", "func (o *GetV0AuthCallbackDefault) SetPayload(payload *models.Error) {\n\to.Payload = payload\n}", "func (o *NrActivityListSuggestDefault) SetPayload(payload *models.Error) {\n\to.Payload = payload\n}", "func (o *GetResetPasswordRequestEmailNotFound) SetPayload(payload *models.GeneralResponse) {\n\to.Payload = payload\n}", "func (o *GetNamespacedNotebooksNotFound) SetPayload(payload *models.Error) {\r\n\to.Payload = payload\r\n}", "func (o *GetHealthzInternalServerError) SetPayload(payload string) {\n\to.Payload = payload\n}", "func (o *GetSectionNotFound) SetPayload(payload *models.NotFound) {\n\to.Payload = payload\n}", "func (o *GetDocumentNotFound) SetPayload(payload *ghcmessages.Error) {\n\to.Payload = payload\n}", "func (o *WeaviateActionsPatchOK) SetPayload(payload *models.ActionGetResponse) {\n\to.Payload = payload\n}", "func (o *GetTaskSyncNotFound) SetPayload(payload interface{}) {\n\to.Payload = payload\n}", "func (o *HealthGetDefault) SetPayload(payload interface{}) {\n\to.Payload = payload\n}", "func (o *GetHealthzNotImplemented) SetPayload(payload string) {\n\to.Payload = payload\n}", "func (o *GetNamespacedNotebooksOK) SetPayload(payload *models.GetNotebooksResponse) {\r\n\to.Payload = payload\r\n}", "func (o *GetRepositoryInfoInternalServerError) SetPayload(payload *models.Error) {\n\to.Payload = payload\n}", "func (o *GetDocumentOK) SetPayload(payload *ghcmessages.Document) {\n\to.Payload = payload\n}", "func (o *GetModelNotFound) SetPayload(payload *restmodels.Error) {\n\to.Payload = payload\n}", "func (o *GetRefreshTokenNotFound) SetPayload(payload *models.GeneralResponse) {\n\to.Payload = payload\n}", "func (o *GetGateSourceByGateNameAndMntNotFound) SetPayload(payload *models.ErrorResponse) {\n\to.Payload = payload\n}", "func (o *GetTaskDetailsDefault) SetPayload(payload *models.Error) {\n\to.Payload = payload\n}", "func (o *ServiceInstanceLastOperationGetDefault) SetPayload(payload *models.Error) {\n\to.Payload = payload\n}", "func (o *GetServicesHaproxyRuntimeAclsIDDefault) SetPayload(payload *models.Error) {\n\to.Payload = payload\n}", "func (o *PutSlideLikeDefault) SetPayload(payload *models.Error) {\n\to.Payload = payload\n}", "func (o *GetAllStorageNotFound) SetPayload(payload *models.Error) {\n\to.Payload = payload\n}", "func (o *GetTradesByAccountDefault) SetPayload(payload *models.ErrorResponse) {\n\to.Payload = payload\n}", "func (o *UpdateMoveTaskOrderPostCounselingInformationNotFound) SetPayload(payload interface{}) {\n\to.Payload = payload\n}", "func (o *GetBackupRuntimeEnvironmentsNotFound) SetPayload(payload string) {\n\to.Payload = payload\n}", "func (o *GetProviderRegionByIDInternalServerError) SetPayload(payload *models.APIResponse) {\n\to.Payload = payload\n}", "func (o *GetClusterInstallConfigNotFound) SetPayload(payload *models.Error) {\n\to.Payload = payload\n}", "func (o *DescribeDefault) SetPayload(payload *models.Error) {\n\to.Payload = payload\n}", "func (o *GetApisInternalServerError) SetPayload(payload *models.Error) {\n\to.Payload = payload\n}", "func (o *GetHealthzOK) SetPayload(payload string) {\n\to.Payload = payload\n}", "func (o *GetAllReposOK) SetPayload(payload *models.ResourceArrayData) {\n\to.Payload = payload\n}", "func (o *PostRegisterDetailsNotFound) SetPayload(payload *models.GeneralResponse) {\n\to.Payload = payload\n}", "func (o *GetPaymentNotFound) SetPayload(payload *models.ErrorResponse) {\n\to.Payload = payload\n}", "func (o *GetInteractionsInternalServerError) SetPayload(payload *models.APIError) {\n\to.Payload = payload\n}", "func (o *GetDistrictForSchoolNotFound) SetPayload(payload *models.NotFound) {\n\to.Payload = payload\n}", "func (o *GetProjectProjectNameServiceServiceNameResourceOK) SetPayload(payload *models.Resources) {\n\to.Payload = payload\n}", "func (o *GetVSphereDatastoresInternalServerError) SetPayload(payload *models.Error) {\n\to.Payload = payload\n}", "func (o *GraphqlPostOK) SetPayload(payload *models.GraphQLResponse) {\n\to.Payload = payload\n}", "func (o *GetPaymentRequestEDINotFound) SetPayload(payload *supportmessages.ClientError) {\n\to.Payload = payload\n}", "func (o *AddKeypairInternalServerError) SetPayload(payload *models.APIResponse) {\n\to.Payload = payload\n}", "func (o *GetServicesHaproxyRuntimeAclsIDNotFound) SetPayload(payload *models.Error) {\n\to.Payload = payload\n}", "func (o *GetProbeOK) SetPayload(payload *models.Response) {\n\to.Payload = payload\n}", "func (o *ArtifactListerInternalServerError) SetPayload(payload *weles.ErrResponse) {\n\to.Payload = payload\n}", "func (o *RegisterPluginDefault) SetPayload(payload *models.Error) {\n\to.Payload = payload\n}", "func (o *UpdateMovieNotFound) SetPayload(payload *models.Result) {\n\to.Payload = payload\n}", "func (o *GetTournamentOK) SetPayload(payload *models.Tournament) {\n\to.Payload = payload\n}", "func (o *GetResetPasswordRequestEmailInternalServerError) SetPayload(payload *models.GeneralResponse) {\n\to.Payload = payload\n}", "func (o *GetTaskSyncInternalServerError) SetPayload(payload interface{}) {\n\to.Payload = payload\n}", "func (o *GetSearchDefault) WithPayload(payload *models.Error) *GetSearchDefault {\n\to.Payload = payload\n\treturn o\n}", "func (o *ViewOneOrderDefault) SetPayload(payload *models.Error) {\n\to.Payload = payload\n}", "func (o *RetrieveCopyDefault) SetPayload(payload *models.Error) {\n\to.Payload = payload\n}", "func (o *GetGistsOK) SetPayload(payload models.Gists) {\n\to.Payload = payload\n}", "func (o *GetCardsDefault) SetPayload(payload *models.Error) {\n\to.Payload = payload\n}", "func (o *ServiceInstanceLastOperationGetOK) SetPayload(payload *models.LastOperationResource) {\n\to.Payload = payload\n}", "func (o *GetIdentityIDOK) SetPayload(payload *models.Identity) {\n\to.Payload = payload\n}", "func (o *GetVSphereDatastoresOK) SetPayload(payload []*models.VSphereDatastore) {\n\to.Payload = payload\n}", "func (o *BookChapListDefault) SetPayload(payload *models.Error) {\n\to.Payload = payload\n}", "func (o *GetBackendNotFound) SetPayload(payload *models.Error) {\n\to.Payload = payload\n}", "func (o *GetModelOK) SetPayload(payload *restmodels.Model) {\n\to.Payload = payload\n}", "func (o *GetLinkInfoNotFound) SetPayload(payload *models.Error) {\n\to.Payload = payload\n}" ]
[ "0.7497345", "0.7378032", "0.724183", "0.7240926", "0.7117507", "0.6984785", "0.69442767", "0.69177794", "0.68996066", "0.68838185", "0.6851332", "0.6850395", "0.6833772", "0.6832188", "0.6830926", "0.68229914", "0.68158305", "0.68103075", "0.6809424", "0.6805097", "0.6789021", "0.67687607", "0.6752598", "0.6752555", "0.6748101", "0.67427135", "0.6736068", "0.67349654", "0.6713108", "0.67114854", "0.6705477", "0.6702287", "0.6692221", "0.6689744", "0.66890377", "0.6678874", "0.667439", "0.66606915", "0.6660556", "0.6659903", "0.6656238", "0.6648009", "0.66463804", "0.66445744", "0.66398495", "0.66386014", "0.66274583", "0.6618739", "0.66172266", "0.66139024", "0.661218", "0.6612058", "0.6611587", "0.6611336", "0.6610799", "0.6610004", "0.6607641", "0.6607513", "0.66028655", "0.6601899", "0.6598698", "0.65967697", "0.6595219", "0.65909547", "0.6590578", "0.6590041", "0.6588817", "0.6583636", "0.6577299", "0.65767825", "0.6575317", "0.6572083", "0.65702766", "0.656889", "0.65643936", "0.65615463", "0.6559658", "0.6552451", "0.65520304", "0.65512437", "0.6548496", "0.6548055", "0.65416336", "0.6536022", "0.6531077", "0.6526124", "0.6525616", "0.652458", "0.65231097", "0.65219057", "0.65204126", "0.65203774", "0.6520048", "0.65152127", "0.65049475", "0.65031695", "0.6497688", "0.6493086", "0.6489315", "0.6484147" ]
0.81901646
0
WriteResponse to the client
func (o *GetSearchDefault) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { rw.WriteHeader(o._statusCode) if o.Payload != nil { payload := o.Payload if err := producer.Produce(rw, payload); err != nil { panic(err) // let the recovery middleware deal with this } } }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (r *Response) Write(w io.Writer) error", "func (c *Operation) writeResponse(rw http.ResponseWriter, status int, data []byte) { // nolint: unparam\n\trw.WriteHeader(status)\n\n\tif _, err := rw.Write(data); err != nil {\n\t\tlogger.Errorf(\"Unable to send error message, %s\", err)\n\t}\n}", "func WriteResponse(w http.ResponseWriter, mensaje string, code int) {\n\tmessage := myTypes.Respuesta{\n\t\tMessage: mensaje,\n\t}\n\tresponse, _ := json.Marshal(message)\n\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\tw.WriteHeader(code)\n\tw.Write(response)\n}", "func WriteResponse(w http.ResponseWriter, object interface{}, rerr *irma.RemoteError) {\n\tstatus, bts := JsonResponse(object, rerr)\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\tw.WriteHeader(status)\n\t_, err := w.Write(bts)\n\tif err != nil {\n\t\tLogWarning(errors.WrapPrefix(err, \"failed to write response\", 0))\n\t}\n}", "func (o *PingOK) WriteResponse(rw http.ResponseWriter, producer httpkit.Producer) {\n\n\trw.WriteHeader(200)\n}", "func WriteResponse(w http.ResponseWriter, v interface{}, statusCode int) {\n\tresBody, err := json.Marshal(v)\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n\tw.Header().Add(\"Content-Type\", \"application/json\")\n\tw.WriteHeader(statusCode)\n\t_, _ = w.Write(resBody)\n}", "func WriteResponse(w http.ResponseWriter, code int, object interface{}) {\n\tdata, err := json.Marshal(object)\n\tif err != nil {\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\tw.WriteHeader(code)\n\tw.Write(data)\n}", "func (o *GetPingOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(200)\n}", "func writeResponse(body []byte, w *http.ResponseWriter) {\n\t(*w).Header().Set(\"Content-Type\", \"text/plain; charset=utf-8\")\n\t_, err := (*w).Write(body)\n\tif err != nil {\n\t\tlog.Println(err.Error())\n\t\t(*w).WriteHeader(http.StatusInternalServerError)\n\t\treturn\n\t}\n}", "func WriteResponse(w http.ResponseWriter, code int, resp interface{}) error {\n\tj, err := json.Marshal(resp)\n\tif err != nil {\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\treturn err\n\t}\n\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\tw.WriteHeader(code)\n\n\t_, err = w.Write(j)\n\treturn err\n}", "func writeResponse(w *http.ResponseWriter, res responseData, status int) {\n\tresJSON, err := json.Marshal(res)\n\tif err != nil {\n\t\thttp.Error(*w, \"Failed to parse struct `responseData` into JSON object\", http.StatusInternalServerError)\n\t}\n\n\t(*w).Header().Set(\"Content-Type\", \"application/json\")\n\t(*w).WriteHeader(status)\n\t(*w).Write(resJSON)\n}", "func WriteResponse(w http.ResponseWriter, d string) {\n\tw.WriteHeader(200)\n\tw.Header().Set(\"Content-Type\", \"application/json; charset=UTF-8\")\n\tw.Write([]byte(d))\n\treturn\n}", "func (o *CreateFacilityUsersOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(200)\n}", "func writeResponse(w http.ResponseWriter, response Response) {\n\tjson, err := json.Marshal(&response)\n\n\tif err != nil {\n\t\tfmt.Fprint(w, \"There was an error processing the request.\")\n\t}\n\n\tcommon.Log(fmt.Sprintf(\"Returning response %s\", json))\n\tfmt.Fprintf(w, \"%s\", json)\n}", "func (o *CreateProgramOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(200)\n}", "func (o *DepositNewFileOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(200)\n}", "func (o *UpdateMedicineOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(200)\n}", "func (o *CreateTaskCreated) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\t// response header Location\n\n\tlocation := o.Location.String()\n\tif location != \"\" {\n\t\trw.Header().Set(\"Location\", location)\n\t}\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(201)\n}", "func writeResponse(r *http.Request, w http.ResponseWriter, code int, resp interface{}) {\n\n\t// Deal with CORS\n\tif origin := r.Header.Get(\"Origin\"); origin != \"\" {\n\t\tw.Header().Set(\"Access-Control-Allow-Origin\", origin)\n\t\tw.Header().Set(\"Access-Control-Allow-Methods\", \"DELETE, GET, HEAD, OPTIONS, POST, PUT\")\n\t\tw.Header().Set(\"Access-Control-Allow-Credentials\", \"true\")\n\t\t// Allow any headers\n\t\tif wantedHeaders := r.Header.Get(\"Access-Control-Request-Headers\"); wantedHeaders != \"\" {\n\t\t\tw.Header().Set(\"Access-Control-Allow-Headers\", wantedHeaders)\n\t\t}\n\t}\n\n\tw.Header().Set(\"Content-Type\", \"text/plain; charset=utf-8\")\n\n\tb, err := json.Marshal(resp)\n\tif err != nil {\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\tfmt.Fprintln(w, `{\"error\":\"failed to marshal json\"}`)\n\t\treturn\n\t}\n\n\tw.WriteHeader(code)\n\tfmt.Fprintln(w, string(b))\n}", "func (o *VerifyAccountCreated) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(201)\n}", "func writeResponse(w http.ResponseWriter, h int, p interface{}) {\n\t// I set the content type...\n\tw.Header().Set(\"Content-Type\", \"application/json; charset=UTF-8\")\n\t// ... I write the specified status code...\n\tw.WriteHeader(h)\n\t// ... and I write the response\n\tb, _ := json.Marshal(p)\n\tw.Write(b)\n}", "func (o *UpdateCatalogOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(200)\n}", "func (c *SwitchVersion) WriteResponse(rw http.ResponseWriter, rp runtime.Producer) {\n\tswitch c.Request.Method {\n\tcase http.MethodPost:\n\t\tc.postSwitchVersion(rw, rp)\n\tdefault:\n\t\tc.notSupported(rw, rp)\n\t}\n}", "func (o *PutRecordingsOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(200)\n}", "func (o *BofaChkUpdateOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(200)\n}", "func (o *VerifyHealthCredentialOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(200)\n}", "func WriteResponse(w http.ResponseWriter, code int, err error, data interface{}, t0 time.Time) {\n\tw.WriteHeader(code)\n\tresp := &Response{Data: data, Dur: fmt.Sprint(time.Since(t0)), OK: false}\n\tif code < 300 {\n\t\tresp.OK = true\n\t}\n\tif err != nil {\n\t\tresp.Err = err.Error()\n\t}\n\terr = json.NewEncoder(w).Encode(resp)\n\tif err != nil {\n\t\tlog.Infof(\"failed to json encode response: %v\", err)\n\t\tif _, err = w.Write([]byte(spew.Sdump(resp))); err != nil {\n\t\t\tlog.Infof(\"failed to write dump of response: %v\", err)\n\t\t}\n\t}\n}", "func (o *NewDiscoveryOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(200)\n}", "func writeResponse(data []byte, size int64, ctype string, w http.ResponseWriter) {\n\tw.Header().Set(\"Content-Type\", ctype)\n\tw.Header().Set(\"Content-Length\", fmt.Sprintf(\"%d\", size))\n\tw.Header().Set(\"Cache-Control\", \"no-transform,public,max-age=86400,s-maxage=2592000\")\n\tw.WriteHeader(http.StatusOK)\n\tw.Write(data)\n}", "func writeResponse(w http.ResponseWriter, code int, object interface{}) {\n\tfmt.Println(\"writing response:\", code, object)\n\tdata, err := json.Marshal(object)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tw.Header().Set(\"content-type\", \"application/json\")\n\tw.WriteHeader(code)\n\tw.Write(data)\n}", "func writeResponse(w http.ResponseWriter, authZRes *authorization.Response) {\n\n\tdata, err := json.Marshal(authZRes)\n\tif err != nil {\n\t\tlog.Fatal(\"Failed to marshel authz response %q\", err.Error())\n\t} else {\n\t\tw.Write(data)\n\t}\n\n\tif authZRes == nil || authZRes.Err != \"\" {\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t}\n}", "func (o *GetCharactersCharacterIDOpportunitiesOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\t// response header Cache-Control\n\n\tcacheControl := o.CacheControl\n\tif cacheControl != \"\" {\n\t\trw.Header().Set(\"Cache-Control\", cacheControl)\n\t}\n\n\t// response header Expires\n\n\texpires := o.Expires\n\tif expires != \"\" {\n\t\trw.Header().Set(\"Expires\", expires)\n\t}\n\n\t// response header Last-Modified\n\n\tlastModified := o.LastModified\n\tif lastModified != \"\" {\n\t\trw.Header().Set(\"Last-Modified\", lastModified)\n\t}\n\n\trw.WriteHeader(200)\n\tpayload := o.Payload\n\tif payload == nil {\n\t\tpayload = make(models.GetCharactersCharacterIDOpportunitiesOKBody, 0, 50)\n\t}\n\n\tif err := producer.Produce(rw, payload); err != nil {\n\t\tpanic(err) // let the recovery middleware deal with this\n\t}\n\n}", "func (o *WeaviateThingsGetNotImplemented) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(501)\n}", "func (c *UpdateSwitch) WriteResponse(rw http.ResponseWriter, rp runtime.Producer) {\n\tswitch c.Request.Method {\n\tcase http.MethodPost:\n\t\tc.postUpdateSwitch(rw, rp)\n\tdefault:\n\t\tc.notSupported(rw, rp)\n\t}\n}", "func (c *UpdateSwitch) WriteResponse(rw http.ResponseWriter, rp runtime.Producer) {\n\tswitch c.Request.Method {\n\tcase http.MethodPost:\n\t\tc.postUpdateSwitch(rw, rp)\n\tdefault:\n\t\tc.notSupported(rw, rp)\n\t}\n}", "func (o *UpdateLinkInPostOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(200)\n}", "func (o *GetChatroomsIDOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(200)\n\tif o.Payload != nil {\n\t\tpayload := o.Payload\n\t\tif err := producer.Produce(rw, payload); err != nil {\n\t\t\tpanic(err) // let the recovery middleware deal with this\n\t\t}\n\t}\n}", "func (o *GetEchoNameOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(200)\n}", "func (o *GetUIContentOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(200)\n}", "func (o *ListVsphereResourceOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(200)\n\tif o.Payload != nil {\n\t\tpayload := o.Payload\n\t\tif err := producer.Produce(rw, payload); err != nil {\n\t\t\tpanic(err) // let the recovery middleware deal with this\n\t\t}\n\t}\n}", "func ResponseWrite(w http.ResponseWriter, responseCode int, responseData interface{}) {\n\t// Write Response\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\tw.WriteHeader(responseCode)\n\n\t// Write JSON to Response\n\tjson.NewEncoder(w).Encode(responseData)\n}", "func writeHTTPResponseInWriter(httpRes http.ResponseWriter, httpReq *http.Request, nobelPrizeWinnersResponse []byte, err error) {\n\tif err != nil {\n\t\tlog.Println(err.Error())\n\t\thttp.Error(httpRes, err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n\tlog.Printf(\"Request %s Succesfully Completed\", httpReq.RequestURI)\n\thttpRes.Header().Set(\"Content-Type\", \"application/json\")\n\thttpRes.Write(nobelPrizeWinnersResponse)\n}", "func (o *PostKeysKeyOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(200)\n}", "func (o *Operation) writeResponse(rw io.Writer, v interface{}) {\n\terr := json.NewEncoder(rw).Encode(v)\n\tif err != nil {\n\t\tlog.Errorf(\"Unable to send error response, %s\", err)\n\t}\n}", "func writeResponse(data interface{}, w http.ResponseWriter) error {\n\tvar (\n\t\tenc []byte\n\t\terr error\n\t)\n\tenc, err = json.Marshal(data)\n\tif err != nil {\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\treturn fmt.Errorf(\"Failure to marshal, err = %s\", err)\n\t}\n\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\tn, err := w.Write(enc)\n\tif err != nil {\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\treturn fmt.Errorf(\"Failure to write, err = %s\", err)\n\t}\n\tif n != len(enc) {\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\treturn fmt.Errorf(\"Short write sent = %d, wrote = %d\", len(enc), n)\n\t}\n\treturn nil\n}", "func (o *CreateUserOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(200)\n}", "func WriteResponse(rw io.Writer, v interface{}) {\n\terr := json.NewEncoder(rw).Encode(v)\n\tif err != nil {\n\t\tlogger.Errorf(\"Unable to send error response, %s\", err)\n\t}\n}", "func (o *UpdateMoveTaskOrderPostCounselingInformationOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(200)\n\tif o.Payload != nil {\n\t\tpayload := o.Payload\n\t\tif err := producer.Produce(rw, payload); err != nil {\n\t\t\tpanic(err) // let the recovery middleware deal with this\n\t\t}\n\t}\n}", "func (o *PutQuestionOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(200)\n}", "func (r *response) Write(b []byte) (n int, err error) {\n\tif !r.headersSend {\n\t\tif r.status == 0 {\n\t\t\tr.status = http.StatusOK\n\t\t}\n\t\tr.WriteHeader(r.status)\n\t}\n\tn, err = r.ResponseWriter.Write(b)\n\tr.size += int64(n)\n\treturn\n}", "func (o *PostOperationsDeleteP2PPathCreated) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(201)\n}", "func (o *HealthGetOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(200)\n}", "func (o *VerifyEmailTokenOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(200)\n}", "func (o *WeaviateThingsPatchNotImplemented) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(501)\n}", "func (o *WeaviateThingsGetOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(200)\n\tif o.Payload != nil {\n\t\tpayload := o.Payload\n\t\tif err := producer.Produce(rw, payload); err != nil {\n\t\t\tpanic(err) // let the recovery middleware deal with this\n\t\t}\n\t}\n}", "func (o *DeleteServiceIDOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(200)\n}", "func (o *Operation) writeResponse(rw io.Writer, v interface{}) {\n\terr := json.NewEncoder(rw).Encode(v)\n\t// as of now, just log errors for writing response\n\tif err != nil {\n\t\tlogger.Errorf(\"Unable to send error response, %s\", err)\n\t}\n}", "func (o *PostOperationsGetNodeEdgePointDetailsCreated) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(201)\n}", "func (o *UserEditOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(200)\n}", "func (o *WeaviatePeersAnnounceOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(200)\n}", "func (o *CertifyOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(200)\n}", "func writeResponse(writer http.ResponseWriter, response *http.Response) (int64, error) {\n\tdefer response.Body.Close()\n\twriteResponseHeaders(writer, response, false)\n\treturn io.Copy(writer, response.Body)\n}", "func (o *PutMeetupDefault) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(o._statusCode)\n}", "func (o *FingerPathsPostCreated) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(201)\n}", "func (o *PostPlaybookOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(200)\n}", "func (o *UpdateHostIgnitionCreated) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(201)\n}", "func (o *GetCharactersCharacterIDLocationOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\t// response header Cache-Control\n\n\tcacheControl := o.CacheControl\n\tif cacheControl != \"\" {\n\t\trw.Header().Set(\"Cache-Control\", cacheControl)\n\t}\n\n\t// response header Expires\n\n\texpires := o.Expires\n\tif expires != \"\" {\n\t\trw.Header().Set(\"Expires\", expires)\n\t}\n\n\t// response header Last-Modified\n\n\tlastModified := o.LastModified\n\tif lastModified != \"\" {\n\t\trw.Header().Set(\"Last-Modified\", lastModified)\n\t}\n\n\trw.WriteHeader(200)\n\tif o.Payload != nil {\n\t\tpayload := o.Payload\n\t\tif err := producer.Produce(rw, payload); err != nil {\n\t\t\tpanic(err) // let the recovery middleware deal with this\n\t\t}\n\t}\n}", "func (o *GetPingDefault) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(o._statusCode)\n}", "func (o *PostManagementKubernetesIoV1NodesOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(200)\n}", "func (o *PutPerformancesOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(200)\n}", "func (o *StopAppAccepted) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(202)\n}", "func (o *GetFleetsFleetIDMembersOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\t// response header Cache-Control\n\n\tcacheControl := o.CacheControl\n\tif cacheControl != \"\" {\n\t\trw.Header().Set(\"Cache-Control\", cacheControl)\n\t}\n\n\t// response header Content-Language\n\n\tcontentLanguage := o.ContentLanguage\n\tif contentLanguage != \"\" {\n\t\trw.Header().Set(\"Content-Language\", contentLanguage)\n\t}\n\n\t// response header Expires\n\n\texpires := o.Expires\n\tif expires != \"\" {\n\t\trw.Header().Set(\"Expires\", expires)\n\t}\n\n\t// response header Last-Modified\n\n\tlastModified := o.LastModified\n\tif lastModified != \"\" {\n\t\trw.Header().Set(\"Last-Modified\", lastModified)\n\t}\n\n\trw.WriteHeader(200)\n\tpayload := o.Payload\n\tif payload == nil {\n\t\tpayload = make(models.GetFleetsFleetIDMembersOKBody, 0, 50)\n\t}\n\n\tif err := producer.Produce(rw, payload); err != nil {\n\t\tpanic(err) // let the recovery middleware deal with this\n\t}\n\n}", "func (o *GetMeetupsDefault) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(o._statusCode)\n}", "func (o *PostEventCreated) WriteResponse(rw http.ResponseWriter, producer httpkit.Producer) {\n\n\trw.WriteHeader(201)\n}", "func (o *GetTaskTaskIDOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(200)\n\tif o.Payload != nil {\n\t\tpayload := o.Payload\n\t\tif err := producer.Produce(rw, payload); err != nil {\n\t\t\tpanic(err) // let the recovery middleware deal with this\n\t\t}\n\t}\n}", "func (o *CreateTCPCheckAccepted) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\t// response header Reload-ID\n\n\treloadID := o.ReloadID\n\tif reloadID != \"\" {\n\t\trw.Header().Set(\"Reload-ID\", reloadID)\n\t}\n\n\trw.WriteHeader(202)\n\tif o.Payload != nil {\n\t\tpayload := o.Payload\n\t\tif err := producer.Produce(rw, payload); err != nil {\n\t\t\tpanic(err) // let the recovery middleware deal with this\n\t\t}\n\t}\n}", "func (o *PostOperationsGetNetworkElementListCreated) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(201)\n}", "func (o *ServiceInstanceLastOperationGetOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\t// response header RetryAfter\n\n\tretryAfter := o.RetryAfter\n\tif retryAfter != \"\" {\n\t\trw.Header().Set(\"RetryAfter\", retryAfter)\n\t}\n\n\trw.WriteHeader(200)\n\tif o.Payload != nil {\n\t\tpayload := o.Payload\n\t\tif err := producer.Produce(rw, payload); err != nil {\n\t\t\tpanic(err) // let the recovery middleware deal with this\n\t\t}\n\t}\n}", "func (o *GetPiecesIDOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(200)\n\tif o.Payload != nil {\n\t\tpayload := o.Payload\n\t\tif err := producer.Produce(rw, payload); err != nil {\n\t\t\tpanic(err) // let the recovery middleware deal with this\n\t\t}\n\t}\n}", "func (o *GetTaskDetailsOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(200)\n\tif o.Payload != nil {\n\t\tpayload := o.Payload\n\t\tif err := producer.Produce(rw, payload); err != nil {\n\t\t\tpanic(err) // let the recovery middleware deal with this\n\t\t}\n\t}\n}", "func (o *UpdateClusterOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(200)\n}", "func (o *GetDetailOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\trw.Header().Set(\"Access-Control-Allow-Origin\", \"*\")\n\trw.WriteHeader(200)\n\tif o.Payload != nil {\n\t\tpayload := o.Payload\n\t\tif err := producer.Produce(rw, payload); err != nil {\n\t\t\tpanic(err) // let the recovery middleware deal with this\n\t\t}\n\t}\n}", "func (o *GetServicesHaproxyRuntimeAclsIDOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(200)\n\tif o.Payload != nil {\n\t\tpayload := o.Payload\n\t\tif err := producer.Produce(rw, payload); err != nil {\n\t\t\tpanic(err) // let the recovery middleware deal with this\n\t\t}\n\t}\n}", "func (r *responseInfoRecorder) Write(b []byte) (int, error) {\n\tr.ContentLength += int64(len(b))\n\tif r.statusCode == 0 {\n\t\tr.statusCode = http.StatusOK\n\t}\n\treturn r.ResponseWriter.Write(b)\n}", "func (o *LogoutOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(200)\n}", "func (o *UploadFileOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(200)\n}", "func WriteResponse(w http.ResponseWriter, data interface{}) error {\n\tenv := map[string]interface{}{\n\t\t\"meta\": map[string]interface{}{\n\t\t\t\"code\": http.StatusOK,\n\t\t},\n\t\t\"data\": data,\n\t}\n\treturn jsonResponse(w, env)\n}", "func (o *WeaviateThingTemplatesCreateNotImplemented) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(501)\n}", "func (r *Responder) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\tfor k, v := range r.headers {\n\t\tfor _, val := range v {\n\t\t\trw.Header().Add(k, val)\n\t\t}\n\t}\n\n\trw.WriteHeader(r.code)\n\n\tif r.response != nil {\n\t\tif err := producer.Produce(rw, r.response); err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t}\n}", "func (o *GetGateSourceByGateNameAndMntOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(200)\n\tif o.Payload != nil {\n\t\tpayload := o.Payload\n\t\tif err := producer.Produce(rw, payload); err != nil {\n\t\t\tpanic(err) // let the recovery middleware deal with this\n\t\t}\n\t}\n}", "func (o *Output) writeResponse(response string) error {\r\n\t// write the response\r\n\tif _, err := o.writer.WriteString(response + \"\\n\"); err != nil {\r\n\t\treturn err\r\n\t}\r\n\r\n\treturn nil\r\n}", "func (o *CreateSpoeCreated) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(201)\n\tpayload := o.Payload\n\tif err := producer.Produce(rw, payload); err != nil {\n\t\tpanic(err) // let the recovery middleware deal with this\n\t}\n}", "func (o *GetTransportByIDOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(200)\n\tif o.Payload != nil {\n\t\tpayload := o.Payload\n\t\tif err := producer.Produce(rw, payload); err != nil {\n\t\t\tpanic(err) // let the recovery middleware deal with this\n\t\t}\n\t}\n}", "func (o *TransferOK) WriteResponse(rw http.ResponseWriter, producer httpkit.Producer) {\n\n\trw.WriteHeader(200)\n\tif err := producer.Produce(rw, o.Payload); err != nil {\n\t\tpanic(err) // let the recovery middleware deal with this\n\t}\n\n}", "func (o *CreateUserCreated) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(201)\n}", "func (o *ViewOneOrderOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(200)\n\tif o.Payload != nil {\n\t\tpayload := o.Payload\n\t\tif err := producer.Produce(rw, payload); err != nil {\n\t\t\tpanic(err) // let the recovery middleware deal with this\n\t\t}\n\t}\n}", "func (o *GetVisiblePruebasFromQuestionTestInternalServerError) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(500)\n}", "func (o *GetWhaleTranfersOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(200)\n\tpayload := o.Payload\n\tif payload == nil {\n\t\t// return empty array\n\t\tpayload = make([]*models.OperationsRow, 0, 50)\n\t}\n\n\tif err := producer.Produce(rw, payload); err != nil {\n\t\tpanic(err) // let the recovery middleware deal with this\n\t}\n}", "func (o *SearchTournamentsOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(200)\n\tpayload := o.Payload\n\tif payload == nil {\n\t\tpayload = make([]*models.Tournament, 0, 50)\n\t}\n\n\tif err := producer.Produce(rw, payload); err != nil {\n\t\tpanic(err) // let the recovery middleware deal with this\n\t}\n\n}", "func (o *CreateTCPCheckCreated) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(201)\n\tif o.Payload != nil {\n\t\tpayload := o.Payload\n\t\tif err := producer.Produce(rw, payload); err != nil {\n\t\t\tpanic(err) // let the recovery middleware deal with this\n\t\t}\n\t}\n}", "func (s *Server) writeInfoResponse(\n\tw http.ResponseWriter,\n\tr *http.Request,\n\tmessage []byte,\n\tstatus int,\n\theaders map[string]string,\n) {\n\tfor k, v := range headers {\n\t\tw.Header().Add(k, v)\n\t}\n\n\tw.WriteHeader(status)\n\tw.Write(message)\n}" ]
[ "0.81304604", "0.788264", "0.7772782", "0.77727515", "0.7753031", "0.77414024", "0.76676446", "0.7638492", "0.7610648", "0.75810087", "0.75796163", "0.7568614", "0.7561152", "0.7559278", "0.7544965", "0.75428236", "0.7541938", "0.7534686", "0.7531949", "0.7520315", "0.75197434", "0.751327", "0.7512263", "0.7505821", "0.75036234", "0.7499273", "0.74883264", "0.7483884", "0.74775463", "0.7468526", "0.74680185", "0.74661297", "0.7464711", "0.74642605", "0.74642605", "0.74614394", "0.7460979", "0.74598646", "0.74458253", "0.7444159", "0.743647", "0.7427759", "0.74258095", "0.7419657", "0.74142104", "0.7407588", "0.7406497", "0.7406384", "0.7399059", "0.73891616", "0.7386186", "0.73809904", "0.7361118", "0.7360885", "0.73552495", "0.7355052", "0.7354089", "0.734803", "0.7345781", "0.732836", "0.7326291", "0.73191583", "0.73165286", "0.7316437", "0.7315448", "0.7313202", "0.7312272", "0.73100346", "0.73088604", "0.7301074", "0.7296896", "0.72915983", "0.7291387", "0.7288954", "0.72848505", "0.72835606", "0.7282206", "0.728063", "0.7275217", "0.72748876", "0.7273231", "0.72730994", "0.72695905", "0.72691834", "0.7269083", "0.7265924", "0.7261747", "0.7253492", "0.72522444", "0.7249374", "0.72485197", "0.7248316", "0.7241051", "0.7239245", "0.72371995", "0.72342974", "0.72284424", "0.72232044", "0.72155213", "0.7214858", "0.7213404" ]
0.0
-1
Write a small go test using your APIMessage (serialize/deserialize)
func TestBidirectionalAPIResponse(t *testing.T) { c2s := RegistrationResponse{} addr := uint32(12345) c2s.Ipv4Addr = &addr port := uint32(10) c2s.Port = &port // Serialize marsh, err := proto.Marshal(&c2s) if err != nil { t.Fatalf("Failed to serialize registration response: expected nil, got %v", err) } // Deserialize deser := RegistrationResponse{} if err := proto.Unmarshal(marsh, &deser); err != nil { t.Fatalf("Bad registration response returned") } // Test for correctness correctIpv4 := uint32(12345) correctPort := uint32(10) if *deser.Ipv4Addr != correctIpv4 { t.Fatalf("Registration response has wrong ipv4address") } if *deser.Port != correctPort { t.Fatalf("Registration response has wrong port") } // Success if doesn't fail above fmt.Println("") fmt.Println("Success!") }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func TestMessage(t *testing.T) {\n\t// Create the various types of messages to test.\n\n\t// MsgVersion.\n\taddrYou := &net.TCPAddr{IP: net.ParseIP(\"192.168.0.1\"), Port: 8333}\n\tyou, err := wire.NewNetAddress(addrYou, 1, wire.SFNodeNetwork)\n\tif err != nil {\n\t\tt.Errorf(\"NewNetAddress: %v\", err)\n\t}\n\tyou.Timestamp = time.Time{} // Version message has zero value timestamp.\n\taddrMe := &net.TCPAddr{IP: net.ParseIP(\"127.0.0.1\"), Port: 8333}\n\tme, err := wire.NewNetAddress(addrMe, 1, wire.SFNodeNetwork)\n\tif err != nil {\n\t\tt.Errorf(\"NewNetAddress: %v\", err)\n\t}\n\t// A version message that is decoded comes out a little different than\n\t// the original data structure, so we need to create a slightly different\n\t// message to test against.\n\tme.Timestamp = time.Time{} // Version message has zero value timestamp.\n\tyouExpected, err := wire.NewNetAddress(addrYou, 0, wire.SFNodeNetwork)\n\tif err != nil {\n\t\tt.Errorf(\"NewNetAddress: %v\", err)\n\t}\n\tyouExpected.Timestamp = time.Time{} // Version message has zero value timestamp.\n\tmeExpected, err := wire.NewNetAddress(addrMe, 0, wire.SFNodeNetwork)\n\tif err != nil {\n\t\tt.Errorf(\"NewNetAddress: %v\", err)\n\t}\n\tmeExpected.Timestamp = time.Time{} // Version message has zero value timestamp.\n\tmsgVersion := wire.NewMsgVersion(me, you, 123123, []uint32{1})\n\tmsgVersionExpected := wire.NewMsgVersion(meExpected, youExpected, 123123, []uint32{1})\n\n\tmsgVerack := wire.NewMsgVerAck()\n\tmsgPong := wire.NewMsgPong()\n\tmsgAddr := wire.NewMsgAddr()\n\tmsgInv := wire.NewMsgInv()\n\tmsgGetData := wire.NewMsgGetData()\n\n\t// ripe-based getpubkey message\n\tripeBytes := make([]byte, 20)\n\tripeBytes[0] = 1\n\tripe, err := wire.NewRipeHash(ripeBytes)\n\tif err != nil {\n\t\tt.Fatalf(\"could not make a ripe hash %s\", err)\n\t}\n\texpires := time.Unix(0x495fab29, 0) // 2009-01-03 12:15:05 -0600 CST)\n\tmsgGetPubKey := wire.NewMsgGetPubKey(123123, expires, 2, 1, ripe, nil)\n\n\tpub1Bytes, pub2Bytes := make([]byte, 64), make([]byte, 64)\n\tpub2Bytes[0] = 1\n\tpub1, err := wire.NewPubKey(pub1Bytes)\n\tif err != nil {\n\t\tt.Fatalf(\"could not create a pubkey %s\", err)\n\t}\n\tpub2, err := wire.NewPubKey(pub2Bytes)\n\tif err != nil {\n\t\tt.Fatalf(\"could not create a pubkey %s\", err)\n\t}\n\tmsgPubKey := wire.NewMsgPubKey(123123, expires, 2, 1, 0, pub1, pub2, 0, 0, nil, nil, nil)\n\n\tenc := make([]byte, 99)\n\tmsgMsg := wire.NewMsgMsg(123123, expires, 2, 1, enc, 0, 0, 0, nil, nil, 0, 0, nil, 0, nil, nil, nil)\n\n\tmsgBroadcast := wire.NewMsgBroadcast(123123, expires, 2, 1, nil, enc, 0, 0, 0, nil, nil, 0, 0, 0, nil, nil)\n\n\ttests := []struct {\n\t\tin wire.Message // Value to encode\n\t\tout wire.Message // Expected decoded value\n\t\tbmnet wire.BitmessageNet // Network to use for wire.encoding\n\t\tbytes int // Expected num bytes read/written\n\t}{\n\t\t{msgVersion, msgVersionExpected, wire.MainNet, 119},\n\t\t{msgVerack, msgVerack, wire.MainNet, 24},\n\t\t{msgPong, msgPong, wire.MainNet, 24},\n\t\t{msgAddr, msgAddr, wire.MainNet, 25},\n\t\t{msgInv, msgInv, wire.MainNet, 25},\n\t\t{msgGetData, msgGetData, wire.MainNet, 25},\n\t\t{msgGetPubKey, msgGetPubKey, wire.MainNet, 66},\n\t\t{msgPubKey, msgPubKey, wire.MainNet, 178},\n\t\t{msgMsg, msgMsg, wire.MainNet, 145},\n\t\t{msgBroadcast, msgBroadcast, wire.MainNet, 145},\n\t}\n\n\tt.Logf(\"Running %d tests\", len(tests))\n\tfor i, test := range tests {\n\t\t// Encode to wire.format.\n\t\tvar buf bytes.Buffer\n\t\tnw, err := wire.WriteMessageN(&buf, test.in, test.bmnet)\n\t\tif err != nil {\n\t\t\tt.Errorf(\"WriteMessage #%d error %v\", i, err)\n\t\t\tcontinue\n\t\t}\n\n\t\t// Ensure the number of bytes written match the expected value.\n\t\tif nw != test.bytes {\n\t\t\tt.Errorf(\"WriteMessage #%d unexpected num bytes \"+\n\t\t\t\t\"written - got %d, want %d\", i, nw, test.bytes)\n\t\t}\n\n\t\t// Decode from wire.format.\n\t\trbuf := bytes.NewReader(buf.Bytes())\n\t\tnr, msg, _, err := wire.ReadMessageN(rbuf, test.bmnet)\n\t\tif err != nil {\n\t\t\tt.Errorf(\"ReadMessage #%d error %v, msg %v\", i, err,\n\t\t\t\tspew.Sdump(msg))\n\t\t\tcontinue\n\t\t}\n\t\tif !reflect.DeepEqual(msg, test.out) {\n\t\t\tt.Errorf(\"ReadMessage #%d\\n got: %v want: %v\", i,\n\t\t\t\tspew.Sdump(msg), spew.Sdump(test.out))\n\t\t\tcontinue\n\t\t}\n\n\t\t// Ensure the number of bytes read match the expected value.\n\t\tif nr != test.bytes {\n\t\t\tt.Errorf(\"ReadMessage #%d unexpected num bytes read - \"+\n\t\t\t\t\"got %d, want %d\", i, nr, test.bytes)\n\t\t}\n\t}\n\n\t// Do the same thing for Read/WriteMessage, but ignore the bytes since\n\t// they don't return them.\n\tt.Logf(\"Running %d tests\", len(tests))\n\tfor i, test := range tests {\n\t\t// Encode to wire.format.\n\t\tvar buf bytes.Buffer\n\t\terr := wire.WriteMessage(&buf, test.in, test.bmnet)\n\t\tif err != nil {\n\t\t\tt.Errorf(\"WriteMessage #%d error %v\", i, err)\n\t\t\tcontinue\n\t\t}\n\n\t\t// Decode from wire.format.\n\t\trbuf := bytes.NewReader(buf.Bytes())\n\t\tmsg, _, err := wire.ReadMessage(rbuf, test.bmnet)\n\t\tif err != nil {\n\t\t\tt.Errorf(\"ReadMessage #%d error %v, msg %v\", i, err,\n\t\t\t\tspew.Sdump(msg))\n\t\t\tcontinue\n\t\t}\n\t\tif !reflect.DeepEqual(msg, test.out) {\n\t\t\tt.Errorf(\"ReadMessage #%d\\n got: %v want: %v\", i,\n\t\t\t\tspew.Sdump(msg), spew.Sdump(test.out))\n\t\t\tcontinue\n\t\t}\n\t}\n}", "func TestMessageRegistry(t *testing.T) { //nolint:funlen,gocyclo\n\tvar result MessageRegistry\n\terr := json.NewDecoder(strings.NewReader(messageRegistryBody)).Decode(&result)\n\n\tif err != nil {\n\t\tt.Errorf(\"Error decoding JSON: %s\", err)\n\t}\n\n\tif result.ID != \"MyRegistry.json\" {\n\t\tt.Errorf(\"Received invalid ID: %s\", result.ID)\n\t}\n\n\tif result.Description != \"This registry is an example.\" {\n\t\tt.Errorf(\"Received invalid Description: %s\", result.Description)\n\t}\n\n\tif result.Language != \"en\" {\n\t\tt.Errorf(\"Received invalid Language: %s\", result.Language)\n\t}\n\n\tif result.Name != \"MyRegistry Registry\" {\n\t\tt.Errorf(\"Received invalid Name: %s\", result.Name)\n\t}\n\n\tif result.ODataType != \"#MessageRegistry.v1_2_0.MessageRegistry\" {\n\t\tt.Errorf(\"Received invalid ODataType: %s\", result.ODataType)\n\t}\n\n\tif result.OwningEntity != \"The vendor name\" {\n\t\tt.Errorf(\"Received invalid OwningEntity: %s\", result.OwningEntity)\n\t}\n\n\tif result.RegistryPrefix != \"MyRegistry\" {\n\t\tt.Errorf(\"Received invalid RegistryPrefix: %s\", result.RegistryPrefix)\n\t}\n\n\tif result.RegistryVersion != \"2.2.0\" {\n\t\tt.Errorf(\"Received invalid RegistryVersion: %s\", result.RegistryVersion)\n\t}\n\n\t// test the messages\n\n\tif len(result.Messages) != 4 {\n\t\tt.Errorf(\"Received invalid number of Messages: %d\", len(result.Messages))\n\t}\n\n\t// FirstMessage\n\tmessageKey := \"FirstMessage\"\n\tif m, ok := result.Messages[messageKey]; ok {\n\t\tif m.Description != \"Example of message with one arg.\" {\n\t\t\tt.Errorf(\"Received invalid Description: %s for the messageKey: %s\", m.Description, messageKey)\n\t\t}\n\t\tif m.Message != \"This message has only one arg: %1\" {\n\t\t\tt.Errorf(\"Received invalid Message: %s for the messageKey: %s\", m.Message, messageKey)\n\t\t}\n\t\tif m.NumberOfArgs != 1 {\n\t\t\tt.Errorf(\"Received invalid NumberOfArgs: %d for the messageKey: %s\", m.NumberOfArgs, messageKey)\n\t\t}\n\t\tif m.ParamTypes[0] != \"string\" {\n\t\t\tt.Errorf(\"Received invalid ParamTypes: %s for the messageKey: %s\", m.ParamTypes[0], messageKey)\n\t\t}\n\t\tif m.Resolution != \"The resolution for the first message.\" {\n\t\t\tt.Errorf(\"Received invalid Resolution: %s for the messageKey: %s\", m.Resolution, messageKey)\n\t\t}\n\t\tif m.Severity != \"OK\" {\n\t\t\tt.Errorf(\"Received invalid Severity: %s for the messageKey: %s\", m.Severity, messageKey)\n\t\t}\n\t} else {\n\t\tt.Errorf(\"MessageKey %s not found.\", messageKey)\n\t}\n\n\t// SecondMessage\n\tmessageKey = \"SecondMessage\"\n\tif m, ok := result.Messages[messageKey]; ok {\n\t\tif m.Description != \"Example of message without args.\" {\n\t\t\tt.Errorf(\"Received invalid Description: %s for the messageKey: %s\", m.Description, messageKey)\n\t\t}\n\t\tif m.Message != \"This message has no args.\" {\n\t\t\tt.Errorf(\"Received invalid Message: %s for the messageKey: %s\", m.Message, messageKey)\n\t\t}\n\t\tif m.NumberOfArgs != 0 {\n\t\t\tt.Errorf(\"Received invalid NumberOfArgs: %d for the messageKey: %s\", m.NumberOfArgs, messageKey)\n\t\t}\n\t\tif len(m.ParamTypes) > 0 {\n\t\t\tt.Errorf(\"Received invalid ParamTypes: %v for the messageKey: %s\", m.ParamTypes, messageKey)\n\t\t}\n\t\tif m.Resolution != \"The resolution for the second message.\" {\n\t\t\tt.Errorf(\"Received invalid Resolution: %s for the messageKey: %s\", m.Resolution, messageKey)\n\t\t}\n\t\tif m.Severity != \"Critical\" {\n\t\t\tt.Errorf(\"Received invalid Severity: %s for the messageKey: %s\", m.Severity, messageKey)\n\t\t}\n\t} else {\n\t\tt.Errorf(\"MessageKey %s not found.\", messageKey)\n\t}\n\n\t// ThirdMessage\n\tmessageKey = \"ThirdMessage\"\n\tif m, ok := result.Messages[messageKey]; ok {\n\t\tif m.Description != \"Example of message with two args.\" {\n\t\t\tt.Errorf(\"Received invalid Description: %s for the messageKey: %s\", m.Description, messageKey)\n\t\t}\n\t\tif m.Message != \"This message has two args: %1 and %2\" {\n\t\t\tt.Errorf(\"Received invalid Message: %s for the messageKey: %s\", m.Message, messageKey)\n\t\t}\n\t\tif m.NumberOfArgs != 2 {\n\t\t\tt.Errorf(\"Received invalid NumberOfArgs: %d for the messageKey: %s\", m.NumberOfArgs, messageKey)\n\t\t}\n\t\tif m.ParamTypes[0] != \"string\" {\n\t\t\tt.Errorf(\"Received invalid ParamTypes[0]: %s for the messageKey: %s\", m.ParamTypes[0], messageKey)\n\t\t}\n\t\tif m.ParamTypes[1] != \"string\" {\n\t\t\tt.Errorf(\"Received invalid ParamTypes[1]: %s for the messageKey: %s\", m.ParamTypes[1], messageKey)\n\t\t}\n\t\tif m.Resolution != \"The resolution for the third message.\" {\n\t\t\tt.Errorf(\"Received invalid Resolution: %s for the messageKey: %s\", m.Resolution, messageKey)\n\t\t}\n\t\tif m.Severity != \"Warning\" {\n\t\t\tt.Errorf(\"Received invalid Severity: %s for the messageKey: %s\", m.Severity, messageKey)\n\t\t}\n\t} else {\n\t\tt.Errorf(\"MessageKey %s not found.\", messageKey)\n\t}\n\n\t// MessageWithOem\n\tmessageKey = \"MessageWithOem\"\n\tif m, ok := result.Messages[messageKey]; ok {\n\t\tif m.Description != \"Example of message with Oem.\" {\n\t\t\tt.Errorf(\"Received invalid Description: %s for the messageKey: %s\", m.Description, messageKey)\n\t\t}\n\t\tif m.Message != \"This message has Oem info.\" {\n\t\t\tt.Errorf(\"Received invalid Message: %s for the messageKey: %s\", m.Message, messageKey)\n\t\t}\n\t\tif m.NumberOfArgs != 0 {\n\t\t\tt.Errorf(\"Received invalid NumberOfArgs: %d for the messageKey: %s\", m.NumberOfArgs, messageKey)\n\t\t}\n\t\tif len(m.ParamTypes) > 0 {\n\t\t\tt.Errorf(\"Received invalid ParamTypes: %v for the messageKey: %s\", m.ParamTypes, messageKey)\n\t\t}\n\t\tif m.Resolution != \"The resolution for the message with Oem.\" {\n\t\t\tt.Errorf(\"Received invalid Resolution: %s for the messageKey: %s\", m.Resolution, messageKey)\n\t\t}\n\t\tif m.Severity != \"Critical\" {\n\t\t\tt.Errorf(\"Received invalid Severity: %s for the messageKey: %s\", m.Severity, messageKey)\n\t\t}\n\n\t\t// test oem\n\t\tswitch oem := m.Oem.(type) {\n\t\tcase map[string]interface{}:\n\t\t\tfor vendor, values := range oem {\n\t\t\t\tif vendor != \"VendorName\" {\n\t\t\t\t\tt.Errorf(\"Received invalid Oem vendor: %s for the messageKey: %s\", vendor, messageKey)\n\t\t\t\t}\n\t\t\t\tswitch val := values.(type) {\n\t\t\t\tcase map[string]interface{}:\n\t\t\t\t\tfor k, v := range val {\n\t\t\t\t\t\tif k != \"OemInfo1\" && k != \"OemInfoN\" {\n\t\t\t\t\t\t\tt.Errorf(\"Received invalid Oem key %s for vendor: %s for the messageKey: %s\", k, vendor, messageKey)\n\t\t\t\t\t\t}\n\t\t\t\t\t\tif k == \"OemInfo1\" && v != \"The Oem info 1.\" {\n\t\t\t\t\t\t\tt.Errorf(\"Received invalid OemInfo1: %s for the messageKey: %s\", v, messageKey)\n\t\t\t\t\t\t}\n\t\t\t\t\t\tif v == \"OemInfoN\" && v != \"The Oem info N.\" {\n\t\t\t\t\t\t\tt.Errorf(\"Received invalid OemInfoN: %s for the messageKey: %s\", v, messageKey)\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\tdefault:\n\t\t\t\t\tt.Error(\"Unexpected value format\")\n\t\t\t\t}\n\t\t\t}\n\t\tdefault:\n\t\t\tt.Errorf(\"Received invalid Oem for the messageKey: %s\", messageKey)\n\t\t}\n\t} else {\n\t\tt.Errorf(\"MessageKey %s not found.\", messageKey)\n\t}\n}", "func TestMsgSerialize(t *testing.T) {\n handler := new(CmdMsgHandler)\n cmd := new(CmdMsg)\n cmd.Cmd = CMD_ENV\n cmd.Data = \"\"\n\n b, err := handler.SerializeMsg(cmd)\n if err != nil {\n t.Fatal(err)\n }\n\n obj, err := handler.DeserializeMsg(b, 255)\n if err != nil {\n t.Fatal(err)\n }\n\n newCmd, ok := obj.(*CmdMsg)\n if !ok {\n t.Fatal(\"Invalid type received %T\", obj)\n }\n\n if cmd.Cmd != newCmd.Cmd {\n t.Fatalf(\n \"Cmd mismatch: %s vs %s\", \n cmd.Cmd, \n newCmd.Cmd,\n )\n }\n\n if cmd.Data != newCmd.Data {\n t.Fatalf(\n \"Data mismatch: %s vs %s\", \n cmd.Data, \n newCmd.Data,\n ) }\n\n log.Printf(\"TestMsgSerialize: passed\")\n}", "func Test_RetMsgGenJson_Case1(t *testing.T) {\n // invoke requestGen()\n msgBuf := RetMsgGenJson(\"OK\", \"nothing happens\")\n\n\t// decode the msg\n\tvar msg interface{}\n\terr := json.Unmarshal(msgBuf, &msg)\n\tif err != nil {\n\t\tt.Errorf(\"json.Unmarshal():%s\", err.Error())\n\t\treturn\n\t}\n\n\tstrMap, ok := msg.(map[string]interface{})\n\tif !ok {\n\t\tt.Errorf(\"fail to convert to map\")\n\t\treturn\n\t}\n\tvalue, ok := strMap[\"retCode\"]\n\tif !ok {\n\t\tt.Errorf(\"no retCode\")\n\t\treturn\n\t}\n\tif value != \"OK\" {\n\t\tt.Errorf(\"retCode(%s) != OK\", value)\n\t\treturn\n\t}\n\n\tvalue, ok = strMap[\"msg\"]\n\tif !ok {\n\t\tt.Errorf(\"no msg\")\n\t\treturn\n\t}\n\tif value != \"nothing happens\" {\n\t\tt.Errorf(\"msg(%s) != nothing happens\", value)\n\t\treturn\n\t}\n}", "func TestMessageHandler(t *testing.T) {\n log.SetOutput(ioutil.Discard)\n payload := Payload{\n Originator: \"Diogo\",\n Recipient: \"5531988174420\",\n Message: \"Test message\",\n }\n testRequest(payload, 200, 1, t)\n\n payload = Payload{\n Originator: \"\",\n Recipient: \"5531988174420\",\n Message: \"Test message\",\n }\n testRequest(payload, 400, 0, t)\n\n payload = Payload{\n Originator: \"Diogo\",\n Recipient: \"5531988174420\",\n Message: strings.Repeat(\"a\", 170),\n }\n testRequest(payload, 200, 2, t)\n}", "func JSONMarshalAndUnmarshalTest(t *testing.T, xmlPath string, includeDirs []string) {\n\tfor i, c := range casesMsgsTest {\n\t\tdCT, err := gomavlib.NewDialectCT(3, ctMessages)\n\t\trequire.NoError(t, err)\n\t\tdMsgCT, ok := dCT.Messages[c.id]\n\t\trequire.Equal(t, true, ok)\n\t\tbytesEncoded, err := dMsgCT.Encode(c.parsed, c.isV2)\n\t\trequire.NoError(t, err)\n\t\trequire.Equal(t, c.raw, bytesEncoded)\n\n\t\t// Decode bytes using RT\n\t\tdefs, version, err := libgen.XMLToFields(xmlPath, includeDirs)\n\t\trequire.NoError(t, err)\n\n\t\t// Create dialect from the parsed defs.\n\t\tdRT, err := gomavlib.NewDialectRT(version, defs)\n\t\trequire.NoError(t, err)\n\t\tdMsgRT := dRT.Messages[c.id]\n\t\trequire.Equal(t, uint(3), dRT.GetVersion())\n\n\t\t// Decode bytes using RT\n\t\tmsgDecoded, err := dMsgRT.Decode(c.raw, c.isV2)\n\t\trequire.NoError(t, err)\n\t\t// Marshal JSON\n\t\tbytesCreated, err := msgDecoded.(*gomavlib.DynamicMessage).MarshalJSON()\n\t\trequire.NoError(t, err)\n\t\tif i == 7 || i == 8 { // Test cases with altered JSON\n\t\t\trequire.NotEqual(t, jsonTest[i], string(bytesCreated))\n\t\t} else {\n\t\t\trequire.Equal(t, jsonTest[i], string(bytesCreated))\n\t\t}\n\n\t\t// Generate JSON Schema\n\t\tschemaBytes, err := msgDecoded.(*gomavlib.DynamicMessage).GenerateJSONSchema(\"/mavlink\", \"topic\")\n\t\trequire.NoError(t, err)\n\t\tif i == 7 { // Test case with altered schema example\n\t\t\trequire.NotEqual(t, schemasTest[i], string(schemaBytes))\n\t\t} else {\n\t\t\trequire.Equal(t, schemasTest[i], string(schemaBytes))\n\t\t}\n\n\t\t// Validate JSON document against schema\n\t\tschemaLoader := gojsonschema.NewStringLoader(schemasTest[i])\n\t\tdocumentLoader := gojsonschema.NewStringLoader(jsonTest[i])\n\t\tresult, err := gojsonschema.Validate(schemaLoader, documentLoader)\n\t\tif i == 8 { // JSONTest[8] has a string entry where it should be float32 - should not validate against schemasTest[8]\n\t\t\trequire.NoError(t, err)\n\t\t\trequire.Equal(t, false, result.Valid())\n\t\t} else if i == 1 || i == 9 { // float as nan, +inf, or -inf string not accepted\n\t\t\trequire.NoError(t, err)\n\t\t\trequire.Equal(t, false, result.Valid())\n\t\t} else {\n\t\t\trequire.NoError(t, err)\n\t\t\trequire.Equal(t, true, result.Valid())\n\t\t}\n\n\t\t// Test Unmarshal\n\t\t// Create new DynamicMessage with empty fields for testing unmarshal\n\t\tdm, err := dRT.CreateMessageById(uint32(dRT.Messages[c.id].Msg.Id))\n\t\trequire.NoError(t, err)\n\t\terr = dm.UnmarshalJSON(bytesCreated)\n\t\trequire.NoError(t, err)\n\t\tif i == 1 { // Check that NaN, Inf, and -Inf have been umarshalled correctly.\n\t\t\tcheck := math.IsNaN(float64(dm.Fields[\"flow_comp_m_x\"].(gomavlib.JsonFloat32).F))\n\t\t\trequire.Equal(t, true, check)\n\t\t\tcheck = math.IsInf(float64(dm.Fields[\"flow_comp_m_y\"].(gomavlib.JsonFloat32).F), 1)\n\t\t\trequire.Equal(t, true, check)\n\t\t\tcheck = math.IsInf(float64(dm.Fields[\"ground_distance\"].(gomavlib.JsonFloat32).F), -1)\n\t\t\trequire.Equal(t, true, check)\n\n\t\t\t// check that SetField as a float32 also works:\n\t\t\terr := dm.SetField(\"flow_comp_m_x\", float32(math.NaN()))\n\t\t\trequire.NoError(t, err)\n\t\t\tcheck = math.IsNaN(float64(dm.Fields[\"flow_comp_m_x\"].(gomavlib.JsonFloat32).F))\n\t\t\trequire.Equal(t, true, check)\n\n\t\t} else if i == 9 { // Check slice of NaN, +Inf, and -Inf float 64 values.\n\t\t\tarray := dm.Fields[\"distance\"].([]gomavlib.JsonFloat64)\n\t\t\tpatternCount := 0\n\t\t\tfor j := 0; j < 16; j++ {\n\t\t\t\tval := array[j].F\n\t\t\t\tif patternCount <= 1 {\n\t\t\t\t\tcheck := math.IsNaN(val)\n\t\t\t\t\trequire.Equal(t, true, check)\n\t\t\t\t\tpatternCount++\n\t\t\t\t} else if patternCount == 2 {\n\t\t\t\t\tcheck := math.IsInf(val, 1)\n\t\t\t\t\trequire.Equal(t, true, check)\n\t\t\t\t\tpatternCount++\n\t\t\t\t} else if patternCount == 3 {\n\t\t\t\t\tcheck := math.IsInf(val, -1)\n\t\t\t\t\trequire.Equal(t, true, check)\n\t\t\t\t\tpatternCount = 0 // Reset the pattern\n\t\t\t\t}\n\t\t\t}\n\n\t\t\t// Check that SetField as a slice of float64 also works\n\t\t\tfloatSlice := []float64{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16}\n\t\t\terr := dm.SetField(\"distance\", floatSlice)\n\t\t\trequire.NoError(t, err)\n\t\t\tvals, ok := dm.Fields[\"distance\"].([]gomavlib.JsonFloat64)\n\t\t\trequire.Equal(t, true, ok)\n\t\t\tfor i, val := range vals {\n\t\t\t\trequire.Equal(t, float64(i+1), val.F)\n\t\t\t}\n\t\t} else {\n\t\t\trequire.Equal(t, msgDecoded.(*gomavlib.DynamicMessage).Fields, dm.Fields)\n\t\t}\n\t}\n}", "func Test_Message(t *testing.T) {\n\tMessage(\"resource\", \"I am a resource message\")\n}", "func TestGetMessage(t *testing.T) {\n\tfor _, sub := range testSubscriptions[1:] {\n\t\tmessage, err := sub.Next(context.Background())\n\t\tif err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\n\t\tdecodedMessage := &api.BaseMessage{}\n\t\tjson.Unmarshal(message.Data, decodedMessage)\n\n\t\toriginalMessage := fmt.Sprintf(\"%s send 'hello test'\", testHosts[0].ID())\n\t\tif decodedMessage.Body != originalMessage {\n\t\t\tt.Fatal(\"Message not does not match\")\n\t\t}\n\t}\n}", "func AuthMsgsSerializationTest(t *testing.T, serializerTest func(t *testing.T, msg wire.Msg)) {\n\tt.Helper()\n\n\trng := pkgtest.Prng(t)\n\tserializerTest(t, wire.NewAuthResponseMsg(NewRandomAccount(rng)))\n}", "func TestUnmarshalBaseMessage(t *testing.T) {\n\tvar baseMessage BaseMessage\n\tbytesBaseMessage, _ := json.Marshal(baseMessage)\n\ttests := []struct {\n\t\tname string\n\t\tbaseMsg []byte\n\t\twant *BaseMessage\n\t\twantErr error\n\t}{\n\t\t{\n\t\t\tname: \"UnmarshalBaseMessageTest-WrongFormat\",\n\t\t\tbaseMsg: []byte(\"\"),\n\t\t\twant: nil,\n\t\t\twantErr: errors.New(\"unexpected end of JSON input\"),\n\t\t},\n\t\t{\n\t\t\tname: \"UnmarshalBaseMessageTest-RightFormat\",\n\t\t\tbaseMsg: bytesBaseMessage,\n\t\t\twant: &baseMessage,\n\t\t\twantErr: nil,\n\t\t},\n\t}\n\tfor _, test := range tests {\n\t\tt.Run(test.name, func(t *testing.T) {\n\t\t\tgot, err := UnmarshalBaseMessage(test.baseMsg)\n\t\t\tif err != nil {\n\t\t\t\tif !reflect.DeepEqual(err.Error(), test.wantErr.Error()) {\n\t\t\t\t\tt.Errorf(\"Error Got = %v,Want =%v\", err.Error(), test.wantErr.Error())\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tif !reflect.DeepEqual(err, test.wantErr) {\n\t\t\t\t\tt.Errorf(\"Error Got = %v,Want =%v\", err, test.wantErr)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t}\n\t\t\tif !reflect.DeepEqual(got, test.want) {\n\t\t\t\tt.Errorf(\"UnmarshalBaseMessage() = %v, want %v\", got, test.want)\n\t\t\t}\n\t\t})\n\t}\n}", "func TestSerDesROS2Messages_test_msgs(t *testing.T) {\n\tSetDefaultFailureMode(FailureContinues)\n\n\tConvey(\"test_msgs.Arrays\", t, func() {\n\t\tgoObj := test_msgs.NewArrays()\n\t\ttest_msgs.ArraysTypeSupport.AsGoStruct(goObj, unsafe.Pointer(Fixture_C_test_msgs__Arrays()))\n\t\tSo(goObj, ShouldResemble, Fixture_Go_test_msgs__Arrays())\n\t\tcobj := test_msgs.ArraysTypeSupport.PrepareMemory()\n\t\tdefer test_msgs.ArraysTypeSupport.ReleaseMemory(cobj)\n\t\ttest_msgs.ArraysTypeSupport.AsCStruct(cobj, goObj)\n\t\tSo((*_Ctype_struct_test_msgs__msg__Arrays)(cobj), ShouldResemble, Fixture_C_test_msgs__Arrays())\n\t})\n\tConvey(\"test_msgs.BasicTypes\", t, func() {\n\t\tgoObj := test_msgs.NewBasicTypes()\n\t\ttest_msgs.BasicTypesTypeSupport.AsGoStruct(goObj, unsafe.Pointer(Fixture_C_test_msgs__BasicTypes()))\n\t\tSo(goObj, ShouldResemble, Fixture_Go_test_msgs__BasicTypes())\n\t\tcobj := test_msgs.BasicTypesTypeSupport.PrepareMemory()\n\t\tdefer test_msgs.BasicTypesTypeSupport.ReleaseMemory(cobj)\n\t\ttest_msgs.BasicTypesTypeSupport.AsCStruct(cobj, goObj)\n\t\tSo((*_Ctype_struct_test_msgs__msg__BasicTypes)(cobj), ShouldResemble, Fixture_C_test_msgs__BasicTypes())\n\t})\n\tConvey(\"test_msgs.Builtins\", t, func() {\n\t\tgoObj := test_msgs.NewBuiltins()\n\t\ttest_msgs.BuiltinsTypeSupport.AsGoStruct(goObj, unsafe.Pointer(Fixture_C_test_msgs__Builtins()))\n\t\tSo(goObj, ShouldResemble, Fixture_Go_test_msgs__Builtins())\n\t\tcobj := test_msgs.BuiltinsTypeSupport.PrepareMemory()\n\t\tdefer test_msgs.BuiltinsTypeSupport.ReleaseMemory(cobj)\n\t\ttest_msgs.BuiltinsTypeSupport.AsCStruct(cobj, goObj)\n\t\tSo((*_Ctype_struct_test_msgs__msg__Builtins)(cobj), ShouldResemble, Fixture_C_test_msgs__Builtins())\n\t})\n\tConvey(\"test_msgs.BoundedSequences\", t, func() {\n\t\tgoObj := test_msgs.NewBoundedSequences()\n\t\ttest_msgs.BoundedSequencesTypeSupport.AsGoStruct(goObj, unsafe.Pointer(Fixture_C_test_msgs__BoundedSequences()))\n\t\tSo(goObj, ShouldResemble, Fixture_Go_test_msgs__BoundedSequences())\n\t\tcobj := test_msgs.BoundedSequencesTypeSupport.PrepareMemory()\n\t\tdefer test_msgs.BoundedSequencesTypeSupport.ReleaseMemory(cobj)\n\t\ttest_msgs.BoundedSequencesTypeSupport.AsCStruct(cobj, goObj)\n\t\tSo((*_Ctype_struct_test_msgs__msg__BoundedSequences)(cobj), ShouldResemble, Fixture_C_test_msgs__BoundedSequences())\n\t})\n\tConvey(\"test_msgs.Constants\", t, func() {\n\t\tgoObj := test_msgs.NewConstants()\n\t\ttest_msgs.ConstantsTypeSupport.AsGoStruct(goObj, unsafe.Pointer(Fixture_C_test_msgs__Constants()))\n\t\tSo(goObj, ShouldResemble, Fixture_Go_test_msgs__Constants())\n\t\tcobj := test_msgs.ConstantsTypeSupport.PrepareMemory()\n\t\tdefer test_msgs.ConstantsTypeSupport.ReleaseMemory(cobj)\n\t\ttest_msgs.ConstantsTypeSupport.AsCStruct(cobj, goObj)\n\t\tSo((*_Ctype_struct_test_msgs__msg__Constants)(cobj), ShouldResemble, Fixture_C_test_msgs__Constants())\n\t})\n\tConvey(\"test_msgs.Defaults\", t, func() {\n\t\tgoObj := test_msgs.NewDefaults()\n\t\ttest_msgs.DefaultsTypeSupport.AsGoStruct(goObj, unsafe.Pointer(Fixture_C_test_msgs__Defaults()))\n\t\tSo(goObj, ShouldResemble, Fixture_Go_test_msgs__Defaults())\n\t\tcobj := test_msgs.DefaultsTypeSupport.PrepareMemory()\n\t\tdefer test_msgs.DefaultsTypeSupport.ReleaseMemory(cobj)\n\t\ttest_msgs.DefaultsTypeSupport.AsCStruct(cobj, goObj)\n\t\tSo((*_Ctype_struct_test_msgs__msg__Defaults)(cobj), ShouldResemble, Fixture_C_test_msgs__Defaults())\n\t})\n\tConvey(\"test_msgs.Empty\", t, func() {\n\t\tgoObj := test_msgs.NewEmpty()\n\t\ttest_msgs.EmptyTypeSupport.AsGoStruct(goObj, unsafe.Pointer(Fixture_C_test_msgs__Empty()))\n\t\tSo(goObj, ShouldResemble, Fixture_Go_test_msgs__Empty())\n\t\tcobj := test_msgs.EmptyTypeSupport.PrepareMemory()\n\t\tdefer test_msgs.EmptyTypeSupport.ReleaseMemory(cobj)\n\t\ttest_msgs.EmptyTypeSupport.AsCStruct(cobj, goObj)\n\t\tSo((*_Ctype_struct_test_msgs__msg__Empty)(cobj), ShouldResemble, Fixture_C_test_msgs__Empty())\n\t})\n\tConvey(\"test_msgs.MultiNested\", t, func() {\n\t\tgoObj := test_msgs.NewMultiNested()\n\t\ttest_msgs.MultiNestedTypeSupport.AsGoStruct(goObj, unsafe.Pointer(Fixture_C_test_msgs__MultiNested()))\n\t\tSo(goObj, ShouldResemble, Fixture_Go_test_msgs__MultiNested())\n\t\tcobj := test_msgs.MultiNestedTypeSupport.PrepareMemory()\n\t\tdefer test_msgs.MultiNestedTypeSupport.ReleaseMemory(cobj)\n\t\ttest_msgs.MultiNestedTypeSupport.AsCStruct(cobj, goObj)\n\t\tSo((*_Ctype_struct_test_msgs__msg__MultiNested)(cobj), ShouldResemble, Fixture_C_test_msgs__MultiNested())\n\t})\n\tConvey(\"test_msgs.Nested\", t, func() {\n\t\tgoObj := test_msgs.NewNested()\n\t\ttest_msgs.NestedTypeSupport.AsGoStruct(goObj, unsafe.Pointer(Fixture_C_test_msgs__Nested()))\n\t\tSo(goObj, ShouldResemble, Fixture_Go_test_msgs__Nested())\n\t\tcobj := test_msgs.NestedTypeSupport.PrepareMemory()\n\t\tdefer test_msgs.NestedTypeSupport.ReleaseMemory(cobj)\n\t\ttest_msgs.NestedTypeSupport.AsCStruct(cobj, goObj)\n\t\tSo((*_Ctype_struct_test_msgs__msg__Nested)(cobj), ShouldResemble, Fixture_C_test_msgs__Nested())\n\t})\n\tConvey(\"test_msgs.UnboundedSequences do not allocate memory for empty slices\", t, func() {\n\t\tgoObj := test_msgs.NewUnboundedSequences()\n\t\ttest_msgs.UnboundedSequencesTypeSupport.AsGoStruct(goObj, unsafe.Pointer(Fixture_C_test_msgs__UnboundedSequences_no_allocate_memory_on_empty_slice()))\n\t\tSo(goObj, ShouldResemble, Fixture_Go_test_msgs__UnboundedSequences_no_allocate_memory_on_empty_slice())\n\t\tcobj := test_msgs.UnboundedSequencesTypeSupport.PrepareMemory()\n\t\tdefer test_msgs.UnboundedSequencesTypeSupport.ReleaseMemory(cobj)\n\t\ttest_msgs.UnboundedSequencesTypeSupport.AsCStruct(cobj, goObj)\n\t\tSo((*_Ctype_struct_test_msgs__msg__UnboundedSequences)(cobj), ShouldResemble, Fixture_C_test_msgs__UnboundedSequences_no_allocate_memory_on_empty_slice())\n\t})\n\tConvey(\"test_msgs.UnboundedSequences\", t, func() {\n\t\tgoObj := test_msgs.NewUnboundedSequences()\n\t\ttest_msgs.UnboundedSequencesTypeSupport.AsGoStruct(goObj, unsafe.Pointer(Fixture_C_test_msgs__UnboundedSequences()))\n\t\tSo(goObj, ShouldResemble, Fixture_Go_test_msgs__UnboundedSequences())\n\t\tcobj := test_msgs.UnboundedSequencesTypeSupport.PrepareMemory()\n\t\tdefer test_msgs.UnboundedSequencesTypeSupport.ReleaseMemory(cobj)\n\t\ttest_msgs.UnboundedSequencesTypeSupport.AsCStruct(cobj, goObj)\n\t\tSo((*_Ctype_struct_test_msgs__msg__UnboundedSequences)(cobj), ShouldResemble, Fixture_C_test_msgs__UnboundedSequences())\n\t})\n\tConvey(\"test_msgs.WStrings\", t, func() {\n\t\tgoObj := test_msgs.NewWStrings()\n\t\ttest_msgs.WStringsTypeSupport.AsGoStruct(goObj, unsafe.Pointer(Fixture_C_test_msgs__WStrings()))\n\t\tSo(goObj, ShouldResemble, Fixture_Go_test_msgs__WStrings())\n\t\tcobj := test_msgs.WStringsTypeSupport.PrepareMemory()\n\t\tdefer test_msgs.WStringsTypeSupport.ReleaseMemory(cobj)\n\t\ttest_msgs.WStringsTypeSupport.AsCStruct(cobj, goObj)\n\t\tSo((*_Ctype_struct_test_msgs__msg__WStrings)(cobj), ShouldResemble, Fixture_C_test_msgs__WStrings())\n\t})\n}", "func TestDeserialize(t *testing.T) {\n\trunPolicyPackIntegrationTest(t, \"deserialize\", NodeJS, nil, []policyTestScenario{\n\t\t{WantErrors: nil},\n\t})\n}", "func (st *SDKTester) Test(resp interface{}) {\n\tif resp == nil || st.respWant == nil {\n\t\tst.t.Logf(\"response want/got is nil, abort\\n\")\n\t\treturn\n\t}\n\n\trespMap := st.getFieldMap(resp)\n\tfor i, v := range st.respWant {\n\t\tif reflect.DeepEqual(v, respMap[i]) {\n\t\t\tcontinue\n\t\t}\n\t\tswitch x := respMap[i].(type) {\n\t\tcase Stringer:\n\t\t\tif !assert.Equal(st.t, v, x.String()) {\n\t\t\t\tst.t.Errorf(\"%s want %v, got %v\", i, v, respMap[i])\n\t\t\t}\n\t\tcase map[string]interface{}:\n\t\t\tif value, ok := x[\"Value\"]; ok {\n\t\t\t\tif !assert.Equal(st.t, v, value) {\n\t\t\t\t\tst.t.Errorf(\"%s want %v, got %v\", i, v, respMap[i])\n\t\t\t\t}\n\t\t\t}\n\t\tcase Inter:\n\t\t\tif !assert.Equal(st.t, v, x.Int()) {\n\t\t\t\tst.t.Errorf(\"%s want %v, got %v\", i, v, respMap[i])\n\t\t\t}\n\t\tdefault:\n\t\t\tif !assert.Equal(st.t, v, respMap[i]) {\n\t\t\t\tst.t.Errorf(\"%s want %v, got %v\", i, v, respMap[i])\n\t\t\t}\n\t\t}\n\t}\n}", "func createTestMessages() {\n\t//Ping our CRUD Microservice to see if Messageboards are already created\n\tctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)\n\tdefer cancel()\n\treq, err := http.NewRequest(\"GET\", isMessageBoardMade, nil)\n\tif err != nil {\n\t\ttheErr := \"There was an error reaching out to isMessageBoardCreated: \" + err.Error()\n\t\tlogWriter(theErr)\n\t\tfmt.Println(theErr)\n\t}\n\n\tresp, err := http.DefaultClient.Do(req.WithContext(ctx))\n\n\tbody, err := ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\ttheErr := \"There was an error getting a response for seeing if a messageboard is created \" + err.Error()\n\t\tlogWriter(theErr)\n\t\tfmt.Println(theErr)\n\t}\n\n\t//Marshal the response into a type we can read\n\ttype ReturnMessage struct {\n\t\tTheErr []string `json:\"TheErr\"`\n\t\tResultMsg []string `json:\"ResultMsg\"`\n\t\tSuccOrFail int `json:\"SuccOrFail\"`\n\t\tGivenHDogMB MessageBoard `json:\"GivenHDogMB\"`\n\t\tGivenHamMB MessageBoard `json:\"GivenHamMB\"`\n\t}\n\tvar returnedMessage ReturnMessage\n\tjson.Unmarshal(body, &returnedMessage)\n\n\t/*Assign the messageboards for hotdog and hamburger off of the response, (if it is 0)\n\tAlso fill those loadedMessage */\n\tif returnedMessage.SuccOrFail != 0 {\n\t\t//Log the failure to get the database\n\t\tmessage := \"Failure to get the hotdog and hamburger messageboards\"\n\t\tlogWriter(message)\n\t\ttheMessageBoardHDog = MessageBoard{}\n\t\ttheMessageBoardHam = MessageBoard{}\n\t} else {\n\t\ttheMessageBoardHDog = returnedMessage.GivenHDogMB\n\t\ttheMessageBoardHam = returnedMessage.GivenHamMB\n\t\t//Fill the hotdog Messagemap\n\t\tfor g := 0; g < len(theMessageBoardHDog.AllOriginalMessages); g++ {\n\t\t\tloadedMessagesMapHDog[g+1] = theMessageBoardHDog.AllOriginalMessages[g]\n\t\t}\n\t\t//Fill the hamburger MessageMap\n\t\tfor z := 0; z < len(theMessageBoardHam.AllOriginalMessages); z++ {\n\t\t\tloadedMessagesMapHam[z+1] = theMessageBoardHam.AllOriginalMessages[z]\n\t\t}\n\t}\n}", "func TestSer(t *testing.T) {\n\tt.Skip()\n}", "func TestEncoderAndDecoder(t *testing.T) {\n\tbuf := new(bytes.Buffer)\n\n\tinPb := &example.A{\n\t\tDescription: \"hello world!\",\n\t\tNumber: 1,\n\t}\n\t// UUID is 16 byte long\n\tfor i := 0; i < 16; i++ {\n\t\tinPb.Id = append(inPb.Id, byte(i))\n\t}\n\n\tbytes, err := proto.Marshal(inPb)\n\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tmsg := NewMessage(0, bytes)\n\n\te := NewMsgEncoder(buf)\n\te.Encode(msg)\n\n\toutMsg := NewEmptyMessage()\n\n\td := NewMsgDecoder(buf)\n\td.Decode(outMsg)\n\n\tif !reflect.DeepEqual(msg, outMsg) {\n\t\tt.Fatal(\"Messages are not equal!\")\n\t}\n\n\toutPb := new(example.A)\n\n\tproto.Unmarshal(outMsg.bytes, outPb)\n\n\tif !reflect.DeepEqual(outPb, inPb) {\n\t\tt.Fatal(\"Protos are not equal!\")\n\t}\n}", "func (suite *JsonTestSuite) TestExample() {\n\ttype T struct {\n\t\tF int `json:\"a,omitempty\"`\n\t\tB int\n\t}\n\tt := T{F: 1, B: 2}\n\tbytes, err := json.Marshal(t)\n\tsuite.Assert().NoError(err)\n\tvar x T\n\tsuite.Assert().NoError(json.Unmarshal(bytes, &x))\n\tsuite.Assert().Equal(t, x)\n}", "func TestPbEncoderAndDecoder(t *testing.T) {\n\tregister(0, reflect.TypeOf(example.A{}))\n\n\tbuf := new(bytes.Buffer)\n\n\tinPb := &example.A{\n\t\tDescription: \"hello world!\",\n\t\tNumber: 1,\n\t}\n\t// UUID is 16 byte long\n\tfor i := 0; i < 16; i++ {\n\t\tinPb.Id = append(inPb.Id, byte(i))\n\t}\n\n\tmsg := NewPbMessage(0, inPb)\n\n\te := NewMsgEncoder(buf)\n\te.EncodePb(msg)\n\n\toutMsg := NewEmptyPbMessage()\n\n\td := NewMsgDecoder(buf)\n\terr := d.DecodePb(outMsg)\n\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tif !reflect.DeepEqual(msg, outMsg) {\n\t\tt.Fatal(\"Messages are not equal!\")\n\t}\n\n\tif !reflect.DeepEqual(inPb, outMsg.pb) {\n\t\tt.Fatal(\"Protos are not equal!\")\n\t}\n}", "func TestSimpleJSON(t *testing.T) {\n\n\tassert := assert.New(t)\n\n\ttrans := &transport{}\n\tobs, err := simpleSetup(trans, time.Second, nil)\n\tassert.NotNil(obs)\n\tassert.Nil(err)\n\n\treq := simpleJSONRequest()\n\n\tobs.QueueJSON(req, \"iot\", \"mac:112233445566\", \"1234\")\n\tobs.QueueJSON(req, \"test\", \"mac:112233445566\", \"1234\")\n\tobs.QueueJSON(req, \"no-match\", \"mac:112233445566\", \"1234\")\n\n\tobs.Shutdown(true)\n\n\tassert.Equal(int32(2), trans.i)\n}", "func TestX(t *testing.T) {\n\tt.Skip()\n\n\tvar r cue.Runtime\n\tinst, err := r.Compile(\"test\", `\n\t`)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tb, err := openapi.Gen(inst, &openapi.Config{\n\t\t// ExpandReferences: true,\n\t})\n\tif err != nil {\n\t\tt.Fatal(errors.Details(err, nil))\n\t}\n\n\tvar out = &bytes.Buffer{}\n\t_ = json.Indent(out, b, \"\", \" \")\n\tt.Error(out.String())\n}", "func RunJSONSerializationTestForPrivateEndpoint(subject PrivateEndpoint) string {\n\t// Serialize to JSON\n\tbin, err := json.Marshal(subject)\n\tif err != nil {\n\t\treturn err.Error()\n\t}\n\n\t// Deserialize back into memory\n\tvar actual PrivateEndpoint\n\terr = json.Unmarshal(bin, &actual)\n\tif err != nil {\n\t\treturn err.Error()\n\t}\n\n\t// Check for outcome\n\tmatch := cmp.Equal(subject, actual, cmpopts.EquateEmpty())\n\tif !match {\n\t\tactualFmt := pretty.Sprint(actual)\n\t\tsubjectFmt := pretty.Sprint(subject)\n\t\tresult := diff.Diff(subjectFmt, actualFmt)\n\t\treturn result\n\t}\n\n\treturn \"\"\n}", "func TestGetDataAndReturnResponse(t *testing.T) {\n\tdata := getDataAndReturnResponse()\n\tif data.Message != \"hello world\" {\n\t\tt.Errorf(\"Expected string 'hello world' but received: '%s'\", data)\n\t}\n}", "func Test005ConversionToAndFromMsgpackAndJson(t *testing.T) {\n\n\tcv.Convey(`from gl we should be able to create a known Go struct,\n\ntype Event struct {\n\tId int\n\tUser Person\n\tFlight string\n\tPilot []string\n}\n\n Event{}, and fill in its fields`, t, func() {\n\t\tevent := `(eventdemo id:123 user: (persondemo first:\"Liz\" last:\"C\") flight:\"AZD234\" pilot:[\"Roger\" \"Ernie\"] cancelled:true)`\n\t\tenv := NewZlisp()\n\t\tdefer env.Close()\n\n\t\tenv.StandardSetup()\n\n\t\tx, err := env.EvalString(event)\n\t\tpanicOn(err)\n\n\t\tcv.So(x.SexpString(nil), cv.ShouldEqual, ` (eventdemo id:123 user: (persondemo first:\"Liz\" last:\"C\") flight:\"AZD234\" pilot:[\"Roger\" \"Ernie\"] cancelled:true)`)\n\n\t\tjsonBy := SexpToJson(x)\n\t\tcv.So(string(jsonBy), cv.ShouldEqual, `{\"Atype\":\"eventdemo\", \"id\":123, \"user\":{\"Atype\":\"persondemo\", \"first\":\"Liz\", \"last\":\"C\", \"zKeyOrder\":[\"first\", \"last\"]}, \"flight\":\"AZD234\", \"pilot\":[\"Roger\", \"Ernie\"], \"cancelled\":true, \"zKeyOrder\":[\"id\", \"user\", \"flight\", \"pilot\", \"cancelled\"]}`)\n\t\tmsgpack, goObj := SexpToMsgpack(x)\n\t\t// msgpack field ordering is random, so can't expect a match the serialization byte-for-byte\n\t\t//cv.So(msgpack, cv.ShouldResemble, expectedMsgpack)\n\t\tgoObj2, err := MsgpackToGo(msgpack)\n\t\tpanicOn(err)\n\t\t// the ordering of jsonBack is canonical, so won't match ours\n\t\t// cv.So(string(jsonBack), cv.ShouldResemble, `{\"id\":123, \"user\":{\"first\":\"Liz\", \"last\":\"C\"}, \"flight\":\"AZD234\", \"pilot\":[\"Roger\", \"Ernie\"]}`)\n\n\t\tfmt.Printf(\"goObj = '%#v'\\n\", goObj)\n\t\tfmt.Printf(\"goObj2 = '%#v'\\n\", goObj2)\n\n\t\tcv.So(goObj, cv.ShouldResemble, goObj2)\n\n\t\tiface, err := MsgpackToGo(msgpack)\n\t\tpanicOn(err)\n\t\tsexp, err := GoToSexp(iface, env)\n\t\tpanicOn(err)\n\t\t// must get into same order to have sane comparison, so borrow the KeyOrder to be sure.\n\t\thhh := sexp.(*SexpHash)\n\t\thhh.KeyOrder = x.(*SexpHash).KeyOrder\n\t\tsexpStr := sexp.SexpString(nil)\n\t\texpectedSexpr := ` (eventdemo id:123 user: (persondemo first:\"Liz\" last:\"C\") flight:\"AZD234\" pilot:[\"Roger\" \"Ernie\"] cancelled:true)`\n\t\tcv.So(sexpStr, cv.ShouldResemble, expectedSexpr)\n\n\t\tfmt.Printf(\"\\n Unmarshaling from msgpack into pre-defined go struct should succeed.\\n\")\n\n\t\tvar goEvent Event\n\t\tdec := codec.NewDecoderBytes(msgpack, &msgpHelper.mh)\n\t\terr = dec.Decode(&goEvent)\n\t\tpanicOn(err)\n\t\tfmt.Printf(\"from msgpack, goEvent = '%#v'\\n\", goEvent)\n\t\tcv.So(goEvent.Id, cv.ShouldEqual, 123)\n\t\tcv.So(goEvent.Flight, cv.ShouldEqual, \"AZD234\")\n\t\tcv.So(goEvent.Pilot[0], cv.ShouldEqual, \"Roger\")\n\t\tcv.So(goEvent.Pilot[1], cv.ShouldEqual, \"Ernie\")\n\t\tcv.So(goEvent.User.First, cv.ShouldEqual, \"Liz\")\n\t\tcv.So(goEvent.User.Last, cv.ShouldEqual, \"C\")\n\n\t\tgoEvent = Event{}\n\t\tjdec := codec.NewDecoderBytes([]byte(jsonBy), &msgpHelper.jh)\n\t\terr = jdec.Decode(&goEvent)\n\t\tpanicOn(err)\n\t\tfmt.Printf(\"from json, goEvent = '%#v'\\n\", goEvent)\n\t\tcv.So(goEvent.Id, cv.ShouldEqual, 123)\n\t\tcv.So(goEvent.Flight, cv.ShouldEqual, \"AZD234\")\n\t\tcv.So(goEvent.Pilot[0], cv.ShouldEqual, \"Roger\")\n\t\tcv.So(goEvent.Pilot[1], cv.ShouldEqual, \"Ernie\")\n\t\tcv.So(goEvent.User.First, cv.ShouldEqual, \"Liz\")\n\t\tcv.So(goEvent.User.Last, cv.ShouldEqual, \"C\")\n\t\tcv.So(goEvent.Cancelled, cv.ShouldEqual, true)\n\n\t\tfmt.Printf(\"\\n And directly from Go to S-expression via GoToSexp() should work.\\n\")\n\t\tsexp2, err := GoToSexp(goObj2, env)\n\t\tcv.So(sexp2.SexpString(nil), cv.ShouldEqual, expectedSexpr)\n\t\tfmt.Printf(\"\\n Result: directly from Go map[string]interface{} -> sexpr via GoMapToSexp() produced: '%s'\\n\", sexp2.SexpString(nil))\n\n\t\tfmt.Printf(\"\\n And the reverse direction, from S-expression to go map[string]interface{} should work.\\n\")\n\t\tgoMap3 := SexpToGo(sexp2, env, nil).(map[string]interface{})\n\n\t\t// detailed diff\n\t\tgoObj2map := goObj2.(map[string]interface{})\n\n\t\t// looks like goMap3 has an int, whereas goObj2map has an int64\n\n\t\t// compare goMap3 and goObj2\n\t\tfor k3, v3 := range goMap3 {\n\t\t\tv2 := goObj2map[k3]\n\t\t\tcv.So(v3, cv.ShouldResemble, v2)\n\t\t}\n\n\t\tfmt.Printf(\"\\n Directly Sexp -> msgpack -> pre-established Go struct Event{} should work.\\n\")\n\n\t\tswitch asHash := sexp2.(type) {\n\t\tdefault:\n\t\t\terr = fmt.Errorf(\"value must be a hash or defmap\")\n\t\t\tpanic(err)\n\t\tcase *SexpHash:\n\t\t\ttn := asHash.TypeName\n\t\t\tfactory, hasMaker := GoStructRegistry.Registry[tn]\n\t\t\tif !hasMaker {\n\t\t\t\terr = fmt.Errorf(\"type '%s' not registered in GoStructRegistry\", tn)\n\t\t\t\tpanic(err)\n\t\t\t}\n\t\t\tnewStruct, err := factory.Factory(env, asHash)\n\t\t\tpanicOn(err)\n\n\t\t\t// What didn't work here was going through msgpack, because\n\t\t\t// ugorji msgpack encode, when writing, will turn signed ints into unsigned ints,\n\t\t\t// which is a problem for msgp decoding. Hence cut out the middle men\n\t\t\t// and decode straight from jsonBytes into our newStruct.\n\t\t\tjsonBytes := []byte(SexpToJson(asHash))\n\n\t\t\tjsonDecoder := json.NewDecoder(bytes.NewBuffer(jsonBytes))\n\t\t\terr = jsonDecoder.Decode(newStruct)\n\t\t\tswitch err {\n\t\t\tcase io.EOF:\n\t\t\tcase nil:\n\t\t\tdefault:\n\t\t\t\tpanic(fmt.Errorf(\"error during jsonDecoder.Decode() on type '%s': '%s'\", tn, err))\n\t\t\t}\n\t\t\tasHash.SetGoStructFactory(factory)\n\n\t\t\tfmt.Printf(\"from json via factory.Make(), newStruct = '%#v'\\n\", newStruct)\n\t\t\tcv.So(newStruct, cv.ShouldResemble, &goEvent)\n\t\t}\n\t})\n}", "func TestSend(t *testing.T) {\n\tvar paid [3]PayloadSequence\n\n\trecord := Payload{\n\t\tId: 12,\n\t\tName: \"Robert Murdock\",\n\t\tActive: true,\n\t}\n\n\tpaid[0].Text = \"Hi. I'm Robert. Robert Murdock\"\n\tpaid[0].Type = TEXT\n\tpaid[0].PlainType = \"text/plain\"\n\n\ttmpjson, jsonr := json.Marshal(&record)\n\tpaid[1].Text = string(tmpjson)\n\tpaid[1].Type = JSON\n\tpaid[1].PlainType = \"application/json\"\n\n\ttmpxml, xmlr := xml.Marshal(record)\n\tpaid[2].Text = string(tmpxml)\n\tpaid[2].Type = XML\n\tpaid[2].PlainType = \"application/xml\"\n\n\tt.Logf(\"jsonr: %+v; xmlr: %+v\", jsonr, xmlr)\n\n\tfor _, pay := range paid {\n\t\thts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\t\tSend(w, OK, pay.Type, pay.Text)\n\t\t}))\n\n\t\tres, err := http.Get(hts.URL)\n\t\tif err != nil {\n\t\t\tt.Errorf(\"Unknown error: %+v\", err)\n\t\t\tt.FailNow()\n\t\t\treturn\n\t\t}\n\n\t\tbody, err := ioutil.ReadAll(res.Body)\n\t\tres.Body.Close()\n\t\tif err != nil {\n\t\t\tt.Errorf(\"ioutil fail to read body: %+v\", err)\n\t\t\tt.Fail()\n\t\t\treturn\n\t\t}\n\n\t\tif strings.Compare(string(body), pay.Text) != 0 {\n\t\t\tt.Errorf(\"Expected %s, have %s\", pay.Text, string(body))\n\t\t}\n\n\t\tt.Logf(\"headers: %+v\", res.Header)\n\t\tt.Logf(\"Body: %+v\", string(body))\n\t\tif strings.Compare(res.Header.Get(\"Content-Type\"), pay.PlainType) != 0 {\n\t\t\tt.Errorf(\"Expected %s, have %s\", pay.PlainType, (res.Header.Get(\"Content-Type\")))\n\t\t}\n\n\t\thts.Close()\n\t}\n}", "func TestPagarmeWebhooTest(t *testing.T) {\n\n requestBody := []byte(`{\n \"object\": \"postback\",\n \"status\": \"pending_retry\",\n \"model\": \"transaction\",\n \"model_id\": \"1662527\",\n \"headers\": \"{\\\"Content-Type\\\":\\\"application/x-www-form-urlencoded\\\",\\\"X-PagarMe-Event\\\":\\\"transaction_status_changed\\\",\\\"X-Hub-Signature\\\":\\\"sha1=0c62a0b489e1138ef39ae71dece45be1c0e97c1e\\\",\\\"User-Agent\\\":\\\"PagarMe-Hookshot/1.0\\\"}\",\n \"payload\": \"id=1662527&fingerprint=a67597c98a493cc8b2c62ab018a553c19747e8a5&event=transaction_status_changed&old_status=waiting_payment&desired_status=paid&current_status=paid&object=transaction&transaction%5Bobject%5D=transaction&transaction%5Bstatus%5D=paid&transaction%5Brefuse_reason%5D=&transaction%5Bstatus_reason%5D=acquirer&transaction%5Bacquirer_response_code%5D=&transaction%5Bacquirer_name%5D=pagarme&transaction%5Bacquirer_id%5D=56f9d019decf72cc70055d58&transaction%5Bauthorization_code%5D=&transaction%5Bsoft_descriptor%5D=&transaction%5Btid%5D=1662527&transaction%5Bnsu%5D=1662527&transaction%5Bdate_created%5D=2017-06-28T17%3A36%3A52.808Z&transaction%5Bdate_updated%5D=2017-06-28T17%3A37%3A25.949Z&transaction%5Bamount%5D=15000&transaction%5Bauthorized_amount%5D=15000&transaction%5Bpaid_amount%5D=0&transaction%5Brefunded_amount%5D=0&transaction%5Binstallments%5D=1&transaction%5Bid%5D=1662527&transaction%5Bcost%5D=380&transaction%5Bcard_holder_name%5D=&transaction%5Bcard_last_digits%5D=&transaction%5Bcard_first_digits%5D=&transaction%5Bcard_brand%5D=&transaction%5Bcard_pin_mode%5D=&transaction%5Bpostback_url%5D=https%3A%2F%2Frequestb.in%2F10m5xva1&transaction%5Bpayment_method%5D=boleto&transaction%5Bcapture_method%5D=ecommerce&transaction%5Bantifraud_score%5D=&transaction%5Bboleto_url%5D=https%3A%2F%2Fpagar.me&transaction%5Bboleto_barcode%5D=1234%205678&transaction%5Bboleto_expiration_date%5D=2017-07-03T03%3A00%3A00.000Z&transaction%5Breferer%5D=api_key&transaction%5Bip%5D=177.63.194.231&transaction%5Bsubscription_id%5D=&transaction%5Bphone%5D=&transaction%5Baddress%5D=&transaction%5Bcustomer%5D=&transaction%5Bbilling%5D=&transaction%5Bshipping%5D=&transaction%5Bcard%5D=&transaction%5Bsplit_rules%5D=\",\n \"request_url\": \"https://requestb.in/10m5xva1\",\n \"retries\": 0,\n \"next_retry\": null,\n \"deliveries\": [\n {\n \"object\": \"postback_delivery\",\n \"status\": \"success\",\n \"status_reason\": \"http_status_code\",\n \"status_code\": \"200\",\n \"response_time\": 228,\n \"response_headers\": \"{\\\"date\\\":\\\"Wed, 28 Jun 2017 17:37:26 GMT\\\",\\\"content-type\\\":\\\"text/html; charset=utf-8\\\",\\\"transfer-encoding\\\":\\\"chunked\\\",\\\"connection\\\":\\\"close\\\",\\\"set-cookie\\\":[\\\"__cfduid=dd5481bfa0252320257fb1d3de05f19961498671446; expires=Thu, 28-Jun-18 17:37:26 GMT; path=/; domain=.requestb.in; HttpOnly\\\"],\\\"sponsored-by\\\":\\\"https://www.runscope.com\\\",\\\"via\\\":\\\"1.1 vegur\\\",\\\"strict-transport-security\\\":\\\"max-age=15552000\\\",\\\"x-content-type-options\\\":\\\"nosniff\\\",\\\"server\\\":\\\"cloudflare-nginx\\\",\\\"cf-ray\\\":\\\"376269f9ef4c0ed9-EWR\\\"}\",\n \"response_body\": \"ok\",\n \"date_created\": \"2017-06-28T17:37:26.033Z\",\n \"date_updated\": \"2017-06-28T17:37:26.266Z\",\n \"id\": \"pd_cj4haa8lt14slps730qybvjij\"\n }\n ],\n \"date_created\": \"2017-06-28T17:37:26.008Z\",\n \"date_updated\": \"2017-06-28T18:11:42.999Z\",\n \"signature\": \"sha1=0c62a0b489e1138ef39ae71dece45be1c0e97c1e\",\n \"id\": \"po_cj4haa8l4131bpi73glgzbnpp\"\n }`) \n\n webkook := pagarme.NewDefaultWebhook()\n\n data, err := webkook.Parse(requestBody)\n\n if err != nil {\n t.Errorf(\"Parse error: %v\", err)\n }\n\n if data.Response.Id != 1662527 {\n t.Errorf(\"Error is not pased: %v\", data.Response.Id)\n }\n\n}", "func TestServiceUpdateToJSON_TwoConfig_UpdateActions(t *testing.T) {\n}", "func TestJsonRpc(t *testing.T) {\n\t_id1 := float64(84363)\n\trpcRequest := server.NewJsonRpcRequest(_id1, \"null\", nil)\n\trpcResult := sendRpcAndParseResponseOrFailNow(t, rpcRequest)\n\tassert.Equal(t, _id1, rpcResult.Id)\n\n\t_id2 := \"84363\"\n\trpcRequest2 := server.NewJsonRpcRequest(_id2, \"null\", nil)\n\trpcResult2 := sendRpcAndParseResponseOrFailNow(t, rpcRequest2)\n\tassert.Equal(t, _id2, rpcResult2.Id)\n\tassert.Equal(t, \"2.0\", rpcResult2.Version)\n\n}", "func TestSendMessage(t *testing.T) {\n\tmessage := &api.BaseMessage{\n\t\tBody: fmt.Sprintf(\"%s send 'hello test'\", testHosts[0].ID()),\n\t\tFlag: api.FlagGenericMessage,\n\t}\n\n\tsendData, err := json.Marshal(message)\n\tif err != nil {\n\t\tt.Fatal(\"Error occurred when marshalling message object\")\n\t}\n\n\terr = testPubsubs[0].Publish(serviceTag, sendData)\n\tif err != nil {\n\t\tt.Fatal(\"Error occurred when publishing\")\n\t}\n}", "func TestCompliancetestDemo(t *testing.T) {\n\t// Register new Vendor account\n\tvendor := utils.CreateNewAccount(auth.AccountRoles{auth.Vendor}, testconstants.VID)\n\n\t// Register new TestHouse account\n\ttestHouse := utils.CreateNewAccount(auth.AccountRoles{auth.TestHouse}, 0)\n\n\t// Register new TestHouse account\n\tsecondTestHouse := utils.CreateNewAccount(auth.AccountRoles{auth.TestHouse}, 0)\n\n\t// Publish model info\n\tmodel := utils.NewMsgAddModel(vendor.Address, testconstants.VID)\n\t_, _ = utils.AddModel(model, vendor)\n\t// Publish modelVersion\n\tmodelVersion := utils.NewMsgAddModelVersion(model.VID, model.PID,\n\t\ttestconstants.SoftwareVersion, testconstants.SoftwareVersionString, vendor.Address)\n\t_, _ = utils.AddModelVersion(modelVersion, vendor)\n\n\t// Publish first testing result using Sign and Broadcast AddTestingResult message\n\tfirstTestingResult := utils.NewMsgAddTestingResult(model.VID, model.PID,\n\t\tmodelVersion.SoftwareVersion, modelVersion.SoftwareVersionString, testHouse.Address)\n\tutils.SignAndBroadcastMessage(testHouse, firstTestingResult)\n\n\t// Check testing result is created\n\treceivedTestingResult, _ := utils.GetTestingResult(firstTestingResult.VID,\n\t\tfirstTestingResult.PID, firstTestingResult.SoftwareVersion)\n\trequire.Equal(t, receivedTestingResult.VID, firstTestingResult.VID)\n\trequire.Equal(t, receivedTestingResult.PID, firstTestingResult.PID)\n\trequire.Equal(t, receivedTestingResult.SoftwareVersion, firstTestingResult.SoftwareVersion)\n\trequire.Equal(t, 1, len(receivedTestingResult.Results))\n\trequire.Equal(t, receivedTestingResult.Results[0].TestResult, firstTestingResult.TestResult)\n\trequire.Equal(t, receivedTestingResult.Results[0].TestDate, firstTestingResult.TestDate)\n\trequire.Equal(t, receivedTestingResult.Results[0].Owner, firstTestingResult.Signer)\n\n\t// Publish second model info\n\tsecondModel := utils.NewMsgAddModel(vendor.Address, testconstants.VID)\n\t_, _ = utils.AddModel(secondModel, vendor)\n\t// Publish second modelVersion\n\tsecondModelVersion := utils.NewMsgAddModelVersion(secondModel.VID, secondModel.PID,\n\t\ttestconstants.SoftwareVersion, testconstants.SoftwareVersionString, vendor.Address)\n\t_, _ = utils.AddModelVersion(secondModelVersion, vendor)\n\n\t// Publish second testing result using POST\n\tsecondTestingResult := utils.NewMsgAddTestingResult(secondModel.VID, secondModel.PID,\n\t\tsecondModelVersion.SoftwareVersion, secondModelVersion.SoftwareVersionString, testHouse.Address)\n\t_, _ = utils.PublishTestingResult(secondTestingResult, testHouse)\n\n\t// Check testing result is created\n\treceivedTestingResult, _ = utils.GetTestingResult(secondTestingResult.VID,\n\t\tsecondTestingResult.PID, secondTestingResult.SoftwareVersion)\n\trequire.Equal(t, receivedTestingResult.VID, secondTestingResult.VID)\n\trequire.Equal(t, receivedTestingResult.PID, secondTestingResult.PID)\n\trequire.Equal(t, receivedTestingResult.SoftwareVersion, secondTestingResult.SoftwareVersion)\n\trequire.Equal(t, 1, len(receivedTestingResult.Results))\n\trequire.Equal(t, receivedTestingResult.Results[0].TestResult, secondTestingResult.TestResult)\n\trequire.Equal(t, receivedTestingResult.Results[0].TestDate, secondTestingResult.TestDate)\n\trequire.Equal(t, receivedTestingResult.Results[0].Owner, secondTestingResult.Signer)\n\n\t// Publish new testing result for second model\n\tthirdTestingResult := utils.NewMsgAddTestingResult(secondModel.VID, secondModel.PID,\n\t\tsecondModelVersion.SoftwareVersion, secondModelVersion.SoftwareVersionString, secondTestHouse.Address)\n\t_, _ = utils.PublishTestingResult(thirdTestingResult, secondTestHouse)\n\n\t// Check testing result is created\n\treceivedTestingResult, _ = utils.GetTestingResult(secondTestingResult.VID,\n\t\tsecondTestingResult.PID, secondTestingResult.SoftwareVersion)\n\trequire.Equal(t, 2, len(receivedTestingResult.Results))\n\trequire.Equal(t, receivedTestingResult.Results[0].Owner, secondTestingResult.Signer)\n\trequire.Equal(t, receivedTestingResult.Results[0].TestResult, secondTestingResult.TestResult)\n\trequire.Equal(t, receivedTestingResult.Results[1].Owner, thirdTestingResult.Signer)\n\trequire.Equal(t, receivedTestingResult.Results[1].TestResult, thirdTestingResult.TestResult)\n}", "func TestUnmarshalPathologies(t *testing.T) {\n\n}", "func ControlMsgsSerializationTest(t *testing.T, serializerTest func(t *testing.T, msg wire.Msg)) {\n\tt.Helper()\n\n\tserializerTest(t, wire.NewPingMsg())\n\tserializerTest(t, wire.NewPongMsg())\n\tminLen := 16\n\tmaxLenDiff := 16\n\trng := pkgtest.Prng(t)\n\tserializerTest(t, &wire.ShutdownMsg{Reason: newRandomASCIIString(rng, minLen, maxLenDiff)})\n}", "func TestMain_SendReceiveMessage(t *testing.T) {\n\tms := test.MustRunCluster(t, 3)\n\tm0, m1 := ms.GetNode(0), ms.GetNode(1)\n\tdefer ms.Close()\n\n\t// Expected indexes and Fields\n\texpected := map[string][]string{\n\t\t\"i\": {\"f\"},\n\t}\n\n\t// Create a client for each node.\n\tclient0 := m0.Client()\n\tclient1 := m1.Client()\n\n\t// Create indexes and fields on one node.\n\tif err := client0.CreateIndex(context.Background(), \"i\", pilosa.IndexOptions{}); err != nil && err != pilosa.ErrIndexExists {\n\t\tt.Fatal(err)\n\t} else if err := client0.CreateField(context.Background(), \"i\", \"f\"); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t// Make sure node0 knows about the index and field created.\n\tschema0, err := client0.Schema(context.Background())\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\treceived0 := map[string][]string{}\n\tfor _, idx := range schema0 {\n\t\treceived0[idx.Name] = []string{}\n\t\tfor _, field := range idx.Fields {\n\t\t\treceived0[idx.Name] = append(received0[idx.Name], field.Name)\n\t\t}\n\t}\n\tif !reflect.DeepEqual(received0, expected) {\n\t\tt.Fatalf(\"unexpected schema on node0: %s\", received0)\n\t}\n\n\t// Make sure node1 knows about the index and field created.\n\tschema1, err := client1.Schema(context.Background())\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\treceived1 := map[string][]string{}\n\tfor _, idx := range schema1 {\n\t\treceived1[idx.Name] = []string{}\n\t\tfor _, field := range idx.Fields {\n\t\t\treceived1[idx.Name] = append(received1[idx.Name], field.Name)\n\t\t}\n\t}\n\tif !reflect.DeepEqual(received1, expected) {\n\t\tt.Fatalf(\"unexpected schema on node1: %s\", received1)\n\t}\n\n\t// Write data on first node.\n\tif _, err := m0.Query(t, \"i\", \"\", fmt.Sprintf(`\n Set(1, f=1)\n Set(%d, f=1)\n `, 2*pilosa.ShardWidth+1)); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t// We have to wait for the broadcast message to be sent before checking state.\n\ttime.Sleep(1 * time.Second)\n\n\t// Make sure node0 knows about the latest MaxShard.\n\tmaxShards0, err := client0.MaxShardByIndex(context.Background())\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif maxShards0[\"i\"] != 2 {\n\t\tt.Fatalf(\"unexpected maxShard on node0: %d\", maxShards0[\"i\"])\n\t}\n\n\t// Make sure node1 knows about the latest MaxShard.\n\tmaxShards1, err := client1.MaxShardByIndex(context.Background())\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif maxShards1[\"i\"] != 2 {\n\t\tt.Fatalf(\"unexpected maxShard on node1: %d\", maxShards1[\"i\"])\n\t}\n}", "func TestBytesMessage(t *testing.T) {\n\tmessage := &Message{\n\t\tTopic: \"test\",\n\t\tData: []byte(\"test\"),\n\t} // Initialize message\n\n\tencodedMessage, err := message.Bytes() // Encode message\n\n\tif err != nil { // Check for errors\n\t\tt.Fatal(err) // Panic\n\t}\n\n\tif !strings.Contains(string(encodedMessage), \"test\") { // Check invalid encoded message\n\t\tt.Fatal(\"invalid encoded message\") // Panic\n\t}\n}", "func createTestObjects(t *testing.T) (protos.AppServer, chan protos.MessageRequest, []*protos.GetInfoResponse_SignInfo) {\n\t// Make some dummy signs\n\tsigns := []*protos.GetInfoResponse_SignInfo{\n\t\t{\n\t\t\tName: \"test1\", Width: 10, Height: 2,\n\t\t},\n\t\t{\n\t\t\tName: \"test2\", Width: 10, Height: 2,\n\t\t},\n\t}\n\t// Make a channel for sending messages\n\tmessageQueue := make(chan protos.MessageRequest, 10)\n\t// Create object under test\n\tserver := NewServer(\"secret\", \"password\", time.Hour, messageQueue, signs)\n\treturn server, messageQueue, signs\n}", "func TestReadTaskAPI(t *testing.T) {\n\n\ttask, err := NewTask(\"test read task api\")\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error : %v\", err)\n\t}\n\tif err := task.Save(); err != nil {\n\t\tt.Fatalf(\"unexpected error : %v\", err)\n\t}\n\n\tm := mux.NewRouter()\n\tm.HandleFunc(\"/task/{query}\", ReadTaskAPI)\n\n\trr := httptest.NewRecorder()\n\treq, _ := http.NewRequest(http.MethodGet, \"/task/\"+task.SID, strings.NewReader(\"\"))\n\n\tm.ServeHTTP(rr, req)\n\n\t// Test status code.\n\tif rr.Code != http.StatusOK {\n\t\tt.Errorf(\"Code : %v, Error : %v\", rr.Code, rr.Body.String())\n\t}\n\n\t// Test the result data.\n\tfind := new(Task)\n\tif err := jsonapi.UnmarshalPayload(rr.Body, find); err != nil {\n\t\tt.Errorf(\"unexpected error (%v)\", err)\n\t}\n\n\tif find.SID != task.SID {\n\t\tt.Errorf(\"expected task '%v', got '%v'\", task.SID, find.SID)\n\t}\n}", "func (c *Consumer) Test(ctx context.Context, m gosqs.Message) error {\n\tout := map[string]interface{}{}\n\n\tif err := m.Decode(&out); err != nil {\n\t\t// returning an error will cause the message to try again until it is sent to the Dead-Letter-Queue\n\t\treturn err\n\t}\n\n\t// returning nil means the message was successfully processed and it will be deleted from the queue\n\treturn nil\n}", "func TestNewSimpleResponse(t *testing.T) {\n\t// Create without format.\n\tr := NewSimpleResponse(\"TAG\", \"XXX\")\n\tif r.Status != \"TAG\" {\n\t\tt.Fatalf(\"Unexpected status. Expected: %s - Found: %s.\", \"TAG\", r.Status)\n\t}\n\tif r.Message != \"XXX\" {\n\t\tt.Fatalf(\"Unexpected message. Expected: %s - Found: %s.\", \"XXX\", r.Message)\n\t}\n\t// Create with format.\n\tr = NewSimpleResponsef(\"TAG2\", \"the%s\", \"message\")\n\tif r.Status != \"TAG2\" {\n\t\tt.Fatalf(\"Unexpected status. Expected: %s - Found: %s.\", \"TAG2\", r.Status)\n\t}\n\tif r.Message != \"themessage\" {\n\t\tt.Fatalf(\"Unexpected message. Expected: %s - Found: %s.\", \"themessage\", r.Message)\n\t}\n}", "func TestMisc(t *testing.T) {\n\tassert := require.New(t)\n\tvideo := VideoV2{}\n\tapi := setupTestApiV2(\"fake\")\n\tassert.Equal(\"Empty Video\\n\", video.Display())\n\tvideo.Id = \"abc123\"\n\tassert.Equal(\"Video abc123\\n\\tAssets : 0\\n\", video.Display())\n\treq := upload.UploadRequest{}\n\t_, err := video.GetUploadParams(req)\n\tassert.NotNil(err)\n\tassert.Equal(\"api is blank\", err.Error())\n\t_, err = video.CreateAssetForUpload(req)\n\tassert.NotNil(err)\n\tassert.Equal(\"api is blank\", err.Error())\n\tvideo.Api = &api\n\terr = video.AddAccount(test_server.ACCOUNT_ID)\n\tassert.NotNil(err)\n\tassert.Equal(\"invalid auth\", err.Error())\n\tr, e := video.Value()\n\tassert.Nil(e)\n\tval := r.([]byte)\n\tbytes, _ := json.Marshal(video)\n\tassert.Equal(bytes, val)\n\tvideo2 := VideoV2{}\n\terr = video2.Scan(\"a\")\n\tassert.NotNil(err)\n\tassert.Equal(\"Type assertion .([]byte) failed.\", err.Error())\n\terr = video2.Scan(val)\n\tassert.Nil(err)\n\tassert.Equal(video.Id, video2.Id)\n}", "func TestPublish(t *testing.T) {\n\tg := getRouter(true)\n\turi := \"api/v1/evaluation/\"\n\tbody := evaluationPublishRequest{\n\t\tCourseId: courseId,\n\t\tCourseName: courseName,\n\t\tRate: 7,\n\t\tAttendanceCheckType: 1,\n\t\tExamCheckType: 2,\n\t\tContent: \"老师讲课很棒\",\n\t\tIsAnonymous: false,\n\t\tTags: []uint8{5, 2, 1},\n\t}\n\n\tjsonByte, err := json.Marshal(body)\n\tif err != nil {\n\t\tt.Errorf(\"Test Error: Json Marshal Error: %s\", err.Error())\n\t}\n\n\tw := util.PerformRequestWithBody(http.MethodPost, g, uri, jsonByte, tokenStr)\n\n\tvar data struct {\n\t\tCode int\n\t\tMessage string\n\t\tData *evaluationPublishResponse\n\t}\n\n\tif err := json.Unmarshal([]byte(w.Body.String()), &data); err != nil {\n\t\tt.Errorf(\"Test Error: Publish New Evaluation Error; Json Unmarshal Error: %s\", err.Error())\n\t}\n\n\tevaluationId = data.Data.EvaluationId\n\tfmt.Printf(\"--- evaluationId = %d\\n\", evaluationId)\n\n\tif w.Code != http.StatusOK {\n\t\tt.Errorf(\"Test Error: StatusCode Error: %d\", w.Code)\n\t}\n}", "func TestDecode(t *testing.T) {\n\tdec := decode.NewDecoder()\n\t// register creation functions for all the slats.\n\tdec.AddDefaultCallbacks(core.Slats)\n\t// read say story data\n\tvar spec map[string]interface{}\n\tif e := json.Unmarshal([]byte(debug.SayHelloGoodbyeData), &spec); e != nil {\n\t\tt.Fatal(e)\n\t} else if prog, e := dec.ReadSpec(spec); e != nil {\n\t\tt.Fatal(e)\n\t} else if diff := pretty.Diff(debug.SayHelloGoodbye, prog); len(diff) > 0 {\n\t\tt.Fatal(pretty.Sprint(prog))\n\t} else {\n\t\tt.Log(\"ok. decoded story matches expected story\")\n\t}\n}", "func createTestMessages() {\n\n\tif testMsgsInit {\n\t\treturn\n\t}\n\tdefer func() { testMsgsInit = true }()\n\n\ttestmsgs := make([]*testmsgType, 100)\n\ti := 0\n\n\t// a test message with default initialization\n\ttestmsgs[i] = &testmsgType{msg: Message{TopicPartition: TopicPartition{Topic: &testconf.Topic, Partition: 0}}}\n\ti++\n\n\t// a test message for partition 0 with only Opaque specified\n\ttestmsgs[i] = &testmsgType{msg: Message{TopicPartition: TopicPartition{Topic: &testconf.Topic, Partition: 0},\n\t\tOpaque: fmt.Sprintf(\"Op%d\", i),\n\t}}\n\ti++\n\n\t// a test message for partition 0 with empty Value and Keys\n\ttestmsgs[i] = &testmsgType{msg: Message{TopicPartition: TopicPartition{Topic: &testconf.Topic, Partition: 0},\n\t\tValue: []byte(\"\"),\n\t\tKey: []byte(\"\"),\n\t\tOpaque: fmt.Sprintf(\"Op%d\", i),\n\t}}\n\ti++\n\n\t// a test message for partition 0 with Value, Key, and Opaque\n\ttestmsgs[i] = &testmsgType{msg: Message{TopicPartition: TopicPartition{Topic: &testconf.Topic, Partition: 0},\n\t\tValue: []byte(fmt.Sprintf(\"value%d\", i)),\n\t\tKey: []byte(fmt.Sprintf(\"key%d\", i)),\n\t\tOpaque: fmt.Sprintf(\"Op%d\", i),\n\t}}\n\ti++\n\n\t// a test message for partition 0 without Value\n\ttestmsgs[i] = &testmsgType{msg: Message{TopicPartition: TopicPartition{Topic: &testconf.Topic, Partition: 0},\n\t\tKey: []byte(fmt.Sprintf(\"key%d\", i)),\n\t\tOpaque: fmt.Sprintf(\"Op%d\", i),\n\t}}\n\ti++\n\n\t// a test message for partition 0 without Key\n\ttestmsgs[i] = &testmsgType{msg: Message{TopicPartition: TopicPartition{Topic: &testconf.Topic, Partition: 0},\n\t\tValue: []byte(fmt.Sprintf(\"value%d\", i)),\n\t\tOpaque: fmt.Sprintf(\"Op%d\", i),\n\t}}\n\ti++\n\n\tp0TestMsgs = testmsgs[:i]\n\n\t// a test message for PartitonAny with Value, Key, and Opaque\n\ttestmsgs[i] = &testmsgType{msg: Message{TopicPartition: TopicPartition{Topic: &testconf.Topic, Partition: PartitionAny},\n\t\tValue: []byte(fmt.Sprintf(\"value%d\", i)),\n\t\tKey: []byte(fmt.Sprintf(\"key%d\", i)),\n\t\tOpaque: fmt.Sprintf(\"Op%d\", i),\n\t}}\n\ti++\n\n\t// a test message for a non-existent partition with Value, Key, and Opaque.\n\t// It should generate ErrUnknownPartition\n\ttestmsgs[i] = &testmsgType{expectedError: Error{ErrUnknownPartition, \"\", false},\n\t\tmsg: Message{TopicPartition: TopicPartition{Topic: &testconf.Topic, Partition: int32(10000)},\n\t\t\tValue: []byte(fmt.Sprintf(\"value%d\", i)),\n\t\t\tKey: []byte(fmt.Sprintf(\"key%d\", i)),\n\t\t\tOpaque: fmt.Sprintf(\"Op%d\", i),\n\t\t}}\n\ti++\n\n\tpAllTestMsgs = testmsgs[:i]\n}", "func TestSqlSMSStorage_GetSMSs(t *testing.T) {\n\n}", "func TestMessageFromBytes(t *testing.T) {\n\tmessage := &Message{\n\t\tTopic: \"test\",\n\t\tData: []byte(\"test\"),\n\t} // Initialize message\n\n\tencodedMessage, err := message.Bytes() // Get message bytes\n\n\tif err != nil { // Check for errors\n\t\tt.Fatal(err) // Panic\n\t}\n\n\tdecodedMessage, err := MessageFromBytes(encodedMessage) // Decode message\n\n\tif err != nil { // Check for errors\n\t\tt.Fatal(err) // Panic\n\t}\n\n\tif !bytes.Equal(decodedMessage.Data, message.Data) { // Check invalid message\n\t\tt.Fatalf(\"invalid decoded message; got %s, wanted %s\", string(decodedMessage.Data), string(message.Data)) // Panic\n\t}\n}", "func TestMessage_EncodeAndRecv(t *testing.T) {\n\n\t// create request\n\tname := \"leo\"\n\tage := 18\n\treq := Request{\n\t\tPath: \"Student\",\n\t\tMethod: \"Register\",\n\t\tArgs: []interface{}{name, age},\n\t}\n\treqData, err := json.Marshal(req)\n\tif err != nil {\n\t\tt.Fatalf(\"TestMessage_EncodeAndRecv|Marshal|Fail|%v\", err)\n\t\treturn\n\t}\n\t// create message\n\tmsg := Message{\n\t\tHeader: &Header{\n\t\t\tVersion: 0,\n\t\t\tMessageType: MsgTypeRequest,\n\t\t\tCompressType: NoneCompress,\n\t\t\tSeqID: 1,\n\t\t\tExtension: 0,\n\t\t\tBodyLength: uint32(len(reqData)),\n\t\t},\n\t\tData: reqData,\n\t}\n\t// encode message\n\tmsgData := msg.Encode()\n\n\t// test receive message\n\trecvMsg, err := RecvMessage(bytes.NewReader(msgData))\n\tif err != nil {\n\t\tt.Fatalf(\"TestMessage_EncodeAndRecv|RecvMessage|Fail|%v\", err)\n\t\treturn\n\t}\n\n\t// compare\n\tdiff := cmp.Diff(msg, *recvMsg)\n\tif diff != \"\" {\n\t\tt.Fatalf(diff)\n\t}\n}", "func RunJSONSerializationTestForPartnerInfo(subject PartnerInfo) string {\n\t// Serialize to JSON\n\tbin, err := json.Marshal(subject)\n\tif err != nil {\n\t\treturn err.Error()\n\t}\n\n\t// Deserialize back into memory\n\tvar actual PartnerInfo\n\terr = json.Unmarshal(bin, &actual)\n\tif err != nil {\n\t\treturn err.Error()\n\t}\n\n\t// Check for outcome\n\tmatch := cmp.Equal(subject, actual, cmpopts.EquateEmpty())\n\tif !match {\n\t\tactualFmt := pretty.Sprint(actual)\n\t\tsubjectFmt := pretty.Sprint(subject)\n\t\tresult := diff.Diff(subjectFmt, actualFmt)\n\t\treturn result\n\t}\n\n\treturn \"\"\n}", "func TestEventService(t *testing.T) {\n\tvar result EventService\n\terr := json.NewDecoder(strings.NewReader(eventServiceBody)).Decode(&result)\n\n\tif err != nil {\n\t\tt.Errorf(\"Error decoding JSON: %s\", err)\n\t}\n\n\tif result.ID != \"EventService\" {\n\t\tt.Errorf(\"Received invalid ID: %s\", result.ID)\n\t}\n\n\tif result.Name != \"Event Service\" {\n\t\tt.Errorf(\"Received invalid name: %s\", result.Name)\n\t}\n\n\tif result.DeliveryRetryAttempts != 4 {\n\t\tt.Errorf(\"Expected 4 retry attempts, got: %d\", result.DeliveryRetryAttempts)\n\t}\n\n\tif result.DeliveryRetryIntervalSeconds != 30 {\n\t\tt.Errorf(\"Expected 30 second retry interval, got: %d\", result.DeliveryRetryIntervalSeconds)\n\t}\n\n\tif result.SSEFilterPropertiesSupported.MetricReportDefinition {\n\t\tt.Error(\"MetricReportDefinition filter should be false\")\n\t}\n\n\tif !result.SSEFilterPropertiesSupported.MessageID {\n\t\tt.Error(\"Message ID filter should be true\")\n\t}\n\n\tif result.submitTestEventTarget != \"/redfish/v1/EventService/Actions/EventService.SubmitTestEvent\" {\n\t\tt.Errorf(\"Invalid SubmitTestEvent target: %s\", result.submitTestEventTarget)\n\t}\n\n\tfor _, et := range result.EventTypesForSubscription {\n\t\tif !et.IsValidEventType() {\n\t\t\tt.Errorf(\"invalid event type: %s\", et)\n\t\t}\n\t}\n\n}", "func TestObjectMessage(t *testing.T) {\n\tprefix := \"test\"\n\tlevel := level.Info\n\ttimestamp := time.Now()\n\toutput := &Stdout\n\tstackframe := getStackFrame(2)\n\n\ttests := []struct {\n\t\tname string\n\t\tf string\n\t\ta []any\n\t\te map[string]interface{}\n\t}{\n\t\t{\n\t\t\tname: \"Object message with formatted string\",\n\t\t\tf: \"formatted string %s\",\n\t\t\ta: []any{\"value\"},\n\t\t\te: map[string]interface{}{\n\t\t\t\t\"prefix\": prefix,\n\t\t\t\t\"level\": \"INFO\",\n\t\t\t\t\"timestamp\": timestamp.Format(output.TimestampFormat),\n\t\t\t\t\"message\": \"formatted string value\",\n\t\t\t\t\"filePath\": stackframe.FilePath,\n\t\t\t\t\"funcName\": stackframe.FuncName,\n\t\t\t\t\"funcAddress\": fmt.Sprintf(\"%#x\", stackframe.FuncAddress),\n\t\t\t\t\"lineNumber\": stackframe.FileLine,\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"Object message with multiple formatted values\",\n\t\t\tf: \"formatted string with multiple values %s %d\",\n\t\t\ta: []any{\"value\", 1},\n\t\t\te: map[string]interface{}{\n\t\t\t\t\"prefix\": prefix,\n\t\t\t\t\"level\": \"INFO\",\n\t\t\t\t\"timestamp\": timestamp.Format(output.TimestampFormat),\n\t\t\t\t\"message\": \"formatted string with multiple values value 1\",\n\t\t\t\t\"filePath\": stackframe.FilePath,\n\t\t\t\t\"funcName\": stackframe.FuncName,\n\t\t\t\t\"funcAddress\": fmt.Sprintf(\"%#x\", stackframe.FuncAddress),\n\t\t\t\t\"lineNumber\": stackframe.FileLine,\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"Object message with no formatting\",\n\t\t\tf: \"\",\n\t\t\ta: []any{\"value\"},\n\t\t\te: map[string]interface{}{\n\t\t\t\t\"prefix\": prefix,\n\t\t\t\t\"level\": \"INFO\",\n\t\t\t\t\"timestamp\": timestamp.Format(output.TimestampFormat),\n\t\t\t\t\"message\": \"value\",\n\t\t\t\t\"filePath\": stackframe.FilePath,\n\t\t\t\t\"funcName\": stackframe.FuncName,\n\t\t\t\t\"funcAddress\": fmt.Sprintf(\"%#x\", stackframe.FuncAddress),\n\t\t\t\t\"lineNumber\": stackframe.FileLine,\n\t\t\t},\n\t\t},\n\t}\n\n\tfor _, test := range tests {\n\t\tt.Run(test.name, func(t *testing.T) {\n\t\t\tresult := objectMessage(\n\t\t\t\tprefix,\n\t\t\t\tlevel,\n\t\t\t\ttimestamp,\n\t\t\t\toutput,\n\t\t\t\tstackframe,\n\t\t\t\ttest.f,\n\t\t\t\ttest.a...,\n\t\t\t)\n\n\t\t\t// Unmarshal the JSON result into a map\n\t\t\tvar resultObj map[string]interface{}\n\t\t\terr := json.Unmarshal([]byte(result), &resultObj)\n\t\t\tif err != nil {\n\t\t\t\tt.Fatalf(\"Failed to unmarshal JSON: %v\", err)\n\t\t\t}\n\n\t\t\tif resultObj[\"level\"] != test.e[\"level\"] ||\n\t\t\t\tresultObj[\"prefix\"] != test.e[\"prefix\"] ||\n\t\t\t\tresultObj[\"message\"] != test.e[\"message\"] {\n\t\t\t\tt.Errorf(\"Expected '%v', got '%v'\", test.e, resultObj)\n\t\t\t}\n\t\t})\n\t}\n}", "func (a *UtilsApiService) Test(ctx context.Context) (TestResponse, *http.Response, error) {\n\tvar (\n\t\tlocalVarHttpMethod = strings.ToUpper(\"Get\")\n\t\tlocalVarPostBody interface{}\n\t\tlocalVarFileName string\n\t\tlocalVarFileBytes []byte\n\t\tlocalVarReturnValue TestResponse\n\t)\n\n\t// create path and map variables\n\tlocalVarPath := a.client.cfg.BasePath + \"/utils/test\"\n\n\tlocalVarHeaderParams := make(map[string]string)\n\tlocalVarQueryParams := url.Values{}\n\tlocalVarFormParams := url.Values{}\n\n\t// to determine the Content-Type header\n\tlocalVarHttpContentTypes := []string{}\n\n\t// set Content-Type header\n\tlocalVarHttpContentType := selectHeaderContentType(localVarHttpContentTypes)\n\tif localVarHttpContentType != \"\" {\n\t\tlocalVarHeaderParams[\"Content-Type\"] = localVarHttpContentType\n\t}\n\n\t// to determine the Accept header\n\tlocalVarHttpHeaderAccepts := []string{\"application/json;charset=UTF-8\"}\n\n\t// set Accept header\n\tlocalVarHttpHeaderAccept := selectHeaderAccept(localVarHttpHeaderAccepts)\n\tif localVarHttpHeaderAccept != \"\" {\n\t\tlocalVarHeaderParams[\"Accept\"] = localVarHttpHeaderAccept\n\t}\n\tr, err := a.client.prepareRequest(ctx, localVarPath, localVarHttpMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFileName, localVarFileBytes)\n\tif err != nil {\n\t\treturn localVarReturnValue, nil, err\n\t}\n\n\tlocalVarHttpResponse, err := a.client.callAPI(r)\n\tif err != nil || localVarHttpResponse == nil {\n\t\treturn localVarReturnValue, localVarHttpResponse, err\n\t}\n\n\tlocalVarBody, err := ioutil.ReadAll(localVarHttpResponse.Body)\n\tlocalVarHttpResponse.Body.Close()\n\tif err != nil {\n\t\treturn localVarReturnValue, localVarHttpResponse, err\n\t}\n\n\tif localVarHttpResponse.StatusCode < 300 {\n\t\t// If we succeed, return the data, otherwise pass on to decode error.\n\t\terr = a.client.decode(&localVarReturnValue, localVarBody, localVarHttpResponse.Header.Get(\"Content-Type\"));\n\t\tif err == nil { \n\t\t\treturn localVarReturnValue, localVarHttpResponse, err\n\t\t}\n\t}\n\n\tif localVarHttpResponse.StatusCode >= 300 {\n\t\tnewErr := GenericSwaggerError{\n\t\t\tbody: localVarBody,\n\t\t\terror: localVarHttpResponse.Status,\n\t\t}\n\t\tif localVarHttpResponse.StatusCode == 200 {\n\t\t\tvar v TestResponse\n\t\t\terr = a.client.decode(&v, localVarBody, localVarHttpResponse.Header.Get(\"Content-Type\"));\n\t\t\t\tif err != nil {\n\t\t\t\t\tnewErr.error = err.Error()\n\t\t\t\t\treturn localVarReturnValue, localVarHttpResponse, newErr\n\t\t\t\t}\n\t\t\t\tnewErr.model = v\n\t\t\t\treturn localVarReturnValue, localVarHttpResponse, newErr\n\t\t}\n\t\tif localVarHttpResponse.StatusCode == 0 {\n\t\t\tvar v ErrorModel\n\t\t\terr = a.client.decode(&v, localVarBody, localVarHttpResponse.Header.Get(\"Content-Type\"));\n\t\t\t\tif err != nil {\n\t\t\t\t\tnewErr.error = err.Error()\n\t\t\t\t\treturn localVarReturnValue, localVarHttpResponse, newErr\n\t\t\t\t}\n\t\t\t\tnewErr.model = v\n\t\t\t\treturn localVarReturnValue, localVarHttpResponse, newErr\n\t\t}\n\t\treturn localVarReturnValue, localVarHttpResponse, newErr\n\t}\n\n\treturn localVarReturnValue, localVarHttpResponse, nil\n}", "func testType(urlString string, typ interface{}) error {\n\tslice := reflect.Indirect(reflect.ValueOf(typ)).Kind() == reflect.Slice\n\n\treq, err := client.NewRequest(\"GET\", urlString, nil)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// start with a json.RawMessage so we can decode multiple ways below\n\traw := new(json.RawMessage)\n\t_, err = client.Do(req, raw)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// unmarshal directly to a map\n\tvar m1 map[string]interface{}\n\tif slice {\n\t\tvar s []map[string]interface{}\n\t\terr := json.Unmarshal(*raw, &s)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tm1 = s[0]\n\t} else {\n\t\terr := json.Unmarshal(*raw, &m1)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\t// unmarshal to typ first, then re-marshal and unmarshal to a map\n\terr = json.Unmarshal(*raw, typ)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tvar byt []byte\n\tif slice {\n\t\t// use first item in slice\n\t\tv := reflect.Indirect(reflect.ValueOf(typ))\n\t\tbyt, err = json.Marshal(v.Index(0).Interface())\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t} else {\n\t\tbyt, err = json.Marshal(typ)\n\t}\n\n\tvar m2 map[string]interface{}\n\terr = json.Unmarshal(byt, &m2)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// now compares the two maps\n\tfor k, v := range m1 {\n\t\tif _, ok := m2[k]; !ok {\n\t\t\tfmt.Printf(\"%v missing field for key: %v (example value: %v)\\n\", reflect.TypeOf(typ), k, v)\n\t\t}\n\t}\n\n\treturn nil\n}", "func TestMessageFunky(t *testing.T) {\n\n\tmsg := \"test @(emoticonnnnnnnn) @man (Zebrahttp://cnn.com\"\n\trespData, err := processMsgHelper(&msg)\n\n\tif err != nil {\n\t\tt.Fatalf(\"Failed with an error\")\n\t}\n\n\tif v, ok := (respData)[MentionType]; ok == false || len(v) != 1 {\n\t\tt.Fatalf(\"Expected to have no mention\")\n\t}\n\n\tif v, ok := (respData)[EmoticonType]; ok == false || len(v) != 1 {\n\t\tt.Fatalf(\"Expected to have no emoticonss\")\n\t}\n\n\tif v, ok := (respData)[UrlType]; ok == false || len(v) != 1 {\n\t\tt.Fatalf(\"Expected to have 1 url\")\n\t}\n\n\tvar mention *string = respData[MentionType][0].(*string)\n\tif *mention != \"man\" {\n\t\tt.Fatalf(\"Mismatched mention\")\n\t}\n\n\tvar emoticon *string = respData[EmoticonType][0].(*string)\n\tif *emoticon != \"emoticonnnnnnnn\" {\n\t\tt.Fatalf(\"Mismatched emoticon\")\n\t}\n\n\tvar uInfo *urlInfo = respData[UrlType][0].(*urlInfo)\n\tif uInfo.Url != \"http://cnn.com\" {\n\t\tt.Fatalf(\"Expected cnn.com for url\")\n\t}\n\n\tif len(uInfo.Title) == 0 {\n\t\tt.Fatalf(\"Expected title for the url\")\n\t}\n}", "func testPostWebhook(p bytes.Buffer) {\n\tresp, _ := http.PostForm(\n\t\t\"\",\n\t\turl.Values{\"payload_json\": {p.String()}},\n\t)\n\n\tbody, _ := ioutil.ReadAll(resp.Body)\n\tdefer resp.Body.Close()\n\tfmt.Println(string(body))\n}", "func TestComplexTypes(t *testing.T) {\n\n}", "func TestObjectCreated(t *testing.T) {\n\texpectedCard := TeamsMessageCard{\n\t\tType: messageType,\n\t\tContext: context,\n\t\tThemeColor: msTeamsColors[\"Normal\"],\n\t\tSummary: \"kubewatch notification received\",\n\t\tTitle: \"kubewatch\",\n\t\tText: \"\",\n\t\tSections: []TeamsMessageCardSection{\n\t\t\t{\n\t\t\t\tActivityTitle: \"A `pod` in namespace `new` has been `created`:\\n`foo`\",\n\t\t\t\tMarkdown: true,\n\t\t\t},\n\t\t},\n\t}\n\n\tts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tw.WriteHeader(http.StatusOK)\n\t\tif r.Method != \"POST\" {\n\t\t\tt.Errorf(\"expected a POST request for ObjectCreated()\")\n\t\t}\n\t\tdecoder := json.NewDecoder(r.Body)\n\t\tvar c TeamsMessageCard\n\t\tif err := decoder.Decode(&c); err != nil {\n\t\t\tt.Errorf(\"%v\", err)\n\t\t}\n\t\tif !reflect.DeepEqual(c, expectedCard) {\n\t\t\tt.Errorf(\"expected %v, got %v\", expectedCard, c)\n\t\t}\n\t}))\n\n\tms := &MSTeams{TeamsWebhookURL: ts.URL}\n\tp := &v1.Pod{\n\t\tTypeMeta: metav1.TypeMeta{\n\t\t\tKind: \"Pod\",\n\t\t\tAPIVersion: \"v1\",\n\t\t},\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tUID: \"12345678\",\n\t\t\tName: \"foo\",\n\t\t\tNamespace: \"new\",\n\t\t},\n\t}\n\tms.ObjectCreated(p)\n}", "func TestValidate1(t *testing.T) {\n\tendpoints := make(map[string]map[string]*Endpoint)\n\tendpoints[\"/test\"] = map[string]*Endpoint{\n\t\t\"get\": {\n\t\t\tParams: &Parameters{\n\t\t\t\tQuery: map[string]*ParamEntry{\"test\": {Type: \"string\", Required: true}},\n\t\t\t\tPath: map[string]*ParamEntry{\"test\": {Type: \"boolean\", Required: true}},\n\t\t\t},\n\t\t\tRecieves: &Recieves{\n\t\t\t\tHeaders: map[string]string{\"foo\": \"bar\"},\n\t\t\t\tBody: map[string]string{\"example_array.0.foo\": \"string\"},\n\t\t\t},\n\t\t\tResponses: map[int]*Response{\n\t\t\t\t200: {\n\t\t\t\t\tHeaders: map[string]string{\"foo\": \"bar\"},\n\t\t\t\t\tBody: map[string]interface{}{\"bar\": \"foo\"},\n\t\t\t\t\tWeight: 100,\n\t\t\t\t\tActions: []map[string]interface{}{\n\t\t\t\t\t\t{\"delay\": 10},\n\t\t\t\t\t\t{\"request\": map[interface{}]interface{}{\"target\": \"testService\", \"id\": \"testRequest\"}},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tActions: []map[string]interface{}{\n\t\t\t\t{\"delay\": 10},\n\t\t\t\t{\"request\": map[interface{}]interface{}{\"target\": \"testService\", \"id\": \"testRequest\"}},\n\t\t\t},\n\t\t},\n\t}\n\n\tcfg := &Config{\n\t\tVersion: 1.0,\n\t\tServices: map[string]*Service{\n\t\t\t\"testService\": {Hostname: \"localhost\", Port: 8080},\n\t\t},\n\t\tStartupActions: []map[string]interface{}{\n\t\t\t{\"delay\": 10},\n\t\t\t{\"request\": map[interface{}]interface{}{\"target\": \"testService\", \"id\": \"testRequest\"}},\n\t\t},\n\t\tRequests: map[string]*Request{\n\t\t\t\"testRequest\": {\n\t\t\t\tURL: \"/test\",\n\t\t\t\tProtocol: \"http\",\n\t\t\t\tMethod: \"get\",\n\t\t\t\tHeaders: map[string]string{\"foo\": \"bar\"},\n\t\t\t\tBody: nil,\n\t\t\t\tExpectedResponse: &Response{\n\t\t\t\t\tStatusCode: 200,\n\t\t\t\t\tBody: map[string]interface{}{\"foo.bar\": \"string\"},\n\t\t\t\t\tHeaders: nil,\n\t\t\t\t\tWeight: 100,\n\t\t\t\t\tActions: []map[string]interface{}{\n\t\t\t\t\t\t{\"delay\": 10},\n\t\t\t\t\t\t{\"request\": map[interface{}]interface{}{\"target\": \"testService\", \"id\": \"testRequest\"}},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\tEndpoints: endpoints,\n\t}\n\n\tif err := Validate(cfg); err != nil {\n\t\tt.Errorf(\"Validation Failed: %s\", err.Error())\n\t}\n}", "func TestDecode(t *testing.T) {\n\tt.Parallel()\n\n\ttestCases := []struct {\n\t\tname string\n\t\ttweakEnc func(*msgpack.Encoder)\n\t\tinput any // will be encoded verbatim with\n\t\texpect *TestMessage\n\t\texpectUnknown protoreflect.RawFields\n\t\texpectRaw msgpack.RawMessage\n\t\texpectDecoded any\n\t\terr string\n\t}{\n\t\t{\n\t\t\tname: \"int32->int64\",\n\t\t\tinput: map[int32]any{\n\t\t\t\t3: int32(10),\n\t\t\t},\n\t\t\texpect: &TestMessage{Intval: 10},\n\t\t},\n\t\t{\n\t\t\tname: \"int8->int64\",\n\t\t\tinput: map[int32]any{\n\t\t\t\t3: int8(10),\n\t\t\t},\n\t\t\texpect: &TestMessage{Intval: 10},\n\t\t},\n\t\t{\n\t\t\tname: \"int64->int32\",\n\t\t\tinput: map[int32]any{\n\t\t\t\t5: int64(10),\n\t\t\t},\n\t\t\texpect: &TestMessage{ShortIntval: 10},\n\t\t},\n\t\t{\n\t\t\tname: \"int64->int32 (overflow)\",\n\t\t\tinput: map[int32]any{\n\t\t\t\t5: int64(math.MaxInt32 * 2),\n\t\t\t},\n\t\t\texpect: &TestMessage{ShortIntval: -2},\n\t\t},\n\t\t{\n\t\t\tname: \"float64->int32\",\n\t\t\tinput: map[int32]any{\n\t\t\t\t5: float64(217),\n\t\t\t},\n\t\t\terr: \"bad type: expected int32, got float64\",\n\t\t},\n\n\t\t{\n\t\t\tname: \"unknown field\",\n\t\t\tinput: map[int32]any{\n\t\t\t\t777: \"nerds\",\n\t\t\t\t3: 100,\n\t\t\t},\n\t\t\texpect: &TestMessage{\n\t\t\t\tIntval: 100,\n\t\t\t},\n\t\t\texpectUnknown: []byte{\n\t\t\t\t250, 255, 255, 255, 15, // proto: 536870911: LEN\n\t\t\t\t10, // proto: 10 bytes in this field\n\t\t\t\t129, // msgpack: 1 element map\n\t\t\t\t205, 3, 9, // msgpack: 777\n\t\t\t\t165, 110, 101, 114, 100, 115, // msgpack: 5-char string, \"nerds\"\n\t\t\t},\n\t\t\texpectRaw: []byte{\n\t\t\t\t130, // 2 item map\n\t\t\t\t3, 100, // tag 3, 100\n\t\t\t\t205, 3, 9, 165, 110, 101, 114, 100, 115, // tag 777, 5 char string \"nerds\"\n\t\t\t},\n\t\t\texpectDecoded: map[int32]any{\n\t\t\t\t3: int64(100),\n\t\t\t\t777: \"nerds\",\n\t\t\t},\n\t\t},\n\n\t\t{\n\t\t\tname: \"sparse array\",\n\t\t\tinput: map[int32]any{\n\t\t\t\t13: map[int32]string{\n\t\t\t\t\t3: \"hello\",\n\t\t\t\t\t12: \"there\",\n\t\t\t\t},\n\t\t\t},\n\t\t\texpect: &TestMessage{\n\t\t\t\tStrings: []string{\n\t\t\t\t\t\"\", \"\", \"\",\n\t\t\t\t\t\"hello\",\n\t\t\t\t\t\"\", \"\", \"\",\n\t\t\t\t\t\"\", \"\", \"\",\n\t\t\t\t\t\"\", \"\",\n\t\t\t\t\t\"there\",\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\n\tConvey(`TestDecode`, t, func() {\n\t\tfor _, tc := range testCases {\n\t\t\ttc := tc\n\t\t\tConvey(tc.name, func() {\n\t\t\t\tenc := msgpack.GetEncoder()\n\t\t\t\tdefer msgpack.PutEncoder(enc)\n\n\t\t\t\tbuf := bytes.Buffer{}\n\t\t\t\tenc.Reset(&buf)\n\t\t\t\tif tc.tweakEnc != nil {\n\t\t\t\t\ttc.tweakEnc(enc)\n\t\t\t\t}\n\t\t\t\tSo(enc.Encode(tc.input), ShouldBeNil)\n\n\t\t\t\tmsg := &TestMessage{}\n\t\t\t\terr := Unmarshal(buf.Bytes(), msg)\n\t\t\t\tif tc.err == \"\" {\n\t\t\t\t\tSo(err, ShouldBeNil)\n\n\t\t\t\t\tknown := proto.Clone(msg).(*TestMessage)\n\t\t\t\t\tknown.ProtoReflect().SetUnknown(nil)\n\t\t\t\t\tSo(known, ShouldResembleProto, tc.expect)\n\n\t\t\t\t\tSo(msg.ProtoReflect().GetUnknown(), ShouldResemble, tc.expectUnknown)\n\n\t\t\t\t\tif tc.expectRaw != nil {\n\t\t\t\t\t\traw, err := Marshal(msg, Deterministic)\n\t\t\t\t\t\tSo(err, ShouldBeNil)\n\n\t\t\t\t\t\tSo(raw, ShouldResemble, tc.expectRaw)\n\n\t\t\t\t\t\tif len(msg.ProtoReflect().GetUnknown()) > 0 {\n\t\t\t\t\t\t\tdec := msgpack.GetDecoder()\n\t\t\t\t\t\t\tdefer msgpack.PutDecoder(dec)\n\t\t\t\t\t\t\tdec.Reset(bytes.NewBuffer(raw))\n\t\t\t\t\t\t\tdec.UseLooseInterfaceDecoding(true)\n\t\t\t\t\t\t\tdec.SetMapDecoder(func(d *msgpack.Decoder) (any, error) {\n\t\t\t\t\t\t\t\treturn d.DecodeUntypedMap()\n\t\t\t\t\t\t\t})\n\n\t\t\t\t\t\t\tdecoded := reflect.MakeMap(reflect.TypeOf(tc.expectDecoded))\n\n\t\t\t\t\t\t\tSo(dec.DecodeValue(decoded), ShouldBeNil)\n\n\t\t\t\t\t\t\tSo(decoded.Interface(), ShouldResemble, tc.expectDecoded)\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t} else {\n\t\t\t\t\tSo(err, ShouldErrLike, tc.err)\n\t\t\t\t}\n\n\t\t\t})\n\t\t}\n\t})\n\n}", "func TestSimpleJSONWithMatchers(t *testing.T) {\n\n\tassert := assert.New(t)\n\n\tm := []string{\"mac:112233445566\", \"mac:112233445565\"}\n\n\ttrans := &transport{}\n\tobs, err := simpleSetup(trans, time.Second, m)\n\tassert.Nil(err)\n\n\treq := simpleJSONRequest()\n\n\tobs.QueueJSON(req, \"iot\", \"mac:112233445565\", \"1234\")\n\tobs.QueueJSON(req, \"test\", \"mac:112233445566\", \"1234\")\n\tobs.QueueJSON(req, \"iot\", \"mac:112233445560\", \"1234\")\n\tobs.QueueJSON(req, \"test\", \"mac:112233445560\", \"1234\")\n\n\tobs.Shutdown(true)\n\n\tassert.Equal(int32(2), trans.i)\n}", "func TestCreateAPObject(t *testing.T) {\n\tclient := newPetsClient(t)\n\tresult, err := client.CreateAPObject(context.Background(), PetAPObject{\n\t\tID: to.Ptr[int32](2),\n\t\tName: to.Ptr(\"Hira\"),\n\t\tAdditionalProperties: map[string]interface{}{\n\t\t\t\"siblings\": []interface{}{\n\t\t\t\tmap[string]interface{}{\n\t\t\t\t\t\"id\": float64(1),\n\t\t\t\t\t\"name\": \"Puppy\",\n\t\t\t\t\t\"birthdate\": \"2017-12-13T02:29:51Z\",\n\t\t\t\t\t\"complexProperty\": map[string]interface{}{\n\t\t\t\t\t\t\"color\": \"Red\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\t\"picture\": base64.StdEncoding.EncodeToString([]byte{255, 255, 255, 255, 254}),\n\t\t},\n\t}, nil)\n\trequire.NoError(t, err)\n\tif r := cmp.Diff(result.PetAPObject, PetAPObject{\n\t\tID: to.Ptr[int32](2),\n\t\tName: to.Ptr(\"Hira\"),\n\t\tStatus: to.Ptr(true),\n\t\tAdditionalProperties: map[string]interface{}{\n\t\t\t\"siblings\": []interface{}{\n\t\t\t\tmap[string]interface{}{\n\t\t\t\t\t\"id\": float64(1),\n\t\t\t\t\t\"name\": \"Puppy\",\n\t\t\t\t\t\"birthdate\": \"2017-12-13T02:29:51Z\",\n\t\t\t\t\t\"complexProperty\": map[string]interface{}{\n\t\t\t\t\t\t\"color\": \"Red\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\t\"picture\": base64.StdEncoding.EncodeToString([]byte{255, 255, 255, 255, 254}),\n\t\t},\n\t}); r != \"\" {\n\t\tt.Fatal(r)\n\t}\n}", "func generateTestSpec(fields []gengo.Field) *gengo.MsgSpec {\n\tmsgSpec := &gengo.MsgSpec{}\n\tmsgSpec.FullName = \"TestMessage\"\n\tmsgSpec.Package = \"Testing\"\n\tmsgSpec.MD5Sum = \"1337beeffeed1337\"\n\tmsgSpec.ShortName = \"Test\"\n\tmsgSpec.Fields = fields\n\treturn msgSpec\n}", "func RunJSONSerializationTestForJsonField(subject JsonField) string {\n\t// Serialize to JSON\n\tbin, err := json.Marshal(subject)\n\tif err != nil {\n\t\treturn err.Error()\n\t}\n\n\t// Deserialize back into memory\n\tvar actual JsonField\n\terr = json.Unmarshal(bin, &actual)\n\tif err != nil {\n\t\treturn err.Error()\n\t}\n\n\t// Check for outcome\n\tmatch := cmp.Equal(subject, actual, cmpopts.EquateEmpty())\n\tif !match {\n\t\tactualFmt := pretty.Sprint(actual)\n\t\tsubjectFmt := pretty.Sprint(subject)\n\t\tresult := diff.Diff(subjectFmt, actualFmt)\n\t\treturn result\n\t}\n\n\treturn \"\"\n}", "func TestApiPOST(t *testing.T) {\n var body postBody\n body.Name = \"Joe Plum\"\n body.Salary = \"123\"\n body.Age = \"25\"\n\n var response postResponse\n statusCode, err := Post(\"http://dummy.restapiexample.com/api/v1/create\", body, &response)\n if err != nil {\n t.Errorf(\"Got err %v while trying to use api post\", err)\n return\n }\n if statusCode != 200 {\n t.Errorf(\"Got status code 200 while trying to use api post, got %v instead\", statusCode)\n return\n }\n if response.Status != \"success\" {\n t.Errorf(\"expected response.Status to be success, got %s instead\", response.Status)\n }\n if response.Data.Name != \"Joe Plum\" {\n t.Errorf(\"expected response.Data.Name to be Joe Plum, got %s instead\", response.Data.Name)\n }\n if response.Data.Salary != \"123\" {\n t.Errorf(\"expected response.Data.Salary to be 123, got %s instead\", response.Data.Salary)\n }\n if response.Data.Age != \"25\" {\n t.Errorf(\"expected response.Data.Age to be 25, got %s instead\", response.Data.Age)\n }\n return\n}", "func runTestGet(t *testing.T, s *Server, textPbPath string,\n\tdatatype gnmipb.GetRequest_DataType, encoding gnmipb.Encoding,\n\twantRetCode codes.Code, wantRespVal interface{}, useModels []*gnmipb.ModelData) {\n\t// Send request\n\tvar pbPath gnmipb.Path\n\tif err := proto.UnmarshalText(textPbPath, &pbPath); err != nil {\n\t\tt.Fatalf(\"error in unmarshaling path: %v\", err)\n\t}\n\treq := &gnmipb.GetRequest{\n\t\tPath: []*gnmipb.Path{&pbPath},\n\t\tType: datatype,\n\t\tEncoding: encoding,\n\t\tUseModels: useModels,\n\t}\n\tt.Log(\"req:\", req)\n\tresp, err := s.Get(context.Background(), req)\n\tt.Log(\"resp:\", resp)\n\n\t// Check return code\n\tif status.Code(err) != wantRetCode {\n\t\tt.Fatalf(\"got return code %v, want %v\", status.Code(err), wantRetCode)\n\t}\n\n\t// Check response value\n\tvar gotVal interface{}\n\tif resp != nil {\n\t\tnotifs := resp.GetNotification()\n\t\tif len(notifs) != 1 {\n\t\t\tt.Fatalf(\"got %d notifications, want 1\", len(notifs))\n\t\t}\n\t\tupdates := notifs[0].GetUpdate()\n\t\tif len(updates) != 1 {\n\t\t\tt.Fatalf(\"got %d updates in the notification, want 1\", len(updates))\n\t\t}\n\t\tval := updates[0].GetVal()\n\t\tif val == nil {\n\t\t\treturn\n\t\t}\n\n\t\tvar jsonbytes []byte\n\t\tswitch {\n\t\tcase val.GetJsonIetfVal() != nil:\n\t\t\tjsonbytes = val.GetJsonIetfVal()\n\t\tcase val.GetJsonVal() != nil:\n\t\t\tjsonbytes = val.GetJsonVal()\n\t\t}\n\n\t\tif len(jsonbytes) == 0 {\n\t\t\tgotVal, err = value.ToScalar(val)\n\t\t\tif err != nil {\n\t\t\t\tt.Errorf(\"got: %v, want a scalar value\", gotVal)\n\t\t\t}\n\t\t} else {\n\t\t\t// Unmarshal json data to gotVal container for comparison\n\t\t\tif err := json.Unmarshal(jsonbytes, &gotVal); err != nil {\n\t\t\t\tt.Fatalf(\"error in unmarshaling JSON data to json val: %v\", err)\n\t\t\t}\n\t\t\tvar wantJSONStruct interface{}\n\t\t\tif err := json.Unmarshal([]byte(wantRespVal.(string)), &wantJSONStruct); err != nil {\n\t\t\t\tt.Fatalf(\"error in unmarshaling JSON data to json val: %v\", err)\n\t\t\t}\n\t\t\twantRespVal = wantJSONStruct\n\t\t}\n\t}\n\n\tif !reflect.DeepEqual(gotVal, wantRespVal) {\n\t\tt.Errorf(\"got: %v (%T),\\nwant %v (%T)\", gotVal, gotVal, wantRespVal, wantRespVal)\n\t}\n}", "func TestPagarmeSubscriptionCreate(t *testing.T) {\n \n Pagarme := pagarme.NewPagarme(\"pt-BR\", ApiKey, CryptoKey)\n Pagarme.SetDebug()\n\n planId, _ := client.Get(\"PlanoId\").Int64()\n cardId, _ := client.Get(\"CardId\").Result()\n subscription := pagarme.NewSubscriptionWithCard(planId)\n subscription.CardId = cardId\n subscription.PostbackUrl = \"https://mobilemind.free.beeceptor.com/webhook/pagarme\"\n\n\n pagarmefillCustomer(subscription.Customer)\n\n result, err := Pagarme.SubscriptionCreate(subscription)\n\n if err != nil {\n t.Errorf(\"Erro ao create subscription: %v\", err)\n }else{\n //t.Log(fmt.Sprintf(\"result = %v\", customer.Id)) \n\n if result.Id == 0 {\n t.Errorf(\"Subscription id is expected\")\n return\n }\n\n client.Set(\"SubscriptionId\", result.Id, 0)\n\n }\n\n}", "func TestJsonEncodeSubstruct(t *testing.T) {\n\tt.Parallel()\n\n\t// Set up a new mock substruct\n\ttype TestSubStruct struct {\n\t\tTestSubKey string `json:\"test_sub_key\"`\n\t}\n\t// Set up a mock struct for testing\n\ttype TestStruct struct {\n\t\tTestKey string `json:\"test_key\"`\n\t\tTestKeyTwo TestSubStruct `json:\"test_key_two\"`\n\t\tNotAllowed string `json:\"not_allowed\"`\n\t}\n\n\t// Base model and test model\n\tvar model = new(TestStruct)\n\tvar modelTest = new(TestStruct)\n\tvar allowedFields = []string{\"test_key\", \"test_key_two\"} // notice omitted: notAllowed\n\n\t// Set the testing data\n\tmodel.TestKey = \"TestValue1\"\n\tmodel.TestKeyTwo.TestSubKey = \"TestSubValue\"\n\tmodel.NotAllowed = \"PrivateValue\"\n\n\t// Set the buffer and encoder\n\tvar b bytes.Buffer\n\tenc := json.NewEncoder(&b)\n\n\t// Run the encoder\n\terr := JSONEncode(enc, model, allowedFields)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t// Now unmarshal and test\n\tif err = json.Unmarshal(b.Bytes(), &modelTest); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t// Test for our fields and values now\n\tif modelTest.TestKey != \"TestValue1\" {\n\t\tt.Fatal(\"TestKey does not have the right value! Encoding failed.\", modelTest.TestKey)\n\t} else if modelTest.TestKeyTwo.TestSubKey != \"TestSubValue\" {\n\t\tt.Fatal(\"TestKeyTwo does not have the right value! Encoding failed.\", modelTest.TestKeyTwo)\n\t} else if modelTest.NotAllowed == \"PrivateValue\" {\n\t\tt.Fatal(\"Field not removed! notAllowed does not have the right value! Encoding failed.\", modelTest.NotAllowed)\n\t}\n\n}", "func newMyTest(name string, req *msg.Request, status string, errors []string) *myTest {\n\treturn &myTest{\n\t\tname: name,\n\t\treq: req,\n\t\tres: &msg.Response{\n\t\t\tStatus: status,\n\t\t\tErrors: errors,\n\t\t\tDriver: driverVersion,\n\t\t\tLanguage: lang,\n\t\t\tLanguageVersion: langVersion,\n\t\t\tAST: getTree(req.Content),\n\t\t},\n\t}\n}", "func testHandler(w http.ResponseWriter, r *http.Request) {\n\n\tbody, err := ioutil.ReadAll(r.Body)\n\tif err != nil {\n\t\tw.Write([]byte(\"Cannot read body\"))\n\t}\n\n\ttestResponse := &TestResponse{r.Method, r.RequestURI[:strings.Index(r.RequestURI, \"?\")], string(body)}\n\n\tresp, err := json.Marshal(testResponse)\n\tif err != nil {\n\t\tw.Write([]byte(\"Cannot encode response data\"))\n\t}\n\n\tw.Write([]byte(resp))\n}", "func TestMyInfoProtocolFunctions(t *testing.T) {\n\toptions := &Options{\n\t\tHasHeader: true,\n\t\tRecordDelimiter: \"\\n\",\n\t\tFieldDelimiter: \",\",\n\t\tComments: \"\",\n\t\tName: \"S3Object\", // Default table name for all objects\n\t\tReadFrom: bytes.NewReader([]byte(\"name1,name2,name3,name4\" + \"\\n\" + \"5,is,a,string\" + \"\\n\" + \"random,random,stuff,stuff\")),\n\t\tCompressed: \"\",\n\t\tExpression: \"\",\n\t\tOutputFieldDelimiter: \",\",\n\t\tStreamSize: 20,\n\t}\n\ts3s, err := NewInput(options)\n\tif err != nil {\n\t\tt.Error(err)\n\t}\n\tmyVal, _ := s3s.createStatXML()\n\tmyOtherVal, _ := s3s.createProgressXML()\n\n\ttables := []struct {\n\t\tpayloadStatMsg string\n\t\tpayloadProgressMsg string\n\t\texpectedStat int\n\t\texpectedProgress int\n\t}{\n\t\t{myVal, myOtherVal, 233, 243},\n\t}\n\tfor _, table := range tables {\n\t\tvar currBuf = &bytes.Buffer{}\n\t\tif len(s3s.writeStatMessage(table.payloadStatMsg, currBuf).Bytes()) != table.expectedStat {\n\t\t\tt.Error()\n\t\t}\n\t\tcurrBuf.Reset()\n\t\tif len(s3s.writeProgressMessage(table.payloadProgressMsg, currBuf).Bytes()) != table.expectedProgress {\n\t\t\tt.Error()\n\t\t}\n\t}\n}", "func TestErrors(t *testing.T) {\n\tvar testMessenger = messaging.NewDummyMessenger(msgConfig)\n\tpub1 := publisher.NewPublisher(test1Config, testMessenger)\n\tpub1.Address()\n\tpub1.CreateInput(\"fakeid\", \"faketype\", types.DefaultInputInstance, nil)\n\tpub1.CreateInputFromFile(\"fakeid\", \"faketype\", types.DefaultInputInstance, \"fakepath\", nil)\n\tpub1.CreateInputFromHTTP(\"fakeid\", \"faketype\", types.DefaultInputInstance, \"fakeurl\", \"\", \"\", 0, nil)\n\tpub1.CreateInputFromOutput(\"fakeid\", \"faketype\", types.DefaultInputInstance, \"fakeaddr\", nil)\n\tpub1.CreateNode(\"fakeid\", types.NodeTypeAlarm)\n\tout1 := pub1.CreateOutput(\"fakeid\", types.OutputTypeAlarm, types.DefaultOutputInstance)\n\tpub1.Domain()\n\tpub1.GetDomainInputs()\n\tpub1.GetDomainNodes()\n\tpub1.GetDomainOutputs()\n\tpub1.GetDomainPublishers()\n\tpub1.GetDomainInput(\"fakeaddr\")\n\tpub1.GetDomainNode(\"fakeaddr\")\n\tpub1.GetDomainOutput(\"fakeaddr\")\n\tpub1.GetIdentity()\n\tpub1.GetIdentityKeys()\n\tpub1.GetInputByNodeHWID(\"fakenode\", \"\", \"\")\n\tpub1.GetInputs()\n\tpub1.GetNodeAttr(\"fakenode\", \"fakeattr\")\n\tpub1.GetNodeByAddress(\"fakeaddr\")\n\tpub1.GetNodeByHWID(\"fakenode\")\n\tpub1.GetNodeByNodeID(\"fakenode\")\n\tpub1.GetNodeConfigBool(\"fakeid\", \"fakeattr\", false)\n\tpub1.GetNodeConfigFloat(\"fakeid\", \"fakeattr\", 42.0)\n\tpub1.GetNodeConfigInt(\"fakeid\", \"fakeattr\", 42)\n\tpub1.GetNodeConfigString(\"fakeid\", \"fakeattr\", \"fake\")\n\tpub1.GetNodes()\n\tpub1.GetNodeStatus(\"fakeid\", \"fakeattr\")\n\tpub1.GetNodeStatus(\"doesntexist\", \"\")\n\tpub1.GetOutputByNodeHWID(\"fakenode\", \"\", \"\")\n\tpub1.GetOutputByID(\"fakeid\")\n\tpub1.GetOutputs()\n\tpub1.GetOutputValueByNodeHWID(\"fakedevice\", \"faketype\", \"\")\n\tpub1.GetOutputValueByID(\"fakeid\")\n\tpub1.MakeNodeDiscoveryAddress(\"fakeid\")\n\tpub1.PublishNodeConfigure(\"fakeaddr\", types.NodeAttrMap{})\n\tpub1.PublishRaw(out1, true, \"value\")\n\tpub1.SetNodeConfigHandler(nil)\n\tpub1.SetSigningOnOff(true)\n\tpub1.Subscribe(\"\", \"\")\n\tpub1.Unsubscribe(\"\", \"\")\n\tpub1.UpdateNodeErrorStatus(\"fakeid\", types.NodeRunStateError, \"fake status\")\n\tpub1.UpdateNodeAttr(\"fakeid\", types.NodeAttrMap{})\n\tpub1.UpdateNodeConfig(\"fakeid\", types.NodeAttrName, nil)\n\tpub1.UpdateNodeConfigValues(\"fakeid\", types.NodeAttrMap{})\n\tpub1.UpdateNodeStatus(\"fakeid\", types.NodeStatusMap{})\n\tpub1.UpdateOutput(nil)\n\tpub1.UpdateOutputForecast(\"fakeid\", []types.OutputValue{})\n}", "func TestMyProtocolFunction(t *testing.T) {\n\toptions := &Options{\n\t\tHasHeader: false,\n\t\tRecordDelimiter: \"\\n\",\n\t\tFieldDelimiter: \",\",\n\t\tComments: \"\",\n\t\tName: \"S3Object\", // Default table name for all objects\n\t\tReadFrom: bytes.NewReader([]byte(\"name1,name2,name3,name4\" + \"\\n\" + \"5,is,a,string\" + \"\\n\" + \"random,random,stuff,stuff\")),\n\t\tCompressed: \"\",\n\t\tExpression: \"\",\n\t\tOutputFieldDelimiter: \",\",\n\t\tStreamSize: 20,\n\t\tHeaderOpt: true,\n\t}\n\ts3s, err := NewInput(options)\n\tif err != nil {\n\t\tt.Error(err)\n\t}\n\ttables := []struct {\n\t\tpayloadMsg string\n\t\texpectedRecord int\n\t\texpectedEnd int\n\t}{\n\t\t{\"random payload\", 115, 56},\n\t}\n\tfor _, table := range tables {\n\t\tvar currentMessage = &bytes.Buffer{}\n\t\tif len(s3s.writeRecordMessage(table.payloadMsg, currentMessage).Bytes()) != table.expectedRecord {\n\t\t\tt.Error()\n\t\t}\n\t\tcurrentMessage.Reset()\n\t\tif len(s3s.writeEndMessage(currentMessage).Bytes()) != table.expectedEnd {\n\t\t\tt.Error()\n\t\t}\n\t\tcurrentMessage.Reset()\n\t\tif len(s3s.writeContinuationMessage(currentMessage).Bytes()) != 57 {\n\t\t\tt.Error()\n\t\t}\n\t\tcurrentMessage.Reset()\n\t}\n}", "func TestCreateUser(t *testing.T) {\n\tt.Log(\"testing user creation\")\n\tvar createUser = func() {\n\t\trec = httptest.NewRecorder()\n\t\treq, err := http.NewRequest(\"POST\", \"/v0/users\", strings.NewReader(`\n\t\t{\n\t\t\t\"data\": {\n\t\t\t\t\"type\": \"users\",\n\t\t\t\t\"attributes\": {\n\t\t\t\t\t\"username\": \"Holygarian\",\n\t\t\t\t\t\"password\": \"pass6\"\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\t`))\n\t\t// Expect(err).ToNot(HaveOccurred())\n\t\tapi.Handler().ServeHTTP(rec, req)\n\t\tExpect(rec.Code).To(Equal(http.StatusCreated))\n\t\tExpect(rec.Body.String()).To(MatchJSON(`\n\t\t{\n\t\t\t\"meta\": {\n\t\t\t\t\"author\": \"The api2go examples crew\",\n\t\t\t\t\"license\": \"wtfpl\",\n\t\t\t\t\"license-url\": \"http://www.wtfpl.net\"\n\t\t\t},\n\t\t\t\"data\": {\n\t\t\t\t\"id\": \"1\",\n\t\t\t\t\"type\": \"users\",\n\t\t\t\t\"attributes\": {\n\t\t\t\t\t\"user-name\": \"marvin\"\n\t\t\t\t},\n\t\t\t\t\"relationships\": {\n\t\t\t\t\t\"sweets\": {\n\t\t\t\t\t\t\"data\": [],\n\t\t\t\t\t\t\"links\": {\n\t\t\t\t\t\t\t\"related\": \"http://localhost:31415/v0/users/1/sweets\",\n\t\t\t\t\t\t\t\"self\": \"http://localhost:31415/v0/users/1/relationships/sweets\"\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\t`))\n\t}\n\t\n}", "func TestExample1(t *testing.T) {\n\tinput := append([]byte{0x40, 1, 0x7d, 0x34,\n\t\t(11 << 4) | 11}, []byte(\"temperature\")...)\n\n\tmsg, err := ParseDgramMessage(input)\n\tif err != nil {\n\t\tt.Fatalf(\"Error parsing message: %v\", err)\n\t}\n\n\tif msg.Type() != Confirmable {\n\t\tt.Errorf(\"Expected message type confirmable, got %v\", msg.Type())\n\t}\n\tif msg.Code() != codes.GET {\n\t\tt.Errorf(\"Expected message code codes.GET, got %v\", msg.Code())\n\t}\n\tif msg.MessageID() != 0x7d34 {\n\t\tt.Errorf(\"Expected message ID 0x7d34, got 0x%x\", msg.MessageID())\n\t}\n\n\tif msg.Option(URIPath).(string) != \"temperature\" {\n\t\tt.Errorf(\"Incorrect uri path: %q\", msg.Option(URIPath))\n\t}\n\n\tif len(msg.Token()) > 0 {\n\t\tt.Errorf(\"Incorrect token: %x\", msg.Token())\n\t}\n\tif len(msg.Payload()) > 0 {\n\t\tt.Errorf(\"Incorrect payload: %q\", msg.Payload())\n\t}\n}", "func RunJSONSerializationTestForPrivateEndpoint_Spec(subject PrivateEndpoint_Spec) string {\n\t// Serialize to JSON\n\tbin, err := json.Marshal(subject)\n\tif err != nil {\n\t\treturn err.Error()\n\t}\n\n\t// Deserialize back into memory\n\tvar actual PrivateEndpoint_Spec\n\terr = json.Unmarshal(bin, &actual)\n\tif err != nil {\n\t\treturn err.Error()\n\t}\n\n\t// Check for outcome\n\tmatch := cmp.Equal(subject, actual, cmpopts.EquateEmpty())\n\tif !match {\n\t\tactualFmt := pretty.Sprint(actual)\n\t\tsubjectFmt := pretty.Sprint(subject)\n\t\tresult := diff.Diff(subjectFmt, actualFmt)\n\t\treturn result\n\t}\n\n\treturn \"\"\n}", "func Test() {\n\t/*\n\t\tTest function to test functionality\n\t*/\n\tcompanySlug := \"\"\n\turl := \"https://api.fiken.no/api/v2/companies/\" + companySlug\n\ttoken := \"\"\n\tvar bearer = \"Bearer \" + token\n\n\tvar sale = CreateSale{\n\t\tSaleNumber: \"10002\",\n\t\tDate: \"2020-11-26\",\n\t\tKind: \"external_invoice\",\n\t\tSettled: true,\n\t\tTotalPaid: 56250,\n\t\tOutstandingBalance: 0,\n\t\tLines: []Line{\n\t\t\tLine{\n\t\t\t\tDescription: \"Nikey Shoes\",\n\t\t\t\tNetPrice: 45000,\n\t\t\t\tVat: 11250,\n\t\t\t\tAccount: \"3000\",\n\t\t\t\tVatType: \"HIGH\",\n\t\t\t},\n\t\t},\n\t\tCustomerID: 1710967200,\n\t\tCurrency: \"NOK\",\n\t\tDueDate: \"2020-11-26\",\n\t\t// Kid: \"5855454756\",\n\t\tPaymentAccount: \"1920:10001\",\n\t\tPaymentDate: \"2020-11-26\",\n\t\t// PaymentFee: 0,\n\t}\n\n\tCreateFikenSale(url, bearer, sale)\n\n\t// getSale(url, bearer, \"1713471449\")\n\n}", "func TestRequest(t *testing.T) {\n\tdoh, _ := NewTransport(testURL, ips, nil, nil, nil)\n\ttransport := doh.(*transport)\n\trt := makeTestRoundTripper()\n\ttransport.client.Transport = rt\n\tgo doh.Query(simpleQueryBytes)\n\treq := <-rt.req\n\tif req.URL.String() != testURL {\n\t\tt.Errorf(\"URL mismatch: %s != %s\", req.URL.String(), testURL)\n\t}\n\treqBody, err := ioutil.ReadAll(req.Body)\n\tif err != nil {\n\t\tt.Error(err)\n\t}\n\tif len(reqBody)%PaddingBlockSize != 0 {\n\t\tt.Errorf(\"reqBody has unexpected length: %d\", len(reqBody))\n\t}\n\t// Parse reqBody into a Message.\n\tnewQuery := mustUnpack(reqBody)\n\t// Ensure the converted request has an ID of zero.\n\tif newQuery.Header.ID != 0 {\n\t\tt.Errorf(\"Unexpected request header id: %v\", newQuery.Header.ID)\n\t}\n\t// Check that all fields except for Header.ID and Additionals\n\t// are the same as the original. Additionals may differ if\n\t// padding was added.\n\tif !queriesMostlyEqual(simpleQuery, *newQuery) {\n\t\tt.Errorf(\"Unexpected query body:\\n\\t%v\\nExpected:\\n\\t%v\", newQuery, simpleQuery)\n\t}\n\tcontentType := req.Header.Get(\"Content-Type\")\n\tif contentType != \"application/dns-message\" {\n\t\tt.Errorf(\"Wrong content type: %s\", contentType)\n\t}\n\taccept := req.Header.Get(\"Accept\")\n\tif accept != \"application/dns-message\" {\n\t\tt.Errorf(\"Wrong Accept header: %s\", accept)\n\t}\n}", "func TestBuildDeviceTwinResult(t *testing.T) {\n\tbaseMessage := BaseMessage{EventID: uuid.New().String(), Timestamp: time.Now().UnixNano() / 1e6}\n\tmsgTwins := createMessageTwin()\n\tdevTwinResultDealType0 := createDeviceTwinResultDealTypeGet(baseMessage)\n\tbytesDealType0, _ := json.Marshal(devTwinResultDealType0)\n\tdevTwinResult1 := createDeviceTwinResult(baseMessage)\n\tbytesDealType1, _ := json.Marshal(devTwinResult1)\n\ttests := []struct {\n\t\tname string\n\t\tbaseMessage BaseMessage\n\t\ttwins map[string]*MsgTwin\n\t\tdealType int\n\t\twant []byte\n\t\twantErr error\n\t}{\n\t\t{\n\t\t\tname: \"Test1\",\n\t\t\tbaseMessage: baseMessage,\n\t\t\ttwins: msgTwins,\n\t\t\tdealType: 0,\n\t\t\twant: bytesDealType0,\n\t\t\twantErr: nil,\n\t\t},\n\t\t{\n\t\t\tname: \"Test2\",\n\t\t\tbaseMessage: baseMessage,\n\t\t\ttwins: msgTwins,\n\t\t\tdealType: 1,\n\t\t\twant: bytesDealType1,\n\t\t\twantErr: nil,\n\t\t},\n\t}\n\tfor _, test := range tests {\n\t\tt.Run(test.name, func(t *testing.T) {\n\t\t\tgot, err := BuildDeviceTwinResult(test.baseMessage, test.twins, test.dealType)\n\t\t\tif !reflect.DeepEqual(err, test.wantErr) {\n\t\t\t\tt.Errorf(\"BuildDeviceTwinResult() error = %v, wantErr %v\", err, test.wantErr)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tif !reflect.DeepEqual(got, test.want) {\n\t\t\t\tt.Errorf(\"BuildDeviceTwinResult() = %v, want %v\", got, test.want)\n\t\t\t}\n\t\t})\n\t}\n}", "func TestPostSlidesSaveAs(t *testing.T) {\n request := createPostSlidesSaveAsRequest()\n e := initializeTest(\"PostSlidesSaveAs\", \"\", \"\")\n if e != nil {\n t.Errorf(\"Error: %v.\", e)\n return\n }\n c := getTestApiClient()\n r, _, e := c.DocumentApi.PostSlidesSaveAs(request)\n if e != nil {\n t.Errorf(\"Error: %v.\", e)\n return\n }\n assertBinaryResponse(r, t)\n}", "func RunUnitTest(t *testing.T, e *echo.Echo, test Test) {\n\tvar req *http.Request\n\tif test.Payload != nil {\n\t\tpayloadBytes, err := test.Payload.MarshalBinary()\n\t\tif !assert.NoError(t, err) {\n\t\t\treturn\n\t\t}\n\t\tif test.MalformedPayload {\n\t\t\tpayloadBytes = append([]byte{'x'}, payloadBytes...)\n\t\t}\n\t\treq = httptest.NewRequest(test.Method, test.URL, bytes.NewReader(payloadBytes))\n\t\treq.Header.Set(echo.HeaderContentType, echo.MIMEApplicationJSON)\n\t} else {\n\t\treq = httptest.NewRequest(test.Method, test.URL, bytes.NewReader([]byte{}))\n\t}\n\n\trecorder := httptest.NewRecorder()\n\tc := e.NewContext(req, recorder)\n\tc.SetParamNames(test.ParamNames...)\n\tc.SetParamValues(test.ParamValues...)\n\thandlerErr := test.Handler(c)\n\tif handlerErr != nil {\n\t\t// In prod, this is handled by CollectStats middleware\n\t\tc.Error(handlerErr)\n\t}\n\tassert.Equal(t, test.ExpectedStatus, recorder.Code)\n\n\tif test.ExpectedError != \"\" {\n\t\t// echo.HTTPError prefixes the error with the status code, so we pop\n\t\t// that off before comparing\n\t\tif httpErr, ok := handlerErr.(*echo.HTTPError); ok {\n\t\t\t// Coerce returned message to string, to avoid type mismatches\n\t\t\ttest_utils.AssertErrorsEqual(t, test.ExpectedError, fmt.Sprintf(\"%s\", httpErr.Message))\n\t\t} else {\n\t\t\tassert.EqualError(t, handlerErr, test.ExpectedError)\n\t\t}\n\t} else if test.ExpectedErrorSubstring != \"\" {\n\t\tif handlerErr == nil {\n\t\t\tassert.Fail(t, \"unexpected nil error\", \"error was nil but was expecting %s\", test.ExpectedErrorSubstring)\n\t\t} else {\n\t\t\ttest_utils.AssertErrorsEqual(t, test.ExpectedErrorSubstring, handlerErr.Error())\n\t\t}\n\t} else {\n\t\tif assert.NoError(t, handlerErr) && test.ExpectedResult != nil {\n\t\t\texpectedBytes, err := test.ExpectedResult.MarshalBinary()\n\t\t\tif assert.NoError(t, err) {\n\t\t\t\t// Convert to string for more readable assert failure messages.\n\t\t\t\t//\n\t\t\t\t// json.Marshal returns the serialized value as-is, without\n\t\t\t\t// appending a newline.\n\t\t\t\t//\n\t\t\t\t// The echo.Context's JSON method uses a json.Encoder to encode\n\t\t\t\t// its object. The json.Encoder object always appends a newline\n\t\t\t\t// to the end of the serialized value.\n\t\t\t\t//\n\t\t\t\t// To handle this mismatch, trim a newline from both values.\n\t\t\t\texpected := strings.TrimSuffix(string(expectedBytes), \"\\n\")\n\t\t\t\tactual := strings.TrimSuffix(recorder.Body.String(), \"\\n\")\n\t\t\t\tassert.Equal(t, expected, actual)\n\t\t\t}\n\t\t}\n\t}\n}", "func RunJSONSerializationTestForJsonField_STATUS(subject JsonField_STATUS) string {\n\t// Serialize to JSON\n\tbin, err := json.Marshal(subject)\n\tif err != nil {\n\t\treturn err.Error()\n\t}\n\n\t// Deserialize back into memory\n\tvar actual JsonField_STATUS\n\terr = json.Unmarshal(bin, &actual)\n\tif err != nil {\n\t\treturn err.Error()\n\t}\n\n\t// Check for outcome\n\tmatch := cmp.Equal(subject, actual, cmpopts.EquateEmpty())\n\tif !match {\n\t\tactualFmt := pretty.Sprint(actual)\n\t\tsubjectFmt := pretty.Sprint(subject)\n\t\tresult := diff.Diff(subjectFmt, actualFmt)\n\t\treturn result\n\t}\n\n\treturn \"\"\n}", "func TestEndpoint(t *testing.T) {\n\t// {\"service\":\"Service\",\"service_id\":\"ServiceId\",\"frontend\":\"Frontend\",\"deploy_path\":\"DeployPath\",\"hostname\":\"Hostname\",\"start_time\":\"StartTime\"}\n\n\t// 1. 正常的Marshal & Unmarshal\n\tendpoint := &ServiceEndpoint{\n\t\tService: \"Service\",\n\t\tServiceId: \"ServiceId\",\n\t\tFrontend: \"Frontend\",\n\t\tDeployPath: \"DeployPath\",\n\t\tHostname: \"Hostname\",\n\t\tStartTime: \"StartTime\",\n\t}\n\n\tdata, _ := json.Marshal(endpoint)\n\tfmt.Println(\"Endpoint: \", string(data))\n\n\tassert.True(t, true)\n\n\t// 2. 缺少字段时的Unmarshal(缺少的字段为空)\n\tdata21 := []byte(`{\"service\":\"Service\",\"service_id\":\"ServiceId\",\"frontend\":\"Frontend\"}`)\n\n\tendpoint2 := &ServiceEndpoint{}\n\terr2 := json.Unmarshal(data21, endpoint2)\n\tassert.True(t, err2 == nil)\n\n\tfmt.Println(\"Error2: \", err2)\n\tdata22, _ := json.Marshal(endpoint2)\n\tfmt.Println(\"Endpoint2: \", string(data22))\n\n\t// 3. 字段多的情况下的Unmarshal(多余的字段直接忽略)\n\tdata31 := []byte(`{\"service\":\"Service\", \"serviceA\":\"AService\",\"service_id\":\"ServiceId\",\"frontend\":\"Frontend\"}`)\n\tendpoint3 := &ServiceEndpoint{}\n\terr3 := json.Unmarshal(data31, endpoint3)\n\tassert.True(t, err3 == nil)\n\tfmt.Println(\"Error3: \", err3)\n\tdata32, _ := json.Marshal(endpoint3)\n\tfmt.Println(\"Endpoint3: \", string(data32))\n\n}", "func testGeneratedType(t *testing.T, name string, parser *JSONParser, data []byte) {\n\terr := parser.FeedBytes(data)\n\trequire.NoError(t, err)\n\n\tparserOutput := parser.String()\n\n\tfilename := makeTypeTestGoFile(t, parserOutput)\n\n\trunCmd := exec.Command(\"go\", \"run\", filename)\n\trunCmd.Stdin = bytes.NewBuffer(data)\n\tout, err := runCmd.CombinedOutput()\n\trequire.NoError(t, err, \"running go code: %v, %s\", err, out)\n\n\t// unmarshal input data and test output data to generic type, then compare\n\tvar valIn, valOut interface{}\n\terr = json.Unmarshal(data, &valIn)\n\trequire.NoError(t, err, \"unmarshaling input data: %v\", err)\n\terr = json.Unmarshal([]byte(out), &valOut)\n\trequire.NoError(t, err, \"unmarshaling output data: %v\", err)\n\tif !compareIgnoringNilKeys(t, valIn, valOut) {\n\t\tt.Logf(\"got different value after marshal/unmarshal:\\n%#+v\\n%#+v\\n\\n%s\", valIn, valOut, parserOutput)\n\t}\n}", "func TestPost(t *testing.T) {\n\tconst case1Empty = \"/\"\n\tconst case2SetHeader = \"/set_header\"\n\tconst case3SendJSON = \"/send_json\"\n\tconst case4SendString = \"/send_string\"\n\tconst case5IntegrationSendJSONString = \"/integration_send_json_string\"\n\tconst case6SetQuery = \"/set_query\"\n\tconst case7IntegrationSendJSONStruct = \"/integration_send_json_struct\"\n\t// Check that the number conversion should be converted as string not float64\n\tconst case8SendJSONWithLongIDNumber = \"/send_json_with_long_id_number\"\n\tconst case9SendJSONStringWithLongIDNumberAsFormResult = \"/send_json_string_with_long_id_number_as_form_result\"\n\tts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\t// check method is PATCH before going to check other features\n\t\tif r.Method != POST {\n\t\t\tt.Errorf(\"Expected method %q; got %q\", POST, r.Method)\n\t\t}\n\t\tif r.Header == nil {\n\t\t\tt.Errorf(\"Expected non-nil request Header\")\n\t\t}\n\t\tswitch r.URL.Path {\n\t\tdefault:\n\t\t\tt.Errorf(\"No testing for this case yet : %q\", r.URL.Path)\n\t\tcase case1Empty:\n\t\t\tt.Logf(\"case %v \", case1Empty)\n\t\tcase case2SetHeader:\n\t\t\tt.Logf(\"case %v \", case2SetHeader)\n\t\t\tif r.Header.Get(\"API-Key\") != \"fookey\" {\n\t\t\t\tt.Errorf(\"Expected 'API-Key' == %q; got %q\", \"fookey\", r.Header.Get(\"API-Key\"))\n\t\t\t}\n\t\tcase case3SendJSON:\n\t\t\tt.Logf(\"case %v \", case3SendJSON)\n\t\t\tdefer r.Body.Close()\n\t\t\tbody, _ := ioutil.ReadAll(r.Body)\n\t\t\tif string(body) != `{\"query1\":\"test\",\"query2\":\"test\"}` {\n\t\t\t\tt.Error(`Expected Body with {\"query1\":\"test\",\"query2\":\"test\"}`, \"| but got\", string(body))\n\t\t\t}\n\t\tcase case4SendString:\n\t\t\tt.Logf(\"case %v \", case4SendString)\n\t\t\tif r.Header.Get(\"Content-Type\") != \"application/x-www-form-urlencoded\" {\n\t\t\t\tt.Error(\"Expected Header Content-Type -> application/x-www-form-urlencoded\", \"| but got\", r.Header.Get(\"Content-Type\"))\n\t\t\t}\n\t\t\tdefer r.Body.Close()\n\t\t\tbody, _ := ioutil.ReadAll(r.Body)\n\t\t\tif string(body) != \"query1=test&query2=test\" {\n\t\t\t\tt.Error(\"Expected Body with \\\"query1=test&query2=test\\\"\", \"| but got\", string(body))\n\t\t\t}\n\t\tcase case5IntegrationSendJSONString:\n\t\t\tt.Logf(\"case %v \", case5IntegrationSendJSONString)\n\t\t\tdefer r.Body.Close()\n\t\t\tbody, _ := ioutil.ReadAll(r.Body)\n\t\t\tif string(body) != \"query1=test&query2=test\" {\n\t\t\t\tt.Error(\"Expected Body with \\\"query1=test&query2=test\\\"\", \"| but got\", string(body))\n\t\t\t}\n\t\tcase case6SetQuery:\n\t\t\tt.Logf(\"case %v \", case6SetQuery)\n\t\t\tv := r.URL.Query()\n\t\t\tif v[\"query1\"][0] != \"test\" {\n\t\t\t\tt.Error(\"Expected query1:test\", \"| but got\", v[\"query1\"][0])\n\t\t\t}\n\t\t\tif v[\"query2\"][0] != \"test\" {\n\t\t\t\tt.Error(\"Expected query2:test\", \"| but got\", v[\"query2\"][0])\n\t\t\t}\n\t\tcase case7IntegrationSendJSONStruct:\n\t\t\tt.Logf(\"case %v \", case7IntegrationSendJSONStruct)\n\t\t\tdefer r.Body.Close()\n\t\t\tbody, _ := ioutil.ReadAll(r.Body)\n\t\t\tcomparedBody := []byte(`{\"Lower\":{\"Color\":\"green\",\"Size\":1.7},\"Upper\":{\"Color\":\"red\",\"Size\":0},\"a\":\"a\",\"name\":\"Cindy\"}`)\n\t\t\tif !bytes.Equal(body, comparedBody) {\n\t\t\t\tt.Errorf(`Expected correct json but got ` + string(body))\n\t\t\t}\n\t\tcase case8SendJSONWithLongIDNumber:\n\t\t\tt.Logf(\"case %v \", case8SendJSONWithLongIDNumber)\n\t\t\tdefer r.Body.Close()\n\t\t\tbody, _ := ioutil.ReadAll(r.Body)\n\t\t\tif string(body) != `{\"id\":123456789,\"name\":\"nemo\"}` {\n\t\t\t\tt.Error(`Expected Body with {\"id\":123456789,\"name\":\"nemo\"}`, \"| but got\", string(body))\n\t\t\t}\n\t\tcase case9SendJSONStringWithLongIDNumberAsFormResult:\n\t\t\tt.Logf(\"case %v \", case9SendJSONStringWithLongIDNumberAsFormResult)\n\t\t\tdefer r.Body.Close()\n\t\t\tbody, _ := ioutil.ReadAll(r.Body)\n\t\t\tif string(body) != `id=123456789&name=nemo` {\n\t\t\t\tt.Error(`Expected Body with \"id=123456789&name=nemo\"`, `| but got`, string(body))\n\t\t\t}\n\t\t}\n\t}))\n\n\tdefer ts.Close()\n\n\tNew().Post(ts.URL + case1Empty).\n\t\tEnd()\n\n\tNew().Post(ts.URL+case2SetHeader).\n\t\tSetHeader(\"API-Key\", \"fookey\").\n\t\tEnd()\n\n\tNew().Post(ts.URL + case3SendJSON).\n\t\tSendMapString(`{\"query1\":\"test\"}`).\n\t\tSendMapString(`{\"query2\":\"test\"}`).\n\t\tEnd()\n\n\tNew().Post(ts.URL + case4SendString).\n\t\tSendMapString(\"query1=test\").\n\t\tSendMapString(\"query2=test\").\n\t\tEnd()\n\n\tNew().Post(ts.URL + case5IntegrationSendJSONString).\n\t\tSendMapString(\"query1=test\").\n\t\tSendMapString(`{\"query2\":\"test\"}`).\n\t\tEnd()\n\n\t/* TODO: More testing post for application/x-www-form-urlencoded\n\t post.query(json), post.query(string), post.send(json), post.send(string), post.query(both).send(both)\n\t*/\n\tNew().Post(ts.URL + case6SetQuery).\n\t\tQuery(\"query1=test\").\n\t\tQuery(\"query2=test\").\n\t\tEnd()\n\t// TODO:\n\t// 1. test normal struct\n\t// 2. test 2nd layer nested struct\n\t// 3. test struct pointer\n\t// 4. test lowercase won't be export to json\n\t// 5. test field tag change to json field name\n\ttype Upper struct {\n\t\tColor string\n\t\tSize int\n\t\tnote string\n\t}\n\ttype Lower struct {\n\t\tColor string\n\t\tSize float64\n\t\tnote string\n\t}\n\ttype Style struct {\n\t\tUpper Upper\n\t\tLower Lower\n\t\tName string `json:\"name\"`\n\t}\n\tmyStyle := Style{Upper: Upper{Color: \"red\"}, Name: \"Cindy\", Lower: Lower{Color: \"green\", Size: 1.7}}\n\tNew().Post(ts.URL + case7IntegrationSendJSONStruct).\n\t\tSendMapString(`{\"a\":\"a\"}`).\n\t\tSendStruct(myStyle).\n\t\tEnd()\n\n\tNew().Post(ts.URL + case8SendJSONWithLongIDNumber).\n\t\tSendMapString(`{\"id\":123456789, \"name\":\"nemo\"}`).\n\t\tEnd()\n\n\tNew().Post(ts.URL + case9SendJSONStringWithLongIDNumberAsFormResult).\n\t\tContentType(\"form\").\n\t\tSendMapString(`{\"id\":123456789, \"name\":\"nemo\"}`).\n\t\tEnd()\n}", "func (myst *MysteriousClient) InvokeAPI(rawMessage string) error {\n\trequestBody := fmt.Sprintf(\"ReplyAddress=%s&ReplyMessage=%s&MessageId=%s&Guid=%s\",\n\t\turl.QueryEscape(myst.Addr2), url.QueryEscape(rawMessage), myst.ID1, myst.ID2)\n\n\trequest, err := http.NewRequest(\"POST\", myst.URL, bytes.NewReader([]byte(requestBody)))\n\tif err != nil {\n\t\treturn err\n\t}\n\trequest.Header.Set(\"X-Requested-With\", \"XMLHttpRequest\")\n\trequest.Header.Set(\"User-Agent\", \"Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/52.0.2743.116 Safari/537.36\")\n\trequest.Header.Set(\"Content-Type\", \"application/x-www-form-urlencoded; charset=UTF-8\")\n\n\tclient := &http.Client{Timeout: 25 * time.Second}\n\tresponse, err := client.Do(request)\n\tif err != nil {\n\t\treturn err\n\t}\n\tbody, err := ioutil.ReadAll(response.Body)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer response.Body.Close()\n\n\tlog.Printf(\"Mysterious ding responded to '%s': error %v, status %d, output %s\", rawMessage, err, response.StatusCode, string(body))\n\tif response.StatusCode/200 != 1 {\n\t\treturn fmt.Errorf(\"HTTP status code %d\", response.StatusCode)\n\t}\n\treturn nil\n}", "func TestCreateTaskWebhook(t *testing.T) {\n\tdata := []byte(`{\n \"eventType\": \"taskCreated\",\n \"userId\": \"ec1b92fb1868c44aa9a041583c000e2a\",\n \"userFullName\": \"John Doe\",\n \"timestamp\": \"2015-10-20T14:45:06.331Z\",\n \"task\": {\n \"_id\": \"60e8b629fc8d6d28b513807d7d86b133\",\n \"name\": \"Write report\",\n \"description\": \"For school\",\n \"color\": \"green\",\n \"columnId\": \"ff31c6b2374911e49d115f7064763810\",\n \"totalSecondsSpent\": 0,\n \"totalSecondsEstimate\": 0,\n \"swimlaneId\": \"e037a6400e8911e5bdc9053860f3e5c0\",\n \"dates\": [\n {\n \"targetColumnId\": \"ff31c6b4374911e49d115f7064763810\",\n \"status\": \"active\",\n \"dateType\": \"dueDate\",\n \"dueTimestamp\": \"2015-10-20T15:00:00Z\",\n \"dueTimestampLocal\": \"2015-10-20T17:00:00+02:00\"\n }\n ],\n \"subTasks\": [\n {\n \"name\": \"Proofread\",\n \"finished\": false\n }\n ],\n \"labels\": [\n {\n \"name\": \"Writing\",\n \"pinned\": false\n }\n ]\n }\n }`)\n\n\ts := &CreateTaskWebhook{}\n\tdecodeAndEncodeData(t, data, s)\n}", "func TestGetApi(t *testing.T) {\n\tm := NewMarkHub()\n\tm.content = \"## teste 1\\n>blockotes1\\n>blockotes2\\n\\nmade with :heart:\"\n\t_, err := m.getApi()\n\tif err != nil {\n\t\tt.Errorf(\"TestGetApi(): got -> %v, want: nil\", err)\n\t}\n}", "func TestTokenPayload(t *testing.T) {\n\tdb.InitDB()\n\tvar router *gin.Engine = routes.SetupRouter()\n\n\tvar user models.UserCreate = utils.CreateUser(\"Tom\", \"qwerty1234\", t, router)\n\tuser.Token = utils.ConnectUser(\"Tom\", \"qwerty1234\", t, router)\n\n\tsplittedToken := strings.Split(user.Token, \".\")\n\tif len(splittedToken) != 3 {\n\t\tlog.Fatal(\"Bad token.\")\n\t\tt.Fail()\n\t}\n\n\theader := splittedToken[0]\n\tpayload := splittedToken[1]\n\tsignature := splittedToken[2]\n\n\tdecPayloadByte, err := base64.RawURLEncoding.DecodeString(payload)\n\n\tvar payloadObj models.JwtPayload\n\terr = json.Unmarshal(decPayloadByte, &payloadObj)\n\tif err != nil {\n\t\tlog.Fatal(\"Bad output: \", err.Error())\n\t\tt.Fail()\n\t}\n\tpayloadObj.Iat = 123456\n\tpayloadObj.Exp = 123456\n\tjsonPayload, _ := json.Marshal(payloadObj)\n\tnewPayload := base64.RawURLEncoding.EncodeToString([]byte(string(jsonPayload)))\n\n\tmodifiedToken := header + \".\" + newPayload + \".\" + signature\n\n\tvar url string = \"/v1/user/\" + strconv.Itoa(user.ID)\n\tvar bearer = \"Bearer \" + modifiedToken\n\trecord := httptest.NewRecorder()\n\trequest, _ := http.NewRequest(\"GET\", url, nil)\n\trequest.Header.Add(\"Content-Type\", \"application/json\")\n\trequest.Header.Add(\"Authorization\", bearer)\n\n\trouter.ServeHTTP(record, request)\n\n\tvar message Message\n\terr = json.Unmarshal([]byte(record.Body.String()), &message)\n\tif err != nil {\n\t\tlog.Fatal(\"Bad output: \", err.Error())\n\t\tt.Fail()\n\t}\n\n\tassert.Equal(t, record.Code, 403)\n\tassert.Equal(t, message.Message, \"Bad signature\")\n\n\tuser.Token = utils.ConnectUser(\"Tom\", \"qwerty1234\", t, router)\n\tutils.CleanUser(user.ID, user.Token, t, router)\n\tdb.CloseDB()\n}", "func TestJsonEncode(t *testing.T) {\n\tt.Parallel()\n\n\t// Set up a mock struct for testing\n\ttype TestStruct struct {\n\t\tTestKey string `json:\"test_key\"`\n\t\tTestKeyTwo string `json:\"test_key_two\"`\n\t\tnotAllowed string\n\t}\n\n\t// Base model and test model\n\tvar model = new(TestStruct)\n\tvar modelTest = new(TestStruct)\n\tvar allowedFields = []string{\"test_key\", \"test_key_two\"} // notice omitted: notAllowed\n\n\t// Set the testing data\n\tmodel.TestKey = \"TestValue1\"\n\tmodel.TestKeyTwo = \"TestValue2\"\n\tmodel.notAllowed = \"PrivateValue\"\n\n\t// Set the buffer and encoder\n\tvar b bytes.Buffer\n\tenc := json.NewEncoder(&b)\n\n\t// Run the encoder\n\terr := JSONEncode(enc, model, allowedFields)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t// Now unmarshal and test\n\tif err = json.Unmarshal(b.Bytes(), &modelTest); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t// Test for our fields and values now\n\tif modelTest.TestKey != \"TestValue1\" {\n\t\tt.Fatal(\"TestKey does not have the right value! Encoding failed.\", modelTest.TestKey)\n\t} else if modelTest.TestKeyTwo != \"TestValue2\" {\n\t\tt.Fatal(\"TestKeyTwo does not have the right value! Encoding failed.\", modelTest.TestKeyTwo)\n\t} else if modelTest.notAllowed == \"PrivateValue\" {\n\t\tt.Fatal(\"Field not removed! notAllowed does not have the right value! Encoding failed.\", modelTest.notAllowed)\n\t}\n}", "func RunJSONSerializationTestForPrivateEndpointIPConfiguration(subject PrivateEndpointIPConfiguration) string {\n\t// Serialize to JSON\n\tbin, err := json.Marshal(subject)\n\tif err != nil {\n\t\treturn err.Error()\n\t}\n\n\t// Deserialize back into memory\n\tvar actual PrivateEndpointIPConfiguration\n\terr = json.Unmarshal(bin, &actual)\n\tif err != nil {\n\t\treturn err.Error()\n\t}\n\n\t// Check for outcome\n\tmatch := cmp.Equal(subject, actual, cmpopts.EquateEmpty())\n\tif !match {\n\t\tactualFmt := pretty.Sprint(actual)\n\t\tsubjectFmt := pretty.Sprint(subject)\n\t\tresult := diff.Diff(subjectFmt, actualFmt)\n\t\treturn result\n\t}\n\n\treturn \"\"\n}", "func TestSimplestTransform(t *testing.T) {\n\tassert := assert.New(t)\n\tmessage := map[string]interface{}{\n\t\t\"a\": \"b\",\n\t}\n\ttransform := map[string]interface{}{\n\t\t\"x\": \"y\",\n\t}\n\ttransformErr := Transform(&message, transform)\n\tassert.Nil(transformErr)\n\tassert.Equal(message[\"a\"], \"b\")\n\tassert.NotNil(message[\"x\"])\n\tassert.Equal(message[\"x\"], \"y\")\n}", "func TestGetSlidesDocumentWithFormat(t *testing.T) {\n request := createGetSlidesDocumentWithFormatRequest()\n e := initializeTest(\"GetSlidesDocumentWithFormat\", \"\", \"\")\n if e != nil {\n t.Errorf(\"Error: %v.\", e)\n return\n }\n c := getTestApiClient()\n r, _, e := c.DocumentApi.GetSlidesDocumentWithFormat(request)\n if e != nil {\n t.Errorf(\"Error: %v.\", e)\n return\n }\n assertBinaryResponse(r, t)\n}", "func TestPagarmePaymentCreateWithBoleto(t *testing.T) {\n \n payment := pagarme.NewPaymentWithBoleto(1)\n payment.PostbackUrl = \"https://mobilemind.free.beeceptor.com/webhook/pagarme\"\n\n Pagarme := pagarme.NewPagarme(\"pt-BR\", ApiKey, CryptoKey)\n Pagarme.SetDebug()\n\n pagarmeFillPayments(payment)\n\n result, err := Pagarme.PaymentCreate(payment)\n\n if err != nil {\n t.Errorf(\"Erro ao create card payment: %v\", err)\n }else{\n //t.Log(fmt.Sprintf(\"result = %v\", customer.Id)) \n\n\n if result.Id == 0 {\n t.Errorf(\"Id cant be empty\")\n return\n }\n\n if result.Status == api.PagarmeAuthorized {\n\n captureData := pagarme.NewCaptureData(fmt.Sprintf(\"%v\", result.Id), 1)\n\n result, err := Pagarme.PaymentCapture(captureData)\n\n if err != nil {\n t.Errorf(\"Erro ao capture data: %v\", err)\n return\n }\n\n if result.Status != api.PagarmeWaitingPayment {\n t.Errorf(\"status expected %v, returned %v\", result.Status, api.PagarmeWaitingPayment)\n return \n }\n\n }\n\n client.Set(\"TransactionId\", result.Id, 0)\n\n }\n\n}", "func TestPagarmeSubscriptionUpdate(t *testing.T) {\n \n Pagarme := pagarme.NewPagarme(\"pt-BR\", ApiKey, CryptoKey)\n Pagarme.SetDebug()\n\n planId, _ := client.Get(\"PlanoId\").Int64()\n cardId, _ := client.Get(\"CardId\").Result()\n subscriptionId, _ := client.Get(\"SubscriptionId\").Int64()\n subscription := pagarme.NewSubscriptionWithCard(planId)\n subscription.CardId = cardId\n subscription.Id = subscriptionId\n subscription.PostbackUrl = \"https://mobilemind.free.beeceptor.com/webhook/pagarme\"\n\n\n pagarmefillCustomer(subscription.Customer)\n\n result, err := Pagarme.SubscriptionUpdate(subscription)\n\n if err != nil {\n t.Errorf(\"Erro ao create subscription: %v\", err)\n }else{\n //t.Log(fmt.Sprintf(\"result = %v\", customer.Id)) \n\n if result.Id == 0 {\n t.Errorf(\"Subscription id is expected\")\n return\n }\n\n\n }\n\n}", "func TestExample(t *testing.T) {\n\tinstance := &RestExample{\n\t\tpost: make(map[string]string),\n\t\twatch: make(map[string]chan string),\n\t}\n\n\tinstance.HandleCreateHello(HelloArg{\n\t\tTo: \"rest\",\n\t\tPost: \"rest is powerful\",\n\t})\n\n\tresp, err := rest.SetTest(instance, map[string]string{\"to\": \"rest\"}, nil)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\targ := instance.HandleHello()\n\tif resp.Code != http.StatusOK {\n\t\tt.Error(\"should return ok\")\n\t}\n\tif arg.To != \"rest\" {\n\t\tt.Error(\"arg.To should be rest\")\n\t}\n\tif arg.Post != \"rest is powerful\" {\n\t\tt.Error(\"arg.Post should be 'rest is powerful'\")\n\t}\n\n\tresp, err = rest.SetTest(instance, map[string]string{\"to\": \"123\"}, nil)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\targ = instance.HandleHello()\n\tif resp.Code != http.StatusNotFound {\n\t\tt.Error(\"should return not found\")\n\t}\n}", "func TestGetDataAndParseResponse(t *testing.T) {\n\telectricCount, boogalooCount := getDataAndParseResponse()\n\tif electricCount < 1 {\n\t\tt.Errorf(\"expected more than one name 'Electric', recieved: %d\", electricCount)\n\t}\n\tif boogalooCount < 1 {\n\t\tt.Errorf(\"expected more than one name 'Boogaloo', recieved: %d\", boogalooCount)\n\t}\n}", "func (suite *PouchAPIHelpSuite) TestExample(c *check.C) {\n}", "func TestSimpleParse(t *testing.T) {\n\tjson := `{\"action\":\"jump\", \"time\":100}`\n\tparsed, _ := ParseEventJSON(json)\n\texpected := ActionEvent{\"jump\", 100}\n\tif parsed != expected {\n\t\tt.Errorf(\"JSON parsing of %v resulted in invalid action: %v\", json, parsed)\n\t}\n}", "func TestGenerateProto(t *testing.T) {\n\tgenerateProto()\n}", "func TestPutSlidesConvert(t *testing.T) {\n request := createPutSlidesConvertRequest()\n e := initializeTest(\"PutSlidesConvert\", \"\", \"\")\n if e != nil {\n t.Errorf(\"Error: %v.\", e)\n return\n }\n c := getTestApiClient()\n r, _, e := c.DocumentApi.PutSlidesConvert(request)\n if e != nil {\n t.Errorf(\"Error: %v.\", e)\n return\n }\n assertBinaryResponse(r, t)\n}", "func Test_betaflight_message_can_be_parsed__with_HAMCREST(t *testing.T) {\n\t//info := ParseFirmwareInformation(\"# Betaflight / SPRACINGF3EVO (SPEV) 3.4.0 Apr 17 2018 / 14:00:13 (b2c247d34) MSP API: 1.39\")\n\t//\n\t//we := asserter.Using(t)\n\t//we.AssertThat(info.FirmwareName, core.EqualTo(\"Betaflight\").Comment(\"FirmwareName field\"))\n\t//we.AssertThat(info.TargetName, core.EqualTo(\"SPRACINGF3EVO\"))\n\t//we.AssertThat(info.TargetDetail, core.EqualTo(\"SPEV\"))\n\t//we.AssertThat(info.Version, core.GreaterThanOrEqualTo(int64(3)))\n\t//we.AssertThat(info.ReleaseDateStr, core.EqualTo(\"Apr 17 2018\"))\n\t//we.AssertThat(info.ReleaseTime, core.EqualTo(\"14:00:13\"))\n\t//we.AssertThat(info.GitHash, core.EqualTo(\"b2c247d34\"))\n}", "func RunJSONSerializationTestForIPTag(subject IPTag) string {\n\t// Serialize to JSON\n\tbin, err := json.Marshal(subject)\n\tif err != nil {\n\t\treturn err.Error()\n\t}\n\n\t// Deserialize back into memory\n\tvar actual IPTag\n\terr = json.Unmarshal(bin, &actual)\n\tif err != nil {\n\t\treturn err.Error()\n\t}\n\n\t// Check for outcome\n\tmatch := cmp.Equal(subject, actual, cmpopts.EquateEmpty())\n\tif !match {\n\t\tactualFmt := pretty.Sprint(actual)\n\t\tsubjectFmt := pretty.Sprint(subject)\n\t\tresult := diff.Diff(subjectFmt, actualFmt)\n\t\treturn result\n\t}\n\n\treturn \"\"\n}", "func TestEventMarshaling(t *testing.T) {\n\tassert := asserts.NewTesting(t, asserts.FailStop)\n\n\tevtIn, err := mesh.NewEvent(\"test\")\n\tassert.NoError(err)\n\tdata, err := json.Marshal(evtIn)\n\tassert.NoError(err)\n\n\tevtOut := mesh.Event{}\n\terr = json.Unmarshal(data, &evtOut)\n\tassert.NoError(err)\n\tassert.Equal(evtOut, evtIn)\n\n\tplEvtA, err := mesh.NewEvent(\"payload-a\")\n\tassert.NoError(err)\n\tplEvtB, err := mesh.NewEvent(\"payload-b\")\n\tassert.NoError(err)\n\tplEvtC, err := mesh.NewEvent(\"payload-c\")\n\tassert.NoError(err)\n\n\tevtIn, err = mesh.NewEvent(\"test\", plEvtA, plEvtB, plEvtC)\n\tassert.NoError(err)\n\tdata, err = json.Marshal(evtIn)\n\tassert.NoError(err)\n\n\tevtOut = mesh.Event{}\n\terr = json.Unmarshal(data, &evtOut)\n\tassert.NoError(err)\n\tassert.Equal(evtOut, evtIn)\n\tpl := []mesh.Event{}\n\terr = evtOut.Payload(&pl)\n\tassert.NoError(err)\n\tassert.Equal(pl[0], plEvtA)\n\tassert.Equal(pl[1], plEvtB)\n\tassert.Equal(pl[2], plEvtC)\n}", "func TestCreateAPInPropertiesWithAPString(t *testing.T) {\n\tclient := newPetsClient(t)\n\tresult, err := client.CreateAPInPropertiesWithAPString(context.Background(), PetAPInPropertiesWithAPString{\n\t\tID: to.Ptr[int32](5),\n\t\tName: to.Ptr(\"Funny\"),\n\t\tODataLocation: to.Ptr(\"westus\"),\n\t\tAdditionalProperties: map[string]*string{\n\t\t\t\"color\": to.Ptr(\"red\"),\n\t\t\t\"city\": to.Ptr(\"Seattle\"),\n\t\t\t\"food\": to.Ptr(\"tikka masala\"),\n\t\t},\n\t\tAdditionalProperties1: map[string]*float32{\n\t\t\t\"height\": to.Ptr[float32](5.61),\n\t\t\t\"weight\": to.Ptr[float32](599),\n\t\t\t\"footsize\": to.Ptr[float32](11.5),\n\t\t},\n\t}, nil)\n\trequire.NoError(t, err)\n\tif r := cmp.Diff(result.PetAPInPropertiesWithAPString, PetAPInPropertiesWithAPString{\n\t\tID: to.Ptr[int32](5),\n\t\tName: to.Ptr(\"Funny\"),\n\t\tODataLocation: to.Ptr(\"westus\"),\n\t\tStatus: to.Ptr(true),\n\t\tAdditionalProperties: map[string]*string{\n\t\t\t\"color\": to.Ptr(\"red\"),\n\t\t\t\"city\": to.Ptr(\"Seattle\"),\n\t\t\t\"food\": to.Ptr(\"tikka masala\"),\n\t\t},\n\t\tAdditionalProperties1: map[string]*float32{\n\t\t\t\"height\": to.Ptr[float32](5.61),\n\t\t\t\"weight\": to.Ptr[float32](599),\n\t\t\t\"footsize\": to.Ptr[float32](11.5),\n\t\t},\n\t}); r != \"\" {\n\t\tt.Fatal(r)\n\t}\n}" ]
[ "0.65098625", "0.65059495", "0.6022184", "0.6014352", "0.5968889", "0.5923985", "0.59105635", "0.5836621", "0.5785979", "0.57433534", "0.5712812", "0.56993455", "0.5694657", "0.56586385", "0.5621893", "0.56027323", "0.5597822", "0.55354494", "0.55333185", "0.55289763", "0.55137277", "0.55134314", "0.54888105", "0.5478587", "0.54482126", "0.5433169", "0.5417903", "0.54163766", "0.541198", "0.539915", "0.53973716", "0.53880966", "0.53858197", "0.53805476", "0.53779334", "0.5372821", "0.5363429", "0.5359127", "0.5353948", "0.535207", "0.5347213", "0.5338928", "0.5338712", "0.5331781", "0.53210956", "0.5320719", "0.53134644", "0.5310956", "0.5289535", "0.5282576", "0.52811366", "0.5279016", "0.5262519", "0.5259343", "0.5258391", "0.525528", "0.52551913", "0.5253814", "0.52500284", "0.5245456", "0.5242759", "0.5240769", "0.5239206", "0.5234966", "0.5234109", "0.52292454", "0.5227438", "0.5218751", "0.52170753", "0.5216282", "0.5204054", "0.5203645", "0.52007043", "0.5194801", "0.5180419", "0.5173842", "0.51705194", "0.51687336", "0.5167553", "0.5164803", "0.5163163", "0.51609725", "0.5155855", "0.51507914", "0.5149745", "0.51496357", "0.51476556", "0.514664", "0.51412827", "0.51396656", "0.51363647", "0.5134204", "0.5130844", "0.5122785", "0.5119846", "0.5115896", "0.51153654", "0.51118046", "0.5104524", "0.50960517" ]
0.59140664
6
NewLogstash create an instance of the plugin with default settings
func NewLogstash() *Logstash { return &Logstash{ URL: "http://127.0.0.1:9600", SinglePipeline: false, Collect: []string{"pipelines", "process", "jvm"}, Headers: make(map[string]string), Timeout: config.Duration(time.Second * 5), } }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func init() {\n\tinputs.Add(\"logstash\", func() telegraf.Input {\n\t\treturn NewLogstash()\n\t})\n}", "func New(host string, timeout int) (ls *Logstash, err error) {\n\n\tlshost, lsportstring, err := net.SplitHostPort(host)\n\tif err != nil {\n\t\treturn ls, errors.Wrap(err, \"net-splithost\")\n\t}\n\tlsport, err := strconv.Atoi(lsportstring)\n\tif err != nil {\n\t\treturn ls, errors.Wrap(err, \"logstash port isn't numeric\")\n\t}\n\n\t// temporary at 3 minues. Or I can build the connection after I get the first row back\n\tls = NewHostPort(lshost, lsport, 180000)\n\n\treturn ls, nil\n}", "func (logger *logger) newLogrus() {\n\tlogger.logrus = &logrus.Logger{\n\t\tHooks: make(logrus.LevelHooks),\n\t}\n\n\tlogLevel, err := logrus.ParseLevel(logger.cfg.LogLevel)\n\tif err != nil {\n\t\tlogLevel = defaultLogLevel\n\t}\n\tlogger.logrus.Level = logLevel\n\n\tswitch logger.cfg.LogFormat {\n\tcase jsonLogFormat:\n\t\tlogger.logrus.SetFormatter(&logrus.JSONFormatter{})\n\tdefault:\n\t\tlogger.logrus.SetFormatter(&logrus.TextFormatter{})\n\t}\n\n\tif logger.cfg.LogFilePath == \"\" {\n\t\tlogger.logrus.Out = os.Stdout\n\t\tlogger.logrus.Errorf(\"[%s]:: empty log file. Set 'Stdout' as default \\n\", PackageName)\n\t\tlogger.logrus.Infof(\"[%s]:: initialized logx successfully \\n\", PackageName)\n\t\treturn\n\t}\n\n\tlogfile, err := os.OpenFile(logger.cfg.LogFilePath, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0755)\n\tif err != nil {\n\t\tlogger.logrus.Errorln(\"[%s]:: failed to set log file. Error : '%v'. Set 'Stdout' as default\", PackageName, err)\n\t\treturn\n\t}\n\n\tlogger.logfile = logfile\n\tlogger.logrus.Out = logger.logfile\n\n\tlogger.logrus.Infof(\"[%s]:: initialized logx successfully\", PackageName)\n}", "func LogNew(level, event, msg string) {\n\tvar thelog StructuredLog\n\tthelog.Timestamp = time.Now().Format(time.RFC3339)\n\thostname, _ := os.Hostname()\n\tthelog.Server = hostname\n\tthelog.Level = level\n\tthelog.Event = event\n\tthelog.Message = msg\n\tthelog.Service = \"search-api\"\n\tlogJSON, err := json.Marshal(thelog)\n\tif err != nil {\n\t\tlog.Println(\"Structured logger: Logger JSON Marshal failed !\")\n\t}\n\tlog.Println(string(logJSON))\n}", "func New(serviceName string, environment string) LoggerWrapper {\n\tlogStore = &loggerWrapper{logrus.New().WithField(\"service\", serviceName).WithField(\"environment\", environment)}\n\tif environment == \"production\" {\n\t\tlogStore.SetFormat(&logrus.JSONFormatter{})\n\t}\n\n\t// fmt.Println(\"Adding hook\")\n\t// hook := logrusly.NewLogglyHook(\"71000042-f956-4c7e-987d-8694a20695a8\", \"https://logs-01.loggly.com/bulk/\", logrus.InfoLevel, serviceName)\n\t// logStore.Logger.Hooks.Add(hook)\n\treturn logStore\n}", "func DefaultNew(f func() SLogConfig) error {\n\n\tcfg := f()\n\tlogger := new(LoggerS)\n\tlogger.cfg = &cfg\n\tlogger.SetSliceType(cfg.SplitType)\n\n\tlogger.SetDebug(cfg.Debug)\n\n\twriter := new(logWriter)\n\n\tif cfg.FileNameHandler == nil {\n\t\tcfg.FileNameHandler = cfg.name_handler\n\t}\n\tfilename := cfg.FileNameHandler(0)\n\n\tfile := &os.File{}\n\tfile_info, err := os.Stat(filename)\n\tif err != nil {\n\t\tif os.IsNotExist(err) {\n\t\t\tos.MkdirAll(path.Dir(filename), os.ModePerm)\n\t\t\tfile, err = os.Create(filename)\n\t\t} else {\n\t\t\treturn err\n\t\t}\n\t} else {\n\t\tfile, err = os.OpenFile(filename, os.O_APPEND|os.O_WRONLY, os.ModeAppend)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tswitch cfg.SplitType {\n\tcase SPLIT_TYPE_FILE_SIZE:\n\t\tlogger.SetMaxSize(cfg.Condition)\n\t\tif file_info != nil {\n\t\t\tlogger.size = file_info.Size()\n\t\t}\n\tcase SPLIT_TYPE_TIME_CYCLE:\n\t\tlogger.SetIntervalsTime(cfg.Condition)\n\t}\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\twriter.file = file\n\tif cfg.Debug {\n\t\twriter.stdout = os.Stdout\n\t}\n\tlogger.writer = writer\n\tlogger.Logger = log.New(logger.writer, cfg.Prefix, cfg.LogFlag)\n\n\tLogger = logger\n\n\treturn nil\n}", "func New(_ context.Context, next http.Handler, config *TestConfiguration, name string) (http.Handler, error) {\n\ts := &SouinTraefikPlugin{\n\t\tname: name,\n\t\tnext: next,\n\t}\n\tc := parseConfiguration(*config)\n\n\ts.Retriever = DefaultSouinPluginInitializerFromConfiguration(&c)\n\treturn s, nil\n}", "func newPlugin() (p *slackscot.Plugin) {\n\tp = new(slackscot.Plugin)\n\tp.Name = \"tester\"\n\tp.Commands = []slackscot.ActionDefinition{{\n\t\tMatch: func(m *slackscot.IncomingMessage) bool {\n\t\t\treturn strings.HasPrefix(m.NormalizedText, \"make\")\n\t\t},\n\t\tUsage: \"make `<something>`\",\n\t\tDescription: \"Have the test bot make something for you\",\n\t\tAnswer: func(m *slackscot.IncomingMessage) *slackscot.Answer {\n\t\t\treturn &slackscot.Answer{Text: \"Ready\"}\n\t\t},\n\t}}\n\n\treturn p\n}", "func New() *zap.SugaredLogger {\n\tvar config zap.Config\n\tif os.Getenv(\"LOG_LEVEL\") == \"prod\" {\n\t\tconfig = zap.NewProductionConfig()\n\t} else {\n\t\tconfig = zap.NewDevelopmentConfig()\n\t\tconfig.EncoderConfig.EncodeLevel = zapcore.CapitalColorLevelEncoder\n\t}\n\tconfig.EncoderConfig.TimeKey = \"timestamp\"\n\tconfig.EncoderConfig.EncodeTime = zapcore.ISO8601TimeEncoder\n\tlogger, _ := config.Build()\n\treturn logger.Sugar()\n}", "func New(output *os.File, NoColors ...bool) *Tiny {\n\tnocolors := false\n\tif len(NoColors) > 0 {\n\t\tnocolors = NoColors[0]\n\t}\n\tl := logrus.New()\n\tl.SetLevel(logrus.DebugLevel)\n\tlog.SetOutput(output)\n\tl.SetFormatter(&f.Formatter{\n\t\tNoColors: nocolors,\n\t\tHideKeys: true,\n\t\tFieldsOrder: []string{\"component\", \"category\"},\n\t})\n\treturn &Tiny{l}\n}", "func New(label string) *zap.SugaredLogger {\n\tcfg := zap.NewProductionConfig()\n\tcfg.DisableStacktrace = true\n\n\tlogger, _ := cfg.Build()\n\tdefer logger.Sync() // flushes buffer, if any\n\treturn logger.Sugar().With(\"service\", service, \"event\", label)\n}", "func new() exampleInterface {\n\treturn config{}\n}", "func NewLogger(cfg configFile) *log.Logger {\n\t//This creates new logger\n\tLog = log.New()\n\tLog.Formatter = new(log.JSONFormatter)\n\tLog.Hooks.Add(lfshook.NewHook(lfshook.PathMap{\n\t\tlog.InfoLevel: cfg.Log.Path,\n\t\tlog.ErrorLevel: cfg.Log.Path,\n\t\tlog.DebugLevel: cfg.Log.Path,\n\t}))\n\tLog.Hooks.Add(&metricsHook{\n\t\tmetrics,\n\t})\n\tclient, err := elastic.NewClient(\n\t\telastic.SetURL(cfg.Elastic.URL),\n\t\telastic.SetBasicAuth(cfg.Elastic.Username, cfg.Elastic.Password))\n\tif err != nil {\n\t\tlog.Error(err)\n\t} else {\n\t\thook, err := elogrus.NewElasticHook(client, cfg.Server.Host, log.DebugLevel, \"goplag\")\n\t\tif err != nil {\n\t\t\tlog.Error(err)\n\t\t} else {\n\t\t\tLog.Hooks.Add(hook)\n\t\t}\n\n\t}\n\n\treturn Log\n}", "func newLogPusher(pusherKey PusherKey,\n\tsvcStructuredLog Client, logger *zap.Logger) *logPusher {\n\tpusher := &logPusher{\n\t\tlogGroupName: aws.String(pusherKey.LogGroupName),\n\t\tlogStreamName: aws.String(pusherKey.LogStreamName),\n\t\tsvcStructuredLog: svcStructuredLog,\n\t\tlogger: logger,\n\t}\n\tpusher.logEventBatch = newEventBatch(pusherKey)\n\n\treturn pusher\n}", "func newLog(storage Storage) *RaftLog {\n\t// Your Code Here (2A).\n\thardState, _, err := storage.InitialState()\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tfirstIndex, err := storage.FirstIndex()\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tlastIndex, err := storage.LastIndex()\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tentries, err := storage.Entries(firstIndex, lastIndex+1)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tsnapIndex := firstIndex - 1\n\tsnapTerm, err := storage.Term(snapIndex)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tlog := &RaftLog{\n\t\tstorage: storage,\n\t\tcommitted: hardState.Commit,\n\t\tapplied: snapIndex,\n\t\tstabled: lastIndex,\n\t\tentries: entries,\n\t\tpendingEntries: make([]pb.Entry, 0),\n\t\tsnapIndex: snapIndex,\n\t\tsnapTerm: snapTerm,\n\t}\n\treturn log\n}", "func New(pluginName string) *Logger {\n\tlg := log.New(os.Stdout, \"\", log.Ldate|log.Ltime)\n\tif len(pluginName) == 0 {\n\t\treturn &Logger{logger: lg}\n\t}\n\treturn &Logger{\n\t\tprefix: fmt.Sprintf(\"[%s] \", pluginName),\n\t\tlogger: lg,\n\t}\n}", "func New(config *Configuration) (Logger, error) {\n\tlogger, err := newZapLogger(config)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tlog = logger\n\treturn log, nil\n}", "func New() *zap.Logger {\n\t// Get config\n\tconfig := zap.NewProductionConfig()\n\tconfig.EncoderConfig.TimeKey = \"timestamp\"\n\tconfig.EncoderConfig.EncodeTime = zapcore.ISO8601TimeEncoder\n\tlogger, _ := config.Build()\n\t// Replace global zap logger\n\tzap.ReplaceGlobals(logger)\n\treturn logger\n}", "func New(info logger.Info) (logger.Logger, error) {\n\tlogDir := removeLogDirOption(info.Config)\n\tif logDir == \"\" {\n\t\tlogDir = defaultLogDir\n\t}\n\tinfo.LogPath = filepath.Join(logDir, info.ContainerID)\n\n\tif err := os.MkdirAll(filepath.Dir(info.LogPath), 0755); err != nil {\n\t\treturn nil, fmt.Errorf(\"error setting up logger dir: %v\", err)\n\t}\n\n\treturn jsonfilelog.New(info)\n}", "func newGossipLogWrapper() *log.Logger {\n\treturn log.New(&gossipLogWriter{\n\t\tlogger: logger.GetLogger(\"gossip\"),\n\t}, \"\", 0)\n}", "func New(level string, writer string, prettyprint string) Logger {\n\tvar lg Logger\n\tlg.level = stringToLevel()[level]\n\tlg.logger = json.NewEncoder(stringToWriter(writer))\n\tif prettyprint == \"true\" {\n\t\tlg.logger.SetIndent(\"\", \" \")\n\t}\n\n\tvar process = strings.Split(os.Args[0], \"/\")\n\tlg.json.Process = process[len(process)-1]\n\n\treturn lg\n}", "func New(config config.Config) Logconfigurations {\n\tclient := http.NewClient(config)\n\n\tpkg := Logconfigurations{\n\t\tclient: client,\n\t\tlogger: config.GetLogger(),\n\t}\n\n\treturn pkg\n}", "func New(config *Config) (Logger, error) {\n\troot = logrus.New()\n\tif err := SetOutput(config.Output); err != nil {\n\t\treturn nil, err\n\t}\n\t// Set level\n\tif err := SetLevel(config.Level); err != nil {\n\t\treturn nil, err\n\t}\n\tconsole := false\n\tswitch config.Output {\n\tcase \"stdout\":\n\t\tconsole = true\n\tcase \"stderr\":\n\t\tconsole = true\n\tcase \"split\":\n\t\tconsole = true\n\t}\n\tif console {\n\t\tSetTextFormatter(config.ConsoleFormat)\n\t} else {\n\t\tSetJSONFormatter()\n\t}\n\t// Add global fields\n\tSetFields(config.Fields)\n\tlogg = &logger{\n\t\tentry: logrus.NewEntry(root),\n\t\tconfig: config,\n\t}\n\treturn logg, nil\n}", "func CreateInstallLogstashSystemPluginRequest() (request *InstallLogstashSystemPluginRequest) {\n\trequest = &InstallLogstashSystemPluginRequest{\n\t\tRoaRequest: &requests.RoaRequest{},\n\t}\n\trequest.InitWithApiInfo(\"elasticsearch\", \"2017-06-13\", \"InstallLogstashSystemPlugin\", \"/openapi/logstashes/[InstanceId]/plugins/system/actions/install\", \"elasticsearch\", \"openAPI\")\n\trequest.Method = requests.POST\n\treturn\n}", "func newLog(prefix string) *logging.Logger {\n\tfdFmt := logging.MustStringFormatter(\n\t\t`%{level:.4s}[%{id:03x}]%{time:2006-01-02 15:04:05.000}: %{message}`,\n\t)\n\t// nolint\n\tfd, err := os.OpenFile(logFile, os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0o644)\n\tif err != nil {\n\t\tfmt.Fprint(os.Stderr, err.Error())\n\t}\n\n\tfdLog := logging.NewLogBackend(fd, \"\", 0)\n\tfdFmttr = logging.NewBackendFormatter(fdLog, fdFmt)\n\n\tsysFmttr, err := logging.NewSyslogBackend(prog + \": \")\n\tif err != nil {\n\t\tfmt.Fprint(os.Stderr, err.Error())\n\t}\n\n\tlogging.SetBackend(fdFmttr, sysFmttr)\n\n\treturn logging.MustGetLogger(prog)\n}", "func New(config *Config, log *zap.Logger) (exporter.TraceExporter, error) {\n\thttpClient := &http.Client{}\n\toptions := []elastic.ClientOptionFunc{\n\t\telastic.SetURL(config.Servers...),\n\t\telastic.SetBasicAuth(config.Username, config.Password),\n\t\telastic.SetSniff(config.Sniffer),\n\t\telastic.SetHttpClient(httpClient),\n\t}\n\tif config.TokenFile != \"\" {\n\t\ttoken, err := loadToken(config.TokenFile)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\thttpClient.Transport = &tokenAuthTransport{\n\t\t\ttoken: token,\n\t\t\twrapped: &http.Transport{},\n\t\t}\n\t}\n\n\tesRawClient, err := elastic.NewClient(options...)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to create Elasticsearch client for %s, %v\", config.Servers, err)\n\t}\n\tbulk, err := esRawClient.BulkProcessor().\n\t\tBulkActions(config.bulkActions).\n\t\tBulkSize(config.bulkSize).\n\t\tWorkers(config.bulkWorkers).\n\t\tFlushInterval(config.bulkFlushInterval).\n\t\tDo(context.Background())\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tversion := config.Version\n\tif version == 0 {\n\t\tversion, err = getVersion(esRawClient, config.Servers[0])\n\t}\n\tvar tags []string\n\tif config.TagsAsFields.AllAsFields && config.TagsAsFields.File != \"\" {\n\t\ttags, err = loadTagsFromFile(config.TagsAsFields.File)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"failed to load tags file: %v\", err)\n\t\t}\n\t}\n\n\tw := esSpanStore.NewSpanWriter(esSpanStore.SpanWriterParams{\n\t\tLogger: log,\n\t\tMetricsFactory: metrics.NullFactory,\n\t\tClient: eswrapper.WrapESClient(esRawClient, bulk, version),\n\t\tIndexPrefix: config.IndexPrefix,\n\t\tUseReadWriteAliases: config.UseWriteAlias,\n\t\tAllTagsAsFields: config.TagsAsFields.AllAsFields,\n\t\tTagKeysAsFields: tags,\n\t\tTagDotReplacement: config.TagsAsFields.DotReplacement,\n\t})\n\n\tif config.CreateTemplates {\n\t\tspanMapping, serviceMapping := es.GetMappings(int64(config.Shards), int64(config.Shards), version)\n\t\terr := w.CreateTemplates(spanMapping, serviceMapping)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\tstorage := jexporter.Storage{\n\t\tWriter: w,\n\t}\n\treturn exporterhelper.NewTraceExporter(\n\t\tconfig,\n\t\tstorage.Store,\n\t\texporterhelper.WithShutdown(func() error {\n\t\t\treturn w.Close()\n\t\t}))\n}", "func New(opts ...NewFuncOption) {\n\tlogLevel := zerolog.InfoLevel\n\tzerolog.SetGlobalLevel(logLevel)\n\tlogger := zerolog.New(os.Stdout).With().Timestamp().Logger()\n\tfor _, o := range opts {\n\t\tlogger = o(&logger)\n\t}\n\n\tLogger = logger\n}", "func newInfluxDB(config *Config) (hook *InfluxDBHook, err error) {\n if config == nil {\n config = &Config{}\n }\n\n config.defaults()\n\n var client = newInfluxDBClient(config)\n\n // Make sure that we can connect to InfluxDB\n isReady, err := client.Ready(context.Background()) // if this takes more than 5 seconds then influxdb is probably down\n if err != nil || !isReady {\n return nil, fmt.Errorf(\"NewInfluxDB: Error connecting to InfluxDB, %v\", err)\n }\n\n hook = &InfluxDBHook{\n client: client,\n database: config.Database,\n measurement: config.Measurement,\n tagList: config.Tags,\n precision: config.Precision,\n syslog: config.Syslog,\n facility: config.Facility,\n facilityCode: config.FacilityCode,\n appName: config.AppName,\n version: config.Version,\n minLevel: config.MinLevel,\n org: config.Org,\n bucket: config.Bucket,\n ch: ringchan.NewRingChan(10, config.MaxBufferLog),\n }\n go hook.process()\n return hook, nil\n}", "func init() {\n\t// TODO: set logger\n\t// TODO: register storage plugin to plugin manager\n}", "func newLogger() *ServiceLogger {\n\tLogger := log.New()\n\tvar serviceLogger ServiceLogger\n\t// Log as JSON instead of the default ASCII formatter.\n\tLogger.SetFormatter(&log.JSONFormatter{})\n\n\t// Output to stdout instead of the default stderr\n\tLogger.SetOutput(os.Stdout)\n\n\t// Only log the warning severity or above.\n\tLogger.SetLevel(log.InfoLevel)\n\n\tserviceLogger.Logger = Logger\n\n\treturn &serviceLogger\n}", "func newCMLogger(name string, chainId string, logger *zap.SugaredLogger, logLevel log.LOG_LEVEL) *CMLogger {\n\treturn &CMLogger{name: name, chainId: chainId, SugaredLogger: logger, logLevel: logLevel}\n}", "func New(config Config) Logger {\n\tlg := &logger{\n\t\tencoder: config.Encoder,\n\t\tfilters: config.Filters,\n\t\tthreshold: config.Threshold,\n\t}\n\tif len(lg.filters) == 0 {\n\t\tlg.filters = []Filter{DefaultFilter}\n\t}\n\tif config.Encoder == nil {\n\t\tlg.encoder = DefaultEncoder\n\t}\n\treturn lg\n}", "func New(cfg *Config, logger logger.Logger, registerer prometheus.Registerer) (*Plugin, error) {\n\tservice := &Plugin{\n\t\tcfg: cfg,\n\t\tregisterer: registerer,\n\t\tLogger: logger.NewLogger(\"simplePlugin\"),\n\t}\n\treturn service, nil\n}", "func newConfig(appName string, pathToKeybase string, log Log, ignoreSnooze bool) (*config, error) {\n\tcfg := newDefaultConfig(appName, pathToKeybase, log, ignoreSnooze)\n\terr := cfg.load()\n\treturn &cfg, err\n}", "func New(conf *viper.Viper) plugins.Plugin {\n\tconf = setDefaults(conf)\n\ttask := func() string {\n\t\tformat := conf.GetString(\"timeFormat\")\n\t\treturn time.Now().Format(format)\n\t}\n\treturn core.NewTimerFunc(conf, task)\n}", "func New() *Log {\n\tc := loadConfs()\n\treturn &Log{\n\t\tactiveSegment: c.newSegment(\"testfile\"),\n\t}\n}", "func New(out io.Writer) Logger {\n\tl := log.NewJSONLogger(log.NewSyncWriter(out))\n\tl = log.With(l, \"ts\", log.DefaultTimestampUTC)\n\treturn &logger{l}\n}", "func init() {\n\tregistry.Add(\"tapo\", NewTapoFromConfig)\n}", "func New(t testing.TB) *slog.Logger {\n\ttw := &testWriter{t: t}\n\tslh := slog.NewTextHandler(tw)\n\treturn slog.New(slh)\n}", "func newLog() {\n\tlogBuffer = log.NewLogBuffer(DefaultHttpLogBuffers)\n\tsrvLog = NewSrvLog()\n\n\ttrace.DebugLogger = srvLog\n\ttrace.ErrorLogger = srvLog\n}", "func New() {\n\tzapLogger, _ := zap.NewProduction()\n\tLog = Logger{\n\t\tinternalLogger: zapLogger.Sugar(),\n\t}\n}", "func New(tag, remoteHost string) (logger *Slog, err error) {\n\tlogger = &Slog{}\n\tl, err := syslog.Dial(\"udp\", remoteHost, syslog.LOG_LOCAL0, tag)\n\tlogger.log = l\n\treturn logger, err\n}", "func New(info logger.Info) (logger.Logger, error) {\n\tif info.LogPath == \"\" {\n\t\treturn nil, errdefs.System(errors.New(\"log path is missing -- this is a bug and should not happen\"))\n\t}\n\n\tcfg := newDefaultConfig()\n\tif capacity, ok := info.Config[\"max-size\"]; ok {\n\t\tvar err error\n\t\tcfg.MaxFileSize, err = units.FromHumanSize(capacity)\n\t\tif err != nil {\n\t\t\treturn nil, errdefs.InvalidParameter(errors.Wrapf(err, \"invalid value for max-size: %s\", capacity))\n\t\t}\n\t}\n\n\tif userMaxFileCount, ok := info.Config[\"max-file\"]; ok {\n\t\tvar err error\n\t\tcfg.MaxFileCount, err = strconv.Atoi(userMaxFileCount)\n\t\tif err != nil {\n\t\t\treturn nil, errdefs.InvalidParameter(errors.Wrapf(err, \"invalid value for max-file: %s\", userMaxFileCount))\n\t\t}\n\t}\n\n\tif userCompress, ok := info.Config[\"compress\"]; ok {\n\t\tcompressLogs, err := strconv.ParseBool(userCompress)\n\t\tif err != nil {\n\t\t\treturn nil, errdefs.InvalidParameter(errors.Wrap(err, \"error reading compress log option\"))\n\t\t}\n\t\tcfg.DisableCompression = !compressLogs\n\t}\n\treturn newDriver(info.LogPath, cfg)\n}", "func init() {\n\tlogger = &log.Logger{\n\t\tOut: os.Stdout,\n\t\tLevel: log.DebugLevel,\n\t\tFormatter: &logStashFormatter{log.TextFormatter{\n\t\t\tTimestampFormat: \"2006-01-02 15:04:05\",\n\t\t\tFullTimestamp: true},\n\t\t},\n\t}\n}", "func New(ctx ...interface{}) log15.Logger {\n\tl := log15.New(ctx...)\n\tl.SetHandler(defaultHandler)\n\treturn l\n}", "func New(c *configs.Config) (s *Service) {\n\t//var ac = new(paladin.TOML)\n\t//if err := paladin.Watch(\"application.toml\", ac); err != nil {\n\t//\tpanic(err)\n\t//}\n\ts = &Service{\n\t\tc: c,\n\t\tdao: dao.New(c),\n\t}\n\treturn s\n}", "func newSLogAsZap(log *zap.Logger, sugar *zap.SugaredLogger) {\n\tif zapLog == nil {\n\t\tzapLog = &zapLogger{\n\t\t\tLog: log,\n\t\t\tSugar: sugar,\n\t\t}\n\t\tsugar.Info(\"zap log init success as sugar\")\n\t}\n}", "func (out *elasticsearchOutput) Init(beat string, config outputs.MothershipConfig, topology_expire int) error {\n\n\tif len(config.Protocol) == 0 {\n\t\tconfig.Protocol = \"http\"\n\t}\n\n\tvar urls []string\n\n\tif len(config.Hosts) > 0 {\n\t\t// use hosts setting\n\t\tfor _, host := range config.Hosts {\n\t\t\turl := fmt.Sprintf(\"%s://%s%s\", config.Protocol, host, config.Path)\n\t\t\turls = append(urls, url)\n\t\t}\n\t} else {\n\t\t// use host and port settings\n\t\turl := fmt.Sprintf(\"%s://%s:%d%s\", config.Protocol, config.Host, config.Port, config.Path)\n\t\turls = append(urls, url)\n\t}\n\n\tes := NewElasticsearch(urls, config.Username, config.Password)\n\tout.Conn = es\n\n\tif config.Index != \"\" {\n\t\tout.Index = config.Index\n\t} else {\n\t\tout.Index = beat\n\t}\n\n\tout.TopologyExpire = 15000\n\tif topology_expire != 0 {\n\t\tout.TopologyExpire = topology_expire /*sec*/ * 1000 // millisec\n\t}\n\n\tout.FlushInterval = 1000 * time.Millisecond\n\tif config.Flush_interval != nil {\n\t\tout.FlushInterval = time.Duration(*config.Flush_interval) * time.Millisecond\n\t}\n\tout.BulkMaxSize = 10000\n\tif config.Bulk_size != nil {\n\t\tout.BulkMaxSize = *config.Bulk_size\n\t}\n\n\tif config.Max_retries != nil {\n\t\tout.Conn.SetMaxRetries(*config.Max_retries)\n\t}\n\n\tlogp.Info(\"[ElasticsearchOutput] Using Elasticsearch %s\", urls)\n\tlogp.Info(\"[ElasticsearchOutput] Using index pattern [%s-]YYYY.MM.DD\", out.Index)\n\tlogp.Info(\"[ElasticsearchOutput] Topology expires after %ds\", out.TopologyExpire/1000)\n\tif out.FlushInterval > 0 {\n\t\tlogp.Info(\"[ElasticsearchOutput] Insert events in batches. Flush interval is %s. Bulk size is %d.\", out.FlushInterval, out.BulkMaxSize)\n\t} else {\n\t\tlogp.Info(\"[ElasticsearchOutput] Insert events one by one. This might affect the performance of the shipper.\")\n\t}\n\n\tif config.Save_topology {\n\t\terr := out.EnableTTL()\n\t\tif err != nil {\n\t\t\tlogp.Err(\"Fail to set _ttl mapping: %s\", err)\n\t\t\t// keep trying in the background\n\t\t\tgo func() {\n\t\t\t\tfor {\n\t\t\t\t\terr := out.EnableTTL()\n\t\t\t\t\tif err == nil {\n\t\t\t\t\t\tbreak\n\t\t\t\t\t}\n\t\t\t\t\tlogp.Err(\"Fail to set _ttl mapping: %s\", err)\n\t\t\t\t\ttime.Sleep(5 * time.Second)\n\t\t\t\t}\n\t\t\t}()\n\t\t}\n\t}\n\n\tout.sendingQueue = make(chan EventMsg, 1000)\n\tgo out.SendMessagesGoroutine()\n\n\treturn nil\n}", "func (l *Logger) createNew() {\n\tpath := filepath.Join(l.dir, fmt.Sprintf(\"%s-current.log\", l.filePrefix))\n\tif err := os.MkdirAll(l.dir, 0744); err != nil {\n\t\tglog.WithError(err).Fatal(\"Unable to create directory.\")\n\t}\n\tf, err := os.OpenFile(path, os.O_CREATE|os.O_WRONLY|os.O_TRUNC,\n\t\tos.FileMode(0644))\n\tif err != nil {\n\t\tglog.WithError(err).Fatal(\"Unable to create a new file.\")\n\t}\n\tl.cf = new(CurFile)\n\tl.cf.f = f\n\tcache := new(Cache)\n\tcreateAndUpdateBloomFilter(cache)\n\tatomic.StorePointer(&l.cf.cch, unsafe.Pointer(cache))\n}", "func New() *Config {\n\treturn &Config{DataFormat: \"influx\"}\n}", "func New(t testing.TB) lg.Log {\n\treturn NewWith(t, FactoryFn)\n}", "func new(level string) *logrus.Logger {\n\tlog := logrus.New()\n\n\tlog.SetOutput(os.Stdout)\n\tlog.SetFormatter(&logrus.JSONFormatter{})\n\n\tif l, err := logrus.ParseLevel(level); err != nil {\n\t\tlog.WithField(\"invalidLevel\", level).\n\t\t\tError(\"invalid log level, defaulting to 'info'\")\n\t} else {\n\t\tlog.SetLevel(l)\n\t\tlog.WithField(\"to\", level).\n\t\t\tInfo(\"log level set\")\n\t}\n\treturn log\n}", "func newLogrus(level string, formatter string, output io.Writer) *logrus.Logger {\n\tl, err := logrus.ParseLevel(level)\n\tif err != nil {\n\t\tfmt.Printf(\"Bad level: %v, set it to 'debug'\", level)\n\t\tl = logrus.DebugLevel\n\t}\n\tlogger := &logrus.Logger{\n\t\tOut: output,\n\t\tHooks: make(logrus.LevelHooks),\n\t\tLevel: l,\n\t}\n\tswitch formatter {\n\tcase \"json\":\n\t\tlogger.Formatter = &logrus.JSONFormatter{TimestampFormat: TimestampFormat}\n\tcase \"text\":\n\t\tfallthrough\n\tdefault:\n\t\tlogger.Formatter = &logrus.TextFormatter{DisableColors: true,\n\t\t\tDisableSorting: false, TimestampFormat: TimestampFormat}\n\t}\n\treturn logger\n}", "func init() {\n file, err := os.Create(\"node.log\")\n if err != nil {\n panic(err.Error())\n }\n nodeLogger.Logger = log.New(file, \"\", 0)\n l = lg.New(os.Stdout, \"[utl] \", 0, 0, false)\n}", "func newSSHDefaultConfig(userName, identity string) (*sshClientConfig, error) {\n\tconfig, err := sshDefaultConfig(userName, identity)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &sshClientConfig{ClientConfig: config}, nil\n}", "func New(logLevel uint, logDir string, format logrus.Formatter) (*logrus.Logger, error) {\n\tlog := logrus.New()\n\tlog.Level = logrus.Level(logLevel)\n\n\tvar logFName string\n\tts := time.Now().Format(\"tccbot-2006-01-02-15-04-05\")\n\tif logDir != \"\" {\n\t\tlogFName = filepath.Join(logDir, ts+\".log\")\n\n\t\tlogFile, err := os.OpenFile(logFName, os.O_CREATE|os.O_WRONLY, 0644)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tlog.SetOutput(logFile)\n\t\tlogrus.RegisterExitHandler(func() {\n\t\t\tif logFile == nil {\n\t\t\t\treturn\n\t\t\t}\n\t\t\tlogFile.Close()\n\t\t})\n\t}\n\n\tlog.SetFormatter(format)\n\n\tlog.WithFields(logrus.Fields{\"application\": \"tccbot\", \"location\": logFName})\n\n\treturn log, nil\n}", "func init() {\n\tlogger.Info(\"Initialising Parse JWT Go Plugin\")\n}", "func (e Engine) newLogs(config dvid.StoreConfig) (*fileLogs, bool, error) {\n\tpath, _, err := parseConfig(config)\n\tif err != nil {\n\t\treturn nil, false, err\n\t}\n\n\tvar created bool\n\tif _, err := os.Stat(path); os.IsNotExist(err) {\n\t\tdvid.Infof(\"Log not already at path (%s). Creating ...\\n\", path)\n\t\tif err := os.MkdirAll(path, 0755); err != nil {\n\t\t\treturn nil, false, err\n\t\t}\n\t\tcreated = true\n\t} else {\n\t\tdvid.Infof(\"Found log at %s (err = %v)\\n\", path, err)\n\t}\n\n\t// opt, err := getOptions(config.Config)\n\t// if err != nil {\n\t// \treturn nil, false, err\n\t// }\n\n\tlog := &fileLogs{\n\t\tpath: path,\n\t\tconfig: config,\n\t\tfiles: make(map[string]*fileLog),\n\t}\n\treturn log, created, nil\n}", "func New(logEnable bool, logServiceType int, config *Fluent) ILog {\n\tif !logEnable {\n\t\tlogServiceType = DEFAULT\n\t}\n\n\tswitch logServiceType {\n\tcase FLUENT:\n\t\treturn NewFluent(config)\n\tdefault:\n\t\treturn Default()\n\t}\n\n\treturn nil\n}", "func newConfig(path string) *Config {\n\tfile, e := ioutil.ReadFile(path)\n\tif e != nil {\n\t\tlog.Fatalf(\"config error: %v\", e)\n\t}\n\tvar cfg Config\n\te = json.Unmarshal(file, &cfg)\n\tif e != nil {\n\t\tlog.Fatalf(\"config error: %v\", e)\n\t}\n\n\t// redirect logging to a file\n\twriter, err := os.OpenFile(\"gorobot.log\", os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0644)\n\tif err != nil {\n\t\tlog.Fatalf(\"Unable to open file log: %v\", err)\n\t}\n\tlog.SetOutput(writer)\n\treturn &cfg\n}", "func newSyslog(encoder zapcore.Encoder, enab zapcore.LevelEnabler) (zapcore.Core, error) {\n\t// Initialize a syslog writer.\n\twriter, err := syslog.New(syslog.LOG_ERR|syslog.LOG_LOCAL0, filepath.Base(os.Args[0]))\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"failed to get a syslog writer\")\n\t}\n\n\treturn &syslogCore{\n\t\tLevelEnabler: enab,\n\t\tencoder: encoder,\n\t\twriter: writer,\n\t}, nil\n}", "func newLogger() logger {\n\tleKey := os.Getenv(\"LOG_ENTRIES_KEY\")\n\tfmtFallback := &fmtLogger{}\n\n\tif leKey == \"\" {\n\t\treturn fmtFallback\n\t}\n\n\tle, err := le_go.Connect(leKey)\n\n\tif err != nil {\n\t\treturn fmtFallback\n\t}\n\n\tdefer le.Close()\n\n\treturn le\n}", "func TestNew_noMetaOnInit(t *testing.T) {\n\tt.Parallel()\n\n\ttmpDir := t.TempDir()\n\tbucket, err := fileblob.OpenBucket(tmpDir, nil)\n\trequire.NoError(t, err)\n\trequire.NoError(t,\n\t\tbucket.WriteAll(context.Background(), \".pulumi/stacks/dev.json\", []byte(\"bar\"), nil))\n\n\tctx := context.Background()\n\t_, err = New(ctx, diagtest.LogSink(t), \"file://\"+filepath.ToSlash(tmpDir), nil)\n\trequire.NoError(t, err)\n\n\tassert.NoFileExists(t, filepath.Join(tmpDir, \".pulumi\", \"meta.yaml\"))\n}", "func New(l *logging.Logger) transport.Transport {\n\treturn &DummyLogs{\n\t\tlogger: l,\n\t}\n}", "func New(env string, fields ...zap.Field) (logger *zap.Logger) {\n\tswitch env {\n\tcase \"production\":\n\t\tlogger, _ = zap.NewProduction()\n\tcase \"development\":\n\t\tlogger, _ = zap.NewDevelopment()\n\tdefault:\n\t\tlogger = zap.NewNop()\n\t}\n\tlogger = logger.With(fields...)\n\treturn\n}", "func run(c *cli.Context) error {\n\t// set the log level for the plugin\n\tswitch c.String(\"log.level\") {\n\tcase \"t\", \"trace\", \"Trace\", \"TRACE\":\n\t\tlogrus.SetLevel(logrus.TraceLevel)\n\tcase \"d\", \"debug\", \"Debug\", \"DEBUG\":\n\t\tlogrus.SetLevel(logrus.DebugLevel)\n\tcase \"w\", \"warn\", \"Warn\", \"WARN\":\n\t\tlogrus.SetLevel(logrus.WarnLevel)\n\tcase \"e\", \"error\", \"Error\", \"ERROR\":\n\t\tlogrus.SetLevel(logrus.ErrorLevel)\n\tcase \"f\", \"fatal\", \"Fatal\", \"FATAL\":\n\t\tlogrus.SetLevel(logrus.FatalLevel)\n\tcase \"p\", \"panic\", \"Panic\", \"PANIC\":\n\t\tlogrus.SetLevel(logrus.PanicLevel)\n\tcase \"i\", \"info\", \"Info\", \"INFO\":\n\t\tfallthrough\n\tdefault:\n\t\tlogrus.SetLevel(logrus.InfoLevel)\n\t}\n\n\tlogrus.WithFields(logrus.Fields{\n\t\t\"code\": \"https://github.com/go-vela/vela-kaniko\",\n\t\t\"docs\": \"https://go-vela.github.io/docs/plugins/registry/pipeline/kaniko\",\n\t\t\"registry\": \"https://hub.docker.com/r/target/vela-kaniko\",\n\t}).Info(\"Vela Kaniko Plugin\")\n\n\t// create the plugin\n\tp := &Plugin{\n\t\t// build configuration\n\t\tBuild: &Build{\n\t\t\tEvent: c.String(\"build.event\"),\n\t\t\tSha: c.String(\"build.sha\"),\n\t\t\tSnapshotMode: c.String(\"build.snapshot_mode\"),\n\t\t\tTag: c.String(\"build.tag\"),\n\t\t},\n\t\t// image configuration\n\t\tImage: &Image{\n\t\t\tArgs: c.StringSlice(\"image.build_args\"),\n\t\t\tContext: c.String(\"image.context\"),\n\t\t\tDockerfile: c.String(\"image.dockerfile\"),\n\t\t\tTarget: c.String(\"image.target\"),\n\t\t},\n\t\t// registry configuration\n\t\tRegistry: &Registry{\n\t\t\tDryRun: c.Bool(\"registry.dry_run\"),\n\t\t\tName: c.String(\"registry.name\"),\n\t\t\tMirror: c.String(\"registry.mirror\"),\n\t\t\tUsername: c.String(\"registry.username\"),\n\t\t\tPassword: c.String(\"registry.password\"),\n\t\t\tPushRetry: c.Int(\"registry.push_retry\"),\n\t\t},\n\t\t// repo configuration\n\t\tRepo: &Repo{\n\t\t\tAutoTag: c.Bool(\"repo.auto_tag\"),\n\t\t\tCache: c.Bool(\"repo.cache\"),\n\t\t\tCacheName: c.String(\"repo.cache_name\"),\n\t\t\tName: c.String(\"repo.name\"),\n\t\t\tTags: c.StringSlice(\"repo.tags\"),\n\t\t\tLabel: &Label{\n\t\t\t\tAuthorEmail: c.String(\"label.author_email\"),\n\t\t\t\tCommit: c.String(\"label.commit\"),\n\t\t\t\tCreated: time.Now().Format(time.RFC3339),\n\t\t\t\tFullName: c.String(\"label.full_name\"),\n\t\t\t\tNumber: c.Int(\"label.number\"),\n\t\t\t\tURL: c.String(\"label.url\"),\n\t\t\t},\n\t\t\tLabels: c.StringSlice(\"repo.labels\"),\n\t\t},\n\t}\n\n\t// validate the plugin\n\terr := p.Validate()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// execute the plugin\n\treturn p.Exec()\n}", "func init() {\n\tlog = config.Logger()\n}", "func init() {\n\tlog = config.Logger()\n}", "func New(cfg *Config) (*Agent, error) {\n\n\tlogger := cfg.Logger\n\tif logger == nil {\n\t\tlogLevel := hlog.LevelFromString(cfg.LogLevel)\n\t\tif logLevel == hlog.NoLevel {\n\t\t\tlogLevel = hlog.Info\n\t\t}\n\t\tlogFormat := cfg.LogTimeFormat\n\t\tif logFormat == \"\" {\n\t\t\tlogFormat = rkvApi.DefaultTimeFormat\n\t\t}\n\t\tlogOpts := &hlog.LoggerOptions{\n\t\t\tName: fmt.Sprintf(\"rkv-%s\", cfg.NodeName),\n\t\t\tLevel: logLevel,\n\t\t\tIncludeLocation: cfg.LogIncludeLocation,\n\t\t\tOutput: cfg.LogOutput,\n\t\t\tTimeFormat: logFormat,\n\t\t}\n\t\tif logLevel > hlog.Debug {\n\t\t\t// to skip serf and memberlist debug logs\n\t\t\tlogOpts.Exclude = func(\n\t\t\t\tlevel hlog.Level, msg string, args ...interface{}) bool {\n\n\t\t\t\treturn strings.Index(msg, \"[DEBUG]\") > -1\n\t\t\t}\n\t\t}\n\t\tlogger = hlog.New(logOpts)\n\t}\n\tcfg.Logger = logger\n\n\thostname, _ := os.Hostname()\n\trpcAddr, _ := cfg.RPCAddr()\n\tlogger.Info(\"os\", \"hostname\", hostname)\n\tlogger.Info(\"config\", \"log-level\", cfg.LogLevel)\n\tlogger.Info(\"config\", \"node-name\", cfg.NodeName)\n\tlogger.Info(\"config\", \"data-dir\", cfg.DataDir)\n\tlogger.Info(\"config\", \"db\", cfg.Backend.DSN())\n\tlogger.Info(\"config\", \"discovery-join-address\", cfg.StartJoinAddrs)\n\tlogger.Info(\"config\", \"gRPC address\", rpcAddr)\n\tlogger.Info(\"config\", \"Raft.Heartbeat timeout\", cfg.Raft.HeartbeatTimeout)\n\tlogger.Info(\"config\", \"Raft.Election timeout\", cfg.Raft.ElectionTimeout)\n\n\ta := &Agent{\n\t\tConfig: cfg,\n\t\tlogger: logger,\n\t\tregistry: cfg.Registry,\n\t}\n\n\tif a.registry == nil {\n\t\ta.registry = registry.NewApplyRegistrator()\n\t}\n\n\tvar setup = []struct {\n\t\tname string\n\t\tfn func() error\n\t}{\n\t\t{\"setupRoute\", a.setupRoute},\n\t\t{\"setupRaft\", a.setupRaft},\n\t\t{\"setupMembership\", a.setupMembership},\n\t\t{\"setupGrpcServer\", a.setupGrpcServer},\n\t\t{\"setupHTTPServer\", a.setupHTTPServer},\n\t}\n\tfor _, s := range setup {\n\t\tif err := s.fn(); err != nil {\n\t\t\treturn nil, fmt.Errorf(\"%v: %v\", s.name, err)\n\t\t}\n\t}\n\treturn a, nil\n}", "func New(debug bool) *Logs {\n\tlogrus.SetOutput(os.Stdout)\n\tprimary := logrus.New()\n\n\tif debug {\n\t\tlogrus.SetLevel(logrus.DebugLevel)\n\t\tprimary.SetFormatter(&logrus.TextFormatter{\n\t\t\tForceColors: true,\n\t\t})\n\t} else {\n\t\tlogrus.SetLevel(logrus.InfoLevel)\n\t\tprimary.SetFormatter(&logrus.JSONFormatter{})\n\n\t}\n\n\treturn &Logs{\n\t\tPrimary: primary,\n\t\tCommand: primary.WithField(\"type\", \"command\"),\n\t\tMultiplexer: primary.WithField(\"type\", \"multiplexer\"),\n\t\tdebug: debug,\n\t}\n}", "func New(level zapcore.Level, serviceName, environment string) (*zap.Logger, error) {\n\tconfig := zap.NewProductionConfig()\n\tconfig.Level = zap.NewAtomicLevelAt(level)\n\tconfig.DisableStacktrace = false\n\tconfig.Sampling = nil\n\tconfig.OutputPaths = []string{\"stdout\"}\n\tconfig.ErrorOutputPaths = []string{\"stderr\"}\n\tconfig.InitialFields = map[string]interface{}{\n\t\t\"service\": serviceName,\n\t\t\"env\": environment,\n\t}\n\treturn config.Build()\n}", "func newStorer(pluginName string, storagePath string, gcpProjectID string, gcpCredentialsFile string) (storer store.GlobalSiloStringStorer, err error) {\n\tif gcpProjectID != \"\" {\n\t\treturn newDatastoreStorerWithInMemoryCache(pluginName, gcpProjectID, gcpCredentialsFile)\n\t}\n\n\treturn store.NewLevelDB(pluginName, storagePath)\n}", "func New(config Config) *zap.Logger {\n\t// If we are running in production, stdin will not be a terminal. Otherwise, we should use a\n\t// more friendly looking output style.\n\tencoder := zapcore.NewJSONEncoder(zap.NewProductionEncoderConfig())\n\tif isTerminal() {\n\t\tencoderConf := zap.NewDevelopmentEncoderConfig()\n\t\tencoderConf.EncodeLevel = zapcore.CapitalColorLevelEncoder\n\t\tencoderConf.MessageKey = \"message\"\n\n\t\tencoder = zapcore.NewConsoleEncoder(encoderConf)\n\t}\n\n\twriter := os.Stdout\n\n\tminLevel, ok := levelMap[config.Level]\n\tif !ok {\n\t\tminLevel = zapcore.InfoLevel\n\t}\n\n\tenabler := zap.LevelEnablerFunc(func(level zapcore.Level) bool {\n\t\treturn level >= minLevel\n\t})\n\n\treturn zap.New(zapcore.NewCore(encoder, writer, enabler))\n}", "func newLog(jobId string) Log {\n\treturn Log{\n\t\tId: uniuri.New(),\n\t\tJobId: jobId,\n\t\tStatus: \"New\",\n\t}\n}", "func InitLog() {\n // TODO: implement json logger\n\n /*log.SetFormatter(&log.TextFormatter{\n DisableTimestamp: true,\n })\n if logJson {\n log.SetFormatter(&log.JSONFormatter{})\n }\n log.SetOutput(os.Stdout)\n\n level, err := log.ParseLevel(logLevel)\n if err != nil {\n log.Error(errors.Wrap(err, fmt.Sprintf(\"Invalid log level %s, defaulting to INFO\", logLevel)))\n level = log.InfoLevel\n }\n log.SetLevel(level)*/\n\n\n log = standard.New(logLevel)\n\n}", "func New(options ...Option) *Logger {\n\n\tres := Logger{\n\t\tnow: time.Now,\n\t\tfatal: func() { os.Exit(1) },\n\t\tstdout: os.Stdout,\n\t\tstderr: os.Stderr,\n\t\tcallerDepth: 0,\n\t\tmapper: nopMapper,\n\t\treTrace: reTraceDefault,\n\t}\n\tfor _, opt := range options {\n\t\topt(&res)\n\t}\n\n\tif res.format != \"\" {\n\t\t// formatter defined\n\t\tvar err error\n\t\tres.templ, err = template.New(\"lgr\").Parse(res.format)\n\t\tif err != nil {\n\t\t\tfmt.Printf(\"invalid template %s, error %v. switched to %s\\n\", res.format, err, Short)\n\t\t\tres.format = Short\n\t\t\tres.templ = template.Must(template.New(\"lgrDefault\").Parse(Short))\n\t\t}\n\n\t\tbuf := bytes.Buffer{}\n\t\tif err = res.templ.Execute(&buf, layout{}); err != nil {\n\t\t\tfmt.Printf(\"failed to execute template %s, error %v. switched to %s\\n\", res.format, err, Short)\n\t\t\tres.format = Short\n\t\t\tres.templ = template.Must(template.New(\"lgrDefault\").Parse(Short))\n\t\t}\n\t}\n\n\t// set *On flags once for optimization on multiple Logf calls\n\tres.callerOn = strings.Contains(res.format, \"{{.Caller\") || res.callerFile || res.callerFunc || res.callerPkg\n\tres.levelBracesOn = strings.Contains(res.format, \"[{{.Level}}]\") || res.levelBraces\n\n\tres.sameStream = isStreamsSame(res.stdout, res.stderr)\n\n\treturn &res\n}", "func New(debug bool, errorChannel string) *Logs {\n\tlogrus.SetOutput(os.Stdout)\n\tprimary := logrus.New()\n\n\tif debug {\n\t\tlogrus.SetLevel(logrus.DebugLevel)\n\t\tprimary.SetFormatter(&logrus.TextFormatter{\n\t\t\tForceColors: true,\n\t\t})\n\t} else {\n\t\tlogrus.SetLevel(logrus.InfoLevel)\n\t\tprimary.SetFormatter(&logrus.JSONFormatter{})\n\n\t}\n\n\treturn &Logs{\n\t\tPrimary: primary,\n\t\tCommand: primary.WithField(\"type\", \"command\"),\n\t\tMultiplexer: primary.WithField(\"type\", \"multiplexer\"),\n\t\tdebug: debug,\n\t\terrorChannel: errorChannel,\n\t}\n}", "func New() *Logger {\n\tif log == nil {\n\t\tlog = new(Logger)\n\t\tlog.Logger = logrus.New()\n\n\t\tlog.Formatter = &MyFormatter{}\n\n\t\tswitch strings.ToUpper(strings.TrimSpace(configure.GetString(\"log.level\"))) {\n\t\tcase \"PANIC\":\n\t\t\tlog.Level = logrus.PanicLevel\n\t\tcase \"FATAL\":\n\t\t\tlog.Level = logrus.FatalLevel\n\t\tcase \"ERROR\":\n\t\t\tlog.Level = logrus.ErrorLevel\n\t\tcase \"WARN\", \"WARNING\":\n\t\t\tlog.Level = logrus.WarnLevel\n\t\tcase \"INFO\":\n\t\t\tlog.Level = logrus.InfoLevel\n\t\tcase \"DEBUG\":\n\t\t\tlog.Level = logrus.DebugLevel\n\t\tdefault:\n\t\t\tlog.Level = logrus.DebugLevel\n\t\t}\n\n\t\tlogFile := getLogFile(strings.TrimSpace(configure.GetString(\"log.file\")))\n\t\tlog.Out = logFile\n\n\t}\n\treturn log\n}", "func NewDefault(handler Handler) *Logger {\n\treturn New(handler, Ltime|Lfile|Llevel)\n}", "func NewLogger(output *os.File, component string) (Logger, error) {\n\tlog := zerolog.New(output).With().\n\t\tStr(\"component\", component).\n\t\tLogger()\n\n\tswitch viper.GetString(\"log-level\") {\n\tcase \"debug\":\n\t\tzerolog.SetGlobalLevel(zerolog.DebugLevel)\n\tcase \"warning\":\n\t\tzerolog.SetGlobalLevel(zerolog.WarnLevel)\n\tcase \"fatal\":\n\t\tzerolog.SetGlobalLevel(zerolog.FatalLevel)\n\tcase \"info\":\n\t\tzerolog.SetGlobalLevel(zerolog.InfoLevel)\n\tdefault:\n\t\tzerolog.SetGlobalLevel(zerolog.InfoLevel)\n\t\tlog.Info().Msgf(\"Unknown log-level %s, using info.\", viper.GetString(\"log-level\"))\n\t}\n\n\treturn logger{\n\t\tLogger: log,\n\t}, nil\n}", "func init() {\n initialiseLogger()\n}", "func initLoggerParams() error {\n\tvar (\n\t\terr error\n\t\ttimeFormat string\n\t\tformatter *logrus.TextFormatter\n\t\tlevel logrus.Level\n\t\twriter *rotatelogs.RotateLogs\n\t)\n\n\t// timeFormat\n\ttimeFormat = viper.GetString(\"logging.timeFormat\")\n\tif timeFormat == \"\" {\n\t\ttimeFormat = defaultTimeFormat\n\t}\n\n\t// formatter\n\tformatter = &logrus.TextFormatter{\n\t\tDisableColors: true,\n\t\tFullTimestamp: true,\n\t\tTimestampFormat: timeFormat,\n\t\tFieldMap: logrus.FieldMap{\n\t\t\tlogrus.FieldKeyTime: \"timeStamp\",\n\t\t\tlogrus.FieldKeyMsg: \"message\",\n\t\t},\n\t}\n\n\t// logLevel\n\tlogLevel := viper.GetString(\"logging.level\")\n\tif logLevel == \"\" {\n\t\tlogLevel = defaultLogLevel\n\t}\n\tlevel, err = logrus.ParseLevel(logLevel)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// fileFlag\n\tfileFlag := viper.GetBool(\"logging.file.enable\")\n\n\t// theLoggerParams\n\ttheLoggerParams.level = level\n\ttheLoggerParams.formatter = formatter\n\ttheLoggerParams.fileFlag = fileFlag\n\ttheLoggerParams.init = true\n\n\tif fileFlag {\n\t\tfilePath := viper.GetString(\"logging.file.path\")\n\t\tif filePath != \"\" {\n\t\t\terr = util.CheckFilePath(filePath)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t\tfileName := path.Join(filePath, serviceName)\n\t\twriter, err = rotatelogs.New(\n\t\t\t// 分割后的文件名称\n\t\t\tfileName+\".%Y%m%d.log\",\n\n\t\t\t// WithLinkName为最新的日志建立软连接,以方便随着找到当前日志文件\n\t\t\trotatelogs.WithLinkName(fileName),\n\n\t\t\t// WithRotationTime设置日志分割的时间,这里设置为一天分割一次\n\t\t\t// WithRotationSize设置日志分割的大小\n\t\t\trotatelogs.WithRotationTime(24*time.Hour),\n\n\t\t\t// WithMaxAge和WithRotationCount二者只能设置一个,\n\t\t\t// WithMaxAge设置文件清理前的最长保存时间,\n\t\t\t// WithRotationCount设置文件清理前最多保存的个数。\n\t\t\trotatelogs.WithMaxAge(7*24*time.Hour),\n\t\t)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\ttheLoggerParams.writer = writer\n\t}\n\n\treturn nil\n}", "func newConfig() *Config {\n\n\tc := &Config{}\n\tvar logLevel, bServers, dServers string\n\n\tflag.StringVar(&c.ControlAddress, \"controlAddress\", \"localhost:4000\",\n\t\t\"Control process IP address, default localhost:4000\")\n\n\tflag.BoolVar(&c.Broadcast, \"broadcast\", true,\n\t\t\"Set to false to squash actual broadcast.\")\n\n\tflag.IntVar(&c.Bclients, \"bClients\", 1,\n\t\t\"The number of broadcast clients; Default 1\")\n\n\tflag.IntVar(&c.Dclients, \"dClients\", 1,\n\t\t\"The number of deliver clients; Default 1\")\n\n\tflag.IntVar(&c.Channels, \"channels\", 1,\n\t\t\"The number of channels; Default 1\")\n\n\tflag.StringVar(&bServers, \"bServers\", \"\",\n\t\t\"A comma-separated list of IP:PORT of broadcast servers to target; Required\")\n\n\tflag.StringVar(&dServers, \"dServers\", \"\",\n\t\t\"A comma-separated list of IP:PORT of deliver servers to target; Defaults to broadcast szervers\")\n\n\tflag.IntVar(&c.Transactions, \"transactions\", 1,\n\t\t\"The number of transactions broadcast to each client's servers; Default 1\")\n\n\tflag.IntVar(&c.Payload, \"payload\", TxHeaderSize,\n\t\t\"Payload size in bytes; Minimum/default is the performance header size (56 bytes)\")\n\n\tflag.IntVar(&c.Burst, \"burst\", 1,\n\t\t\"The number of transactions burst to each server during broadcast; Dafault 1\")\n\n\tflag.DurationVar(&c.Delay, \"delay\", 0,\n\t\t\"The delay between bursts, in the form required by time.ParseDuration(); Default is no delay\")\n\n\tflag.IntVar(&c.Window, \"window\", 100,\n\t\t\"The number of blocks allowed to be delivered without an ACK; Default 100\")\n\n\tflag.IntVar(&c.AckEvery, \"ackEvery\", 70,\n\t\t\"The deliver client will ACK every (this many) blocks; Default 70\")\n\n\tflag.DurationVar(&c.Timeout, \"timeout\", 30*time.Second,\n\t\t\"The initialization timeout, in the form required by time.ParseDuration(); Default 30s\")\n\n\tflag.BoolVar(&c.LatencyAll, \"latencyAll\", false,\n\t\t\"By default, only block latencies are reported. Set -latencyAll=true to report all transaction latencies\")\n\n\tflag.StringVar(&c.LatencyDir, \"latencyDir\", \"\",\n\t\t\"The directory to contain latency files; These files are only created if -latencyDir is specified\")\n\n\tflag.StringVar(&c.LatencyPrefix, \"latencyPrefix\", \"client\",\n\t\t\"Prefix for latency file names\")\n\n\tflag.StringVar(&logLevel, \"logLevel\", \"info\",\n\t\t\"The global logging level; Default 'info'\")\n\n\tflag.StringVar(&c.ControlLogging, \"controlLogging\", \"\",\n\t\t\"Override logging level for the 'control' process\")\n\n\tflag.StringVar(&c.BroadcastLogging, \"broadcastLogging\", \"\",\n\t\t\"Override logging level for the 'broadcast' processes\")\n\n\tflag.StringVar(&c.DeliverLogging, \"deliverLogging\", \"\",\n\t\t\"Override logging level for the 'deliver' processes\")\n\n\tflag.Parse()\n\n\tif c.ControlLogging == \"\" {\n\t\tc.ControlLogging = logLevel\n\t}\n\tif c.BroadcastLogging == \"\" {\n\t\tc.BroadcastLogging = logLevel\n\t}\n\tif c.DeliverLogging == \"\" {\n\t\tc.DeliverLogging = logLevel\n\t}\n\n\tinitLogging(c.ControlLogging)\n\n\trequireUint16(\"bclients\", c.Bclients)\n\trequireUint16(\"dclients\", c.Dclients)\n\trequireUint16(\"channels\", c.Channels)\n\trequireNonEmpty(\"bServers\", bServers)\n\tif dServers == \"\" {\n\t\tdServers = bServers\n\t}\n\trequireUint32(\"transactions\", c.Transactions)\n\trequirePosInt(\"payload\", c.Payload)\n\tif c.Payload < TxHeaderSize {\n\t\tlogger.Infof(\"Payload size will be set to the default (%d bytes)\\n\",\n\t\t\tTxHeaderSize)\n\t\tc.Payload = TxHeaderSize\n\t}\n\trequirePosInt(\"burst\", c.Burst)\n\trequirePosDuration(\"delay\", c.Delay)\n\trequirePosInt(\"window\", c.Window)\n\trequirePosInt(\"ackevery\", c.AckEvery)\n\trequireLE(\"ackevery\", \"window\", c.AckEvery, c.Window)\n\trequirePosDuration(\"timeout\", c.Timeout)\n\n\tc.Bservers = strings.Split(bServers, \",\")\n\tc.NumBservers = len(c.Bservers)\n\n\tc.Dservers = strings.Split(dServers, \",\")\n\tc.NumDservers = len(c.Dservers)\n\n\tlogger.Infof(\"Configuration\")\n\tlogger.Infof(\" Broadcast Servers: %d: %v\", c.NumBservers, c.Bservers)\n\tlogger.Infof(\" Broadcast Clients: %d\", c.Bclients)\n\tlogger.Infof(\" Deliver Servers : %d: %v\", c.NumDservers, c.Dservers)\n\tlogger.Infof(\" Deliver Clients : %d\", c.Dclients)\n\tlogger.Infof(\" Channels : %d\", c.Channels)\n\tlogger.Infof(\" Transactions : %d\", c.Transactions)\n\tlogger.Infof(\" Payload : %d\", c.Payload)\n\tlogger.Infof(\" Burst : %d\", c.Burst)\n\tlogger.Infof(\" Delay : %s\", c.Delay.String())\n\tlogger.Infof(\" Window : %d\", c.Window)\n\tlogger.Infof(\" AckEvery : %d\", c.AckEvery)\n\tlogger.Infof(\" Broadcast? : %v\", c.Broadcast)\n\n\tc.TotalBroadcastClients =\n\t\tuint64(c.NumBservers) * uint64(c.Channels) * uint64(c.Bclients)\n\tc.TxBroadcastPerClient = uint64(c.Transactions)\n\tc.BytesBroadcastPerClient = c.TxBroadcastPerClient * uint64(c.Payload)\n\tc.TotalTxBroadcast = uint64(c.TotalBroadcastClients) * c.TxBroadcastPerClient\n\tc.TotalBytesBroadcast = c.TotalTxBroadcast * uint64(c.Payload)\n\n\tc.TotalDeliverClients =\n\t\tuint64(c.NumDservers) * uint64(c.Channels) * uint64(c.Dclients)\n\tc.TxDeliveredPerClient =\n\t\tuint64(c.NumBservers) * uint64(c.Bclients) * uint64(c.Transactions)\n\tc.BytesDeliveredPerClient = c.TxDeliveredPerClient * uint64(c.Payload)\n\tc.TotalTxDelivered = c.TxDeliveredPerClient * c.TotalDeliverClients\n\tc.TotalBytesDelivered = c.TotalTxDelivered * uint64(c.Payload)\n\n\treturn c\n}", "func New() echo.MiddlewareFunc {\n\treturn NewWithLogger(logrus.StandardLogger())\n}", "func New() echo.MiddlewareFunc {\n\treturn NewWithLogger(logrus.StandardLogger())\n}", "func init() {\n\tvar logger = log.Get()\n\tlogger.Info(\"Processing Golang plugin init function!!\" )\n\t//Here you write the code for db connection\n}", "func (p *MockDeployPlugin) Create(cluster *lang.Cluster, deployName string, params util.NestedParameterMap, eventLog *event.Log) error {\n\ttime.Sleep(p.SleepTime)\n\treturn nil\n}", "func New(serviceName, env, source string) *Datadog {\n\tdatadog, err := statsd.New(source)\n\tif err != nil {\n\t\tlog.Fatal(err.Error())\n\t}\n\n\t// Get hostname\n\thost, err := os.Hostname()\n\tif err != nil {\n\t\thost = \"undefined\"\n\t}\n\n\t// Get service name\n\tif len(serviceName) < 1 {\n\t\tlog.Fatal(errors.New(\"Datadog service name should be provided\"))\n\t}\n\n\tdatadog.Namespace = fmt.Sprintf(\"enterprise_%s.\", serviceName)\n\tdatadog.Tags = append(datadog.Tags, \"env:\"+env, \"host:\"+host)\n\n\tlog.Println(\"Datadog initialized...\")\n\n\treturn &Datadog{\n\t\tclient: datadog,\n\t}\n}", "func New(s shorter.Store, c interface{}) (shorter.Shorter, error) {\n\t// load extends configuration\n\t// TODO: implement\n\treturn nil, errors.New(\"unimplemented\")\n}", "func New(logger model.Logger) API {\n\treturn API{logger: logger}\n}", "func init() {\n\t// Log as logstash instead of the default ASCII formatter.\n\tLog.Logger.Formatter = new(logrus.TextFormatter)\n\n\t// Output to stdout, could also be a file.\n\tLog.Logger.Out = os.Stdout\n\n\t// Only log the warning severity or above.\n\tLog.Logger.Level = logrus.InfoLevel\n}", "func New(format, level string, atomic zap.AtomicLevel) (*Logger, error) {\n\tlogFormat, err := logger.MapFormat(format)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tlogLevel, err := logger.MapLevel(level)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tlog, err := logger.NewWithAtomicLevel(logFormat, atomic)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif err = logger.InitKlog(log, logLevel); err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Redirects logs those are being written using standard logging mechanism to klog\n\t// to avoid logs from controller-runtime being pushed to the standard logs.\n\tklog.CopyStandardLogTo(\"ERROR\")\n\n\treturn &Logger{Logger: log}, nil\n}", "func New(target io.Writer, prefix string) Structured {\n\t// wrapedy wrap!\n\treturn &basicStructured{&defaultLogger{log.New(target, prefix, log.LstdFlags)}}\n}", "func newTemplateHelper(cr *v1alpha1.Installation, extraParams map[string]string, config *config.Monitoring) *TemplateHelper {\n\tparam := Parameters{\n\t\tPlaceholder: Placeholder,\n\t\tNamespace: config.GetNamespace(),\n\t\tMonitoringKey: config.GetLabelSelector(),\n\t\tExtraParams: extraParams,\n\t}\n\n\ttemplatePath, exists := os.LookupEnv(\"TEMPLATE_PATH\")\n\tif !exists {\n\t\ttemplatePath = \"./templates/monitoring\"\n\t}\n\n\tmonitoringKey, exists := os.LookupEnv(\"MONITORING_KEY\")\n\tif exists {\n\t\tparam.MonitoringKey = monitoringKey\n\t}\n\n\treturn &TemplateHelper{\n\t\tParameters: param,\n\t\tTemplatePath: templatePath,\n\t}\n}", "func New() *Logger {\n\n\tbaseLogrus := logrus.New()\n\n\tvar logger = &Logger{baseLogrus}\n\n\tf, err := os.OpenFile(\"dummy-api.log\", os.O_CREATE|os.O_WRONLY, 0666)\n\tif err != nil {\n\t\tlog.Fatalf(\"unable to interact with log file: %s\", err)\n\t}\n\n\tlogger.SetFormatter(&logrus.JSONFormatter{\n\t\tTimestampFormat: \"02-01-2006 15:04:05\", // DD-MM-YYYY HH:MM:SS\n\n\t})\n\n\toutputs := io.MultiWriter(os.Stderr, f) // Write to both standard error and the log file.\n\tlogger.Out = outputs\n\n\treturn logger\n\n}", "func init() {\n\tlogger = log.New(os.Stderr, \"\", 0)\n\tLogKeys = make(map[string]bool)\n\tlogNoTime = false\n}", "func New() *AttestorPlugin {\n\treturn &AttestorPlugin{}\n}", "func New(category string) (*zap.Logger, error) {\n\tvar config zap.Config\n\tif os.Getenv(\"IDBRO_ENV\") == \"production\" {\n\t\tconfig = zap.NewProductionConfig()\n\t} else {\n\t\tconfig = zap.NewDevelopmentConfig()\n\t}\n\n\t// Remove caller info\n\tconfig.DisableCaller = true\n\n\t// Set the initial category\n\tconfig.InitialFields = map[string]interface{}{\n\t\t\"category\": category,\n\t}\n\n\treturn config.Build()\n}", "func New() Logger {\n\n\tzerolog.TimeFieldFormat = zerolog.TimeFormatUnix\n\n\tlogLevel := zerolog.InfoLevel\n\n\tzerolog.SetGlobalLevel(logLevel)\n\n\tlogger := zerolog.New(os.Stdout).With().Timestamp().Logger()\n\n\treturn Logger{logger: &logger}\n}", "func newConfig() *Config {\n\treturn &Config{\n\t\tgeneral{\n\t\t\tVerbose: false,\n\t\t},\n\t\tserver{\n\t\t\tType: \"http\",\n\t\t\tHost: \"0.0.0.0\",\n\t\t},\n\t\tmongo{\n\t\t\tHost: \"0.0.0.0:27017\",\n\t\t\tDatabase: \"etlog\",\n\t\t\tCollection: \"logs\",\n\t\t},\n\t}\n}" ]
[ "0.6625022", "0.61418605", "0.57407326", "0.56890637", "0.5624623", "0.5587943", "0.54636955", "0.5444958", "0.54293525", "0.5407665", "0.5355695", "0.5343652", "0.5317818", "0.52984", "0.5295035", "0.52913904", "0.529004", "0.52157074", "0.5208905", "0.52056134", "0.5202887", "0.5188493", "0.518456", "0.5179337", "0.5150803", "0.5147694", "0.5142975", "0.5138523", "0.513502", "0.51213384", "0.5114248", "0.5107911", "0.51074535", "0.51050514", "0.5099767", "0.5092991", "0.5091093", "0.5089603", "0.5087416", "0.50847596", "0.5082557", "0.5069874", "0.50618374", "0.5059837", "0.50594395", "0.50593007", "0.50561357", "0.50485015", "0.5038444", "0.5022783", "0.50226414", "0.5021674", "0.5018822", "0.50156283", "0.5009829", "0.5005423", "0.49927488", "0.49921897", "0.49903297", "0.49865046", "0.49802306", "0.4974369", "0.49703285", "0.49638224", "0.49610865", "0.49541366", "0.49471182", "0.49471182", "0.4943241", "0.49397978", "0.49307665", "0.49295858", "0.4924703", "0.49185064", "0.49168155", "0.49114636", "0.49107662", "0.49098474", "0.49048358", "0.49045983", "0.4888071", "0.488652", "0.4882476", "0.4877083", "0.4877083", "0.48760766", "0.48740608", "0.48628855", "0.48628646", "0.48555672", "0.48537543", "0.48533756", "0.48531994", "0.48529837", "0.48515928", "0.48483855", "0.48456967", "0.4843608", "0.48383507", "0.48368517" ]
0.64876527
1
createHTTPClient create a clients to access API
func (logstash *Logstash) createHTTPClient() (*http.Client, error) { tlsConfig, err := logstash.ClientConfig.TLSConfig() if err != nil { return nil, err } client := &http.Client{ Transport: &http.Transport{ TLSClientConfig: tlsConfig, }, Timeout: time.Duration(logstash.Timeout), } return client, nil }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func createHTTPClient() *http.Client {\n\tclient := &http.Client{\n\t\tTransport: &http.Transport{\n\t\t\tMaxIdleConnsPerHost: MaxIdleConnections,\n\t\t},\n\t\tTimeout: time.Duration(RequestTimeout) * time.Second,\n\t}\n\n\treturn client\n}", "func createHTTPClient() *http.Client {\n\tclient := &http.Client{\n\t\tTransport: &http.Transport{\n\t\t\tMaxIdleConnsPerHost: 15,\n\t\t},\n\t\tTimeout: time.Duration(10) * time.Second,\n\t}\n\n\treturn client\n}", "func createHTTPClient() *http.Client {\n\tclient := &http.Client{}\n\tif insecure {\n\t\thttp.DefaultTransport.(*http.Transport).TLSClientConfig = &tls.Config{InsecureSkipVerify: true}\n\t}\n\treturn client\n}", "func createHTTPClient() *http.Client {\n\ttr := &http.Transport{\n\t\tTLSClientConfig: &tls.Config{InsecureSkipVerify: true},\n\t\tMaxIdleConnsPerHost: 1,\n\t\tDisableKeepAlives: true,\n\t}\n\n\treturn &http.Client{\n\t\tTransport: tr,\n\t\tTimeout: time.Second * 60,\n\t}\n}", "func CreateHTTPClient(requestURL string) (*Client, error) {\n\t_, err := url.ParseRequestURI(requestURL)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &Client{\n\t\tHTTPClient: &http.Client{\n\t\t\tTimeout: time.Duration(requestTimeout) * time.Second,\n\t\t},\n\t\tbaseURL: requestURL,\n\t}, nil\n}", "func CreateHTTPClient(roundTripper func(*http.Request) (*http.Response, error)) *http.Client {\n\treturn &http.Client{\n\t\tTransport: roundTripperFunc(roundTripper),\n\t}\n}", "func NewHTTPClient() *http.Client {\n\n\ttr := &http.Transport{\n\t\t//TLSClientConfig: &tls.Config{\n\t\t//\tInsecureSkipVerify: conf.InsecureSkipVerify,\n\t\t//},\n\t\tMaxIdleConnsPerHost: DefaultMaxIdleConnsPerHost,\n\t\tProxy: http.ProxyFromEnvironment,\n\t\tDial: (&net.Dialer{\n\t\t\tTimeout: DefaultTimeout,\n\t\t\tKeepAlive: DefaultKeepAlive,\n\t\t}).Dial,\n\t\tTLSHandshakeTimeout: DefaultTimeout,\n\t}\n\n\treturn &http.Client{\n\t\tTimeout: DefaultTimeout,\n\t\tTransport: tr,\n\t}\n}", "func newHTTPClient(cfg *OutboundCommConfig) (*http.Client, error) {\n\tvar err error\n\tvar caCertPool tlsCertPool.CertPool\n\tif cfg.CACertsPaths != \"\" {\n\t\tcaCertPool, err = tlsCertPool.NewCertPool(false)\n\t\tif err != nil {\n\t\t\treturn nil, errors.Wrap(err, \"Failed to create new Cert Pool\")\n\t\t}\n\n\t\tcaCertsPaths := strings.Split(cfg.CACertsPaths, \",\")\n\t\tvar caCerts []string\n\t\tfor _, path := range caCertsPaths {\n\t\t\tif path == \"\" {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\t// Create a pool with server certificates\n\t\t\tcaCert, e := ioutil.ReadFile(filepath.Clean(path))\n\t\t\tif e != nil {\n\t\t\t\treturn nil, errors.Wrap(e, \"Failed Reading server certificate\")\n\t\t\t}\n\t\t\tcaCerts = append(caCerts, string(caCert))\n\t\t}\n\n\t\tcaCertPool.Add(tlsCertPool.DecodeCerts(caCerts)...)\n\t} else {\n\t\tcaCertPool, err = tlsCertPool.NewCertPool(true)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\t// update the config's caCertPool\n\tcfg.caCertPool = caCertPool\n\n\ttlsConfig, err := buildNewCertPool(cfg.caCertPool)\n\tif err != nil {\n\t\tlog.Printf(\"HTTP Transport - Failed to build/get Cert Pool: %s\", err)\n\t\treturn nil, err\n\t}\n\n\treturn &http.Client{\n\t\tTransport: &http.Transport{\n\t\t\tTLSClientConfig: tlsConfig,\n\t\t},\n\t\tTimeout: cfg.Timeout,\n\t}, nil\n}", "func NewHTTPClient(apiEndpoint string, pageSize int64, setAuth func(r *http.Request)) *APIClient {\n\treturn &APIClient{\n\t\tconn: connector.NewHTTPConnector(apiEndpoint, pageSize, setAuth),\n\t}\n}", "func NewHTTPClient(slog slog.Logger, filer sio.Filer) (clt Client, err error) {\n\thttpClt := &HTTPClient{logger: slog}\n\thttpClt.client = httpClt\n\thttpClt.filer = filer\n\treturn httpClt.client, nil\n}", "func NewClient(httpClient *http.Client) *Client {\n\tif httpClient == nil {\n\t\thttpClient = http.DefaultClient\n\t}\n\tbaseURL, _ := url.Parse(baseURL)\n\n\tc := &Client{client: httpClient, BaseURL: baseURL, UserAgent: userAgent}\n\tc.common.client = c\n\tc.RRSet = (*RRSetService)(&c.common)\n\tc.RData = (*RDataService)(&c.common)\n\n\treturn c\n}", "func (c *Config) CreateHTTPClient() *http.Client {\n\tif c.HTTPTransport != nil {\n\t\treturn &http.Client{\n\t\t\tTransport: c.HTTPTransport,\n\t\t}\n\t}\n\treturn http.DefaultClient\n}", "func NewClient(httpClient *http.Client, URL string, Token string, Source string, SourceType string, Index string) (*Client) {\n\t// Create a new client\n\tif httpClient == nil {\n\t\ttr := &http.Transport{TLSClientConfig: &tls.Config{InsecureSkipVerify: true}} // turn off certificate checking\n\t\thttpClient = &http.Client{Timeout: time.Second * 20, Transport: tr}\n\t}\n\n\tc := &Client{HTTPClient: httpClient, URL: URL, Token: Token, Source: Source, SourceType: SourceType, Index: Index}\n\n\treturn c\n}", "func newHTTPClient(cfg *Config) (*http.Client, error) {\n\t// Configure proxy if needed.\n\tvar dial func(network, addr string) (net.Conn, error)\n\tif cfg.Proxy != \"\" {\n\t\tproxy := &socks.Proxy{\n\t\t\tAddr: cfg.Proxy,\n\t\t\tUsername: cfg.ProxyUser,\n\t\t\tPassword: cfg.ProxyPass,\n\t\t}\n\t\tdial = func(network, addr string) (net.Conn, error) {\n\t\t\tc, err := proxy.Dial(network, addr)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\treturn c, nil\n\t\t}\n\t}\n\n\t// Configure TLS if needed.\n\tvar tlsConfig *tls.Config\n\tif !cfg.NoTLS {\n\t\ttlsConfig = &tls.Config{\n\t\t\tInsecureSkipVerify: cfg.TLSSkipVerify,\n\t\t}\n\t\tif !cfg.TLSSkipVerify && cfg.RPCCert != \"\" {\n\t\t\tpem, err := ioutil.ReadFile(cfg.RPCCert)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\n\t\t\tpool := x509.NewCertPool()\n\t\t\tif ok := pool.AppendCertsFromPEM(pem); !ok {\n\t\t\t\treturn nil, fmt.Errorf(\"invalid certificate file: %v\",\n\t\t\t\t\tcfg.RPCCert)\n\t\t\t}\n\t\t\ttlsConfig.RootCAs = pool\n\t\t}\n\t}\n\n\ttimeout, _ := time.ParseDuration(\"30s\")\n\n\t// Create and return the new HTTP client potentially configured with a\n\t// proxy and TLS.\n\tclient := http.Client{\n\t\tTransport: &http.Transport{\n\t\t\tDial: dial,\n\t\t\tTLSClientConfig: tlsConfig,\n\t\t},\n\t\tTimeout: timeout,\n\t}\n\treturn &client, nil\n}", "func newHTTPClient() *http.Client {\n\tclient := &http.Client{\n\t\tTimeout: defaultTimeout,\n\t}\n\treturn client\n}", "func newHTTPClient(count int) *client {\n\treturn &client{\n\t\tcli: &http.Client{\n\t\t\tTimeout: time.Second * 5,\n\t\t},\n\t\tworkers: count,\n\t\t//can be different size\n\t\terrChan: make(chan error, count),\n\t\tseen: make(map[int]struct{}),\n\t\tpath: \"http://host.docker.internal:9010/objects/\",\n\t}\n}", "func createClient(options *Options) (c *Client) {\n\n\t// Create a client\n\tc = new(Client)\n\n\t// Set options (either default or user modified)\n\tif options == nil {\n\t\toptions = ClientDefaultOptions()\n\t}\n\n\t// dial is the net dialer for clientDefaultTransport\n\tdial := &net.Dialer{KeepAlive: options.DialerKeepAlive, Timeout: options.DialerTimeout}\n\n\t// clientDefaultTransport is the default transport struct for the HTTP client\n\tclientDefaultTransport := &http.Transport{\n\t\tDialContext: dial.DialContext,\n\t\tExpectContinueTimeout: options.TransportExpectContinueTimeout,\n\t\tIdleConnTimeout: options.TransportIdleTimeout,\n\t\tMaxIdleConns: options.TransportMaxIdleConnections,\n\t\tProxy: http.ProxyFromEnvironment,\n\t\tTLSHandshakeTimeout: options.TransportTLSHandshakeTimeout,\n\t}\n\n\t// Determine the strategy for the http client (no retry enabled)\n\tif options.RequestRetryCount <= 0 {\n\t\tc.httpClient = httpclient.NewClient(\n\t\t\thttpclient.WithHTTPTimeout(options.RequestTimeout),\n\t\t\thttpclient.WithHTTPClient(&http.Client{\n\t\t\t\tTransport: clientDefaultTransport,\n\t\t\t\tTimeout: options.RequestTimeout,\n\t\t\t}),\n\t\t)\n\t} else { // Retry enabled\n\t\t// Create exponential back-off\n\t\tbackOff := heimdall.NewExponentialBackoff(\n\t\t\toptions.BackOffInitialTimeout,\n\t\t\toptions.BackOffMaxTimeout,\n\t\t\toptions.BackOffExponentFactor,\n\t\t\toptions.BackOffMaximumJitterInterval,\n\t\t)\n\n\t\tc.httpClient = httpclient.NewClient(\n\t\t\thttpclient.WithHTTPTimeout(options.RequestTimeout),\n\t\t\thttpclient.WithRetrier(heimdall.NewRetrier(backOff)),\n\t\t\thttpclient.WithRetryCount(options.RequestRetryCount),\n\t\t\thttpclient.WithHTTPClient(&http.Client{\n\t\t\t\tTransport: clientDefaultTransport,\n\t\t\t\tTimeout: options.RequestTimeout,\n\t\t\t}),\n\t\t)\n\t}\n\n\t// Create a last Request and parameters struct\n\tc.LastRequest = new(LastRequest)\n\tc.LastRequest.Error = new(Error)\n\tc.Parameters = &Parameters{\n\t\tUserAgent: options.UserAgent,\n\t}\n\treturn\n}", "func MakeHTTPClient(request *http.Request) (*http.Client, error) {\n\tcertFile := viper.GetString(\"AUTH.client_cert\")\n\tkeyFile := viper.GetString(\"AUTH.client_key\")\n\tif certFile != \"\" && keyFile != \"\" {\n\t\tcert, err := tls.LoadX509KeyPair(certFile, keyFile)\n\t\tif err != nil {\n\t\t\tlog.Error(\"Failed to load client key and certificate\")\n\t\t\treturn nil, err\n\t\t}\n\n\t\ttlsConfig := &tls.Config{\n\t\t\tCertificates: []tls.Certificate{cert},\n\t\t}\n\n\t\tcaFile := viper.GetString(\"AUTH.root_ca\")\n\t\tif caFile != \"\" {\n\t\t\tcaCerts := x509.NewCertPool()\n\n\t\t\tcaData, err := ioutil.ReadFile(caFile)\n\t\t\tif err != nil {\n\t\t\t\tlog.Error(\"Failed to load root CA\")\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tcaCerts.AppendCertsFromPEM(caData)\n\t\t\ttlsConfig.RootCAs = caCerts\n\t\t}\n\n\t\ttlsConfig.BuildNameToCertificate()\n\t\ttransport := &http.Transport{TLSClientConfig: tlsConfig, Proxy: http.ProxyFromEnvironment}\n\t\treturn &http.Client{Transport: transport}, nil\n\t}\n\n\t//dev only\n\tif rhIdentity := viper.GetString(\"AUTH.x_rh_identity\"); rhIdentity != \"\" {\n\t\trequest.Header.Set(\"x-rh-identity\", rhIdentity)\n\t}\n\tuser := viper.GetString(\"AUTH.user\")\n\tpassword := viper.GetString(\"AUTH.password\")\n\tif user != \"\" && password != \"\" {\n\t\trequest.SetBasicAuth(user, password)\n\t}\n\treturn &http.Client{}, nil\n}", "func NewClient(httpClient *http.Client) *Client {\n\tif httpClient == nil {\n\t\thttpClient = http.DefaultClient\n\t}\n\tbaseURL, err := url.Parse(baseURL)\n\tc := &Client{client: httpClient, BaseURL: baseURL, err: err}\n\tc.common.client = c\n\tc.Teams = (*TeamsService)(&c.common)\n\tc.Invitations = (*InvitationsService)(&c.common)\n\treturn c\n}", "func CreateHTTPClient(handler http.Handler) (*http.Client, func()) {\n\ts := httptest.NewServer(handler)\n\n\tcli := &http.Client{\n\t\tTransport: &http.Transport{\n\t\t\tDialContext: func(_ context.Context, network, _ string) (net.Conn, error) {\n\t\t\t\treturn net.Dial(network, s.Listener.Addr().String())\n\t\t\t},\n\t\t},\n\t}\n\n\treturn cli, s.Close\n}", "func (rpc *RpcClient) newHTTPClient() (*http.Client, error) {\n\t// Configure proxy if needed.\n\tvar dial func(network, addr string) (net.Conn, error)\n\n\t// Configure TLS if needed.\n\tvar tlsConfig *tls.Config\n\n\t// Create and return the new HTTP client potentially configured with a\n\t// proxy and TLS.\n\tclient := http.Client{\n\t\tTransport: &http.Transport{\n\t\t\tDial: dial,\n\t\t\tTLSClientConfig: tlsConfig,\n\t\t\tDialContext: (&net.Dialer{\n\t\t\t\tTimeout: 5 * time.Second,\n\t\t\t\tKeepAlive: 5 * time.Second,\n\t\t\t\tDualStack: true,\n\t\t\t}).DialContext,\n\t\t},\n\t}\n\treturn &client, nil\n}", "func NewHTTPClient(retries int) HTTPClient {\n\tif retries <= 0 {\n\t\tpanic(\"retries should be greater than 0\")\n\t}\n\treturn &httpClient{\n\t\tretries: retries,\n\t}\n}", "func NewHTTPClient(url string, backend Backend) (*HTTPClient, error) {\n b := backend\n if b == nil {\n b = newDefaultBackend()\n }\n return &HTTPClient{url: url, backend: b}, nil\n}", "func NewHTTPClient() *HTTPClient {\n\treturn &HTTPClient{\n\t\tfasthttpClient: fasthttp.Client{},\n\t}\n}", "func (rpc *RpcClient) newHTTPClient() (*http.Client, error) {\n\t// Configure proxy if needed.\n\tvar dial func(network, addr string) (net.Conn, error)\n\tif rpc.Cfg.OptionConfig.Proxy != \"\" {\n\t\tproxy := &socks.Proxy{\n\t\t\tAddr: rpc.Cfg.OptionConfig.Proxy,\n\t\t\tUsername: rpc.Cfg.OptionConfig.ProxyUser,\n\t\t\tPassword: rpc.Cfg.OptionConfig.ProxyPass,\n\t\t}\n\t\tdial = func(network, addr string) (net.Conn, error) {\n\t\t\tc, err := proxy.Dial(network, addr)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\treturn c, nil\n\t\t}\n\t}\n\n\t// Configure TLS if needed.\n\tvar tlsConfig *tls.Config\n\tif !rpc.Cfg.SoloConfig.NoTLS && rpc.Cfg.SoloConfig.RPCCert != \"\" {\n\t\tpem, err := ioutil.ReadFile(rpc.Cfg.SoloConfig.RPCCert)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tpool := x509.NewCertPool()\n\t\tpool.AppendCertsFromPEM(pem)\n\t\ttlsConfig = &tls.Config{\n\t\t\tRootCAs: pool,\n\t\t\tInsecureSkipVerify: rpc.Cfg.SoloConfig.NoTLS,\n\t\t}\n\t} else {\n\t\ttlsConfig = &tls.Config{\n\t\t\tInsecureSkipVerify: rpc.Cfg.SoloConfig.NoTLS,\n\t\t}\n\t}\n\n\t// Create and return the new HTTP client potentially configured with a\n\t// proxy and TLS.\n\tclient := http.Client{\n\t\tTransport: &http.Transport{\n\t\t\tDial: dial,\n\t\t\tTLSClientConfig: tlsConfig,\n\t\t\tDialContext: (&net.Dialer{\n\t\t\t\tTimeout: time.Duration(rpc.Cfg.OptionConfig.Timeout) * time.Second,\n\t\t\t\tKeepAlive: time.Duration(rpc.Cfg.OptionConfig.Timeout) * time.Second,\n\t\t\t\tDualStack: true,\n\t\t\t}).DialContext,\n\t\t},\n\t}\n\treturn &client, nil\n}", "func NewHTTPClient() *HTTPClient {\n\treturn &HTTPClient{\n\t\tClient: http.DefaultClient,\n\t\tCacheDir: viper.GetString(\"http_cache_dir\"),\n\t}\n}", "func newHTTPClient() *http.Client {\n\treturn &http.Client{\n\t\tTransport: &http.Transport{\n\t\t\tProxy: http.ProxyFromEnvironment,\n\t\t\tDialContext: (&net.Dialer{\n\t\t\t\tTimeout: timeout,\n\t\t\t\tKeepAlive: 30 * time.Second,\n\t\t\t\tDualStack: true,\n\t\t\t}).DialContext,\n\n\t\t\tTLSHandshakeTimeout: timeout,\n\t\t\tResponseHeaderTimeout: timeout,\n\t\t\tExpectContinueTimeout: 1 * time.Second,\n\t\t\tMaxIdleConns: 5,\n\t\t\tIdleConnTimeout: 90 * time.Second,\n\t\t},\n\t}\n}", "func NewClient(apiURL string, logger lager.Logger) Client {\n\treturn &client{\n\t\trequestGenerator: rata.NewRequestGenerator(apiURL, api.Routes),\n\t\tgivenHTTPClient: &http.Client{\n\t\t\tTransport: &http.Transport{\n\t\t\t\tDisableKeepAlives: false,\n\t\t\t\tResponseHeaderTimeout: 20 * time.Second,\n\t\t\t\tMaxIdleConns: 200,\n\t\t\t},\n\t\t},\n\t\tlogger: logger,\n\t}\n}", "func NewClient(httpClient *http.Client) *Client {\n\tif httpClient == nil {\n\t\thttpClient = &http.Client{}\n\t}\n\tbaseURL, _ := url.Parse(baseURL)\n\tc := &Client{\n\t\tclient: httpClient,\n\t\tBaseURL: baseURL,\n\t}\n\tc.common.client = c\n\tc.Tags = (*TagsService)(&c.common)\n\tc.Manifests = (*ManifestsService)(&c.common)\n\treturn c\n}", "func NewClient(baseURL *url.URL, token string, httpClient *http.Client) *Client {\n\tif httpClient == nil {\n\t\thttpClient = http.DefaultClient\n\t}\n\n\tc := &Client{client: httpClient, BaseURL: baseURL, token: token}\n\tc.Dashboards = NewDashboardsService(c)\n\tc.Datasources = NewDatasourcesService(c)\n\n\treturn c\n}", "func GetHTTPClient() *http.Client {\n tlsConfig := &tls.Config {\n InsecureSkipVerify: true, //for this test, ignore ssl certificate\n }\n\n tr := &http.Transport{TLSClientConfig: tlsConfig}\n client := &http.Client{Transport: tr}\n\n return client\n}", "func NewHTTPClient(conf Config, mgr interop.Manager, log log.Modular, stats metrics.Type) (output.Streamed, error) {\n\th, err := writer.NewHTTPClient(conf.HTTPClient, mgr, log, stats)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tw, err := NewAsyncWriter(TypeHTTPClient, conf.HTTPClient.MaxInFlight, h, log, stats)\n\tif err != nil {\n\t\treturn w, err\n\t}\n\tif !conf.HTTPClient.BatchAsMultipart {\n\t\tw = OnlySinglePayloads(w)\n\t}\n\treturn NewBatcherFromConfig(conf.HTTPClient.Batching, w, mgr, log, stats)\n}", "func GetHTTPClient() *http.Client {\r\n tlsConfig := &tls.Config {\r\n InsecureSkipVerify: true, //for this test, ignore ssl certificate\r\n }\r\n\r\n tr := &http.Transport{TLSClientConfig: tlsConfig}\r\n client := &http.Client{Transport: tr}\r\n\r\n return client\r\n}", "func CreateClientProvidingHTTPClient(httpClient HTTPClient, dockerConfig *configfile.ConfigFile) Client {\n\treturn Client{\n\t\tclient: httpClient,\n\t\tdockerConfig: dockerConfig,\n\t}\n}", "func InitHTTPClient(tracer opentracing.Tracer) {\n\thttpClient = http.NewRequests(tracer)\n}", "func NewHTTPClient(timeout time.Duration) *http.Client {\n\treturn &http.Client{\n\t\tTimeout: timeout,\n\t}\n}", "func NewClient(httpClient *http.Client) (*Client, error) {\n\tc := &Client{\n\t\thttpClient: httpClient,\n\t}\n\tu, err := url.Parse(APIEndpoint)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tc.endpoint = u\n\treturn c, nil\n}", "func NewHTTPClient(ctx context.Context, clientSecretKeyFile []byte, tokenFilepath string) (*http.Client, error) {\n\tconfig, err := google.ConfigFromJSON(clientSecretKeyFile, builderAPIScope)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\ttokenCacheFilename := \"\"\n\tif tokenFilepath == \"\" {\n\t\ttokenCacheFilename, err = tokenCacheFile()\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t} else {\n\t\ttokenCacheFilename = tokenFilepath\n\t}\n\tif !exists(tokenCacheFilename) {\n\t\tlog.Infoln(\"Could not locate OAuth2 token\")\n\t\treturn nil, errors.New(`command requires authentication. try to run \"gactions login\" first`)\n\t}\n\ttok, err := tokenFromFile(tokenCacheFilename)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn config.Client(ctx, tok), nil\n}", "func NewHTTPClient(uri string) HTTPClient {\n\treturn HTTPClient{\n\t\tBackendURI: uri,\n\t\tclient: &http.Client{},\n\t}\n}", "func newClient(httpClient *http.Client) (c *Client) {\n\tc = &Client{httpClient: httpClient}\n\tc.service.client = c\n\tc.Auth = (*AuthService)(&c.service)\n\tc.Providers = (*ProvidersService)(&c.service)\n\tc.Projects = (*ProjectsService)(&c.service)\n\tc.Releases = (*ReleasesService)(&c.service)\n\tc.SlackChannels = (*SlackChannelsService)(&c.service)\n\tc.TelegramChats = (*TelegramChatsService)(&c.service)\n\tc.DiscordChannels = (*DiscordChannelsService)(&c.service)\n\tc.HangoutsChatWebhooks = (*HangoutsChatWebhooksService)(&c.service)\n\tc.MicrosoftTeamsWebhooks = (*MicrosoftTeamsWebhooksService)(&c.service)\n\tc.MattermostWebhooks = (*MattermostWebhooksService)(&c.service)\n\tc.RocketchatWebhooks = (*RocketchatWebhooksService)(&c.service)\n\tc.MatrixRooms = (*MatrixRoomsService)(&c.service)\n\tc.Webhooks = (*WebhooksService)(&c.service)\n\tc.Tags = (*TagsService)(&c.service)\n\treturn c\n}", "func NewClient(baseURL string, apiKey string) Client {\n\treturn &httpClient{\n\t\tapiKey: apiKey,\n\t\tbaseURL: baseURL,\n\t\tinst: &http.Client{},\n\t}\n}", "func InitHttpClient(options CallFireRequestOptions) (*http.Client, *http.Request) {\n\t// Initialise a new http client\n\tclient := &http.Client{}\n\n\t// Setup the request object\n\trequest, err := http.NewRequest(\n\t\toptions.ReqType, options.Url+options.RequestOptions, nil,\n\t)\n\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\t// Set the authentication credentials\n\trequest.SetBasicAuth(options.Login, options.Secret)\n\n\treturn client, request\n}", "func newHTTPClient(\n\tapiKey string,\n\tdebug bool,\n\tomitRetry bool,\n\ttimeout time.Duration,\n\ttransport http.RoundTripper,\n) httpC {\n\tif transport == nil {\n\t\ttransport = http.DefaultTransport\n\t}\n\treturn &gcmHTTP{\n\t\tGCMURL: httpAddress,\n\t\tapiKey: apiKey,\n\t\thttpClient: &http.Client{\n\t\t\tTransport: transport,\n\t\t\tTimeout: timeout,\n\t\t},\n\t\tdebug: debug,\n\t\tomitRetry: omitRetry,\n\t}\n}", "func NewClient(httpClient *http.Client, apiID, apiKey string) *Client {\n\tif httpClient == nil {\n\t\thttpClient = http.DefaultClient\n\t}\n\n\tclient := &Client{}\n\n\tclient.SetHTTPClient(httpClient)\n\tclient.SetAPIID(apiID)\n\tclient.SetAPIKey(apiKey)\n\tclient.SetBaseURL(BaseURL)\n\tclient.SetDebug(false)\n\tclient.SetUserAgent(userAgent)\n\tclient.SetMediaType(mediaType)\n\tclient.SetCharset(charset)\n\n\treturn client\n}", "func NewClient(httpClient *http.Client) *Client {\n\tif httpClient == nil {\n\t\thttpClient = &http.Client{}\n\t}\n\tbaseURL, _ := url.Parse(defaultBaseURL)\n\n\tc := &Client{client: httpClient, BaseURL: baseURL, UserAgent: userAgent}\n\tc.common.client = c\n\tc.Datasets = (*DatasetsService)(&c.common)\n\tc.Streams = (*StreamsService)(&c.common)\n\tc.Users = (*UsersService)(&c.common)\n\tc.Groups = (*GroupsService)(&c.common)\n\tc.Pages = (*PagesService)(&c.common)\n\tc.Logs = (*ActivityLogsService)(&c.common)\n\tc.Accounts = (*AccountsService)(&c.common)\n\n\treturn c\n}", "func NewHTTPClient() *http.Client {\n\ttr := &http.Transport{\n\t\tTLSClientConfig: &tls.Config{\n\t\t\tInsecureSkipVerify: true, //nolint:gosec // Needs to be enabled in suites. Not used in production.\n\t\t},\n\t}\n\n\treturn &http.Client{\n\t\tTransport: tr,\n\t\tCheckRedirect: func(req *http.Request, via []*http.Request) error {\n\t\t\treturn http.ErrUseLastResponse\n\t\t},\n\t}\n}", "func NewHTTPClient() (client.Client, error) {\n\taddr := Settings.Config.URL.String()\n\tc, err := client.NewHTTPClient(client.HTTPConfig{\n\t\tAddr: addr,\n\t\tUsername: Settings.Config.Username,\n\t\tPassword: Settings.Config.Password,\n\t\tTimeout: Settings.Config.Timeout,\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tlog.Debugf(\"action=NewHTTPClient addr=%s username=%s\", addr, Settings.Config.Username)\n\treturn c, nil\n}", "func NewClient() *Client {\n\tclient := &Client{\n\t\turl: baseURL,\n\t\thttpClient: &http.Client{\n\t\t\tTransport: &http.Transport{\n\t\t\t\tMaxConnsPerHost: maxConnsPerHost,\n\t\t\t},\n\t\t},\n\t}\n\n\treturn client\n}", "func NewClient(httpClient *http.Client, username string, password string, atlantisURL string) *Client {\n\tif httpClient == nil {\n\t\thttpClient = http.DefaultClient\n\t}\n\treturn &Client{\n\t\tHTTPClient: httpClient,\n\t\tUsername: username,\n\t\tPassword: password,\n\t\tBaseURL: BaseURL,\n\t\tAtlantisURL: atlantisURL,\n\t}\n}", "func NewHTTPClient(proxyNetwork, proxyAddress string, serviceNetwork, service string) http.Client {\n\tproxyClient := Client{proxyNetwork: proxyNetwork, proxyAddress: proxyAddress, serviceNetwork: serviceNetwork, service: service}\n\ttrans := &http.Transport{\n\t\tDial: proxyClient.proxyDial,\n\t\tDisableKeepAlives: false,\n\t}\n\treturn http.Client{Transport: trans}\n}", "func NewHTTPClientFromConfig(cfg *config.Config) (*HTTPClient, error) {\n\t// get clients\n\tordererClients, err := getOrdererHTTPClients(cfg)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tpeerClients, err := getPeerHTTPClients(cfg)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &HTTPClient{\n\t\tordererHTTPClients: ordererClients,\n\t\tpeerHTTPClients: peerClients,\n\t\tprivKey: cfg.KeyStore.Privs[0],\n\t}, nil\n}", "func NewClient(creds Authenticator, apiBaseURL string, httpClient httpClient) *Client {\n\tif apiBaseURL == \"\" {\n\t\tapiBaseURL = fmt.Sprintf(\"https://%s.api.dragonchain.com\", creds.GetDragonchainID())\n\t}\n\tif httpClient == nil {\n\t\thttpClient = &http.Client{}\n\t}\n\tclient := &Client{\n\t\tcreds: creds,\n\t\tapiBaseURL: apiBaseURL,\n\t\thttpClient: httpClient,\n\t}\n\n\treturn client\n}", "func CreateClient() *http.Client {\n\tproxy := os.Getenv(\"http_proxy\")\n\tvar proxyURL *url.URL\n\tvar transport *http.Transport\n\tif proxy != \"\" {\n\t\tproxyURL, _ = url.Parse(proxy)\n\t}\n\tif proxyURL != nil {\n\t\ttransport = &http.Transport{\n\t\t\tProxy: http.ProxyURL(proxyURL),\n\t\t}\n\t}\n\tvar client http.Client\n\tif transport != nil {\n\t\tclient = http.Client{\n\t\t\tTransport: transport,\n\t\t}\n\t}\n\n\t//client.Timeout = time.Second * 5\n\treturn &client\n}", "func NewClient(baseURL string, defaultHeaders map[string]string) *Client {\n\turl, _ := url.Parse(baseURL)\n\tif defaultHeaders == nil {\n\t\tdefaultHeaders = make(map[string]string)\n\t}\n\treturn &Client{httpClient: &http.Client{}, baseURL: url, defaultHeaders: defaultHeaders}\n}", "func GetHTTPClient() *http.Client { return httpClientPool.Get().(*http.Client) }", "func NewHTTPClientAPI(client http.Client, endpoint, from, accessKeyID, secretAccessKey string) *Option {\n\tSetDefaultHTTPClient(client)\n\n\treturn NewAPI(endpoint, from, accessKeyID, secretAccessKey)\n}", "func CreateClient() *Client {\n\tclient := &http.Client{}\n\tbaseURL, _ := url.Parse(defaultBaseURL)\n\n\tc := &Client{HTTPClient: client, BaseURL: baseURL, RequestEncoding: defaultRequestEncoding}\n\tc.common.client = c\n\tc.Health = (*HealthService)(&c.common)\n\treturn c\n}", "func initClient() *http.Client {\n\treturn &http.Client{\n\t\tTimeout: time.Duration(timeout) * time.Second,\n\t\tTransport: &http.Transport{\n\t\t\tMaxIdleConns: 10,\n\t\t\tMaxIdleConnsPerHost: 10,\n\t\t\tMaxConnsPerHost: 10,\n\t\t\tTLSClientConfig: &tls.Config{\n\t\t\t\tInsecureSkipVerify: true,\n\t\t\t},\n\t\t},\n\t}\n}", "func NewHTTPClient(source Source) (*http.Client, error) {\n\tcerts, err := x509.SystemCertPool()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif len(source.CACerts) > 0 {\n\t\tfor i := range source.CACerts {\n\t\t\tcerts.AddCert(source.CACerts[i])\n\t\t}\n\t}\n\n\treturn &http.Client{\n\t\tTransport: &http.Transport{\n\t\t\tTLSClientConfig: &tls.Config{\n\t\t\t\tInsecureSkipVerify: true,\n\t\t\t\tRootCAs: certs,\n\t\t\t},\n\t\t\tProxy: func(req *http.Request) (*url.URL, error) {\n\t\t\t\tif strings.TrimSpace(source.HTTPProxy) != \"\" {\n\t\t\t\t\tos.Setenv(\"HTTP_PROXY\", source.HTTPProxy)\n\t\t\t\t}\n\n\t\t\t\tif strings.TrimSpace(source.HTTPSProxy) != \"\" {\n\t\t\t\t\tos.Setenv(\"HTTPS_PROXY\", source.HTTPSProxy)\n\t\t\t\t}\n\n\t\t\t\tif strings.TrimSpace(source.NoProxy) != \"\" {\n\t\t\t\t\tos.Setenv(\"NO_PROXY\", source.NoProxy)\n\t\t\t\t}\n\n\t\t\t\treturn http.ProxyFromEnvironment(req)\n\t\t\t},\n\t\t},\n\t}, nil\n}", "func NewClient(baseClient *httpclient.Client) *Client {\n\treturn &Client{\n\t\thttp: baseClient,\n\t}\n}", "func NewHTTPClient(url, endpoint string, timeout time.Duration) *HTTPClient {\n\treturn &HTTPClient{\n\t\turl: url,\n\t\thttpClient: &http.Client{Timeout: timeout},\n\t\tendPoint: endpoint,\n\t}\n}", "func (fmp *provider) createClient() (*http.Client, error) {\n\tswitch fmp.scheme {\n\tcase HTTPScheme:\n\t\treturn &http.Client{}, nil\n\tcase HTTPSScheme:\n\t\tpool, err := x509.SystemCertPool()\n\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"unable to create a cert pool: %w\", err)\n\t\t}\n\n\t\tif fmp.caCertPath != \"\" {\n\t\t\tcert, err := os.ReadFile(filepath.Clean(fmp.caCertPath))\n\n\t\t\tif err != nil {\n\t\t\t\treturn nil, fmt.Errorf(\"unable to read CA from %q URI: %w\", fmp.caCertPath, err)\n\t\t\t}\n\n\t\t\tif ok := pool.AppendCertsFromPEM(cert); !ok {\n\t\t\t\treturn nil, fmt.Errorf(\"unable to add CA from uri: %s into the cert pool\", fmp.caCertPath)\n\t\t\t}\n\t\t}\n\n\t\treturn &http.Client{\n\t\t\tTransport: &http.Transport{\n\t\t\t\tTLSClientConfig: &tls.Config{\n\t\t\t\t\tInsecureSkipVerify: fmp.insecureSkipVerify,\n\t\t\t\t\tRootCAs: pool,\n\t\t\t\t},\n\t\t\t},\n\t\t}, nil\n\tdefault:\n\t\treturn nil, fmt.Errorf(\"invalid scheme type: %s\", fmp.scheme)\n\t}\n}", "func newAPI(cfg *ClientConfig, options ...ClientOption) *Client {\n\tclient := &Client{\n\t\tConfig: cfg,\n\t\thttpClient: &http.Client{},\n\t}\n\n\tfor _, option := range options {\n\t\toption(client)\n\t}\n\n\treturn client\n}", "func NewHTTPClient() (*HTTPClient, error) {\n\tresp, err := http.Get(\"https://raw.githubusercontent.com/cvandeplas/pystemon/master/user-agents.txt\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer resp.Body.Close()\n\tb, err := ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Create the client and attach a cookie jar\n\tclient := &http.Client{}\n\tclient.Jar, _ = cookiejar.New(nil)\n\n\t// Splits the user-agents into a slice and returns an HTTPClient with a random\n\t// user-agent on the header\n\tua := strings.Split(string(b), \"\\n\")\n\trand.Seed(time.Now().UnixNano())\n\treturn &HTTPClient{\n\t\tClient: client,\n\t\tUserAgent: ua[rand.Intn(len(ua))],\n\t}, nil\n}", "func NewClient(httpClient *http.Client) *Client {\n\tif httpClient == nil {\n\t\tcloned := *http.DefaultClient\n\t\thttpClient = &cloned\n\t}\n\n\tbaseURL, _ := url.Parse(defaultBaseURL)\n\n\tc := &Client{\n\t\tclient: httpClient,\n\t\tBaseURL: baseURL,\n\t}\n\n\tc.common.client = c\n\tc.Question = (*QuestionService)(&c.common)\n\tc.Token = (*TokenService)(&c.common)\n\n\treturn c\n}", "func NewHTTPClient(transport http.RoundTripper, ts TokenSource) (*HTTPClient, error) {\n\tif ts == nil {\n\t\treturn nil, errors.New(\"gcp: no credentials available\")\n\t}\n\treturn &HTTPClient{\n\t\tClient: http.Client{\n\t\t\tTransport: &oauth2.Transport{\n\t\t\t\tBase: transport,\n\t\t\t\tSource: ts,\n\t\t\t},\n\t\t},\n\t}, nil\n}", "func NewClient(baseURL string, httpClient *http.Client) (*Client, error) {\n\tif httpClient == nil {\n\t\thttpClient = http.DefaultClient\n\t}\n\n\tbaseEndpoint, err := url.Parse(baseURL)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif !strings.HasSuffix(baseEndpoint.Path, \"/\") {\n\t\tbaseEndpoint.Path += \"/\"\n\t}\n\n\tc := &Client{\n\t\tclient: httpClient,\n\t\tBaseURL: baseEndpoint,\n\t}\n\tc.common.client = c\n\tc.Boards = (*BoardsService)(&c.common)\n\tc.Epics = (*EpicsService)(&c.common)\n\tc.Issues = (*IssuesService)(&c.common)\n\tc.Sprints = (*SprintsService)(&c.common)\n\tc.Backlog = (*BacklogService)(&c.common)\n\n\treturn c, nil\n}", "func NewClient(httpClient *http.Client, username string, password string) *Client {\n\tbase := sling.New().Client(httpClient).Base(msfUrl)\n\tbase.SetBasicAuth(username, password)\n\treturn &Client{\n\t\tsling: base,\n\t\tNBA: newNBAService(base.New()),\n\t}\n}", "func NewClient(httpClient *http.Client, baseURL string) (*Client, error) {\n\tif httpClient == nil {\n\t\thttpClient = http.DefaultClient\n\t}\n\n\tparsedBaseURL, err := url.Parse(baseURL)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tc := &Client{\n\t\tclient: httpClient,\n\t\tbaseURL: parsedBaseURL,\n\t}\n\tc.Authentication = &AuthenticationService{client: c}\n\tc.User = &UserService{client: c}\n\tc.Project = &ProjectService{client: c}\n\tc.Report = &ReportService{client: c}\n\treturn c, nil\n}", "func NewClient(baseURL, apiKey string) *Client {\n\treturn &Client{baseURL: baseURL, apiKey: apiKey, httpClient: http.Client{}}\n}", "func NewClient(meta *metadata.Client, acc string) *http.Client {\n\treturn &http.Client{\n\t\tTransport: newRoundTripper(meta, acc),\n\t}\n}", "func New(url string, httpClient *http.Client, customHeaders http.Header) *Client {\n\tif httpClient == nil {\n\t\thttpClient = &http.Client{\n\t\t\tTimeout: defaultHTTPTimeout,\n\t\t}\n\t}\n\n\treturn &Client{\n\t\turl: url,\n\t\thttpClient: httpClient,\n\t\tcustomHeaders: customHeaders,\n\t}\n}", "func NewClient(url, token string, debug bool) *Client {\n\tc := Client{\n\t\tBaseURL: url,\n\t\t// AccessToken: token,\n\t\tDebug: debug,\n\t}\n\n\tapi := req.New()\n\t//trans, _ := api.Client().Transport.(*http.Transport)\n\t//trans.TLSClientConfig = &tls.Config{InsecureSkipVerify: true}\n\tc.client = api\n\n\treturn &c\n}", "func NewHTTPClient(conn net.Conn, opt *codec.Option) (*Client, error) {\n\t_, _ = io.WriteString(conn, fmt.Sprintf(\"CONNECT %s HTTP/1.0\\n\\n\", defaultHandlePath))\n\n\tres, err := http.ReadResponse(bufio.NewReader(conn), &http.Request{Method: \"CONNECT\"})\n\tif err == nil && res.Status == \"200 Connected to Gingle RPC\" {\n\t\treturn NewRPCClient(conn, opt)\n\t}\n\n\tif err == nil {\n\t\terr = fmt.Errorf(\"client: failed to new http client, err: unexpected http response\")\n\t}\n\treturn nil, err\n}", "func NewClient() *http.Client {\n\tt := &http.Transport{\n\t\tMaxIdleConns: 10,\n\t\tIdleConnTimeout: 30 * time.Second,\n\t}\n\n\treturn &http.Client{Transport: t}\n}", "func NewClient() *http.Client {\n\treturn &http.Client{}\n}", "func DefaultHTTPClientFactory(cc *cli.Context) (*http.Client, error) {\n\tif cc == nil {\n\t\tlogrus.Panic(\"cli context has not been set\")\n\t}\n\tvar c http.Client\n\tcookieJar, _ := cookiejar.New(nil)\n\n\tif cc.GlobalIsSet(\"apiSession\") {\n\t\tvar cookies []*http.Cookie\n\t\tcookie := &http.Cookie{\n\t\t\tName: \"SESSION\",\n\t\t\tValue: cc.GlobalString(\"apiSession\"),\n\t\t}\n\t\tcookies = append(cookies, cookie)\n\t\tu, _ := url.Parse(os.Getenv(\"SPINNAKER_API\"))\n\t\tcookieJar.SetCookies(u, cookies)\n\t}\n\n\tc = http.Client{\n\t\tTimeout: time.Duration(cc.GlobalInt(\"clientTimeout\")) * time.Second,\n\t\tJar: cookieJar,\n\t}\n\n\tvar certPath string\n\tvar keyPath string\n\n\n\tif cc.GlobalIsSet(\"certPath\") {\n\t\tcertPath = cc.GlobalString(\"certPath\")\n\t} else if os.Getenv(\"SPINNAKER_CLIENT_CERT\") != \"\" {\n\t\tcertPath = os.Getenv(\"SPINNAKER_CLIENT_CERT\")\n\t} else {\n\t\tcertPath = \"\"\n\t}\n\tif cc.GlobalIsSet(\"keyPath\") {\n\t\tkeyPath = cc.GlobalString(\"keyPath\")\n\t} else if os.Getenv(\"SPINNAKER_CLIENT_KEY\") != \"\" {\n\t\tkeyPath = os.Getenv(\"SPINNAKER_CLIENT_KEY\")\n\t} else {\n\t\tkeyPath = \"\"\n\t}\n\tif cc.GlobalIsSet(\"iapToken\") {\n\t\tiapToken = cc.GlobalString(\"iapToken\")\n\t} else if os.Getenv(\"SPINNAKER_IAP_TOKEN\") != \"\" {\n\t\tiapToken = os.Getenv(\"SPINNAKER_IAP_TOKEN\")\n\t} else {\n\t\tiapToken = \"\"\n\t}\n\tc.Transport = &http.Transport{\n\t\tTLSClientConfig: &tls.Config{},\n\t}\n\n\tif certPath != \"\" && keyPath != \"\" {\n\t\tlogrus.Debug(\"Configuring TLS with pem cert/key pair\")\n\t\tcert, err := tls.LoadX509KeyPair(certPath, keyPath)\n\t\tif err != nil {\n\t\t\treturn nil, errors.Wrap(err, \"loading x509 keypair\")\n\t\t}\n\n\t\tclientCA, err := ioutil.ReadFile(certPath)\n\t\tif err != nil {\n\t\t\treturn nil, errors.Wrap(err, \"loading client CA\")\n\t\t}\n\n\t\tclientCertPool := x509.NewCertPool()\n\t\tclientCertPool.AppendCertsFromPEM(clientCA)\n\n\t\tc.Transport.(*http.Transport).TLSClientConfig.MinVersion = tls.VersionTLS12\n\t\tc.Transport.(*http.Transport).TLSClientConfig.PreferServerCipherSuites = true\n\t\tc.Transport.(*http.Transport).TLSClientConfig.Certificates = []tls.Certificate{cert}\n\t\tc.Transport.(*http.Transport).TLSClientConfig.InsecureSkipVerify = true\n\t}\n\n\tif cc.GlobalIsSet(\"insecure\") {\n\t\tc.Transport.(*http.Transport).TLSClientConfig.InsecureSkipVerify = true\n\t}\n\n\treturn &c, nil\n}", "func NewClient(httpClient *http.Client, space string, apiKey string) *Client {\n\tif httpClient == nil {\n\t\thttpClient = http.DefaultClient\n\t}\n\n\tbaseURL, _ := url.Parse(\"https://\" + space + \".backlog.com/api/v2/\")\n\tc := &Client{client: httpClient, BaseURL: baseURL, apiKey: apiKey}\n\tc.common.client = c\n\tc.Space = (*SpaceService)(&c.common)\n\tc.Projects = (*ProjectsService)(&c.common)\n\tc.Issues = (*IssuesService)(&c.common)\n\treturn c\n}", "func NewMyHTTPClient(timeZone, country, language, openudid string) *MyHTTPClient {\n\treturn &MyHTTPClient{\n\t\tTimezone: timeZone,\n\t\tCountry: country,\n\t\tLanguage: language,\n\t\tOpenudid: openudid,\n\t\tContentType: \"application/json;charset=utf-8\",\n\t\tclient: &http.Client{Timeout: 30 * time.Second},\n\t}\n}", "func NewHTTPClient(serverEndpoint string, ticket *obtainer.Client) (*HTTPClient, error) {\n\n\tendpointUrl, err := url.Parse(serverEndpoint)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"error parsing endpoint: %s\", err)\n\t}\n\n\treturn &HTTPClient{\n\t\tserverEndpoint: endpointUrl,\n\t\tticket: ticket,\n\t}, nil\n}", "func NewClient(httpClient *http.Client, apikey string) *Service {\n\treturn &Service{\n\t\tsling: sling.New().Client(httpClient).Base(baseURL).Set(\"Authorization\", apikey),\n\t}\n}", "func NewClient(httpClient *http.Client) *Client {\n\tif httpClient == nil {\n\t\thttpClient = http.DefaultClient\n\t}\n\tbaseURL, _ := url.Parse(defaultBaseURL)\n\n\tc := &Client{client: httpClient, BaseURL: baseURL, UserAgent: userAgent}\n\tc.Items = &ItemsService{client: c}\n\tc.Users = &UsersService{client: c}\n\tc.AuthenticatedUser = &AuthenticatedUserService{client: c}\n\tc.Tags = &TagsService{client: c}\n\treturn c\n}", "func NewClient(httpClient *http.Client, atlasSubdomain string) (*Client, error) {\n\tif httpClient == nil {\n\t\thttpClient = &http.Client{}\n\t}\n\n\tvar baseURLStr strings.Builder\n\tbaseURLStr.WriteString(\"https://\")\n\tbaseURLStr.WriteString(atlasSubdomain)\n\tbaseURLStr.WriteString(\".\")\n\tbaseURLStr.WriteString(defaultBaseURL)\n\n\tbaseURL, err := url.Parse(baseURLStr.String())\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tc := &Client{client: httpClient, BaseURL: baseURL}\n\tc.common.client = c\n\tc.ApplicationRole = (*ApplicationRoleService)(&c.common)\n\tc.AuditRecords = (*AuditRecordsService)(&c.common)\n\tc.AvatarsService = (*AvatarsService)(&c.common)\n\treturn c, nil\n}", "func NewClient() *http.Client {\n\ttimeout, err := time.ParseDuration(config.Config.HTTPTimeout)\n\tif err != nil {\n\t\tlogrus.Error(\"parse timeout config\", \"error\", err)\n\t\treturn nil\n\t}\n\n\treturn NewClientWithTimeout(timeout)\n}", "func NewHTTPClient(rawURL string) (Client, error) {\n\tURL, err := url.Parse(rawURL)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &HTTPClient{\n\t\tComponent: Component{Name: \"http-config-client-\" + URL.Host},\n\t\tURL: rawURL,\n\t}, nil\n}", "func NewClient(options *ClientOptions, customHTTPClient *http.Client,\r\n\tcustomEnvironment string) (c *Client) {\r\n\r\n\t// Create a client\r\n\tc = new(Client)\r\n\r\n\t// Set options (either default or user modified)\r\n\tif options == nil {\r\n\t\toptions = DefaultClientOptions()\r\n\t}\r\n\r\n\t// Set the options\r\n\tc.Options = options\r\n\r\n\t// Set the environment\r\n\tvar found bool\r\n\tif c.Environment, found = environments[customEnvironment]; !found {\r\n\t\tc.Environment = environments[EnvironmentProduction]\r\n\t}\r\n\r\n\t// Is there a custom HTTP client to use?\r\n\tif customHTTPClient != nil {\r\n\t\tc.httpClient = customHTTPClient\r\n\t\treturn\r\n\t}\r\n\r\n\t// dial is the net dialer for clientDefaultTransport\r\n\tdial := &net.Dialer{KeepAlive: options.DialerKeepAlive, Timeout: options.DialerTimeout}\r\n\r\n\t// clientDefaultTransport is the default transport struct for the HTTP client\r\n\tclientDefaultTransport := &http.Transport{\r\n\t\tDialContext: dial.DialContext,\r\n\t\tExpectContinueTimeout: options.TransportExpectContinueTimeout,\r\n\t\tIdleConnTimeout: options.TransportIdleTimeout,\r\n\t\tMaxIdleConns: options.TransportMaxIdleConnections,\r\n\t\tProxy: http.ProxyFromEnvironment,\r\n\t\tTLSHandshakeTimeout: options.TransportTLSHandshakeTimeout,\r\n\t}\r\n\r\n\t// Determine the strategy for the http client\r\n\tif options.RequestRetryCount <= 0 {\r\n\r\n\t\t// no retry enabled\r\n\t\tc.httpClient = httpclient.NewClient(\r\n\t\t\thttpclient.WithHTTPTimeout(options.RequestTimeout),\r\n\t\t\thttpclient.WithHTTPClient(&http.Client{\r\n\t\t\t\tTransport: clientDefaultTransport,\r\n\t\t\t\tTimeout: options.RequestTimeout,\r\n\t\t\t}),\r\n\t\t)\r\n\t\treturn\r\n\t}\r\n\r\n\t// Retry enabled - create exponential back-off\r\n\tc.httpClient = httpclient.NewClient(\r\n\t\thttpclient.WithHTTPTimeout(options.RequestTimeout),\r\n\t\thttpclient.WithRetrier(heimdall.NewRetrier(\r\n\t\t\theimdall.NewExponentialBackoff(\r\n\t\t\t\toptions.BackOffInitialTimeout,\r\n\t\t\t\toptions.BackOffMaxTimeout,\r\n\t\t\t\toptions.BackOffExponentFactor,\r\n\t\t\t\toptions.BackOffMaximumJitterInterval,\r\n\t\t\t))),\r\n\t\thttpclient.WithRetryCount(options.RequestRetryCount),\r\n\t\thttpclient.WithHTTPClient(&http.Client{\r\n\t\t\tTransport: clientDefaultTransport,\r\n\t\t\tTimeout: options.RequestTimeout,\r\n\t\t}),\r\n\t)\r\n\r\n\treturn\r\n}", "func NewClient(config *ClientConfig) *Client {\n\treturn &Client{\n\t\tfmt.Sprintf(\"http://%s/%s\", config.APIHost, apiPath),\n\t\thttp.Client{\n\t\t\tTimeout: time.Second * 3,\n\t\t},\n\t}\n}", "func NewClient(httpClient *http.Client) (*Client, error) {\n\tif httpClient == nil {\n\t\thttpClient = &http.Client{}\n\t}\n\tb, err := url.Parse(baseURL)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tc := &Client{client: httpClient, BaseURL: b}\n\tc.PriceHistory = &PriceHistoryService{client: c}\n\tc.Account = &AccountsService{client: c}\n\tc.MarketHours = &MarketHoursService{client: c}\n\tc.Quotes = &QuotesService{client: c}\n\tc.Instrument = &InstrumentService{client: c}\n\tc.Chains = &ChainsService{client: c}\n\tc.Mover = &MoverService{client: c}\n\tc.TransactionHistory = &TransactionHistoryService{client: c}\n\tc.User = &UserService{client: c}\n\tc.Watchlist = &WatchlistService{client: c}\n\n\treturn c, nil\n}", "func CreateHTTPSClient(handler http.Handler) (*http.Client, string, func()) {\n\n\tserver := httptest.NewTLSServer(handler)\n\n\tcert, err := x509.ParseCertificate(server.TLS.Certificates[0].Certificate[0])\n\tif err != nil {\n\t\tlog.WithError(err).Fatal(\"Could not parse certificate\")\n\t}\n\n\tcertpool := x509.NewCertPool()\n\tcertpool.AddCert(cert)\n\n\tclient := &http.Client{\n\t\tTransport: &http.Transport{\n\t\t\tDialContext: func(_ context.Context, network, _ string) (net.Conn, error) {\n\t\t\t\treturn net.Dial(network, server.Listener.Addr().String())\n\t\t\t},\n\t\t\tTLSClientConfig: &tls.Config{\n\t\t\t\tRootCAs: certpool,\n\t\t\t},\n\t\t},\n\t}\n\n\treturn client, server.URL, server.Close\n}", "func NewHTTPClient(skipVerify bool, certPath string) (*http.Client, error) {\n\ttlsConfig := &tls.Config{\n\t\tInsecureSkipVerify: skipVerify,\n\t}\n\n\tif !skipVerify && certPath != \"\" {\n\t\tcert, err := os.ReadFile(certPath)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tcertPool, err := x509.SystemCertPool()\n\t\tif err != nil {\n\t\t\tfmt.Printf(\"WARN: unable to get system cert pool: %v\\n\", err)\n\t\t\tcertPool = x509.NewCertPool()\n\t\t}\n\t\tcertPool.AppendCertsFromPEM(cert)\n\t\ttlsConfig.RootCAs = certPool\n\t}\n\n\treturn &http.Client{\n\t\tTimeout: 2 * time.Minute,\n\t\tTransport: &http.Transport{\n\t\t\tIdleConnTimeout: 2 * time.Minute,\n\t\t\tResponseHeaderTimeout: 2 * time.Minute,\n\t\t\tTLSClientConfig: tlsConfig,\n\t\t}}, nil\n}", "func NewClient(token string, client *http.Client) *Client {\n\tif client == nil {\n\t\thttpClient = &http.Client{Timeout: time.Second * 10}\n\t} else {\n\t\thttpClient = client\n\t}\n\treturn &Client{token}\n}", "func NewClient(c *http.Client, baseURL *url.URL) *client {\n\treturn &client{\n\t\tbaseURL: baseURL,\n\t\tclient: c,\n\t}\n}", "func NewClient() *http.Client {\n\treturn &http.Client{\n\t\tTimeout: 10 * time.Second,\n\t}\n}", "func NewClient(config HostConfig) *Client {\n\tc := &Client{\n\t\tconfig: config,\n\t}\n\tc.client = &http.Client{\n\t\tTransport: &http.Transport{\n\t\t\tTLSClientConfig: &tls.Config{\n\t\t\t\tInsecureSkipVerify: !config.Verify,\n\t\t\t},\n\t\t},\n\t}\n\n\tgrpcAddress := c.config.GRPC\n\tsecure := false\n\tif grpcAddress == `` {\n\t\tu, _ := url.Parse(c.config.API)\n\t\tgrpcAddress = u.Hostname()\n\t\tgrpcPort := u.Port()\n\t\tif u.Scheme == `http` {\n\t\t\tsecure = false\n\t\t\tif grpcPort == `` {\n\t\t\t\tgrpcPort = `80`\n\t\t\t}\n\t\t} else {\n\t\t\tsecure = true\n\t\t\tif grpcPort == `` {\n\t\t\t\tgrpcPort = `443`\n\t\t\t}\n\t\t}\n\n\t\tgrpcAddress = fmt.Sprintf(`%s:%s`, grpcAddress, grpcPort)\n\t}\n\n\tvar conn *grpc.ClientConn\n\tvar err error\n\tif secure {\n\t\tif conn, err = grpc.Dial(\n\t\t\tgrpcAddress,\n\t\t\tgrpc.WithTransportCredentials(credentials.NewTLS(&tls.Config{})),\n\t\t\tgrpc.WithDefaultCallOptions(grpc.MaxCallRecvMsgSize(100<<20)),\n\t\t); err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t} else {\n\t\tif conn, err = grpc.Dial(\n\t\t\tgrpcAddress,\n\t\t\tgrpc.WithInsecure(),\n\t\t\tgrpc.WithDefaultCallOptions(grpc.MaxCallRecvMsgSize(100<<20)),\n\t\t); err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t}\n\n\tc.cc = conn\n\n\tc.blog = protocols.NewTaoBlogClient(c.cc)\n\tc.management = protocols.NewManagementClient(c.cc)\n\n\treturn c\n}", "func NewClient(httpClient *http.Client) *Client {\n\tvar c *http.Client\n\n\tif httpClient == nil {\n\t\tc = http.DefaultClient\n\t} else {\n\t\tc = httpClient\n\t}\n\n\treturn &Client{\n\t\tclient: c,\n\t}\n}", "func NewClient(httpClient *http.Client, baseURL string) (*Client, error) {\n\tif httpClient == nil {\n\t\thttpClient = http.DefaultClient\n\t}\n\n\tbase, err := url.Parse(baseURL)\n\tif err != nil {\n\t\treturn nil, errors.New(\"Could not parse base URL\")\n\t}\n\n\tc := &Client{client: httpClient, baseURL: base}\n\treturn c, nil\n}", "func NewClient(config *ClientConfig) *Client {\n\tvar httpClient *http.Client\n\tvar logger LeveledLoggerInterface\n\n\tif config.HTTPClient == nil {\n\t\thttpClient = &http.Client{}\n\t} else {\n\t\thttpClient = config.HTTPClient\n\t}\n\n\tif config.Logger == nil {\n\t\tlogger = &LeveledLogger{Level: LevelError}\n\t} else {\n\t\tlogger = config.Logger\n\t}\n\n\treturn &Client{\n\t\tAPIToken: config.APIToken,\n\t\tLogger: logger,\n\n\t\tbaseURL: WaniKaniAPIURL,\n\t\thttpClient: httpClient,\n\t}\n}", "func NewClient(s ClientSettings) (*Client, error) {\n\tproxy := http.ProxyFromEnvironment\n\tif s.Proxy != nil {\n\t\tproxy = http.ProxyURL(s.Proxy)\n\t}\n\tlogger.Info(\"HTTP URL: %s\", s.URL)\n\tvar dialer, tlsDialer transport.Dialer\n\tvar err error\n\n\tdialer = transport.NetDialer(s.Timeout)\n\ttlsDialer = transport.TLSDialer(dialer, s.TLS, s.Timeout)\n\n\tif st := s.Observer; st != nil {\n\t\tdialer = transport.StatsDialer(dialer, st)\n\t\ttlsDialer = transport.StatsDialer(tlsDialer, st)\n\t}\n\tparams := s.Parameters\n\tvar encoder bodyEncoder\n\tcompression := s.CompressionLevel\n\tif compression == 0 {\n\t\tswitch s.Format {\n\t\tcase \"json\":\n\t\t\tencoder = newJSONEncoder(nil)\n\t\tcase \"json_lines\":\n\t\t\tencoder = newJSONLinesEncoder(nil)\n\t\t}\n\t} else {\n\t\tswitch s.Format {\n\t\tcase \"json\":\n\t\t\tencoder, err = newGzipEncoder(compression, nil)\n\t\tcase \"json_lines\":\n\t\t\tencoder, err = newGzipLinesEncoder(compression, nil)\n\t\t}\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tclient := &Client{\n\t\tConnection: Connection{\n\t\t\tURL: s.URL,\n\t\t\tUsername: s.Username,\n\t\t\tPassword: s.Password,\n\t\t\tContentType: s.ContentType,\n\t\t\thttp: &http.Client{\n\t\t\t\tTransport: &http.Transport{\n\t\t\t\t\tDial: dialer.Dial,\n\t\t\t\t\tDialTLS: tlsDialer.Dial,\n\t\t\t\t\tProxy: proxy,\n\t\t\t\t},\n\t\t\t\tTimeout: s.Timeout,\n\t\t\t},\n\t\t\tencoder: encoder,\n\t\t},\n\t\tparams: params,\n\t\tcompressionLevel: compression,\n\t\tproxyURL: s.Proxy,\n\t\tbatchPublish: s.BatchPublish,\n\t\theaders: s.Headers,\n\t\tformat: s.Format,\n\t}\n\n\treturn client, nil\n}", "func newCloudlyckeClient() *http.Client {\n\treturn &http.Client{}\n}", "func NewClient(config ClientConfig) (Client, error) {\n\t// raise error on client creation if the url is invalid\n\tneturl, err := url.Parse(config.URL)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\thttpClient := http.DefaultClient\n\n\tif config.TLSInsecureSkipVerify {\n\t\thttpClient.Transport = &http.Transport{TLSClientConfig: &tls.Config{InsecureSkipVerify: true}}\n\t}\n\n\tc := &client{\n\t\tclient: httpClient,\n\t\trawurl: neturl.String(),\n\t\tusername: config.Username,\n\t\tpassword: config.Password,\n\t}\n\n\t// create a single service object and reuse it for each API service\n\tc.service.client = c\n\tc.knowledge = (*knowledgeService)(&c.service)\n\n\treturn c, nil\n}" ]
[ "0.7867207", "0.7842221", "0.7767313", "0.76930034", "0.76230794", "0.7489563", "0.73058105", "0.7303979", "0.7284354", "0.7139966", "0.71161634", "0.71125615", "0.7101692", "0.7077963", "0.7058024", "0.70579094", "0.7052008", "0.70312655", "0.70235515", "0.70198125", "0.70052856", "0.6992913", "0.69910157", "0.69854957", "0.6974019", "0.69719714", "0.6960736", "0.69380057", "0.69362736", "0.6929329", "0.6904244", "0.6898883", "0.68937016", "0.6892736", "0.6887617", "0.68818444", "0.6881093", "0.6878172", "0.68618745", "0.68565905", "0.68557227", "0.6850778", "0.6847038", "0.68453354", "0.6839264", "0.6831605", "0.68143713", "0.6811315", "0.6809679", "0.6807112", "0.6786303", "0.6784624", "0.67836225", "0.6769083", "0.6763592", "0.6761235", "0.6754935", "0.67476344", "0.6739666", "0.673014", "0.6729417", "0.6716713", "0.6712368", "0.6707753", "0.670446", "0.67037857", "0.6700712", "0.66997254", "0.66933185", "0.6688895", "0.6682653", "0.668199", "0.6679023", "0.6676191", "0.66758883", "0.6665656", "0.66409045", "0.6640133", "0.6636727", "0.6623282", "0.66212666", "0.6613358", "0.6611126", "0.660928", "0.66084075", "0.66073", "0.66055185", "0.6595878", "0.65951127", "0.658697", "0.65852314", "0.6580339", "0.6579634", "0.6579545", "0.65786064", "0.6575084", "0.65718037", "0.656583", "0.6564929", "0.65565825" ]
0.7308629
6
gatherJSONData query the data source and parse the response JSON
func (logstash *Logstash) gatherJSONData(address string, value interface{}) error { request, err := http.NewRequest("GET", address, nil) if err != nil { return err } if (logstash.Username != "") || (logstash.Password != "") { request.SetBasicAuth(logstash.Username, logstash.Password) } for header, value := range logstash.Headers { if strings.ToLower(header) == "host" { request.Host = value } else { request.Header.Add(header, value) } } response, err := logstash.client.Do(request) if err != nil { return err } defer response.Body.Close() if response.StatusCode != http.StatusOK { // ignore the err here; LimitReader returns io.EOF and we're not interested in read errors. body, _ := io.ReadAll(io.LimitReader(response.Body, 200)) return fmt.Errorf("%s returned HTTP status %s: %q", address, response.Status, body) } err = json.NewDecoder(response.Body).Decode(value) if err != nil { return err } return nil }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (e *Exporter) gatherData() ([]*Datum, error) {\n\n\tdata := []*Datum{}\n\n\tresponses, err := asyncHTTPGets(e.TargetURLs, e.APIToken)\n\n\tif err != nil {\n\t\treturn data, err\n\t}\n\n\tfor _, response := range responses {\n\n\t\t// Github can at times present an array, or an object for the same data set.\n\t\t// This code checks handles this variation.\n\t\tif isArray(response.body) {\n\t\t\tds := []*Datum{}\n\t\t\tjson.Unmarshal(response.body, &ds)\n\t\t\tdata = append(data, ds...)\n\t\t}\n\n\t\tlog.Infof(\"API data fetched for environment: %s\", response.targetURL.Environment)\n\t}\n\n\t//return data, rates, err\n\treturn data, nil\n\n}", "func loadData() jsonObj {\n\t/*\n\t\tAPI: https://opentdb.com/api_config.php\n\t*/\n\turl := \"https://opentdb.com/api.php?amount=10&type=multiple\"\n\tclient := http.Client{\n\t\tTimeout: time.Second * 2,\n\t}\n\treq, err := http.NewRequest(http.MethodGet, url, nil)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tres, getErr := client.Do(req)\n\tif(getErr != nil) {\n\t\tlog.Fatal(getErr)\n\t}\n\tif(res.Body != nil) {\n\t\tdefer res.Body.Close()\n\t}\n\n\tbody, readErr := ioutil.ReadAll(res.Body)\n\tif(readErr != nil) {\n\t\tlog.Fatal(readErr)\n\t}\n\tbyteBody := []byte(body)\n\tvar questions jsonObj\n\terr = json.Unmarshal(byteBody, &questions)\n\n\tif err == nil {\n\t\tfmt.Printf(\"Data loaded successfully!\")\n\t} else {\n\t\tfmt.Printf(\"%s\\n\", err)\n\t}\n\n\treturn questions\n}", "func fetchData(url string) (*info, error) {\r\n\r\n\t// Throttle the data request rate\r\n\ttime.Sleep(100 * time.Millisecond)\r\n\r\n\tresp, _ := http.Get(source + url)\r\n\tdefer resp.Body.Close()\r\n\r\n\tresult, err := ioutil.ReadAll(resp.Body)\r\n\tif err != nil {\r\n\t\treturn nil, err\r\n\t}\r\n\r\n\tvar i info\r\n\terr = json.Unmarshal(result, &i)\r\n\tif err != nil {\r\n\t\t//log.Println(err, err.Error())\r\n\t\treturn nil, err\r\n\t}\r\n\r\n\ttotalRequest++\r\n\t//fmt.Printf(\"%v\\n\\n\", i)\r\n\treturn &i, nil\r\n}", "func GetData(url string, structToFill interface{}) error {\n\tlog.Printf(\"Getting data from URL: %s...\", url)\n\t// Make an HTTP client so we can add custom headers (currently used for adding in the Bearer token for inter-microservice communication)\n\n\tclient := &http.Client{}\n\treq, err := http.NewRequest(\"GET\", url, nil)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = setToken(req)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif req == nil {\n\t\tfmt.Printf(\"Alert! req is nil!\")\n\t}\n\tresp, err := client.Do(req)\n\tif err != nil {\n\t\tlog.Printf(\"Error on request: %s\", err.Error())\n\t\treturn err\n\t}\n\n\tb, err := ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif resp.StatusCode != http.StatusOK {\n\t\terrorString, err := ioutil.ReadAll(resp.Body)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn errors.New(string(errorString))\n\t}\n\n\terr = json.Unmarshal(b, structToFill)\n\tif err != nil {\n\t\treturn err\n\t}\n\tlog.Printf(\"Done.\")\n\treturn nil\n}", "func (r *DataQueryResult) UnmarshalJSON(b []byte) error {\n\tm := map[string]interface{}{}\n\tif err := json.Unmarshal(b, &m); err != nil {\n\t\treturn err\n\t}\n\n\trefID, ok := m[\"refId\"].(string)\n\tif !ok {\n\t\treturn fmt.Errorf(\"can't decode field refId - not a string\")\n\t}\n\tvar meta *simplejson.Json\n\tif m[\"meta\"] != nil {\n\t\tmm, ok := m[\"meta\"].(map[string]interface{})\n\t\tif !ok {\n\t\t\treturn fmt.Errorf(\"can't decode field meta - not a JSON object\")\n\t\t}\n\t\tmeta = simplejson.NewFromAny(mm)\n\t}\n\tvar series DataTimeSeriesSlice\n\t/* TODO\n\tif m[\"series\"] != nil {\n\t}\n\t*/\n\tvar tables []DataTable\n\tif m[\"tables\"] != nil {\n\t\tts, ok := m[\"tables\"].([]interface{})\n\t\tif !ok {\n\t\t\treturn fmt.Errorf(\"can't decode field tables - not an array of Tables\")\n\t\t}\n\t\tfor _, ti := range ts {\n\t\t\ttm, ok := ti.(map[string]interface{})\n\t\t\tif !ok {\n\t\t\t\treturn fmt.Errorf(\"can't decode field tables - not an array of Tables\")\n\t\t\t}\n\t\t\tvar columns []DataTableColumn\n\t\t\tcs, ok := tm[\"columns\"].([]interface{})\n\t\t\tif !ok {\n\t\t\t\treturn fmt.Errorf(\"can't decode field tables - not an array of Tables\")\n\t\t\t}\n\t\t\tfor _, ci := range cs {\n\t\t\t\tcm, ok := ci.(map[string]interface{})\n\t\t\t\tif !ok {\n\t\t\t\t\treturn fmt.Errorf(\"can't decode field tables - not an array of Tables\")\n\t\t\t\t}\n\t\t\t\tval, ok := cm[\"text\"].(string)\n\t\t\t\tif !ok {\n\t\t\t\t\treturn fmt.Errorf(\"can't decode field tables - not an array of Tables\")\n\t\t\t\t}\n\n\t\t\t\tcolumns = append(columns, DataTableColumn{Text: val})\n\t\t\t}\n\n\t\t\trs, ok := tm[\"rows\"].([]interface{})\n\t\t\tif !ok {\n\t\t\t\treturn fmt.Errorf(\"can't decode field tables - not an array of Tables\")\n\t\t\t}\n\t\t\tvar rows []DataRowValues\n\t\t\tfor _, ri := range rs {\n\t\t\t\tvals, ok := ri.([]interface{})\n\t\t\t\tif !ok {\n\t\t\t\t\treturn fmt.Errorf(\"can't decode field tables - not an array of Tables\")\n\t\t\t\t}\n\t\t\t\trows = append(rows, vals)\n\t\t\t}\n\n\t\t\ttables = append(tables, DataTable{\n\t\t\t\tColumns: columns,\n\t\t\t\tRows: rows,\n\t\t\t})\n\t\t}\n\t}\n\n\tvar dfs *dataFrames\n\tif m[\"dataframes\"] != nil {\n\t\traw, ok := m[\"dataframes\"].([]interface{})\n\t\tif !ok {\n\t\t\treturn fmt.Errorf(\"can't decode field dataframes - not an array of byte arrays\")\n\t\t}\n\n\t\tvar encoded [][]byte\n\t\tfor _, ra := range raw {\n\t\t\tencS, ok := ra.(string)\n\t\t\tif !ok {\n\t\t\t\treturn fmt.Errorf(\"can't decode field dataframes - not an array of byte arrays\")\n\t\t\t}\n\t\t\tenc, err := base64.StdEncoding.DecodeString(encS)\n\t\t\tif err != nil {\n\t\t\t\treturn fmt.Errorf(\"can't decode field dataframes - not an array of arrow frames\")\n\t\t\t}\n\t\t\tencoded = append(encoded, enc)\n\t\t}\n\t\tdecoded, err := data.UnmarshalArrowFrames(encoded)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tdfs = &dataFrames{\n\t\t\tdecoded: decoded,\n\t\t\tencoded: encoded,\n\t\t}\n\t}\n\n\tr.RefID = refID\n\tr.Meta = meta\n\tr.Series = series\n\tr.Tables = tables\n\tif dfs != nil {\n\t\tr.Dataframes = dfs\n\t}\n\treturn nil\n}", "func BindGetJSONData(url string, param req.Param, body interface{}) error {\n\tr, err := req.Get(url, param)\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = r.ToJSON(body)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}", "func processData(data *Response) {\n\tif data == nil {\n\t\tfmt.Println(\"Error\")\n\t\treturn\n\t}\n\tfor _, value := range data.DataItems {\n\t\tfmt.Println(\"DataItem \", value.Name, \" Value \", value.Value, \" Timestamp \", value.Timestamp)\n\t\t//\n\t\t// Add your code here ...\n\t\t//\n\t}\n\n}", "func (m *MSSQLDatastore) FetchJSON(ctx context.Context, query string, args ...interface{}) ([]byte, error) {\n\tif m == nil {\n\t\treturn nil, ErrEmptyObject\n\t}\n\n\tif _, ok := ctx.Deadline(); !ok {\n\t\tvar cancel context.CancelFunc\n\t\tctx, cancel = context.WithTimeout(ctx, QueryLimit)\n\t\tdefer cancel()\n\t}\n\n\trows, err := m.db.QueryContext(ctx, query, args...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tdefer rows.Close()\n\n\treturn ToJSON(rows)\n}", "func FetchJSON(urlPath string) map[string]any {\n\tout := map[string]any{}\n\tresponse, err := http.Get(fmt.Sprintf(\"%s%s\", ServerAddress, urlPath))\n\tif err != nil {\n\t\treturn out\n\t}\n\tdefer response.Body.Close()\n\t_ = json.NewDecoder(response.Body).Decode(&out)\n\treturn out\n}", "func dataGetHandler(w http.ResponseWriter, r *http.Request) {\n\tvars := mux.Vars(r)\n\tresourceName := vars[\"resourceName\"]\n\tlog.Debug(\"In dataHandler. Request for \" + resourceName)\n\tdat, err := ioutil.ReadFile(dataDir + resourceName + \".json\")\n\tif err != nil {\n\t\thttp.Error(w, \"Error fetching data for resource \"+resourceName, 404)\n\t\tlog.Error(err.Error())\n\t\treturn\n\t}\n\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\tfmt.Fprintf(w, string(dat))\n}", "func (h *HTTP) FetchJSON() (map[string]interface{}, error) {\n\tbody, err := h.FetchContent()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar data map[string]interface{}\n\n\terr = json.Unmarshal(body, &data)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn data, nil\n}", "func ReadJSONData(basePath, targPath string) ([]interface{}, error) {\n\tusersUnmarshalled := UsersUnmarshalled{Users: []entity.UserBody{}}\n\tdocsPath, err := filepath.Rel(basePath, targPath)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tbyteValues, err := ioutil.ReadFile(docsPath)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\terr = json.Unmarshal(byteValues, &usersUnmarshalled)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdocs := make([]interface{}, len(usersUnmarshalled.Users))\n\t// replicating original slice to docs\n\tfor index, _ := range usersUnmarshalled.Users {\n\t\tdocs[index] = usersUnmarshalled.Users[index]\n\t}\n\treturn docs, nil\n}", "func fetchJSON(url string, resp interface{}) error {\n\tr, err := http.Get(url)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tdefer r.Body.Close()\n\treturn json.NewDecoder(r.Body).Decode(&resp)\n}", "func getJson(url string, target interface{}) error {\n\tr, err := sendClient.Get(url)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer r.Body.Close()\n\treturn json.NewDecoder(r.Body).Decode(target)\n}", "func (fetcher *Fetcher) readJSONFromUrlForCpu() ([]MetricData, error) {\n\tlog.Infof(fetcher.cpuUsageUrl)\n\tresp, err := http.Get(fetcher.cpuUsageUrl)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tdefer resp.Body.Close()\n\tmetricList := make([]MetricData, 0)\n\n\tbuf := new(bytes.Buffer)\n\n\tbuf.ReadFrom(resp.Body)\n\n\trespByte := buf.Bytes()\n\tif err := json.Unmarshal(respByte, &metricList); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn metricList, nil\n}", "func getJSON(url string, target interface{}) error {\n\tresp, err := http.Get(url)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer resp.Body.Close()\n\n\treturn json.NewDecoder(resp.Body).Decode(target)\n}", "func (fhp *FastHTTPProvider) JSON(ctx context.Context, request *fasthttp.Request, response interface{}, duration *time.Duration) (*fasthttp.Response, error) {\n\tresult, err := fhp.request(ctx, request, duration)\n\n\tif err == nil {\n\t\t_ = json.Unmarshal(result.Body(), &response)\n\t}\n\n\treturn result, err\n}", "func getData() []Data {\n raw, err := ioutil.ReadFile(\"data/responses.json\")\t// read json file\n\n\tif err != nil {\t// error checking\n fmt.Println(err.Error())\n os.Exit(1)\n }\n\n var data []Data\t// create array variable of struct Data to store multiple values\n\n json.Unmarshal(raw, &data)\t//put data into the variable\n\n return data\t// return the data\n}", "func (ds *redisDatasource) QueryData(ctx context.Context, req *backend.QueryDataRequest) (*backend.QueryDataResponse, error) {\n\tlog.DefaultLogger.Debug(\"QueryData\", \"request\", req)\n\n\t// Get Instance\n\tclient, err := ds.getInstance(req.PluginContext)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Create response struct\n\tresponse := backend.NewQueryDataResponse()\n\n\t// Loop over queries and execute them individually\n\tfor _, q := range req.Queries {\n\t\tvar qm queryModel\n\n\t\t// Unmarshal the json into our queryModel\n\t\terr := json.Unmarshal(q.JSON, &qm)\n\t\tlog.DefaultLogger.Debug(\"QueryData\", \"JSON\", q.JSON)\n\n\t\t// Error\n\t\tif err != nil {\n\t\t\tresp := backend.DataResponse{}\n\t\t\tresp.Error = err\n\t\t\tresponse.Responses[q.RefID] = resp\n\t\t\tcontinue\n\t\t}\n\n\t\t// Execute query\n\t\tresp := query(ctx, q, client, qm)\n\n\t\t// Add Time for Streaming and filter fields\n\t\tif qm.Streaming && qm.StreamingDataType != \"DataFrame\" {\n\t\t\tfor _, frame := range resp.Frames {\n\t\t\t\ttimeValues := []time.Time{}\n\n\t\t\t\tlen, _ := frame.RowLen()\n\t\t\t\tif len > 0 {\n\t\t\t\t\tfor j := 0; j < len; j++ {\n\t\t\t\t\t\ttimeValues = append(timeValues, time.Now())\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\t// Filter Fields for Alerting and traffic optimization\n\t\t\t\tif qm.Field != \"\" {\n\t\t\t\t\t// Split Field to array\n\t\t\t\t\tfields, ok := shell.Split(qm.Field)\n\n\t\t\t\t\t// Check if filter is valid\n\t\t\t\t\tif !ok {\n\t\t\t\t\t\tresp.Error = fmt.Errorf(\"field is not valid\")\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t}\n\n\t\t\t\t\tfilterFields := []*data.Field{}\n\n\t\t\t\t\t// Filter fields\n\t\t\t\t\tfor _, field := range frame.Fields {\n\t\t\t\t\t\t_, found := Find(fields, field.Name)\n\n\t\t\t\t\t\tif !found {\n\t\t\t\t\t\t\tcontinue\n\t\t\t\t\t\t}\n\t\t\t\t\t\tfilterFields = append(filterFields, field)\n\t\t\t\t\t}\n\t\t\t\t\tframe.Fields = append([]*data.Field{data.NewField(\"#time\", nil, timeValues)}, filterFields...)\n\t\t\t\t} else {\n\t\t\t\t\tframe.Fields = append([]*data.Field{data.NewField(\"#time\", nil, timeValues)}, frame.Fields...)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\t// save the response in a hashmap based on with RefID as identifier\n\t\tresponse.Responses[q.RefID] = resp\n\t}\n\n\treturn response, nil\n}", "func getJSONDataFromAPI(path string) []byte {\n\n\tu, err := url.Parse(\"https://api.datadoghq.com/api/v1/\" + path)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tq := u.Query()\n\tq.Set(\"api_key\", viper.GetString(\"api_key\"))\n\tq.Set(\"application_key\", viper.GetString(\"app_key\"))\n\tu.RawQuery = q.Encode()\n\n\tvar netClient = &http.Client{\n\t\tTimeout: time.Second * 10,\n\t}\n\tresp, err := netClient.Get(u.String())\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tdefer resp.Body.Close()\n\n\tjd, err := ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\treturn jd\n}", "func (c *Client) FetchJSON(method string, url string, body io.Reader, structure interface{}) (err error) {\n\treq, err := http.NewRequest(method, url, body)\n\tif err != nil {\n\t\treturn\n\t}\n\n\tres, err := c.HandleRequest(req)\n\tdefer res.Body.Close()\n\tif err != nil {\n\t\treturn\n\t}\n\n\terr = json.NewDecoder(res.Body).Decode(structure)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn\n}", "func getData() {\n\n\tfor range time.Tick(time.Millisecond * UPDATE_INTERVAL) {\n\t\t// Build the request\n\t\treq, err := http.NewRequest(\"GET\", URL, nil)\n\t\tif err != nil {\n\t\t\tData <- nil\n\t\t\tcontinue\n\t\t}\n\n\t\tclient := &http.Client{}\n\n\t\tresp, err := client.Do(req)\n\t\tif err != nil {\n\t\t\tData <- nil\n\t\t\tcontinue\n\t\t}\n\n\t\tif resp.StatusCode != 200 { // OK\n\t\t\tData <- nil\n\t\t\tcontinue\n\t\t}\n\n\t\tvar response Response\n\t\tdecoder := json.NewDecoder(resp.Body)\n\t\terr = decoder.Decode(&response)\n\t\tData <- &response\n\t\tresp.Body.Close()\n\t}\n\tStop <- true\n}", "func QueryJSON(e *sqlca.Engine) {\n\n\tvar users []models.UsersDO\n\tstrJsonResults, err := e.Model(&users).Table(TABLE_NAME_USERS).Limit(10).QueryJson()\n\tif err != nil {\n\t\tlog.Error(err.Error())\n\t\treturn\n\t}\n\tlog.Infof(\"user results to JSON %s\", strJsonResults)\n}", "func (s *Client) GetJSON(url string, target interface{}) error {\n\treq, err := http.NewRequest(\"GET\", url, nil)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treq.Header.Set(\"Content-Type\", \"application/json\")\n\n\tres, err := s.Do(req)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tdefer res.Body.Close()\n\n\treturn json.NewDecoder(res.Body).Decode(target)\n}", "func RequestJSON(result interface{}, url url.URL) error {\n\tdata, err := Request(\"GET\", url, nil)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// Result parsing\n\tif err := json.Unmarshal(data, result); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}", "func LoadJSON(source interface{}, state data.Map) (interface{}, error) {\n\tlocation := toolbox.AsString(source)\n\tif location == \"\" {\n\t\treturn nil, errors.New(\"location was empty at LoadJSON\")\n\t}\n\tdata, err := ioutil.ReadFile(location)\n\tif err != nil {\n\t\treturn nil, errors.Wrapf(err, \"failed to load: %v\", location)\n\t}\n\tJSON := string(data)\n\tif toolbox.IsNewLineDelimitedJSON(JSON) {\n\t\tslice, err := toolbox.NewLineDelimitedJSON(JSON)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tvar result = make([]interface{}, 0)\n\t\ttoolbox.ProcessSlice(slice, func(item interface{}) bool {\n\t\t\tif item == nil {\n\t\t\t\treturn true\n\t\t\t}\n\t\t\tif toolbox.IsMap(item) && len(toolbox.AsMap(item)) == 0 {\n\t\t\t\treturn true\n\t\t\t}\n\t\t\tresult = append(result, item)\n\t\t\treturn true\n\t\t})\n\t\treturn result, nil\n\t}\n\tvar result interface{}\n\terr = json.Unmarshal(data, &result)\n\treturn result, err\n}", "func getJson(url string, target interface{}) error {\n\tr, err := http.Get(url)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer r.Body.Close()\n\n\treturn json.NewDecoder(r.Body).Decode(target)\n}", "func getJSON(url string, target interface{}) error {\n\tmyClient := &http.Client{Timeout: defaultHTTPTimeout}\n\treq, err := http.NewRequestWithContext(context.Background(), \"GET\", url, http.NoBody)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"create http req for %s: %w\", url, err)\n\t}\n\tr, err := myClient.Do(req)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer r.Body.Close()\n\n\treturn json.NewDecoder(r.Body).Decode(target)\n}", "func ResponseData(q Query) (data []types.Row, err error) {\n\tvar results []map[string]string\n\tif err = q.All(&results); err != nil {\n\t\treturn nil, err\n\t}\n\tdata = make([]types.Row, len(results))\n\tfor i, r := range results {\n\t\tdata[i].Data = r\n\t}\n\treturn\n}", "func GetData(w http.ResponseWriter, r *http.Request) {\n\tfrom := r.URL.Query().Get(\"from\")\n\tif from == \"\" {\n\t\tfrom = fmt.Sprintf(\"%d\", time.Now().Add(-10*time.Minute).UnixNano()/1000000000)\n\t}\n\tr.Body.Close()\n\tfromI, err := strconv.ParseInt(from, 10, 64)\n\tif err != nil {\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\tw.Write([]byte(\"BAD 'from' parameter\"))\n\t\treturn\n\t}\n\twindow := r.URL.Query().Get(\"window\")\n\tif window == \"\" {\n\t\twindow = \"300\"\n\t}\n\twindowI, err := strconv.ParseInt(window, 10, 64)\n\tif err != nil {\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\tw.Write([]byte(\"BAD 'window' parameter\"))\n\t\treturn\n\t}\n\trv, err := qei.GetData(time.Unix(fromI, 0), time.Duration(windowI)*time.Second)\n\tif err != nil {\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\tw.Write([]byte(err.Error()))\n\t\treturn\n\t}\n\tw.WriteHeader(http.StatusOK)\n\tw.Write(rv.JsonBytes())\n\tr.Body.Close()\n}", "func (t *Task) GetData(d interface{}) error {\n\tdata := t.Data\n\tif data == nil {\n\t\treturn nil\n\t}\n\treturn json.Unmarshal(data, d)\n}", "func GetData(accessToken string, w http.ResponseWriter, r *http.Request) {\n\trequest, err := http.NewRequest(\"GET\", \"https://auth.vatsim.net/api/user\", nil)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\trequest.Header.Add(\"Bearer\", accessToken)\n\trequest.Header.Add(\"accept\", \"application/json\")\n\tclient := http.Client{}\n\tclient.Do(request)\n\n\tdefer request.Body.Close()\n\n\tbody, errReading := ioutil.ReadAll(request.Body)\n\tif errReading != nil {\n\t\tlog.Fatal(errReading)\n\t}\n\n\n\tvar userDetails map[string]interface{}\n\terrJSON := json.Unmarshal(body, &userDetails)\n\tif errJSON != nil {\n\t\tlog.Fatal(errJSON)\n\t}\n\tfmt.Println(userDetails)\n}", "func (handler Handler) GetJSON(r *http.Request) (*jsonq.JsonQuery, error) {\n\n\tcontent, err := ioutil.ReadAll(r.Body)\n\tr.Body.Close()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif len(content) == 0 {\n\t\treturn nil, errors.NewWithCode(err, errors.JSONIsEmpty, r.URL.String())\n\t}\n\n\tdata := map[string]interface{}{}\n\tdec := json.NewDecoder(strings.NewReader(string(content)))\n\tdec.Decode(&data)\n\tjq := jsonq.NewQuery(data)\n\n\treturn jq, nil\n}", "func processApigeeJSON(apigeeResp []byte) apigeeJSONstr {\n\tlog.Debug(\"In processsApigeeJSON\")\n\tvar apigeeJSON apigeeJSONstr\n\tif err := json.Unmarshal(apigeeResp, &apigeeJSON); err != nil {\n\t\tpanic(fmt.Errorf(\"fatal error processing Apigee JSON: %s\", err))\n\t}\n\treturn apigeeJSON\n}", "func GetParsedData(log log.T, input interface{}) (parsedOutput string, err error) {\n\t//Note: As per the link - https://msdn.microsoft.com/en-us/library/aa394217%28v=vs.85%29.aspx\n\t// fields like DNSServerSearchOrder, DefaultGateway are string array. However, on just 1 entry - the data ends up\n\t// showing as a string. If there are multiple entries - ConvertTo-Json - makes it a map with fields similar to\n\t// Format struct.\n\n\t// there are only 2 possibilities - either given input is a string or a json map with fields similar to Format struct.\n\t// anything else means - the command executed to get the data has been changed.\n\n\terrorMsg := \"Unable to read more data from %v due to error - %v\"\n\n\tif str, possible := input.(string); possible {\n\t\tlog.Debugf(\"Input %v can be transformed into string\", input)\n\t\tparsedOutput = str\n\t} else {\n\t\tlog.Debugf(\"Input %v can't be transformed into string\", input)\n\t\tvar format Format\n\t\tdataB, _ := json.Marshal(input)\n\n\t\tif err = json.Unmarshal(dataB, &format); err != nil {\n\t\t\terr = fmt.Errorf(errorMsg, input, err.Error())\n\t\t} else {\n\n\t\t\t//verify if format.Value is not nil\n\t\t\tif len(format.Value) > 0 {\n\t\t\t\t//currently we return 1st element of string array - since DNSServer and Gateway is string\n\t\t\t\t//if that changes then we can return format.Value\n\t\t\t\tparsedOutput = format.Value[0]\n\t\t\t} else {\n\t\t\t\terr = fmt.Errorf(\"Unexpected data format\")\n\t\t\t}\n\t\t}\n\t}\n\n\tlog.Debugf(\"ParsedOutput - %v, error - %v\", parsedOutput, err)\n\treturn\n}", "func FetchJson(url string, v interface{}) error {\r\n\tresp, err := http.Get(url)\r\n\r\n\tif err != nil {\r\n\t\treturn err\r\n\t}\r\n\r\n\tdata, err := ioutil.ReadAll(resp.Body)\r\n\r\n\tif err != nil {\r\n\t\treturn err\r\n\t}\r\n\r\n\tif err = json.Unmarshal(data, v); err != nil {\r\n\t\treturn err\r\n\t}\r\n\r\n\treturn nil\r\n}", "func HelloQueryFactoJSON(c echo.Context) error {\n\n\tid := c.QueryParam(\"id\")\n\tidInt, err := strconv.Atoi(id)\n\n\tif err != nil {\n\t\tlog.Println(err)\n\t\treturn c.String(http.StatusOK, \"Please input only number.\")\n\t}\n\n\tfactoResultJSON := new(ResponseJSON)\n\n\tfactoResultJSON.Input = c.QueryParam(\"id\")\n\tfactoResultJSON.Data = fmt.Sprintf(\"%d\", factorial.GetFacto(idInt)) //Same with test:= ResponseJSON{}\n\n\treturn c.JSON(http.StatusOK, factoResultJSON)\n}", "func RetData(c echo.Context, data interface{}) error {\n\treturn c.JSON(http.StatusOK, DataRes{\n\t\tStatus: 200,\n\t\tData: data,\n\t})\n}", "func getDataFromEndpoint(endpoint string) Data {\r\n\t// Setting the type of request and authorization data\r\n\treq, err := http.NewRequest(\"GET\", endpoint, nil)\r\n\ttoken, _ := ioutil.ReadFile(\"accesstoken.txt\")\r\n\tencodedToken := base64.StdEncoding.EncodeToString([]byte(string(token)))\r\n\tbasicAuth := \"Basic \" + encodedToken\r\n\treq.Header.Set(\"Authorization\", basicAuth)\r\n\r\n\t//Executing the request\r\n\tresp, err := http.DefaultClient.Do(req)\r\n\r\n\tif err != nil {\r\n\t\tlog.Fatal(\"Error in the request.\\n[ERRO] -\", err)\r\n\t}\r\n\tdefer resp.Body.Close()\r\n\r\n\t// Getting json data, decoding them from the raw format and storing them at \"record's\" address\r\n\tvar record Data\r\n\tif err := json.NewDecoder(resp.Body).Decode(&record); err != nil {\r\n\t\tlog.Println(err)\r\n\t}\r\n\treturn record\r\n}", "func jsonRenderHandler(w http.ResponseWriter, r *http.Request) {\n\trunID := r.FormValue(\"runID\")\n\n\tstartIdx, err := strconv.Atoi(r.FormValue(\"startIdx\"))\n\tif err != nil {\n\t\thttputils.ReportError(w, r, err, \"Failed to parse start index\")\n\t\treturn\n\t}\n\n\tminPercent, err := strconv.ParseFloat(r.FormValue(\"minPercent\"), 64)\n\tif err != nil {\n\t\thttputils.ReportError(w, r, err, \"Failed to parse minimum percent\")\n\t\treturn\n\t}\n\n\tmaxPercent, err := strconv.ParseFloat(r.FormValue(\"maxPercent\"), 64)\n\tif err != nil {\n\t\thttputils.ReportError(w, r, err, \"Failed to parse maximum percent\")\n\t\treturn\n\t}\n\n\tif minPercent > maxPercent || minPercent < 0 || maxPercent > 100 {\n\t\thttputils.ReportError(w, r, err, \"Invalid bounds\")\n\t\treturn\n\t}\n\n\t// If the runID does not exist in the cache, this will return an error.\n\tresults, nextIdx, err := resultStore.GetFiltered(runID, startIdx, float32(minPercent), float32(maxPercent))\n\tif err != nil {\n\t\thttputils.ReportError(w, r, err, fmt.Sprintf(\"Failed to get cached results for run %s\", runID))\n\t\treturn\n\t}\n\tif len(results) == 0 {\n\t\thttputils.ReportError(w, r, err, fmt.Sprintf(\"No more results for run %s\", runID))\n\t\treturn\n\t}\n\tsendJsonResponse(w, map[string]interface{}{\"results\": results, \"nextIdx\": nextIdx})\n}", "func (td *S3DataSource) QueryData(ctx context.Context, req *backend.QueryDataRequest) (*backend.QueryDataResponse, error) {\n\n\t// Unmarshal the json into our queryModel\n\tif err := json.Unmarshal(req.PluginContext.DataSourceInstanceSettings.JSONData, &td.settings); err != nil {\n\t\treturn nil, err\n\t}\n\n\tconfig := aws.Config{\n\t\tRegion: aws.String(td.settings.Region),\n\t}\n\n\tif td.settings.AccessKey != \"\" {\n\t\tif secretKey, found := req.PluginContext.DataSourceInstanceSettings.DecryptedSecureJSONData[\"secretKey\"]; found {\n\t\t\tconfig.Credentials = credentials.NewStaticCredentials(td.settings.AccessKey, secretKey, \"\")\n\t\t}\n\t}\n\n\tsess, err := session.NewSession()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\ttd.svc = s3.New(sess, &config)\n\n\t// create response struct\n\tresponse := backend.NewQueryDataResponse()\n\n\t// loop over queries and execute them individually.\n\tfor _, q := range req.Queries {\n\t\tres := td.query(ctx, q)\n\n\t\t// save the response in a hashmap\n\t\t// based on with RefID as identifier\n\t\tresponse.Responses[q.RefID] = res\n\t}\n\n\treturn response, nil\n}", "func getLocations(w http.ResponseWriter, r *http.Request) {\r\n\r\n\tctx := appengine.NewContext(r)\r\n\tlog.Infof(ctx, \"Requested URL: %v\", r.URL)\r\n\r\n\t// What callback function should we use?\r\n\tcallback := \"\"\r\n\tif callbackParam, _ := r.URL.Query()[\"callback\"]; len(callbackParam) == 1 {\r\n\t\tcallback = callbackParam[0]\r\n\t} else {\r\n\t\thttp.Error(w, \"Missing 'callback' parameter\", http.StatusBadRequest)\r\n\t\treturn\r\n\t}\r\n\r\n\t// Should we return cached test data?\r\n\tvar wantCached bool;\r\n\tif _, ok := r.URL.Query()[\"cachedTestData\"]; ok {\r\n\t\twantCached = true;\r\n\t}\r\n\r\n\tvar data []byte\r\n\r\n\t// Do we want to use cached data?\r\n\tvar cacheKey *datastore.Key\r\n\tif wantCached {\r\n\t\tlog.Infof(ctx, \"Trying to use cached data...\")\r\n\t\tcacheKey = datastore.NewKey(ctx, \"VehicleLocations\", \"lastKnown\", 0, nil)\r\n\t\tvlData := new(VehicleLocations)\r\n\t\tif err := datastore.Get(ctx, cacheKey, vlData); err == nil {\r\n\t\t\tdata = vlData.JsonData\r\n\t\t\tlog.Infof(ctx, \"Got the data from the cache: %v bytes\", len(data))\r\n\t\t}\r\n\t}\r\n\r\n\t// Do we still need to get the data?\t\r\n\tif data == nil {\r\n\t\tlog.Infof(ctx, \"Getting the real data from CoA\")\r\n\r\n\t\t// Get the latest bus locations\r\n\t\tclient := urlfetch.Client(ctx)\r\n\t\tresp, err := client.Get(\"https://data.texas.gov/download/gyui-3zdd/text/plain\")\r\n\t\tif err != nil {\r\n\t\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\r\n\t\t\treturn\r\n\t\t}\r\n\r\n\t\t// Read the data and cache it if necessary\t\t\r\n\t\tvar b bytes.Buffer\r\n\t\tb.ReadFrom(resp.Body);\r\n\t\tdata = b.Bytes();\r\n\t\tlog.Infof(ctx, \"Got %v bytes\", len(data))\r\n\r\n\t\t// Cache this for later, if desired\r\n\t\tif wantCached {\r\n\t\t\tvlData := new(VehicleLocations)\r\n\t\t\tvlData.JsonData = data\r\n\t\t\tif _, err := datastore.Put(ctx, cacheKey, vlData); err != nil {\r\n\t\t\t\tlog.Errorf(ctx, \"Caching failed: %v\", err)\r\n\t\t\t}\r\n\t\t\tlog.Infof(ctx, \"Cached the data for later\")\r\n\t\t}\r\n\t}\r\n\r\n\t// Write the data back to the client, wrapped by a function call to the specified callback.\r\n\tfmt.Fprintf(w, callback)\r\n\tfmt.Fprint(w, \"(\")\r\n\tw.Write(data)\r\n\tfmt.Fprint(w, \")\")\r\n}", "func (ds *Datasource) QueryData(ctx context.Context, req *backend.QueryDataRequest) (*backend.QueryDataResponse, error) {\n\tres := backend.NewQueryDataResponse()\n\ts, err := ds.getInstance(req.PluginContext)\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// loop over queries and execute them individually.\n\tfor _, q := range req.Queries {\n\t\tvar qm QueryModel\n\t\terr := json.Unmarshal(q.JSON, &qm)\n\n\t\tif err != nil {\n\t\t\tres.Responses[q.RefID] = backend.DataResponse{\n\t\t\t\tError: err,\n\t\t\t}\n\n\t\t\treturn res, nil\n\t\t}\n\n\t\tquery := helpers.BuildQuery(qm.QueryText, q)\n\t\tres.Responses[q.RefID] = s.client.run(query, q.RefID)\n\t}\n\n\treturn res, nil\n}", "func (o *ThingListResponse) GetData() []ThingResponse {\n\tif o == nil || o.Data == nil {\n\t\tvar ret []ThingResponse\n\t\treturn ret\n\t}\n\treturn *o.Data\n}", "func (eng *MongoEngine) HandleGETData(requestedId string) (interface{}, error) {\n\tsession, err := mgo.Dial(eng.ConnectionAddress)\n\tif err != nil {\n\t\treturn nil, errors.New(\"Failed to connect to database.\")\n\t}\n\tdefer session.Close()\n\tcollection := session.DB(eng.DatabaseName).C(eng.CollectionName)\n\n\tif len(requestedId) > 0 {\n\t\tsingleObject := make(map[string]interface{})\n\t\trequestedId, err := strconv.Atoi(requestedId)\n\t\tif err != nil {\n\t\t\treturn nil, errors.New(\"Failed to parse provided object ID.\")\n\t\t}\n\t\tobjId := bson.M{\"id\": requestedId}\n\t\tif err := collection.Find(objId).One(&singleObject); err != nil {\n\t\t\treturn nil, errors.New(\"Object with provided ID not found.\")\n\t\t}\n\t\treturn singleObject, nil\n\t} else {\n\t\tobjectList := make([]map[string]interface{}, eng.PerPage)\n\t\tif err := collection.Find(bson.M{}).Sort(\"-id\").Limit(eng.PerPage).All(&objectList); err != nil {\n\t\t\treturn nil, errors.New(\"Failed to fetch data from collection\")\n\t\t}\n\t\treturn objectList, nil\n\t}\n\treturn nil, nil\n}", "func fetchData(client *http.Client, url, format string) ([]byte, error) {\n\tif format == \"json\" {\n\t\tformat = \"application/json\"\n\t} else if format == \"xml\" {\n\t\tformat = \"application/xml\"\n\t}\n\n\t// search in-memory store\n\tif CacheOn {\n\t\tvalue, found := Memory.Get(url)\n\t\tif found {\n\t\t\treturn value.([]byte), nil\n\t\t}\n\t}\n\n\treq, err := http.NewRequest(http.MethodGet, url, nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treq.Header.Set(\"Accept\", format)\n\treq.Header.Set(\"User-Agent\", \"\")\n\n\tr, err := client.Do(req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer r.Body.Close()\n\n\tif r.StatusCode >= 400 {\n\t\tbody, err := ioutil.ReadAll(r.Body)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, errors.New(string(body))\n\t}\n\n\tdata, err := ioutil.ReadAll(r.Body)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// save data to in-memory store\n\tif CacheOn {\n\t\tMemory.Set(url, data, cache.DefaultExpiration)\n\t}\n\n\treturn data, nil\n}", "func (m *MSSQLTx) FetchJSON(ctx context.Context, query string, args ...interface{}) ([]byte, error) {\n\treturn m.FetchJSONWithMetrics(ctx, &metrics.NoOp{}, query, args...)\n}", "func (jsonCam *JsonCameraInput)ReadJsonData(camRowOut *dataSet.Camera) {\n if camRowOut == nil {\n // Invalid output structure.\n return\n }\n if jsonCam.Name != nil {\n camRowOut.Name = *jsonCam.Name\n }\n if jsonCam.Ipaddr != nil {\n camRowOut.Ipaddr = *jsonCam.Ipaddr\n }\n if jsonCam.Port != nil {\n camRowOut.Port = *jsonCam.Port\n }\n if jsonCam.Desc != nil {\n camRowOut.Desc = *jsonCam.Desc\n }\n if jsonCam.UserId != nil {\n camRowOut.UserId = *jsonCam.UserId\n }\n if jsonCam.Pwd != nil {\n camRowOut.Pwd = *jsonCam.Pwd\n }\n if jsonCam.Status != 0 {\n camRowOut.Status = jsonCam.Status\n }\n if jsonCam.VideoLenSec != 0 {\n camRowOut.VideoLenSec = jsonCam.VideoLenSec\n }\n if jsonCam.SnapInterval != 0 {\n camRowOut.SnapInterval = jsonCam.SnapInterval\n }\n}", "func newDataSourceData(dInfo *backend.DataSourceInstanceSettings) (*dataSourceData, error) {\n\td := dataSourceData{}\n\terr := jsoniter.Unmarshal(dInfo.JSONData, &d)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif d.QueryTimeoutRaw == \"\" {\n\t\td.QueryTimeout = time.Second * 30\n\t} else {\n\t\tif d.QueryTimeout, err = time.ParseDuration(d.QueryTimeoutRaw); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\tif d.ServerTimeoutValue, err = formatTimeout(d.QueryTimeout); err != nil {\n\t\treturn nil, err\n\t}\n\n\td.Secret = dInfo.DecryptedSecureJSONData[\"clientSecret\"]\n\treturn &d, nil\n}", "func DLSubJSONData() error {\n\tres, err := http.Get(\"https://raw.githubusercontent.com/jl777/komodo/jl777/src/cc/dapps/subatomic.json\")\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tsubJSONData, err := ioutil.ReadAll(res.Body)\n\tres.Body.Close()\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tif res.Status == \"200 OK\" {\n\t\tioutil.WriteFile(\"assets/subatomic.json\", subJSONData, 0644)\n\t\treturn nil\n\t} else {\n\t\t// fmt.Println(res.Status)\n\t\treturn errors.New(res.Status)\n\t}\n}", "func getJson(url string) (resp Resp, err error) {\n\tr, err := http.Get(url)\n\tif err != nil {\n\t\treturn\n\t}\n\n\tbody, err := ioutil.ReadAll(r.Body)\n\tif err != nil {\n\t\treturn\n\t}\n\n\tif len(body) > 0 {\n\t\terr = json.Unmarshal(body, &resp)\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\t\tif resp.Error.ErrorMsg != \"\" {\n\t\t\tlog.Fatal(resp.Error.ErrorMsg)\n\t\t}\n\t}\n\treturn\n}", "func jsonSearchHandler(w http.ResponseWriter, r *http.Request) {\n\trunID := r.FormValue(\"runID\")\n\turl := r.FormValue(\"url\")\n\tresult, err := resultStore.Get(runID, url)\n\tif err != nil {\n\t\thttputils.ReportError(w, r, err, fmt.Sprintf(\"Failed to retrieve search result for run %s, url %s\", runID, url))\n\t}\n\tsendJsonResponse(w, map[string]*resultstore.ResultRec{\"result\": result})\n}", "func (v *VerticaDatasource) QueryData(ctx context.Context, req *backend.QueryDataRequest) (*backend.QueryDataResponse, error) {\n\n\tlog.DefaultLogger.Debug(\"Inside datasource.QueryData Function\", \"Query request: \", req)\n\n\t// create response struct\n\tresponse := backend.NewQueryDataResponse()\n\n\t// Vertica db conntection\n\tconnDB, err := v.GetVerticaDb(req.PluginContext)\n\tif err != nil {\n\t\tlog.DefaultLogger.Error(\"Error while connecting to the Vertica Database: \" + err.Error())\n\t\treturn response, err\n\t}\n\n\tif err = connDB.PingContext(context.Background()); err != nil {\n\t\tlog.DefaultLogger.Error(\"Error while connecting to the Vertica Database: \" + err.Error())\n\t\treturn response, err\n\t}\n\t// https://golang.org/pkg/database/sql/#DBStats\n\tlog.DefaultLogger.Debug(fmt.Sprintf(\"%s connection stats open connections =%d, InUse = %d, Ideal = %d\", req.PluginContext.DataSourceInstanceSettings.Name, connDB.Stats().MaxOpenConnections, connDB.Stats().InUse, connDB.Stats().Idle))\n\n\t// loop over queries and execute them individually.\n\tfor _, q := range req.Queries {\n\t\tres := v.query(ctx, q, connDB)\n\n\t\t// save the response in a hashmap\n\t\t// based on with RefID as identifier\n\t\tresponse.Responses[q.RefID] = res\n\t}\n\n\treturn response, nil\n}", "func JSONGet(filename string) []JSONData {\n\tfile, err := os.Open(filename)\n\n\tif err != nil {\n\t\tlog.Printf(\"[!] Couldn't open the file for reading. Cause %s\",\n\t\t\terr.Error())\n\t}\n\n\tdefer file.Close()\n\tscanner := bufio.NewScanner(file)\n\n\tvar rows []JSONData = make([]JSONData, 0)\n\n\tfor scanner.Scan() {\n\n\t\tvar row JSONData\n\t\tvar data []byte = scanner.Bytes()\n\n\t\tif len(data) != 0 {\n\t\t\terr = json.Unmarshal(data, &row)\n\n\t\t\trows = append(rows, row)\n\n\t\t\tif err != nil {\n\t\t\t\tlog.Printf(\"[!] Couldn't unmarshal data of file. Got %s Cause %s\",\n\t\t\t\t\tscanner.Text(), err.Error())\n\t\t\t}\n\t\t}\n\t}\n\n\treturn rows\n}", "func httpGetJSON(url string, data interface{}) (interface{}, error) {\n\tres, err := http.Get(url)\n\tif err != nil {\n\t\tlog.Errorf(\"Error during http get. Err: %v\", err)\n\t\treturn nil, err\n\t}\n\tbody, err := ioutil.ReadAll(res.Body)\n\tif err != nil {\n\t\tlog.Errorf(\"Error during ioutil readall. Err: %v\", err)\n\t\treturn nil, err\n\t}\n\n\tif err := json.Unmarshal(body, data); err != nil {\n\t\tlog.Errorf(\"Error during json unmarshall. Err: %v\", err)\n\t\treturn nil, err\n\t}\n\n\tlog.Debugf(\"Results for (%s): %+v\\n\", url, data)\n\n\treturn data, nil\n}", "func RespondJSON(w http.ResponseWriter, r *http.Request, data interface{}, err error) {\n\tconst (\n\t\tkeyRequestID = \"request_id\"\n\t)\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\tctx := r.Context()\n\trequestID := requestid.FromContext(ctx)\n\n\tif err != nil {\n\t\tdata := make(map[string]interface{}, 1)\n\t\tdata[\"error\"] = processErrorMessage(err)\n\t\tdata[\"success\"] = false\n\t\tif !requestID.IsEmpty() {\n\t\t\tdata[keyRequestID] = requestID.String()\n\t\t}\n\t\tif extra := putExtraFromError(err); extra != nil {\n\t\t\tdata[\"error_detail\"] = extra\n\t\t}\n\t\t//TODO: extend error: code, cause, etc.\n\t\tbuf := bytes.NewBuffer(nil)\n\t\tjson.NewEncoder(buf).Encode(data)\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\tbuf.WriteTo(w)\n\t\treturn\n\t}\n\tif data == nil {\n\t\tw.WriteHeader(http.StatusNoContent)\n\t\treturn\n\t}\n\n\tcommonData := map[string]interface{}{\n\t\t\"success\": true,\n\t}\n\tif !requestID.IsEmpty() {\n\t\tcommonData[keyRequestID] = requestID.String()\n\t}\n\n\tif _, ok := data.(*EmptyType); !ok {\n\t\tkeyData := \"data\"\n\t\t//if are, ok := data.(interface{ KeyDataResponse() string }); ok {\n\t\t//\tkeyData = are.KeyDataResponse()\n\t\t//}\n\t\tcommonData[keyData] = data\n\t}\n\n\tif strings.Contains(r.Header.Get(\"Accept-Encoding\"), \"gzip\") {\n\t\tw.Header().Set(\"Content-Encoding\", \"gzip\")\n\t\twriter := gzip.NewWriter(w)\n\t\tdefer writer.Close()\n\t\tjson.NewEncoder(writer).Encode(commonData)\n\t\treturn\n\t}\n\tw.WriteHeader(http.StatusOK)\n\tjson.NewEncoder(w).Encode(commonData)\n}", "func getJSON(url string, obj interface{}) (err error) {\n\tresp, err := http.Get(url)\n\n\tif err != nil {\n\t\tlog.Println(err)\n\t\treturn\n\t}\n\tdefer resp.Body.Close()\n\n\tb, err := ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\tlog.Println(err)\n\t\treturn\n\t}\n\n\terr = json.Unmarshal(b, obj)\n\tif err != nil {\n\t\tlog.Println(err)\n\t\treturn\n\t}\n\n\t// Success!\n\treturn\n}", "func (m *MSSQLDatastore) FetchJSONWithMetrics(ctx context.Context, r metrics.Recorder, query string, args ...interface{}) ([]byte, error) {\n\tif m == nil {\n\t\treturn nil, ErrEmptyObject\n\t}\n\n\tif _, ok := ctx.Deadline(); !ok {\n\t\tvar cancel context.CancelFunc\n\t\tctx, cancel = context.WithTimeout(ctx, QueryLimit)\n\t\tdefer cancel()\n\t}\n\n\tend := r.DatabaseSegment(\"mssql\", query, args...)\n\trows, err := m.db.QueryContext(ctx, query, args...)\n\tend()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tdefer rows.Close()\n\n\tend = r.Segment(\"GODB::FetchWithMetrics::FetchJSONWithMetrics\")\n\tj, err := ToJSON(rows)\n\tend()\n\n\treturn j, err\n}", "func (r *RescueTime) GetAnalyticData(timezone string, parameters *AnalyticDataQueryParameters) (AnalyticData, error) {\n\tvar rtd AnalyticData\n\n\tparams := structToMap(parameters)\n\n\tbuiltURL, err := r.buildURL(analyticDataURL, params)\n\tif err != nil {\n\t\treturn rtd, err\n\t}\n\n\tcontents, err := r.getResponse(builtURL)\n\tif err != nil {\n\t\treturn rtd, err\n\t}\n\tcurrentJSON, err := simplejson.NewJson(contents)\n\tif err != nil {\n\t\treturn rtd, err\n\t}\n\n\tdata := AnalyticData{\n\t\tParameters: parameters,\n\t}\n\n\tvar notes string\n\tnotes = fmt.Sprintf(\"%s\", currentJSON.Get(\"notes\").MustString())\n\tdata.Notes = notes\n\n\tvar rowHeaders []string\n\theadersMap := make(map[int]string)\n\theaderRegex := regexp.MustCompile(\"[^A-Za-z0-9]+\")\n\tfor i, s := range currentJSON.Get(\"row_headers\").MustStringArray() {\n\t\trowHeaders = append(rowHeaders, s)\n\t\theadersMap[i] = headerRegex.ReplaceAllString(titleCase(s), \"\")\n\t}\n\tdata.RowHeaders = rowHeaders\n\n\tvar toAppend []row\n\tfor _, entry := range currentJSON.Get(\"rows\").MustArray() {\n\t\tvar aRow row\n\t\tfor index, column := range entry.([]interface{}) {\n\t\t\tthisHeader := headersMap[index]\n\t\t\tfield := reflect.ValueOf(&aRow).Elem().FieldByName(thisHeader)\n\t\t\tswitch field.Interface().(type) {\n\t\t\tcase int, int8, int16, int32, int64:\n\t\t\t\tintValue, err := column.(json.Number).Int64()\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn data, err\n\t\t\t\t}\n\t\t\t\tfield.SetInt(intValue)\n\t\t\tcase time.Time:\n\t\t\t\tparsed, err := time.Parse(\"2006-01-02T15:04:05\", column.(string))\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn rtd, err\n\t\t\t\t}\n\t\t\t\tif timezone != \"\" {\n\t\t\t\t\tlocation, err := time.LoadLocation(timezone)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\treturn rtd, err\n\t\t\t\t\t}\n\t\t\t\t\tparsed = parsed.In(location)\n\t\t\t\t}\n\t\t\t\tfield.Set(reflect.ValueOf(parsed))\n\t\t\tdefault:\n\t\t\t\tfield.Set(reflect.ValueOf(column))\n\t\t\t}\n\t\t}\n\t\ttoAppend = append(toAppend, aRow)\n\t}\n\tdata.Rows = toAppend\n\treturn data, nil\n}", "func (C *Commander) GetData(writer http.ResponseWriter, request *http.Request) {\n\tvar error model.Error\n\tdb := database.DbConn()\n\tdefer func() {\n\t\terr := db.Close()\n\t\tif err != nil {\n\t\t\tpanic(err.Error())\n\t\t}\n\t}()\n\tif strings.Contains(Role, \"program manager\") == true {\n\t\tvar Offset int\n\t\tPages := request.URL.Query()[\"Pages\"]\n\t\tfmt.Println(Pages)\n\t\tif Pages[0] != \"\" {\n\t\t\tlimit, err := strconv.Atoi(request.URL.Query().Get(\"limit\"))\n\t\t\tif limit != 10 && limit != 20 && limit != 50 {\n\t\t\t\twriter.Header().Set(\"Content-Type\", \"application/json\")\n\t\t\t\twriter.WriteHeader(http.StatusBadRequest)\n\t\t\t\terror.Message = \"Incorrect Limit Value\"\n\t\t\t\tjson.NewEncoder(writer).Encode(error)\n\t\t\t\treturn\n\t\t\t}\n\t\t\ti1, _ := strconv.Atoi(Pages[0])\n\t\t\tfmt.Println(i1)\n\t\t\tOffset = 10 * i1\n\t\t\tcount, _ := db.Query(\"SELECT COUNT(Id) FROM sub_project_manager WHERE sub_project_id in (SELECT id FROM sub_project WHERE project_id in (SELECT id FROM project WHERE program_manager_id in (SELECT id FROM program_manager where program_manager_email = ?)))\", UserName)\n\t\t\tdefer func() {\n\t\t\t\terr := count.Close()\n\t\t\t\tif err != nil {\n\t\t\t\t\tpanic(err.Error())\n\t\t\t\t}\n\t\t\t}()\n\t\t\tGetManagerDetails, err := db.Query(\"call GetAllManagerDetailsData(?, ?, ?)\", UserName, Offset, limit)\n\t\t\tif err != nil {\n\t\t\t\tWriteLogFile(err)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tdefer func() {\n\t\t\t\terr := GetManagerDetails.Close()\n\t\t\t\tif err != nil {\n\t\t\t\t\tpanic(err.Error())\n\t\t\t\t}\n\t\t\t}()\n\t\t\tvar Total int\n\t\t\tvar ManagerDetailData model.Project\n\t\t\tvar ManagerDetailsData []model.Project\n\t\t\tfor GetManagerDetails.Next() {\n\t\t\t\tGetManagerDetails.Scan(&ManagerDetailData.ProjectName, &ManagerDetailData.SubProjectName, &ManagerDetailData.ManagerName, &ManagerDetailData.ManagerEmailID, &ManagerDetailData.Id)\n\t\t\t\tManagerDetailsData = append(ManagerDetailsData, ManagerDetailData)\n\t\t\t}\n\t\t\tif count.Next() != false {\n\t\t\t\tcount.Scan(&Total)\n\t\t\t} else {\n\t\t\t\tTotal = 0\n\t\t\t}\n\t\t\tvar PaginationFormat model.Pagination\n\t\t\tPaginationFormat.TotalData = Total\n\t\t\tPaginationFormat.Limit = limit\n\t\t\tPaginationFormat.Data = ManagerDetailsData\n\t\t\tx1 := Total / limit\n\t\t\tx := Total % limit\n\t\t\tif x == 0 {\n\t\t\t\tPaginationFormat.TotalPages = x1\n\t\t\t} else {\n\t\t\t\tPaginationFormat.TotalPages = x1 + 1\n\t\t\t}\n\t\t\tx, _ = strconv.Atoi(Pages[0])\n\t\t\tif PaginationFormat.TotalPages != 0 {\n\t\t\t\tx1 = x + 1\n\t\t\t}\n\t\t\tPaginationFormat.Page = x1\n\t\t\tsetupResponse(&writer, request)\n\t\t\twriter.Header().Set(\"Content-Type\", \"application/json\")\n\t\t\twriter.WriteHeader(http.StatusOK)\n\t\t\tjson.NewEncoder(writer).Encode(PaginationFormat)\n\t\t} else {\n\t\t\twriter.Header().Set(\"Content-Type\", \"application/json\")\n\t\t\twriter.WriteHeader(http.StatusBadRequest)\n\t\t\terror.Message = \"Incorrect Page Value\"\n\t\t\tjson.NewEncoder(writer).Encode(error)\n\t\t\treturn\n\n\t\t}\n\t} else {\n\t\twriter.WriteHeader(http.StatusNotFound)\n\t}\n}", "func GetWithJSONQuery(path string, data string) {\n\tres, err := getThisWithReqBody(path, data)\n\tif err != nil {\n\t\tlog.Println(err)\n\t}\n\tfmt.Printf(sep)\n\tfmt.Printf(\"GET+ result: %s\\n\", res)\n}", "func (c *Client) Read(req *remote.ReadRequest) ([]map[string]interface{}, error) {\n\tctx := context.Background()\n\tquerier := req.Queries[0]\n\tquery := elastic.NewBoolQuery()\n\n\tfor _, matcher := range querier.Matchers {\n\t\tquery = query.Must(elastic.NewTermQuery(matcher.Name, matcher.Value))\n\t}\n\n\t// building elasticsearch query\n\tquery = query.Filter(elastic.\n\t\tNewRangeQuery(\"timestamp\").\n\t\tFrom(querier.StartTimestampMs).\n\t\tTo(querier.EndTimestampMs))\n\n\tsearchResult, err := c.client.\n\t\tSearch().\n\t\tIndex(c.esIndex).\n\t\tQuery(query).\n\t\tSize(1000).\n\t\tSort(\"timestamp\", true).\n\t\tDo(ctx)\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar dataPoints []map[string]interface{}\n\n\t// parsing elasticsearch hits to array with datapoints\n\tif searchResult.Hits.TotalHits > 0 {\n\t\tfmt.Printf(\"Found a total of %d data points\\n\", searchResult.Hits.TotalHits)\n\n\t\tfor _, hit := range searchResult.Hits.Hits {\n\t\t\tvar dataPoint map[string]interface{}\n\t\t\tjson.Unmarshal(*hit.Source, &dataPoint)\n\t\t\tdataPoints = append(dataPoints, dataPoint)\n\t\t}\n\t} else {\n\t\treturn nil, newErrNoDataPointsFound(\"Found no metrics\")\n\t}\n\n\treturn dataPoints, nil\n}", "func ExampleHound_Fetch_json() {\n\th := NewHound(&Config{\n\t\tSystem: afero.NewMemMapFs(),\n\t\tRules: LoadRules(\"\"),\n\t\tFormat: JSONFormat,\n\t})\n\th.Findings = []Finding{\n\t\t{\n\t\t\tCount: 1,\n\t\t\tMessage: \"Found something juicy\",\n\t\t\tPath: \"example.toml\",\n\t\t\tLoot: []string{\"Token 1234560\"},\n\t\t},\n\t}\n\th.Fetch()\n\t// output:\n\t// [{\"count\":1,\"message\":\"Found something juicy\",\"path\":\"example.toml\",\"loot\":[\"Token 1234560\"]}]\n}", "func GetData() (m MyIP, err error) {\n\n\tresp, err := http.Get(url)\n\n\tif err == nil {\n\t\tif resp.StatusCode != 400 {\n\n\t\t\tbody, err := ioutil.ReadAll(resp.Body)\n\n\t\t\tbytBodyErr := []byte(body)\n\t\t\tif err == nil {\n\t\t\t\t_ = json.Unmarshal(bytBodyErr, &m)\n\t\t\t}\n\t\t} else {\n\t\t\treturn m, errors.New(\"400 Bad Request\")\n\t\t}\n\t}\n\treturn m, err\n}", "func DataJSON(status int, v interface{}, headers Headers) *Response {\n\n\tb, err := json.MarshalIndent(v, \"\", \" \")\n\tif err != nil {\n\t\treturn ErrorJSON(http.StatusInternalServerError, err, headers)\n\t}\n\n\treturn &Response{\n\t\tStatus: status,\n\t\tContentType: \"application/json\",\n\t\tContent: bytes.NewBuffer(b),\n\t\tHeaders: headers,\n\t}\n}", "func GetJSON(url string, v interface{}) error {\n\tcli := &http.Client{\n\t\tTimeout: RequestTimeout * time.Second,\n\t}\n\tresp, err := cli.Get(url)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer resp.Body.Close()\n\treply, err := ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = json.Unmarshal(reply, v)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}", "func loadData(w http.ResponseWriter, r *http.Request, ps httprouter.Params) {\n\t// Fetch necessary data\n\ttimes := getEventTimes()\n\timages, err := loadImages()\n\tcheckError(err)\n\n\t// Encode to json\n\tdata := struct {\n\t\tEvents []event `json:\"events\"`\n\t\tImages []imageData `json:\"images\"`\n\t}{times, images}\n\n\tw.Header().Add(\"Content-Type\", \"application/json\")\n\terr = json.NewEncoder(w).Encode(&data)\n\tcheckError(err)\n}", "func SelectResources(data TmpJSON, resourceType string) map[string][]map[string]interface{} {\n\tvar resources []map[string]interface{}\n\t// resources := make([]map[string]interface{}, 0)\n\n\tfor _, v := range data.Data {\n\t\tif v[\"type\"] == resourceType {\n\t\t\tresources = append(resources, v)\n\t\t}\n\t}\n\n\tfiltered := map[string][]map[string]interface{}{\n\t\t\"data\": resources,\n\t}\n\treturn filtered\n}", "func DecodeJsonData(body io.Reader, result interface{}) error {\n\tdecoder := json.NewDecoder(body)\n\treturn decoder.Decode(result)\n}", "func (p *provider) getJSON(url string, token string, data interface{}) (map[string]string, error) {\n\treq, err := http.NewRequest(\"GET\", url, nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treq.Header.Set(\"Authorization\", fmt.Sprintf(\"Bearer %s\", token))\n\treq.Header.Set(\"Accept\", githubAccept)\n\n\tres, err := p.transport.RoundTrip(req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer res.Body.Close()\n\n\tif res.StatusCode != http.StatusOK {\n\t\treturn nil, fmt.Errorf(\"Non-200 response from GitHub API call %s: %d\", url, res.StatusCode)\n\t}\n\n\tif err := json.NewDecoder(res.Body).Decode(&data); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn links.ParseLinks(res.Header.Get(\"Link\")), nil\n}", "func (db *MockDB) FetchJSON(ctx context.Context, q string, args ...interface{}) ([]byte, error) {\n\tdb.FetchJSONCount++\n\n\tif !assert.True(db.t, len(db.FetchJSONExpected) > 0, \"No FetchJSONExpected Defined\") {\n\t\tdb.t.FailNow()\n\t}\n\n\t// We repeat the final registered FetchJSON mock if we run out.\n\tif db.FetchJSONPointer >= len(db.FetchJSONExpected) {\n\t\tfmt.Print(\"\\nMore FetchJSON Calls than Expected\\n\\n\")\n\t\tdb.t.FailNow()\n\t}\n\n\tfetch := db.FetchJSONExpected[db.FetchJSONPointer]\n\tdb.FetchJSONPointer++\n\n\tif !assert.Equal(db.t, fetch.Query, q) {\n\t\tdb.t.FailNow()\n\t}\n\n\tassertDeepEqual(db.t, fetch.Args, args)\n\n\tif fetch.Error != nil {\n\t\treturn nil, fetch.Error\n\t}\n\n\treturn fetch.Content, nil\n}", "func (c *Client) FetchData(ctx context.Context, url string) ([]byte, error) {\n\n\t// Implement semaphores to ensure maximum concurrency threshold.\n\tc.semaphore <- struct{}{}\n\tdefer func() { <-c.semaphore }()\n\n\t// If there is an in-flight request for a unique URL, send response\n\t// from the in-flight request. Else, create the in-flight request.\n\tresponseRaw, err, shared := c.RequestGroup.Do(url, func() (interface{}, error) {\n\t\treturn c.fetchResponse(ctx)\n\t})\n\n\tif err != nil {\n\t\treturn []byte{}, err\n\t}\n\n\tlog.Infof(\"in-flight status : %t\", shared)\n\n\t//time.Sleep(time.Second * 4)\n\n\tresponse := responseRaw.([]byte)\n\n\treturn response, err\n}", "func (h *handler) readJSONInto(into interface{}) error {\n\t\n\tcontentType := h.rq.Header.Get(\"Content-Type\")\n\tif contentType != \"\" && !strings.HasPrefix(contentType, \"application/json\") {\n\t\treturn base.HTTPErrorf(http.StatusUnsupportedMediaType, \"Invalid content type %s\", contentType)\n\t}\n \n \t//TO DO: zip version to be added\n\t \t\n\tdecoder := json.NewDecoder(h.requestBody)\n\tif err := decoder.Decode(into); err != nil {\n\t\tbase.Warn(\"Couldn't parse JSON in HTTP request: %v\", err)\n\t\treturn base.HTTPErrorf(http.StatusBadRequest, \"Bad JSON\")\n\t}\n\t \n\treturn nil\n}", "func GetSampleJSONData(data interface{}) []byte {\n\tdataJSON, err := json.MarshalIndent(data, \"\", \" \")\n\tif err != nil {\n\t\treturn []byte(\"got error while converting data into JSON \")\n\t}\n\treturn dataJSON\n}", "func GetFoodData(c *gin.Context) {\n\tvar food Food\n\tvar MakerRestaurant Restaurant\n\tRestaurantUserName := c.Query(\"username\")\n\tdb.Where(&Restaurant{Username: RestaurantUserName}).First(&MakerRestaurant)\n\tdb.Where(&Food{Name: c.Query(\"name\"), Restaurant: MakerRestaurant}).First(&food)\n\tc.JSON(http.StatusOK, food)\n}", "func (c *ChromeIndexedDB) RequestData(securityOrigin string, databaseName string, objectStoreName string, indexName string, skipCount int, pageSize int, keyRange *types.ChromeIndexedDBKeyRange) ([]*types.ChromeIndexedDBDataEntry, bool, error) {\n paramRequest := make(map[string]interface{}, 7)\n paramRequest[\"securityOrigin\"] = securityOrigin\n paramRequest[\"databaseName\"] = databaseName\n paramRequest[\"objectStoreName\"] = objectStoreName\n paramRequest[\"indexName\"] = indexName\n paramRequest[\"skipCount\"] = skipCount\n paramRequest[\"pageSize\"] = pageSize\n paramRequest[\"keyRange\"] = keyRange\n recvCh, _ := sendCustomReturn(c.target.sendCh, &ParamRequest{Id: c.target.getId(), Method: \"IndexedDB.requestData\", Params: paramRequest})\n resp := <-recvCh\n\n var chromeData struct {\n Result struct { \n ObjectStoreDataEntries []*types.ChromeIndexedDBDataEntry \n HasMore bool \n }\n }\n\n err := json.Unmarshal(resp.Data, &chromeData)\n if err != nil {\n cerr := &ChromeErrorResponse{}\n chromeError := json.Unmarshal(resp.Data, cerr)\n if chromeError == nil && cerr.Error != nil {\n return nil, false, &ChromeRequestErr{Resp: cerr}\n }\n return nil, false, err\n }\n\n return chromeData.Result.ObjectStoreDataEntries, chromeData.Result.HasMore, nil\n}", "func getLocation() *Data {\n\tresponse, err := http.Get(url)\n\tif err != nil {\n\t\tlog.Fatalln(err)\n\t}\n\tdefer response.Body.Close()\n\n\tresult, err := ioutil.ReadAll(response.Body)\n\tif err != nil {\n\t\tlog.Fatalln(err)\n\t}\n\n\tr := &Data{}\n\terr = json.Unmarshal(result, &r)\n\tif err != nil {\n\t\tlog.Fatalln(err)\n\t}\n\treturn r\n}", "func (c *apiClient) doJSON(request *http.Request, output interface{}) error {\n\tdata, err := c.do(request)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn json.Unmarshal(data, output)\n}", "func getBuildPacksData(cli plugin.CliConnection) Buildpacks {\n\tvar res Buildpacks = unmarshallBuildpackSearchResults(\"/v3/buildpacks\", cli)\n\n\tif res.Pagination.TotalPages > 1 {\n\t\tfor i := 2; i <= res.Pagination.TotalPages; i++ {\n\t\t\tapiUrl := fmt.Sprintf(\"/v3/buildpacks?page=%d&per_page=50\", i)\n\t\t\ttRes := unmarshallBuildpackSearchResults(apiUrl, cli)\n\t\t\tres.Resources = append(res.Resources, tRes.Resources...)\n\t\t}\n\t}\n\n\treturn res\n}", "func getPaginatedJSON(ctx context.Context, client *http.Client, registry *url.URL, u *url.URL, response interface{}) (*url.URL, error) {\n\tcreds := registry.User\n\tif u.Host != registry.Host {\n\t\tcreds = nil\n\t}\n\tresp, err := do(ctx, client, creds, &http.Request{\n\t\tMethod: http.MethodGet,\n\t\tURL: u,\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer resp.Body.Close()\n\n\tdata, err := ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"parse %s response: %w\", u.Redacted(), err)\n\t}\n\tif err := json.Unmarshal(data, response); err != nil {\n\t\treturn nil, fmt.Errorf(\"parse %s response: %w\", u.Redacted(), err)\n\t}\n\treturn getNextLink(resp.Header), nil\n}", "func (e *Event) Data(out interface{}) error {\n\treturn json.Unmarshal([]byte(e.data), out)\n}", "func GetData(w http.ResponseWriter, r *http.Request, ps httprouter.Params) {\n\t//startTime := time.Now()\n\tr.ParseForm()\n\tuser_id= strings.Join(r.Form[\"userid\"], \"\")\n\tfmt.Println(\"userNum is :\", user_id)\n\tctx := context.Background()\n\tdsClient, err := datastore.NewClient(ctx, \"kouzoh-p-codechaitu\")\n\tif err != nil {\n\t\t// Handle error.\n\t\tfmt.Println(\"error in creating client\")\n\t}\n\tuser_id_int64, _ := strconv.ParseInt(user_id, 10, 64)\n\tk := datastore.IDKey(\"Recommendations\", user_id_int64, nil)\n\n\tvar e Entity\n\tif err := dsClient.Get(ctx, k, &e); err != nil {\n\t\t// Handle error.\n\t\tfmt.Println(\"error in getting data\" + fmt.Sprint(err))\n\t}\n\tLen := len(e.Recommend)\n\toutput := \"[\"\n\tfor i := 0; i < Len; i++ {\n\t\tm := Recommends{e.Recommend[i].Name, e.Recommend[i].Price, e.Recommend[i].Index, e.Recommend[i].Num_likes, e.Recommend[i].Item_id}\n\t\tdata, err2 := json.Marshal(m)\n\t\tif err2 != nil {\n\t\t\tfmt.Println(\"wrong in json \" + fmt.Sprint(err2))\n\t\t}\n\t\tif (i < Len-1) {\n\n\t\t\toutput = output + string(data) + \",\"\n\t\t} else {\n\n\t\t\toutput = output + string(data)\n\t\t}\n\n\t}\n\toutput = output + \"]\"\n\tfmt.Println(output)\n\tfmt.Fprint(w, output)\n\n}", "func FetchJSON(URL string, Method string, RequestBody interface{}, ResponseBody interface{}, Options FetchOptions) (err error) {\n\t// If body exists, Marshal body\n\tvar body []byte\n\tif RequestBody != nil {\n\t\tbody, _ = json.Marshal(RequestBody)\n\t}\n\n\t// Create request\n\treq, err := http.NewRequest(Method, URL, bytes.NewReader(body))\n\tif err != nil {\n\t\treturn\n\t}\n\n\t// Add content header if necessary\n\tif RequestBody != nil {\n\t\treq.Header.Set(\"Content-Type\", \"application/json\")\n\t}\n\n\t// Add options\n\tOptions.Initiate(req)\n\n\t// Run request\n\tclient := &http.Client{}\n\tresp, err := client.Do(req)\n\tif err != nil {\n\t\treturn\n\t}\n\tdefer resp.Body.Close()\n\n\t//Format response body\n\tbody, err = ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\treturn\n\t}\n\n\t// If we can make a response body, then make it\n\tif ResponseBody != nil {\n\t\t//Format JSON into an object Struct\n\t\terr = json.Unmarshal(body, &ResponseBody)\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\t}\n\n\treturn\n}", "func TestGetJSON(t *testing.T) {\n\tassert := audit.NewTestingAssertion(t, true)\n\t// Setup the test server.\n\tmux := newMultiplexer(assert)\n\tts := restaudit.StartServer(mux, assert)\n\tdefer ts.Close()\n\terr := mux.Register(\"test\", \"json\", NewTestHandler(\"json\", assert))\n\tassert.Nil(err)\n\t// Perform test requests.\n\treq := restaudit.NewRequest(\"GET\", \"/base/test/json/4711?foo=0815\")\n\treq.AddHeader(restaudit.HeaderAccept, restaudit.ApplicationJSON)\n\tresp := ts.DoRequest(req)\n\tresp.AssertStatusEquals(200)\n\tresp.AssertBodyContains(`\"ResourceID\":\"4711\"`)\n\tresp.AssertBodyContains(`\"Query\":\"0815\"`)\n\tresp.AssertBodyContains(`\"Context\":\"foo\"`)\n}", "func (ex *Execer) queryJSONStructs(dest interface{}) error {\n\tblob, err := ex.queryJSONBlob(false)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif blob != nil {\n\t\treturn json.Unmarshal(blob, dest)\n\t}\n\treturn nil\n}", "func ImportJSON(url string, target interface{}, delimiter byte) error {\r\n\t// Get HTTP response from URL\r\n\tresponse, err := httpClient.Get(url)\r\n\tif (err != nil) {\r\n\t\treturn err\r\n\t}\r\n\r\n\tdefer response.Body.Close()\r\n\r\n\t// If the payload is fully JSON convert it to the specific struct and return\r\n\tif (delimiter == 0) {\r\n\t\treturn json.NewDecoder(response.Body).Decode(target)\r\n\t}\r\n\r\n\t/*\r\n\t\tIf payload is a JSON data segments separated by the delimiter\r\n\t\tNeed to assemble an array of structs type given\r\n\t*/\r\n\r\n\treader := bufio.NewReader(response.Body)\r\n\tslice := reflect.ValueOf(target).Elem()\r\n\ttypeOfSlice := slice.Type()\r\n\tslice.Set(reflect.MakeSlice(typeOfSlice, 0, 1))\r\n\tptrToTarget := reflect.New(typeOfSlice.Elem())\r\n\r\n\t// Loop through the payload segments try to convert them to struct and add them to array\r\n\tfor {\r\n\t\tpart, err := reader.ReadBytes(delimiter);\r\n\t\tif (err != nil) {\r\n\t\t\terrMsg := err.Error();\r\n\t\t\tif (errMsg != \"EOF\") {\r\n\t\t\t\tlog.Error(err.Error())\r\n\t\t\t}\r\n\r\n\t\t\treturn nil\r\n\t\t}\r\n\r\n\t\tif (len(part) == 0) {\r\n\t\t\treturn nil\r\n\t\t}\r\n\r\n\t\tif err := json.Unmarshal(part, ptrToTarget.Interface()); (err != nil) {\r\n\t\t\treturn err\r\n\t\t}\r\n\r\n\t\tslice.Set(reflect.Append(slice, ptrToTarget.Elem()))\r\n\t}\r\n\r\n\treturn nil\r\n}", "func (s *server) FetchGophers(w http.ResponseWriter, r *http.Request) {\n\tgophers, _ := s.fetching.FetchGophers(r.Context())\n\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\t_ = json.NewEncoder(w).Encode(gophers)\n\n}", "func (s *server) FetchGophers(w http.ResponseWriter, r *http.Request) {\n\tgophers, _ := s.fetching.FetchGophers(r.Context())\n\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\t_ = json.NewEncoder(w).Encode(gophers)\n\n}", "func loadHolidayData(url string) (data []byte, err error) {\r\n\tresp, err := http.Get(url)\r\n\terrorAlarm(err)\r\n\r\n\tdataFromUrl, err := ioutil.ReadAll(resp.Body)\r\n\terrorAlarm(err)\r\n\tdefer resp.Body.Close()\r\n\tisJsonDataValidAlarm (dataFromUrl)\r\n\r\n\treturn dataFromUrl, nil\r\n}", "func GetData(w http.ResponseWriter, r *http.Request) {\n\tresult := dailyData{}\n\tc := utils.MONGOSESSION.DB(\"healthDB\").C(\"healthData\")\n\tc.Find(bson.M{\"date\": utils.GetDate(time.Now())}).One(&result)\n\tb, _ := json.Marshal(result)\n\tfmt.Fprintf(w, string(b))\n}", "func (c *Client) QueryJSON(query string) (logs.JSON, error) {\n\t// make the query. we use a prepared statement here because mysql\n\t// only returns column type info if the statement is prepared,\n\t// otherwise everything will be typed as []byte\n\tstmt, err := c.Preparex(query)\n\tif err != nil {\n\t\treturn nil, errors.Wrapf(err, \"querying database with query '%s'\", query)\n\t}\n\tdefer stmt.Close()\n\n\t// execute the query\n\trows, err := stmt.Queryx()\n\tif err != nil {\n\t\treturn nil, errors.Wrapf(err, \"retrieving rows of query '%s'\", query)\n\t}\n\tdefer rows.Close()\n\n\t// scan the rows into a JSON representation\n\tvar results []map[string]interface{}\n\tfor rows.Next() {\n\t\t// create a row\n\t\trow := make(map[string]interface{})\n\t\t// scan the row\n\t\tif err := rows.MapScan(row); err != nil {\n\t\t\treturn nil, errors.Wrapf(err, \"scanning row of query '%s'\", query)\n\t\t}\n\t\t// the mysql driver returns text fields as []byte,\n\t\t// so cast to string if any fields have that type\n\t\tfor k, v := range row {\n\t\t\tif b, ok := v.([]byte); ok {\n\t\t\t\trow[k] = string(b)\n\t\t\t}\n\t\t}\n\t\tresults = append(results, row)\n\t}\n\treturn results, nil\n}", "func unmarshalVestingData(bz []byte) (cli.VestingData, error) {\n\tdata := cli.VestingData{}\n\terr := json.Unmarshal(bz, &data)\n\treturn data, err\n}", "func GetCollectorData(conf config.Config, getDataOn, data, dateFrom, dateTo string) (CollectorData, error) {\n\tvar collectorData CollectorData\n\tlinkURL := fmt.Sprintf(\"%s/rest/api/v1/logger/%s/%s/%s/%s/\", conf.CollectorURL, getDataOn, data, dateFrom, dateTo)\n\tlogrus.Infof(\"rest: getCollectorData request URL: %s\", linkURL)\n\treq, err := http.NewRequest(\"GET\", linkURL, nil)\n\tif err != nil {\n\t\tlogrus.Errorf(\"rest: http.NewRequest failed: %v\\n\", err)\n\t\treturn collectorData, err\n\t}\n\ttoken := conf.CollectorToken\n\treq.Header.Add(\"Authorization\", fmt.Sprintf(\"Token %s\", token))\n\n\tres, err := http.DefaultClient.Do(req)\n\tif err != nil {\n\t\tlogrus.Errorf(\"rest: http.DefaultClient.Do(req) failed: %v\\n\", err)\n\t\treturn collectorData, err\n\t}\n\tdefer res.Body.Close()\n\n\tbody, err := ioutil.ReadAll(res.Body)\n\tif err != nil {\n\t\tlogrus.Errorf(\"rest: ioutil.ReadAll(res.Body) failed: %v\\n\", err)\n\t\treturn collectorData, err\n\t}\n\n\tjson.Unmarshal(body, &collectorData)\n\n\treturn collectorData, nil\n}", "func (c *DefaultApiController) DataGet(w http.ResponseWriter, r *http.Request) {\n\tresult, err := c.service.DataGet(r.Context())\n\t//If an error occured, encode the error with the status code\n\tif err != nil {\n\t\tEncodeJSONResponse(err.Error(), &result.Code, w)\n\t\treturn\n\t}\n\t//If no error, encode the body and the result code\n\tEncodeJSONResponse(result.Body, &result.Code, w)\n\n}", "func readStreamedResults(t *gotesting.T, r io.Reader) []*EntityResult {\n\tvar results []*EntityResult\n\tdec := json.NewDecoder(r)\n\tfor dec.More() {\n\t\tres := &EntityResult{}\n\t\tif err := dec.Decode(res); err != nil {\n\t\t\tt.Errorf(\"Failed to decode result: %v\", err)\n\t\t}\n\t\tresults = append(results, res)\n\t}\n\treturn results\n}", "func (client DataFlowClient) GetDataFlowResponder(resp *http.Response) (result DataFlowResource, err error) {\n\terr = autorest.Respond(\n\t\tresp,\n\t\tazure.WithErrorUnlessStatusCode(http.StatusOK),\n\t\tautorest.ByUnmarshallingJSON(&result),\n\t\tautorest.ByClosing())\n\tresult.Response = autorest.Response{Response: resp}\n\treturn\n}", "func (td *SampleDatasource) QueryData(ctx context.Context, req *backend.QueryDataRequest) (*backend.QueryDataResponse, error) {\n\tlog.DefaultLogger.Info(\"QueryData\", \"request\", req)\n\n\t// create response struct\n\tresponse := backend.NewQueryDataResponse()\n\n instance, err := td.im.Get(req.PluginContext)\n if err != nil {\n return nil, err\n }\n instSetting, _ := instance.(*instanceSettings)\n\n\t// loop over queries and execute them individually.\n\tfor _, q := range req.Queries {\n\t\tres := td.query(ctx, q, instSetting)\n\n\t\t// save the response in a hashmap\n\t\t// based on with RefID as identifier\n\t\tresponse.Responses[q.RefID] = res\n\t}\n\n\treturn response, nil\n}", "func (c *HTTPClient) GetJSON(url string, headers map[string]string, out interface{}) error {\n\treturn c.doRequest(url, http.MethodGet, headers, nil, out)\n}", "func JSONGetOne(table database.Table, w http.ResponseWriter, r *http.Request, sb *sqrl.SelectBuilder) {\n\ttable.Clear()\n\n\texSb := *sb\n\n\tvars := mux.Vars(r)\n\tfmt.Println(\"query\")\n\n\tvar val string\n\n\tif value, ok := vars[\"id\"]; ok {\n\t\tval = value\n\t} else if value, ok := vars[\"login\"]; ok {\n\t\tval = value\n\t}\n\n\ttype Item struct {\n\t\tData interface{} `json:\"data\"`\n\t}\n\n\tfmt.Println(val)\n\t//fmt.Println(table.GetPrimaryKey())\n\n\tquery, params, _ := exSb.Where(sqrl.Eq{table.GetPrimaryKey(): val}).ToSql()\n\tfmt.Println(query)\n\n\tif params != nil {\n\t\terrs := db.DB.Select(table.GetItems(), query, params[0])\n\t\tfmt.Println(errs)\n\t} else {\n\t\terrs := db.DB.Select(table.GetItems(), query)\n\t\tfmt.Println(errs)\n\t}\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\tjson.NewEncoder(w).Encode(Item{Data: table.GetItem()})\n}", "func (e *EJ) ParseToData(data interface{}) error {\n\n\tvalue, err := e.jsonHandler.Read()\n\n\tif err != nil {\n\t\treturn fmt.Errorf(\"read: %s\", err.Error())\n\t}\n\n\tif err := json.Unmarshal(value, data); err != nil {\n\t\treturn fmt.Errorf(\"json unmarshal: %s\", err.Error())\n\t}\n\n\treturn nil\n}" ]
[ "0.6332206", "0.5748831", "0.56618845", "0.55320317", "0.533502", "0.5277118", "0.5213886", "0.5193629", "0.51856875", "0.5159931", "0.51567155", "0.5151557", "0.51295197", "0.5118864", "0.5102036", "0.5100363", "0.50929105", "0.5078685", "0.50433415", "0.5042938", "0.5039756", "0.50271475", "0.5026254", "0.50062937", "0.49919033", "0.49694794", "0.4968295", "0.49535608", "0.49268168", "0.49220285", "0.4913983", "0.49055773", "0.49044707", "0.49007064", "0.4894507", "0.48898175", "0.4868001", "0.4849799", "0.48257083", "0.48217335", "0.480526", "0.47854877", "0.47807997", "0.47794917", "0.47783762", "0.4775003", "0.47665915", "0.47665644", "0.47629216", "0.47560117", "0.4744698", "0.4738837", "0.47375977", "0.4729251", "0.47279948", "0.4725742", "0.4723642", "0.4718446", "0.469949", "0.46978268", "0.46973413", "0.46929204", "0.46916324", "0.4689262", "0.468491", "0.46833694", "0.4683028", "0.46820244", "0.46748072", "0.46695024", "0.46689683", "0.46634522", "0.46601042", "0.46576932", "0.46518144", "0.46498996", "0.4645584", "0.46441644", "0.46416822", "0.46351022", "0.46320152", "0.46318495", "0.46104705", "0.46075022", "0.46012652", "0.45997608", "0.45991376", "0.45991376", "0.45985138", "0.45980895", "0.45933568", "0.4591843", "0.4584396", "0.45825428", "0.45774037", "0.45704588", "0.45629433", "0.45564532", "0.4553383", "0.45497116" ]
0.6436924
0
gatherJVMStats gather the JVM metrics and add results to the accumulator
func (logstash *Logstash) gatherJVMStats(address string, accumulator telegraf.Accumulator) error { jvmStats := &JVMStats{} err := logstash.gatherJSONData(address, jvmStats) if err != nil { return err } tags := map[string]string{ "node_id": jvmStats.ID, "node_name": jvmStats.Name, "node_version": jvmStats.Version, "source": jvmStats.Host, } flattener := jsonParser.JSONFlattener{} err = flattener.FlattenJSON("", jvmStats.JVM) if err != nil { return err } accumulator.AddFields("logstash_jvm", flattener.Fields, tags) return nil }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (c *Collector) Transform(allStats *NodeStatsResponse) (metrics []*exportertools.Metric) {\n for _, stats := range allStats.Nodes {\n // GC Stats\n for _, gcstats := range stats.JVM.GC.Collectors {\n metrics = append(metrics, c.ConvertToMetric(\"jvm_gc_collection_seconds_count\",\n float64(gcstats.CollectionCount),\n \"COUNTER\",\n nil))\n\n metrics = append(metrics, c.ConvertToMetric(\"jvm_gc_collection_seconds_sum\",\n float64(gcstats.CollectionTime / 1000),\n \"COUNTER\",\n nil))\n }\n\n // Breaker stats\n for _, bstats := range stats.Breakers {\n metrics = append(metrics, c.ConvertToMetric(\"breakers_estimated_size_bytes\",\n float64(bstats.EstimatedSize),\n \"GAUGE\",\n nil))\n\n metrics = append(metrics, c.ConvertToMetric(\"breakers_limit_size_bytes\",\n float64(bstats.LimitSize),\n \"GAUGE\",\n nil))\n }\n\n // Thread Pool stats\n for pool, pstats := range stats.ThreadPool {\n metrics = append(metrics, c.ConvertToMetric(\"thread_pool_completed_count\",\n float64(pstats.Completed),\n \"COUNTER\",\n map[string]string{\"type\": pool}))\n\n metrics = append(metrics, c.ConvertToMetric(\"thread_pool_rejected_count\",\n float64(pstats.Rejected),\n \"COUNTER\",\n map[string]string{\"type\": pool}))\n\n metrics = append(metrics, c.ConvertToMetric(\"thread_pool_active_count\",\n float64(pstats.Active),\n \"GAUGE\",\n map[string]string{\"type\": pool}))\n\n metrics = append(metrics, c.ConvertToMetric(\"thread_pool_threads_count\",\n float64(pstats.Threads),\n \"GAUGE\",\n map[string]string{\"type\": pool}))\n\n metrics = append(metrics, c.ConvertToMetric(\"thread_pool_largest_count\",\n float64(pstats.Largest),\n \"GAUGE\",\n map[string]string{\"type\": pool}))\n\n metrics = append(metrics, c.ConvertToMetric(\"thread_pool_queue_count\",\n float64(pstats.Queue),\n \"GAUGE\",\n map[string]string{\"type\": pool}))\n }\n\n // JVM Memory Stats\n metrics = append(metrics, c.ConvertToMetric(\"jvm_memory_committed_bytes\",\n float64(stats.JVM.Mem.HeapCommitted),\n \"GAUGE\",\n nil))\n\n metrics = append(metrics, c.ConvertToMetric(\"jvm_memory_used_bytes\",\n float64(stats.JVM.Mem.HeapUsed),\n \"GAUGE\",\n nil))\n\n\n metrics = append(metrics, c.ConvertToMetric(\"jvm_memory_max_bytes\",\n float64(stats.JVM.Mem.HeapMax),\n \"GAUGE\",\n nil))\n\n metrics = append(metrics, c.ConvertToMetric(\"jvm_memory_committed_bytes\",\n float64(stats.JVM.Mem.NonHeapCommitted),\n \"GAUGE\",\n nil))\n\n metrics = append(metrics, c.ConvertToMetric(\"jvm_memory_used_bytes\",\n float64(stats.JVM.Mem.NonHeapUsed),\n \"GAUGE\",\n nil))\n\n // Indices Stats)\n metrics = append(metrics, c.ConvertToMetric(\"indices_fielddata_memory_size_bytes\",\n float64(stats.Indices.FieldData.MemorySize),\n \"GAUGE\",\n nil))\n\n metrics = append(metrics, c.ConvertToMetric(\"indices_fielddata_evictions\",\n float64(stats.Indices.FieldData.Evictions),\n \"COUNTER\",\n nil))\n\n metrics = append(metrics, c.ConvertToMetric(\"indices_filter_cache_memory_size_bytes\",\n float64(stats.Indices.FilterCache.MemorySize),\n \"GAUGE\",\n nil))\n\n metrics = append(metrics, c.ConvertToMetric(\"indices_filter_cache_evictions\",\n float64(stats.Indices.FilterCache.Evictions),\n \"COUNTER\",\n nil))\n\n metrics = append(metrics, c.ConvertToMetric(\"indices_query_cache_memory_size_bytes\",\n float64(stats.Indices.QueryCache.MemorySize),\n \"GAUGE\",\n nil))\n\n metrics = append(metrics, c.ConvertToMetric(\"indices_query_cache_evictions\",\n float64(stats.Indices.QueryCache.Evictions),\n \"COUNTER\",\n nil))\n\n metrics = append(metrics, c.ConvertToMetric(\"indices_request_cache_memory_size_bytes\",\n float64(stats.Indices.QueryCache.MemorySize),\n \"GAUGE\",\n nil))\n\n metrics = append(metrics, c.ConvertToMetric(\"indices_request_cache_evictions\",\n float64(stats.Indices.QueryCache.Evictions),\n \"COUNTER\",\n nil))\n\n metrics = append(metrics, c.ConvertToMetric(\"indices_docs\",\n float64(stats.Indices.Docs.Count),\n \"GAUGE\",\n nil))\n\n metrics = append(metrics, c.ConvertToMetric(\"indices_docs_deleted\",\n float64(stats.Indices.Docs.Deleted),\n \"GAUGE\",\n nil))\n\n metrics = append(metrics, c.ConvertToMetric(\"indices_segments_memory_bytes\",\n float64(stats.Indices.Segments.Memory),\n \"GAUGE\",\n nil))\n\n metrics = append(metrics, c.ConvertToMetric(\"indices_segments_count\",\n float64(stats.Indices.Segments.Count),\n \"GAUGE\",\n nil))\n\n metrics = append(metrics, c.ConvertToMetric(\"indices_store_size_bytes\",\n float64(stats.Indices.Store.Size),\n \"GAUGE\",\n nil))\n\n metrics = append(metrics, c.ConvertToMetric(\"indices_store_throttle_time_ms_total\",\n float64(stats.Indices.Store.ThrottleTime),\n \"COUNTER\",\n nil))\n\n metrics = append(metrics, c.ConvertToMetric(\"indices_flush_total\",\n float64(stats.Indices.Flush.Total),\n \"COUNTER\",\n nil))\n\n metrics = append(metrics, c.ConvertToMetric(\"indices_flush_time_ms_total\",\n float64(stats.Indices.Flush.Time),\n \"COUNTER\",\n nil))\n\n metrics = append(metrics, c.ConvertToMetric(\"indices_indexing_index_time_ms_total\",\n float64(stats.Indices.Indexing.IndexTime),\n \"COUNTER\",\n nil))\n\n metrics = append(metrics, c.ConvertToMetric(\"indices_indexing_index_total\",\n float64(stats.Indices.Indexing.IndexTotal),\n \"COUNTER\",\n nil))\n\n metrics = append(metrics, c.ConvertToMetric(\"indices_merges_total_time_ms_total\",\n float64(stats.Indices.Merges.TotalTime),\n \"COUNTER\",\n nil))\n\n metrics = append(metrics, c.ConvertToMetric(\"indices_merges_total_size_bytes_total\",\n float64(stats.Indices.Merges.TotalSize),\n \"COUNTER\",\n nil))\n\n metrics = append(metrics, c.ConvertToMetric(\"indices_merges_total\",\n float64(stats.Indices.Merges.Total),\n \"COUNTER\",\n nil))\n\n metrics = append(metrics, c.ConvertToMetric(\"indices_refresh_total_time_ms_total\",\n float64(stats.Indices.Refresh.TotalTime),\n \"COUNTER\",\n nil))\n\n metrics = append(metrics, c.ConvertToMetric(\"indices_refresh_total\",\n float64(stats.Indices.Refresh.Total),\n \"COUNTER\",\n nil))\n\n // Transport Stats)\n metrics = append(metrics, c.ConvertToMetric(\"transport_rx_packets_total\",\n float64(stats.Transport.RxCount),\n \"COUNTER\",\n nil))\n\n metrics = append(metrics, c.ConvertToMetric(\"transport_rx_size_bytes_total\",\n float64(stats.Transport.RxSize),\n \"COUNTER\",\n nil))\n\n\n metrics = append(metrics, c.ConvertToMetric(\"transport_tx_packets_total\",\n float64(stats.Transport.TxCount),\n \"COUNTER\",\n nil))\n\n metrics = append(metrics, c.ConvertToMetric(\"transport_tx_size_bytes_total\",\n float64(stats.Transport.TxSize),\n \"COUNTER\",\n nil))\n\n // Process Stats)\n metrics = append(metrics, c.ConvertToMetric(\"process_cpu_percent\",\n float64(stats.Process.CPU.Percent),\n \"GAUGE\",\n nil))\n\n metrics = append(metrics, c.ConvertToMetric(\"process_mem_resident_size_bytes\",\n float64(stats.Process.Memory.Resident),\n \"GAUGE\",\n nil))\n\n metrics = append(metrics, c.ConvertToMetric(\"process_mem_share_size_bytes\",\n float64(stats.Process.Memory.Share),\n \"GAUGE\",\n nil))\n\n metrics = append(metrics, c.ConvertToMetric(\"process_mem_virtual_size_bytes\",\n float64(stats.Process.Memory.TotalVirtual),\n \"GAUGE\",\n nil))\n\n metrics = append(metrics, c.ConvertToMetric(\"process_open_files_count\",\n float64(stats.Process.OpenFD),\n \"GAUGE\",\n nil))\n\n metrics = append(metrics, c.ConvertToMetric(\"process_cpu_time_seconds_sum\",\n float64(stats.Process.CPU.Total / 1000),\n \"COUNTER\",\n nil))\n\n metrics = append(metrics, c.ConvertToMetric(\"process_cpu_time_seconds_sum\",\n float64(stats.Process.CPU.Sys / 1000),\n \"COUNTER\",\n nil))\n\n metrics = append(metrics, c.ConvertToMetric(\"process_cpu_time_seconds_sum\",\n float64(stats.Process.CPU.User / 1000),\n \"COUNTER\",\n nil))\n\n }\n\n return metrics\n}", "func CollectRuntimeMemStats(statsd scopedstatsd.Client, memstatsCurrent *runtime.MemStats, memstatsPrev *runtime.MemStats, tags []string) {\n\t// Collect number of bytes obtained from system.\n\tstatsd.Gauge(\"mem.sys_bytes\", float64(memstatsCurrent.Sys), tags, 1)\n\n\t// Collect number of pointer lookups.\n\tstatsd.Gauge(\"mem.pointer_lookups\", float64(memstatsCurrent.Lookups), tags, 1)\n\n\t// Collect increased heap objects allocated compared to last flush.\n\tstatsd.Count(\"mem.mallocs_total\", int64(memstatsCurrent.Mallocs-memstatsPrev.Mallocs), tags, 1)\n\n\t// Collect increased heap objects freed compared to last flush.\n\tstatsd.Count(\"mem.frees_total\", int64(memstatsCurrent.Frees-memstatsPrev.Frees), tags, 1)\n\n\t// Collect number of mallocs.\n\tstatsd.Gauge(\"mem.mallocs_count\", float64(memstatsCurrent.Mallocs-memstatsCurrent.Frees), tags, 1)\n\n\t// Collect number of bytes newly allocated for heap objects compared to last flush.\n\tstatsd.Count(\"mem.heap_alloc_bytes_total\", int64(memstatsCurrent.TotalAlloc-memstatsPrev.TotalAlloc), tags, 1)\n\n\t// Collect number of heap bytes allocated and still in use.\n\tstatsd.Gauge(\"mem.heap_alloc_bytes\", float64(memstatsCurrent.HeapAlloc), tags, 1)\n\n\t// Collect number of heap bytes obtained from system.\n\tstatsd.Gauge(\"mem.heap_sys_bytes\", float64(memstatsCurrent.HeapSys), tags, 1)\n\n\t// Collect number of heap bytes waiting to be used.\n\tstatsd.Gauge(\"mem.heap_idle_bytes\", float64(memstatsCurrent.HeapIdle), tags, 1)\n\n\t// Collect number of heap bytes that are in use.\n\tstatsd.Gauge(\"mem.heap_inuse_bytes\", float64(memstatsCurrent.HeapInuse), tags, 1)\n\n\t// Collect number of heap bytes released to OS.\n\tstatsd.Gauge(\"mem.heap_released_bytes\", float64(memstatsCurrent.HeapReleased), tags, 1)\n\n\t// Collect number of allocated objects.\n\tstatsd.Gauge(\"mem.heap_objects_count\", float64(memstatsCurrent.HeapObjects), tags, 1)\n\n\t// Collect number of bytes in use by the stack allocator.\n\tstatsd.Gauge(\"mem.stack_inuse_bytes\", float64(memstatsCurrent.StackInuse), tags, 1)\n\n\t// Collect number of bytes obtained from system for stack allocator.\n\tstatsd.Gauge(\"mem.stack_sys_bytes\", float64(memstatsCurrent.StackSys), tags, 1)\n\n\t// Collect number of bytes in use by mspan structures.\n\tstatsd.Gauge(\"mem.mspan_inuse_bytes\", float64(memstatsCurrent.MSpanInuse), tags, 1)\n\n\t// Collect number of bytes used for mspan structures obtained from system.\n\tstatsd.Gauge(\"mem.mspan_sys_bytes\", float64(memstatsCurrent.MSpanSys), tags, 1)\n\n\t// Collect number of bytes in use by mcache structures.\n\tstatsd.Gauge(\"mem.mcache_inuse_bytes\", float64(memstatsCurrent.MCacheInuse), tags, 1)\n\n\t// Collect number of bytes used for mcache structures obtained from system.\n\tstatsd.Gauge(\"mem.mcache_sys_bytes\", float64(memstatsCurrent.MCacheSys), tags, 1)\n\n\t// Collect number of bytes used by the profiling bucket hash table.\n\tstatsd.Gauge(\"mem.buck_hash_sys_bytes\", float64(memstatsCurrent.BuckHashSys), tags, 1)\n\n\t// Collect number of bytes used for garbage collection system metadata.\n\tstatsd.Gauge(\"mem.gc_sys_bytes\", float64(memstatsCurrent.GCSys), tags, 1)\n\n\t// Collect number of bytes used for other system allocations.\n\tstatsd.Gauge(\"mem.other_sys_bytes\", float64(memstatsCurrent.OtherSys), tags, 1)\n\n\t// Collect number of heap bytes when next garbage collection will take pace.\n\tstatsd.Gauge(\"mem.next_gc_bytes\", float64(memstatsCurrent.NextGC), tags, 1)\n}", "func (logstash *Logstash) gatherProcessStats(address string, accumulator telegraf.Accumulator) error {\n\tprocessStats := &ProcessStats{}\n\n\terr := logstash.gatherJSONData(address, processStats)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\ttags := map[string]string{\n\t\t\"node_id\": processStats.ID,\n\t\t\"node_name\": processStats.Name,\n\t\t\"node_version\": processStats.Version,\n\t\t\"source\": processStats.Host,\n\t}\n\n\tflattener := jsonParser.JSONFlattener{}\n\terr = flattener.FlattenJSON(\"\", processStats.Process)\n\tif err != nil {\n\t\treturn err\n\t}\n\taccumulator.AddFields(\"logstash_process\", flattener.Fields, tags)\n\n\treturn nil\n}", "func CollectProcessMetrics(refresh time.Duration) {\n\t// Short circuit if the metics system is disabled\n\tif !Enabled {\n\t\treturn\n\t}\n\t// Create the various data collectors\n\tmemstates := make([]*runtime.MemStats, 2)\n\tdiskstates := make([]*DiskStats, 2)\n\tfor i := 0; i < len(memstates); i++ {\n\t\tmemstates[i] = new(runtime.MemStats)\n\t\tdiskstates[i] = new(DiskStats)\n\t}\n\t// Define the various metics to collect\n\tmemAllocs := metics.GetOrRegisterMeter(\"system/memory/allocs\", metics.DefaultRegistry)\n\tmemFrees := metics.GetOrRegisterMeter(\"system/memory/frees\", metics.DefaultRegistry)\n\tmemInuse := metics.GetOrRegisterMeter(\"system/memory/inuse\", metics.DefaultRegistry)\n\tmemPauses := metics.GetOrRegisterMeter(\"system/memory/pauses\", metics.DefaultRegistry)\n\n\tvar diskReads, diskReadBytes, diskWrites, diskWriteBytes metics.Meter\n\tif err := ReadDiskStats(diskstates[0]); err == nil {\n\t\tdiskReads = metics.GetOrRegisterMeter(\"system/disk/readcount\", metics.DefaultRegistry)\n\t\tdiskReadBytes = metics.GetOrRegisterMeter(\"system/disk/readdata\", metics.DefaultRegistry)\n\t\tdiskWrites = metics.GetOrRegisterMeter(\"system/disk/writecount\", metics.DefaultRegistry)\n\t\tdiskWriteBytes = metics.GetOrRegisterMeter(\"system/disk/writedata\", metics.DefaultRegistry)\n\t} else {\n\t\tbgmlogs.Debug(\"Failed to read disk metics\", \"err\", err)\n\t}\n\t// Iterate loading the different states and updating the meters\n\tfor i := 1; ; i++ {\n\t\truntime.ReadMemStats(memstates[i%2])\n\t\tmemAllocs.Mark(int64(memstates[i%2].Mallocs - memstates[(i-1)%2].Mallocs))\n\t\tmemFrees.Mark(int64(memstates[i%2].Frees - memstates[(i-1)%2].Frees))\n\t\tmemInuse.Mark(int64(memstates[i%2].Alloc - memstates[(i-1)%2].Alloc))\n\t\tmemPauses.Mark(int64(memstates[i%2].PauseTotalNs - memstates[(i-1)%2].PauseTotalNs))\n\n\t\tif ReadDiskStats(diskstates[i%2]) == nil {\n\t\t\tdiskReads.Mark(diskstates[i%2].ReadCount - diskstates[(i-1)%2].ReadCount)\n\t\t\tdiskReadBytes.Mark(diskstates[i%2].ReadBytes - diskstates[(i-1)%2].ReadBytes)\n\t\t\tdiskWrites.Mark(diskstates[i%2].WriteCount - diskstates[(i-1)%2].WriteCount)\n\t\t\tdiskWriteBytes.Mark(diskstates[i%2].WriteBytes - diskstates[(i-1)%2].WriteBytes)\n\t\t}\n\t\ttime.Sleep(refresh)\n\t}\n}", "func (e *Exporter) Collect(ch chan<- prometheus.Metric) {\n\te.mutex.Lock() // To protect metrics from concurrent collects.\n\tdefer e.mutex.Unlock()\n\n\t// Reset metrics.\n\tfor _, vec := range e.gauges {\n\t\tvec.Reset()\n\t}\n\n\tfor _, vec := range e.counters {\n\t\tvec.Reset()\n\t}\n\n\tresp, err := e.client.Get(e.URI)\n\tif err != nil {\n\t\te.up.Set(0)\n\t\tlog.Printf(\"Error while querying Elasticsearch: %v\", err)\n\t\treturn\n\t}\n\tdefer resp.Body.Close()\n\n\tbody, err := ioutil.ReadAll(resp.Body)\n\n\tif err != nil {\n\t\tlog.Printf(\"Failed to read ES response body: %v\", err)\n\t\te.up.Set(0)\n\t\treturn\n\t}\n\n\te.up.Set(1)\n\n\tvar all_stats NodeStatsResponse\n\terr = json.Unmarshal(body, &all_stats)\n\n\tif err != nil {\n\t\tlog.Printf(\"Failed to unmarshal JSON into struct: %v\", err)\n\t\treturn\n\t}\n\n\t// Regardless of whether we're querying the local host or the whole\n\t// cluster, here we can just iterate through all nodes found.\n\n\tfor node, stats := range all_stats.Nodes {\n\t\tlog.Printf(\"Processing node %v\", node)\n\t\t// GC Stats\n\t\tfor collector, gcstats := range stats.JVM.GC.Collectors {\n\t\t\te.counters[\"jvm_gc_collection_count\"].WithLabelValues(all_stats.ClusterName, stats.Name, collector).Set(float64(gcstats.CollectionCount))\n\t\t\te.counters[\"jvm_gc_collection_time_in_millis\"].WithLabelValues(all_stats.ClusterName, stats.Name, collector).Set(float64(gcstats.CollectionTime))\n\t\t}\n\n\t\t// Breaker stats\n\t\tfor breaker, bstats := range stats.Breakers {\n\t\t\te.gauges[\"breakers_estimated_size_in_bytes\"].WithLabelValues(all_stats.ClusterName, stats.Name, breaker).Set(float64(bstats.EstimatedSize))\n\t\t\te.gauges[\"breakers_limit_size_in_bytes\"].WithLabelValues(all_stats.ClusterName, stats.Name, breaker).Set(float64(bstats.LimitSize))\n\t\t}\n\n\t\t// JVM Memory Stats\n\t\te.gauges[\"jvm_mem_heap_committed_in_bytes\"].WithLabelValues(all_stats.ClusterName, stats.Name).Set(float64(stats.JVM.Mem.HeapCommitted))\n\t\te.gauges[\"jvm_mem_heap_used_in_bytes\"].WithLabelValues(all_stats.ClusterName, stats.Name).Set(float64(stats.JVM.Mem.HeapUsed))\n\t\te.gauges[\"jvm_mem_heap_max_in_bytes\"].WithLabelValues(all_stats.ClusterName, stats.Name).Set(float64(stats.JVM.Mem.HeapMax))\n\t\te.gauges[\"jvm_mem_non_heap_committed_in_bytes\"].WithLabelValues(all_stats.ClusterName, stats.Name).Set(float64(stats.JVM.Mem.NonHeapCommitted))\n\t\te.gauges[\"jvm_mem_non_heap_used_in_bytes\"].WithLabelValues(all_stats.ClusterName, stats.Name).Set(float64(stats.JVM.Mem.NonHeapUsed))\n\n\t\t// Indices Stats\n\t\te.gauges[\"indices_fielddata_evictions\"].WithLabelValues(all_stats.ClusterName, stats.Name).Set(float64(stats.Indices.FieldData.Evictions))\n\t\te.gauges[\"indices_fielddata_memory_size_in_bytes\"].WithLabelValues(all_stats.ClusterName, stats.Name).Set(float64(stats.Indices.FieldData.MemorySize))\n\t\te.gauges[\"indices_filter_cache_evictions\"].WithLabelValues(all_stats.ClusterName, stats.Name).Set(float64(stats.Indices.FilterCache.Evictions))\n\t\te.gauges[\"indices_filter_cache_memory_size_in_bytes\"].WithLabelValues(all_stats.ClusterName, stats.Name).Set(float64(stats.Indices.FilterCache.MemorySize))\n\n\t\te.gauges[\"indices_docs_count\"].WithLabelValues(all_stats.ClusterName, stats.Name).Set(float64(stats.Indices.Docs.Count))\n\t\te.gauges[\"indices_docs_deleted\"].WithLabelValues(all_stats.ClusterName, stats.Name).Set(float64(stats.Indices.Docs.Deleted))\n\n\t\te.gauges[\"indices_segments_memory_in_bytes\"].WithLabelValues(all_stats.ClusterName, stats.Name).Set(float64(stats.Indices.Segments.Memory))\n\n\t\te.gauges[\"indices_store_size_in_bytes\"].WithLabelValues(all_stats.ClusterName, stats.Name).Set(float64(stats.Indices.Store.Size))\n\t\te.counters[\"indices_store_throttle_time_in_millis\"].WithLabelValues(all_stats.ClusterName, stats.Name).Set(float64(stats.Indices.Store.ThrottleTime))\n\n\t\te.counters[\"indices_flush_total\"].WithLabelValues(all_stats.ClusterName, stats.Name).Set(float64(stats.Indices.Flush.Total))\n\t\te.counters[\"indices_flush_time_in_millis\"].WithLabelValues(all_stats.ClusterName, stats.Name).Set(float64(stats.Indices.Flush.Time))\n\n\t\t// Transport Stats\n\t\te.counters[\"transport_rx_count\"].WithLabelValues(all_stats.ClusterName, stats.Name).Set(float64(stats.Transport.RxCount))\n\t\te.counters[\"transport_rx_size_in_bytes\"].WithLabelValues(all_stats.ClusterName, stats.Name).Set(float64(stats.Transport.RxSize))\n\t\te.counters[\"transport_tx_count\"].WithLabelValues(all_stats.ClusterName, stats.Name).Set(float64(stats.Transport.TxCount))\n\t\te.counters[\"transport_tx_size_in_bytes\"].WithLabelValues(all_stats.ClusterName, stats.Name).Set(float64(stats.Transport.TxSize))\n\t}\n\n\t// Report metrics.\n\tch <- e.up\n\n\tfor _, vec := range e.counters {\n\t\tvec.Collect(ch)\n\t}\n\n\tfor _, vec := range e.gauges {\n\t\tvec.Collect(ch)\n\t}\n}", "func CollectRuntimeMetrics(registry *Registry) {\n\tCollectMemStats(registry)\n\tCollectSysStats(registry)\n}", "func (logstash *Logstash) Gather(accumulator telegraf.Accumulator) error {\n\tif logstash.client == nil {\n\t\tclient, err := logstash.createHTTPClient()\n\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tlogstash.client = client\n\t}\n\n\tif choice.Contains(\"jvm\", logstash.Collect) {\n\t\tjvmURL, err := url.Parse(logstash.URL + jvmStats)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif err := logstash.gatherJVMStats(jvmURL.String(), accumulator); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tif choice.Contains(\"process\", logstash.Collect) {\n\t\tprocessURL, err := url.Parse(logstash.URL + processStats)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif err := logstash.gatherProcessStats(processURL.String(), accumulator); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tif choice.Contains(\"pipelines\", logstash.Collect) {\n\t\tif logstash.SinglePipeline {\n\t\t\tpipelineURL, err := url.Parse(logstash.URL + pipelineStats)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tif err := logstash.gatherPipelineStats(pipelineURL.String(), accumulator); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t} else {\n\t\t\tpipelinesURL, err := url.Parse(logstash.URL + pipelinesStats)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tif err := logstash.gatherPipelinesStats(pipelinesURL.String(), accumulator); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\n\treturn nil\n}", "func (s *Server) agentMemoryStats(metrics cgm.Metrics, mtags []string) {\n\t// var mem syscall.Rusage\n\t// if err := syscall.Getrusage(syscall.RUSAGE_SELF, &mem); err == nil {\n\t// \tmetrics[tags.MetricNameWithStreamTags(\"agent_max_rss\", tags.FromList(ctags))] = cgm.Metric{Value: uint64(mem.Maxrss * 1024), Type: \"L\"} // maximum resident set size used (in kilobytes)\n\t// } else {\n\t// \ts.logger.Warn().Err(err).Msg(\"collecting rss from system\")\n\t// }\n}", "func (h *Hugepages) gatherRootStats(acc telegraf.Accumulator) error {\n\treturn h.gatherFromHugepagePath(acc, \"hugepages_\"+rootHugepages, h.rootHugepagePath, hugepagesMetricsRoot, nil)\n}", "func (sr *ServicedStatsReporter) gatherStats(t time.Time) []Sample {\n\tstats := []Sample{}\n\t// Handle the host metrics.\n\treg, _ := sr.hostRegistry.(*metrics.StandardRegistry)\n\treg.Each(func(name string, i interface{}) {\n\t\ttagmap := map[string]string{\n\t\t\t\"controlplane_host_id\": sr.hostID,\n\t\t}\n\t\tswitch metric := i.(type) {\n\t\tcase metrics.Gauge:\n\t\t\tstats = append(stats, Sample{name, strconv.FormatInt(metric.Value(), 10), t.Unix(), tagmap})\n\t\tcase metrics.GaugeFloat64:\n\t\t\tstats = append(stats, Sample{name, strconv.FormatFloat(metric.Value(), 'f', -1, 32), t.Unix(), tagmap})\n\t\t}\n\t})\n\t// Handle each container's metrics.\n\tfor key, registry := range sr.containerRegistries {\n\t\treg, _ := registry.(*metrics.StandardRegistry)\n\t\treg.Each(func(name string, i interface{}) {\n\t\t\ttagmap := map[string]string{\n\t\t\t\t\"controlplane_host_id\": sr.hostID,\n\t\t\t\t\"controlplane_service_id\": key.serviceID,\n\t\t\t\t\"controlplane_instance_id\": strconv.FormatInt(int64(key.instanceID), 10),\n\t\t\t}\n\t\t\tswitch metric := i.(type) {\n\t\t\tcase metrics.Gauge:\n\t\t\t\tstats = append(stats, Sample{name, strconv.FormatInt(metric.Value(), 10), t.Unix(), tagmap})\n\t\t\tcase metrics.GaugeFloat64:\n\t\t\t\tstats = append(stats, Sample{name, strconv.FormatFloat(metric.Value(), 'f', -1, 32), t.Unix(), tagmap})\n\t\t\t}\n\t\t})\n\t}\n\treturn stats\n}", "func (h *Hugepages) gatherStatsPerNode(acc telegraf.Accumulator) error {\n\tnodeDirs, err := os.ReadDir(h.numaNodePath)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// read metrics from: node*/hugepages/hugepages-*/*\n\tfor _, nodeDir := range nodeDirs {\n\t\tif !nodeDir.IsDir() || !strings.HasPrefix(nodeDir.Name(), \"node\") {\n\t\t\tcontinue\n\t\t}\n\n\t\tnodeNumber := strings.TrimPrefix(nodeDir.Name(), \"node\")\n\t\t_, err := strconv.Atoi(nodeNumber)\n\t\tif err != nil {\n\t\t\tcontinue\n\t\t}\n\n\t\tperNodeTags := map[string]string{\n\t\t\t\"node\": nodeNumber,\n\t\t}\n\t\thugepagesPath := filepath.Join(h.numaNodePath, nodeDir.Name(), \"hugepages\")\n\t\terr = h.gatherFromHugepagePath(acc, \"hugepages_\"+perNodeHugepages, hugepagesPath, hugepagesMetricsPerNUMANode, perNodeTags)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}", "func (logstash *Logstash) gatherPipelineStats(address string, accumulator telegraf.Accumulator) error {\n\tpipelineStats := &PipelineStats{}\n\n\terr := logstash.gatherJSONData(address, pipelineStats)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\ttags := map[string]string{\n\t\t\"node_id\": pipelineStats.ID,\n\t\t\"node_name\": pipelineStats.Name,\n\t\t\"node_version\": pipelineStats.Version,\n\t\t\"source\": pipelineStats.Host,\n\t}\n\n\tflattener := jsonParser.JSONFlattener{}\n\terr = flattener.FlattenJSON(\"\", pipelineStats.Pipeline.Events)\n\tif err != nil {\n\t\treturn err\n\t}\n\taccumulator.AddFields(\"logstash_events\", flattener.Fields, tags)\n\n\terr = logstash.gatherPluginsStats(pipelineStats.Pipeline.Plugins.Inputs, \"input\", tags, accumulator)\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = logstash.gatherPluginsStats(pipelineStats.Pipeline.Plugins.Filters, \"filter\", tags, accumulator)\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = logstash.gatherPluginsStats(pipelineStats.Pipeline.Plugins.Outputs, \"output\", tags, accumulator)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = logstash.gatherQueueStats(&pipelineStats.Pipeline.Queue, tags, accumulator)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}", "func (e *Exporter) collect(ch chan<- prometheus.Metric) error {\n\tvar mempct, memtot, memfree float64\n\tif v, e := mem.VirtualMemory(); e == nil {\n\t\tmempct = v.UsedPercent\n\t\tmemtot = float64(v.Total)\n\t\tmemfree = float64(v.Free)\n\t}\n\tvar swappct, swaptot, swapfree float64\n\tif v, e := mem.SwapMemory(); e == nil {\n\t\tswappct = v.UsedPercent\n\t\tswaptot = float64(v.Total)\n\t\tswapfree = float64(v.Free)\n\t}\n\tvar cpupct float64\n\tif c, e := cpu.Percent(time.Millisecond, false); e == nil {\n\t\tcpupct = c[0] // one value since we didn't ask per cpu\n\t}\n\tvar load1, load5, load15 float64\n\tif l, e := load.Avg(); e == nil {\n\t\tload1 = l.Load1\n\t\tload5 = l.Load5\n\t\tload15 = l.Load15\n\t}\n\n\tvar cpuTotal, vsize, rss, openFDs, maxFDs, maxVsize float64\n\tif proc, err := procfs.NewProc(int(*pid)); err == nil {\n\t\tif stat, err := proc.NewStat(); err == nil {\n\t\t\tcpuTotal = float64(stat.CPUTime())\n\t\t\tvsize = float64(stat.VirtualMemory())\n\t\t\trss = float64(stat.ResidentMemory())\n\t\t}\n\t\tif fds, err := proc.FileDescriptorsLen(); err == nil {\n\t\t\topenFDs = float64(fds)\n\t\t}\n\t\tif limits, err := proc.NewLimits(); err == nil {\n\t\t\tmaxFDs = float64(limits.OpenFiles)\n\t\t\tmaxVsize = float64(limits.AddressSpace)\n\t\t}\n\t}\n\n\tvar procCpu, procMem float64\n\tvar estCon, lisCon, othCon, totCon, closeCon, timeCon, openFiles float64\n\tvar nThreads float64\n\tif proc, err := process.NewProcess(int32(*pid)); err == nil {\n\t\tif v, e := proc.CPUPercent(); e == nil {\n\t\t\tprocCpu = float64(v)\n\t\t}\n\t\tif v, e := proc.MemoryPercent(); e == nil {\n\t\t\tprocMem = float64(v)\n\t\t}\n\n\t\tif v, e := proc.NumThreads(); e == nil {\n\t\t\tnThreads = float64(v)\n\t\t}\n\t\tif connections, e := proc.Connections(); e == nil {\n\t\t\tfor _, v := range connections {\n\t\t\t\tif v.Status == \"LISTEN\" {\n\t\t\t\t\tlisCon += 1\n\t\t\t\t} else if v.Status == \"ESTABLISHED\" {\n\t\t\t\t\testCon += 1\n\t\t\t\t} else if v.Status == \"TIME_WAIT\" {\n\t\t\t\t\ttimeCon += 1\n\t\t\t\t} else if v.Status == \"CLOSE_WAIT\" {\n\t\t\t\t\tcloseCon += 1\n\t\t\t\t} else {\n\t\t\t\t\tothCon += 1\n\t\t\t\t}\n\t\t\t}\n\t\t\ttotCon = lisCon + estCon + timeCon + closeCon + othCon\n\t\t}\n\t\tif oFiles, e := proc.OpenFiles(); e == nil {\n\t\t\topenFiles = float64(len(oFiles))\n\t\t}\n\t}\n\n\t// metrics from process collector\n\tch <- prometheus.MustNewConstMetric(e.cpuTotal, prometheus.CounterValue, cpuTotal)\n\tch <- prometheus.MustNewConstMetric(e.openFDs, prometheus.CounterValue, openFDs)\n\tch <- prometheus.MustNewConstMetric(e.maxFDs, prometheus.CounterValue, maxFDs)\n\tch <- prometheus.MustNewConstMetric(e.vsize, prometheus.CounterValue, vsize)\n\tch <- prometheus.MustNewConstMetric(e.maxVsize, prometheus.CounterValue, maxVsize)\n\tch <- prometheus.MustNewConstMetric(e.rss, prometheus.CounterValue, rss)\n\t// node specific metrics\n\tch <- prometheus.MustNewConstMetric(e.memPercent, prometheus.CounterValue, mempct)\n\tch <- prometheus.MustNewConstMetric(e.memTotal, prometheus.CounterValue, memtot)\n\tch <- prometheus.MustNewConstMetric(e.memFree, prometheus.CounterValue, memfree)\n\tch <- prometheus.MustNewConstMetric(e.swapPercent, prometheus.CounterValue, swappct)\n\tch <- prometheus.MustNewConstMetric(e.swapTotal, prometheus.CounterValue, swaptot)\n\tch <- prometheus.MustNewConstMetric(e.swapFree, prometheus.CounterValue, swapfree)\n\tch <- prometheus.MustNewConstMetric(e.numCpus, prometheus.CounterValue, float64(runtime.NumCPU()))\n\tch <- prometheus.MustNewConstMetric(e.load1, prometheus.CounterValue, load1)\n\tch <- prometheus.MustNewConstMetric(e.load5, prometheus.CounterValue, load5)\n\tch <- prometheus.MustNewConstMetric(e.load15, prometheus.CounterValue, load15)\n\t// process specific metrics\n\tch <- prometheus.MustNewConstMetric(e.procCpu, prometheus.CounterValue, procCpu)\n\tch <- prometheus.MustNewConstMetric(e.procMem, prometheus.CounterValue, procMem)\n\tch <- prometheus.MustNewConstMetric(e.numThreads, prometheus.CounterValue, nThreads)\n\tch <- prometheus.MustNewConstMetric(e.cpuPercent, prometheus.CounterValue, cpupct)\n\tch <- prometheus.MustNewConstMetric(e.openFiles, prometheus.CounterValue, openFiles)\n\tch <- prometheus.MustNewConstMetric(e.totCon, prometheus.CounterValue, totCon)\n\tch <- prometheus.MustNewConstMetric(e.lisCon, prometheus.CounterValue, lisCon)\n\tch <- prometheus.MustNewConstMetric(e.estCon, prometheus.CounterValue, estCon)\n\tch <- prometheus.MustNewConstMetric(e.closeCon, prometheus.CounterValue, closeCon)\n\tch <- prometheus.MustNewConstMetric(e.timeCon, prometheus.CounterValue, timeCon)\n\treturn nil\n}", "func (h *Hugepages) gatherStatsFromMeminfo(acc telegraf.Accumulator) error {\n\tmeminfo, err := os.ReadFile(h.meminfoPath)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tmetrics := make(map[string]interface{})\n\tlines := bytes.Split(meminfo, newlineByte)\n\tfor _, line := range lines {\n\t\tfields := bytes.Fields(line)\n\t\tif len(fields) < 2 {\n\t\t\tcontinue\n\t\t}\n\t\tfieldName := string(bytes.TrimSuffix(fields[0], colonByte))\n\t\tmetricName, ok := hugepagesMetricsFromMeminfo[fieldName]\n\t\tif !ok {\n\t\t\tcontinue\n\t\t}\n\n\t\tfieldValue, err := strconv.Atoi(string(fields[1]))\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"failed to convert content of %q: %w\", fieldName, err)\n\t\t}\n\n\t\tmetrics[metricName] = fieldValue\n\t}\n\n\tacc.AddFields(\"hugepages_\"+meminfoHugepages, metrics, map[string]string{})\n\treturn nil\n}", "func (p *ProcMetrics) Collect() {\n\tif m, err := CollectProcInfo(p.pid); err == nil {\n\t\tnow := time.Now()\n\n\t\tif !p.lastTime.IsZero() {\n\t\t\tratio := 1.0\n\t\t\tswitch {\n\t\t\tcase m.CPU.Period > 0 && m.CPU.Quota > 0:\n\t\t\t\tratio = float64(m.CPU.Quota) / float64(m.CPU.Period)\n\t\t\tcase m.CPU.Shares > 0:\n\t\t\t\tratio = float64(m.CPU.Shares) / 1024\n\t\t\tdefault:\n\t\t\t\tratio = 1 / float64(runtime.NumCPU())\n\t\t\t}\n\n\t\t\tinterval := ratio * float64(now.Sub(p.lastTime))\n\n\t\t\tp.cpu.user.time = m.CPU.User - p.last.CPU.User\n\t\t\tp.cpu.user.percent = 100 * float64(p.cpu.user.time) / interval\n\n\t\t\tp.cpu.system.time = m.CPU.Sys - p.last.CPU.Sys\n\t\t\tp.cpu.system.percent = 100 * float64(p.cpu.system.time) / interval\n\n\t\t\tp.cpu.total.time = (m.CPU.User + m.CPU.Sys) - (p.last.CPU.User + p.last.CPU.Sys)\n\t\t\tp.cpu.total.percent = 100 * float64(p.cpu.total.time) / interval\n\t\t}\n\n\t\tp.memory.available = m.Memory.Available\n\t\tp.memory.size = m.Memory.Size\n\t\tp.memory.resident.usage = m.Memory.Resident\n\t\tp.memory.resident.percent = 100 * float64(p.memory.resident.usage) / float64(p.memory.available)\n\t\tp.memory.shared.usage = m.Memory.Shared\n\t\tp.memory.text.usage = m.Memory.Text\n\t\tp.memory.data.usage = m.Memory.Data\n\t\tp.memory.pagefault.major.count = m.Memory.MajorPageFaults - p.last.Memory.MajorPageFaults\n\t\tp.memory.pagefault.minor.count = m.Memory.MinorPageFaults - p.last.Memory.MinorPageFaults\n\n\t\tp.files.open = m.Files.Open\n\t\tp.files.max = m.Files.Max\n\n\t\tp.threads.num = m.Threads.Num\n\t\tp.threads.switches.voluntary.count = m.Threads.VoluntaryContextSwitches - p.last.Threads.VoluntaryContextSwitches\n\t\tp.threads.switches.involuntary.count = m.Threads.InvoluntaryContextSwitches - p.last.Threads.InvoluntaryContextSwitches\n\n\t\tp.last = m\n\t\tp.lastTime = now\n\t\tp.engine.Report(p)\n\t}\n}", "func FetchAppServerMemStats(r Result) []float32 {\n\treturn r.AppServerStats().Mem\n}", "func (g gatherer) GatherMetrics(ctx context.Context, out *apm.Metrics) error {\n\tmetricFamilies, err := g.p.Gather()\n\tif err != nil {\n\t\treturn errors.WithStack(err)\n\t}\n\tfor _, mf := range metricFamilies {\n\t\tname := mf.GetName()\n\t\tswitch mf.GetType() {\n\t\tcase dto.MetricType_COUNTER:\n\t\t\tfor _, m := range mf.GetMetric() {\n\t\t\t\tv := m.GetCounter().GetValue()\n\t\t\t\tout.Add(name, makeLabels(m.GetLabel()), v)\n\t\t\t}\n\t\tcase dto.MetricType_GAUGE:\n\t\t\tmetrics := mf.GetMetric()\n\t\t\tif name == \"go_info\" && len(metrics) == 1 && metrics[0].GetGauge().GetValue() == 1 {\n\t\t\t\t// Ignore the \"go_info\" metric from the\n\t\t\t\t// built-in GoCollector, as we provide\n\t\t\t\t// the same information in the payload.\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tfor _, m := range metrics {\n\t\t\t\tv := m.GetGauge().GetValue()\n\t\t\t\tout.Add(name, makeLabels(m.GetLabel()), v)\n\t\t\t}\n\t\tcase dto.MetricType_UNTYPED:\n\t\t\tfor _, m := range mf.GetMetric() {\n\t\t\t\tv := m.GetUntyped().GetValue()\n\t\t\t\tout.Add(name, makeLabels(m.GetLabel()), v)\n\t\t\t}\n\t\tcase dto.MetricType_SUMMARY:\n\t\t\tfor _, m := range mf.GetMetric() {\n\t\t\t\ts := m.GetSummary()\n\t\t\t\tlabels := makeLabels(m.GetLabel())\n\t\t\t\tout.Add(name+\".count\", labels, float64(s.GetSampleCount()))\n\t\t\t\tout.Add(name+\".total\", labels, float64(s.GetSampleSum()))\n\t\t\t\tfor _, q := range s.GetQuantile() {\n\t\t\t\t\tp := int(q.GetQuantile() * 100)\n\t\t\t\t\tout.Add(name+\".percentile.\"+strconv.Itoa(p), labels, q.GetValue())\n\t\t\t\t}\n\t\t\t}\n\t\tdefault:\n\t\t\t// TODO(axw) MetricType_HISTOGRAM\n\t\t}\n\t}\n\treturn nil\n}", "func (p *Psutil) CollectMetrics(mts []plugin.MetricType) ([]plugin.MetricType, error) {\n\tloadReqs := []core.Namespace{}\n\tcpuReqs := []core.Namespace{}\n\tmemReqs := []core.Namespace{}\n\tnetReqs := []core.Namespace{}\n\tdiskReqs := []core.Namespace{}\n\n\tfor _, m := range mts {\n\t\tns := m.Namespace()\n\t\tswitch ns[2].Value {\n\t\tcase \"load\":\n\t\t\tloadReqs = append(loadReqs, ns)\n\t\tcase \"cpu\":\n\t\t\tcpuReqs = append(cpuReqs, ns)\n\t\tcase \"vm\":\n\t\t\tmemReqs = append(memReqs, ns)\n\t\tcase \"net\":\n\t\t\tnetReqs = append(netReqs, ns)\n\t\tcase \"disk\":\n\t\t\tdiskReqs = append(diskReqs, ns)\n\t\tdefault:\n\t\t\treturn nil, fmt.Errorf(\"Requested metric %s does not match any known psutil metric\", m.Namespace().String())\n\t\t}\n\t}\n\n\tmetrics := []plugin.MetricType{}\n\n\tloadMts, err := loadAvg(loadReqs)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tmetrics = append(metrics, loadMts...)\n\n\tcpuMts, err := cpuTimes(cpuReqs)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tmetrics = append(metrics, cpuMts...)\n\n\tmemMts, err := virtualMemory(memReqs)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tmetrics = append(metrics, memMts...)\n\n\tnetMts, err := netIOCounters(netReqs)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tmetrics = append(metrics, netMts...)\n\tmounts := getMountpoints(mts[0].Config().Table())\n\tdiskMts, err := getDiskUsageMetrics(diskReqs, mounts)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tmetrics = append(metrics, diskMts...)\n\n\treturn metrics, nil\n}", "func CollectAllMetrics(client *statsd.Client, log *li.StandardLogger) {\n\n\tvar metrics []metric\n\tmetrics = append(metrics, metric{name: \"gpu.temperature\", cmd: \"vcgencmd measure_temp | egrep -o '[0-9]*\\\\.[0-9]*'\"})\n\tmetrics = append(metrics, metric{name: \"cpu.temperature\", cmd: \"cat /sys/class/thermal/thermal_zone0/temp | awk 'END {print $1/1000}'\"})\n\tmetrics = append(metrics, metric{name: \"threads\", cmd: \"ps -eo nlwp | tail -n +2 | awk '{ num_threads += $1 } END { print num_threads }'\"})\n\tmetrics = append(metrics, metric{name: \"processes\", cmd: \"ps axu | wc -l\"})\n\n\tfor range time.Tick(15 * time.Second) {\n\t\tlog.Info(\"Starting metric collection\")\n\t\tfor _, m := range metrics {\n\t\t\terr := collectMetric(m, client, log)\n\t\t\tif err != nil {\n\t\t\t\tlog.Error(err)\n\t\t\t}\n\t\t}\n\t}\n}", "func (g gatherer) GatherMetrics(ctx context.Context, m *elasticapm.Metrics) error {\n\tg.r.Each(func(name string, v interface{}) {\n\t\tswitch v := v.(type) {\n\t\tcase metrics.Counter:\n\t\t\tm.Add(name, nil, float64(v.Count()))\n\t\tcase metrics.Gauge:\n\t\t\tm.Add(name, nil, float64(v.Value()))\n\t\tcase metrics.GaugeFloat64:\n\t\t\tm.Add(name, nil, v.Value())\n\t\tcase metrics.Histogram:\n\t\t\tm.Add(name+\".count\", nil, float64(v.Count()))\n\t\t\tm.Add(name+\".total\", nil, float64(v.Sum()))\n\t\t\tm.Add(name+\".min\", nil, float64(v.Min()))\n\t\t\tm.Add(name+\".max\", nil, float64(v.Max()))\n\t\t\tm.Add(name+\".stddev\", nil, v.StdDev())\n\t\t\tm.Add(name+\".percentile.50\", nil, v.Percentile(0.5))\n\t\t\tm.Add(name+\".percentile.95\", nil, v.Percentile(0.95))\n\t\t\tm.Add(name+\".percentile.99\", nil, v.Percentile(0.99))\n\t\tdefault:\n\t\t\t// TODO(axw) Meter, Timer, EWMA\n\t\t}\n\t})\n\treturn nil\n}", "func (logstash *Logstash) gatherPipelinesStats(address string, accumulator telegraf.Accumulator) error {\n\tpipelinesStats := &PipelinesStats{}\n\n\terr := logstash.gatherJSONData(address, pipelinesStats)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfor pipelineName, pipeline := range pipelinesStats.Pipelines {\n\t\ttags := map[string]string{\n\t\t\t\"node_id\": pipelinesStats.ID,\n\t\t\t\"node_name\": pipelinesStats.Name,\n\t\t\t\"node_version\": pipelinesStats.Version,\n\t\t\t\"pipeline\": pipelineName,\n\t\t\t\"source\": pipelinesStats.Host,\n\t\t}\n\n\t\tflattener := jsonParser.JSONFlattener{}\n\t\terr := flattener.FlattenJSON(\"\", pipeline.Events)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\taccumulator.AddFields(\"logstash_events\", flattener.Fields, tags)\n\n\t\terr = logstash.gatherPluginsStats(pipeline.Plugins.Inputs, \"input\", tags, accumulator)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\terr = logstash.gatherPluginsStats(pipeline.Plugins.Filters, \"filter\", tags, accumulator)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\terr = logstash.gatherPluginsStats(pipeline.Plugins.Outputs, \"output\", tags, accumulator)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\terr = logstash.gatherQueueStats(&pipeline.Queue, tags, accumulator)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}", "func (m *KubeletMonitor) parsePodStats(podStats []stats.PodStats) {\n\tfor _, podStat := range podStats {\n\t\tvar cpuUsageNanoCoreSum uint64\n\t\tvar memoryUsageBytesSum uint64\n\t\tfor _, containerStat := range podStat.Containers {\n\t\t\tif containerStat.CPU != nil && containerStat.CPU.UsageNanoCores != nil {\n\t\t\t\tcpuUsageNanoCoreSum += *containerStat.CPU.UsageNanoCores\n\t\t\t}\n\t\t\tif containerStat.Memory != nil && containerStat.Memory.UsageBytes != nil {\n\t\t\t\tmemoryUsageBytesSum += *containerStat.Memory.UsageBytes\n\t\t\t}\n\t\t}\n\t\tglog.V(4).Infof(\"Cpu usage of pod %s is %f core\", util.PodStatsKeyFunc(podStat),\n\t\t\tfloat64(cpuUsageNanoCoreSum)/util.NanoToUnit)\n\t\tpodCpuUsageCoreMetrics := metrics.NewEntityResourceMetric(task.PodType, util.PodStatsKeyFunc(podStat),\n\t\t\tmetrics.CPU, metrics.Used, float64(cpuUsageNanoCoreSum)/util.NanoToUnit)\n\n\t\tglog.V(4).Infof(\"Memory usage of pod %s is %f Kb\", util.PodStatsKeyFunc(podStat),\n\t\t\tfloat64(memoryUsageBytesSum)/util.KilobytesToBytes)\n\t\tpodMemoryUsageCoreMetrics := metrics.NewEntityResourceMetric(task.PodType, util.PodStatsKeyFunc(podStat),\n\t\t\tmetrics.Memory, metrics.Used, float64(memoryUsageBytesSum)/util.KilobytesToBytes)\n\n\t\t// application cpu and mem used are the same as pod's.\n\t\tapplicationCpuUsageCoreMetrics := metrics.NewEntityResourceMetric(task.ApplicationType,\n\t\t\tutil.PodStatsKeyFunc(podStat), metrics.CPU, metrics.Used,\n\t\t\tfloat64(cpuUsageNanoCoreSum)/util.NanoToUnit)\n\t\tapplicationMemoryUsageCoreMetrics := metrics.NewEntityResourceMetric(task.ApplicationType,\n\t\t\tutil.PodStatsKeyFunc(podStat), metrics.Memory, metrics.Used,\n\t\t\tfloat64(memoryUsageBytesSum)/util.KilobytesToBytes)\n\n\t\tm.metricSink.AddNewMetricEntries(podCpuUsageCoreMetrics,\n\t\t\tpodMemoryUsageCoreMetrics,\n\t\t\tapplicationCpuUsageCoreMetrics,\n\t\t\tapplicationMemoryUsageCoreMetrics)\n\t}\n}", "func (n *RouterNode) GatherMetrics() {\n\tn.Lock()\n\tdefer n.Unlock()\n\n\tlevel.Debug(n.logger).Log(\n\t\t\"msg\", \"GatherMetrics() locked\",\n\t)\n\n\tif time.Now().Unix() < n.nextCollectionTicker {\n\t\treturn\n\t}\n\tstart := time.Now()\n\tif len(n.metrics) > 0 {\n\t\tn.metrics = n.metrics[:0]\n\t\tlevel.Debug(n.logger).Log(\n\t\t\t\"msg\", \"GatherMetrics() cleared metrics\",\n\t\t)\n\t}\n\tupValue := 1\n\n\t// What is RouterID and AS number of this GoBGP server?\n\tserver, err := n.client.GetBgp(context.Background(), &gobgpapi.GetBgpRequest{})\n\tif err != nil {\n\t\tn.IncrementErrorCounter()\n\t\tlevel.Error(n.logger).Log(\n\t\t\t\"msg\", \"failed query gobgp server\",\n\t\t\t\"error\", err.Error(),\n\t\t)\n\t\tif IsConnectionError(err) {\n\t\t\tn.connected = false\n\t\t\tupValue = 0\n\t\t}\n\t} else {\n\t\tn.routerID = server.Global.RouterId\n\t\tn.localAS = server.Global.Asn\n\t\tlevel.Debug(n.logger).Log(\n\t\t\t\"msg\", \"router info\",\n\t\t\t\"router_id\", n.routerID,\n\t\t\t\"local_asn\", n.localAS,\n\t\t)\n\t\tn.connected = true\n\t}\n\n\tif n.connected {\n\t\tvar wg sync.WaitGroup\n\t\twg.Add(2)\n\t\tgo func() {\n\t\t\tdefer wg.Done()\n\t\t\tn.GetRibCounters()\n\t\t}()\n\t\tgo func() {\n\t\t\tdefer wg.Done()\n\t\t\tn.GetPeers()\n\t\t}()\n\t\twg.Wait()\n\n\t}\n\n\t// Generic Metrics\n\tn.metrics = append(n.metrics, prometheus.MustNewConstMetric(\n\t\trouterUp,\n\t\tprometheus.GaugeValue,\n\t\tfloat64(upValue),\n\t))\n\n\tn.metrics = append(n.metrics, prometheus.MustNewConstMetric(\n\t\trouterErrors,\n\t\tprometheus.CounterValue,\n\t\tfloat64(n.errors),\n\t))\n\tn.metrics = append(n.metrics, prometheus.MustNewConstMetric(\n\t\trouterNextScrape,\n\t\tprometheus.CounterValue,\n\t\tfloat64(n.nextCollectionTicker),\n\t))\n\tn.metrics = append(n.metrics, prometheus.MustNewConstMetric(\n\t\trouterScrapeTime,\n\t\tprometheus.GaugeValue,\n\t\ttime.Since(start).Seconds(),\n\t))\n\n\t// Router ID and ASN\n\tif n.routerID != \"\" {\n\t\tn.metrics = append(n.metrics, prometheus.MustNewConstMetric(\n\t\t\trouterID,\n\t\t\tprometheus.GaugeValue,\n\t\t\t1,\n\t\t\tn.routerID,\n\t\t))\n\t}\n\tif n.localAS > 0 {\n\t\tn.metrics = append(n.metrics, prometheus.MustNewConstMetric(\n\t\t\trouterLocalAS,\n\t\t\tprometheus.GaugeValue,\n\t\t\tfloat64(n.localAS),\n\t\t))\n\t}\n\n\tn.nextCollectionTicker = time.Now().Add(time.Duration(n.pollInterval) * time.Second).Unix()\n\n\tif upValue > 0 {\n\t\tn.result = \"success\"\n\t} else {\n\t\tn.result = \"failure\"\n\t}\n\tn.timestamp = time.Now().Format(time.RFC3339)\n\n\tlevel.Debug(n.logger).Log(\n\t\t\"msg\", \"GatherMetrics() returns\",\n\t)\n}", "func CollectSysStats(registry *Registry) {\n\tvar s sysStatsCollector\n\ts.registry = registry\n\ts.maxOpen = registry.Gauge(\"fh.allocated\", nil)\n\ts.curOpen = registry.Gauge(\"fh.max\", nil)\n\ts.numGoroutines = registry.Gauge(\"go.numGoroutines\", nil)\n\n\tticker := time.NewTicker(30 * time.Second)\n\tgo func() {\n\t\tlog := registry.log\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase <-ticker.C:\n\t\t\t\tlog.Debugf(\"Collecting system stats\")\n\t\t\t\tfdStats(&s)\n\t\t\t\tgoRuntimeStats(&s)\n\t\t\t}\n\t\t}\n\t}()\n}", "func appStatsCollect(ctx *zedrouterContext) {\n\tlog.Infof(\"appStatsCollect: containerStats, started\")\n\tappStatsCollectTimer := time.NewTimer(time.Duration(ctx.appStatsInterval) * time.Second)\n\tfor {\n\t\tselect {\n\t\tcase <-appStatsCollectTimer.C:\n\t\t\titems, stopped := checkAppStopStatsCollect(ctx)\n\t\t\tif stopped {\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tcollectTime := time.Now() // all apps collection assign the same timestamp\n\t\t\tfor _, st := range items {\n\t\t\t\tstatus := st.(types.AppNetworkStatus)\n\t\t\t\tif status.GetStatsIPAddr != nil {\n\t\t\t\t\tacMetrics, err := appContainerGetStats(status.GetStatsIPAddr)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\tlog.Errorf(\"appStatsCollect: can't get App %s Container Metrics on %s, %v\",\n\t\t\t\t\t\t\tstatus.UUIDandVersion.UUID.String(), status.GetStatsIPAddr.String(), err)\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t}\n\t\t\t\t\tacMetrics.UUIDandVersion = status.UUIDandVersion\n\t\t\t\t\tacMetrics.CollectTime = collectTime\n\t\t\t\t\tctx.pubAppContainerMetrics.Publish(acMetrics.Key(), acMetrics)\n\t\t\t\t}\n\t\t\t}\n\t\t\tappStatsCollectTimer = time.NewTimer(time.Duration(ctx.appStatsInterval) * time.Second)\n\t\t}\n\t}\n}", "func (m VarnishPlugin) FetchMetrics() (map[string]interface{}, error) {\n\tvar out []byte\n\tvar err error\n\n\tif m.VarnishName == \"\" {\n\t\tout, err = exec.Command(m.VarnishStatPath, \"-1\").CombinedOutput()\n\t} else {\n\t\tout, err = exec.Command(m.VarnishStatPath, \"-1\", \"-n\", m.VarnishName).CombinedOutput()\n\t}\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"%s: %s\", err, out)\n\t}\n\n\tlineexp := regexp.MustCompile(`^([^ ]+) +(\\d+)`)\n\tsmaexp := regexp.MustCompile(`^SMA\\.([^\\.]+)\\.(.+)$`)\n\n\tstat := map[string]interface{}{\n\t\t\"requests\": float64(0),\n\t}\n\n\tvar tmpv float64\n\tfor _, line := range strings.Split(string(out), \"\\n\") {\n\t\tmatch := lineexp.FindStringSubmatch(line)\n\t\tif match == nil {\n\t\t\tcontinue\n\t\t}\n\n\t\ttmpv, err = strconv.ParseFloat(match[2], 64)\n\t\tif err != nil {\n\t\t\tcontinue\n\t\t}\n\n\t\tswitch match[1] {\n\t\tcase \"cache_hit\", \"MAIN.cache_hit\":\n\t\t\tstat[\"cache_hits\"] = tmpv\n\t\t\tstat[\"requests\"] = stat[\"requests\"].(float64) + tmpv\n\t\tcase \"cache_miss\", \"MAIN.cache_miss\":\n\t\t\tstat[\"requests\"] = stat[\"requests\"].(float64) + tmpv\n\t\tcase \"cache_hitpass\", \"MAIN.cache_hitpass\":\n\t\t\tstat[\"requests\"] = stat[\"requests\"].(float64) + tmpv\n\t\tcase \"MAIN.backend_req\":\n\t\t\tstat[\"backend_req\"] = tmpv\n\t\tcase \"MAIN.backend_conn\":\n\t\t\tstat[\"backend_conn\"] = tmpv\n\t\tcase \"MAIN.backend_fail\":\n\t\t\tstat[\"backend_fail\"] = tmpv\n\t\tcase \"MAIN.backend_reuse\":\n\t\t\tstat[\"backend_reuse\"] = tmpv\n\t\tcase \"MAIN.backend_recycle\":\n\t\t\tstat[\"backend_recycle\"] = tmpv\n\t\tcase \"MAIN.n_object\":\n\t\t\tstat[\"n_object\"] = tmpv\n\t\tcase \"MAIN.n_objectcore\":\n\t\t\tstat[\"n_objectcore\"] = tmpv\n\t\tcase \"MAIN.n_expired\":\n\t\t\tstat[\"n_expired\"] = tmpv\n\t\tcase \"MAIN.n_objecthead\":\n\t\t\tstat[\"n_objecthead\"] = tmpv\n\t\tcase \"MAIN.busy_sleep\":\n\t\t\tstat[\"busy_sleep\"] = tmpv\n\t\tcase \"MAIN.busy_wakeup\":\n\t\t\tstat[\"busy_wakeup\"] = tmpv\n\t\tdefault:\n\t\t\tsmamatch := smaexp.FindStringSubmatch(match[1])\n\t\t\tif smamatch == nil {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif smamatch[2] == \"g_alloc\" {\n\t\t\t\tstat[\"varnish.sma.g_alloc.\"+smamatch[1]+\".g_alloc\"] = tmpv\n\t\t\t} else if smamatch[2] == \"g_bytes\" {\n\t\t\t\tstat[\"varnish.sma.memory.\"+smamatch[1]+\".allocated\"] = tmpv\n\t\t\t} else if smamatch[2] == \"g_space\" {\n\t\t\t\tstat[\"varnish.sma.memory.\"+smamatch[1]+\".available\"] = tmpv\n\t\t\t}\n\t\t}\n\t}\n\n\treturn stat, err\n}", "func (c *VMCollector) Collect(ch chan<- prometheus.Metric) {\n\tfor _, m := range c.getMetrics() {\n\t\tch <- m\n\t}\n}", "func (throttler *Throttler) aggregateMySQLMetrics(ctx context.Context) error {\n\tfor clusterName, probes := range throttler.mysqlInventory.ClustersProbes {\n\t\tmetricName := fmt.Sprintf(\"mysql/%s\", clusterName)\n\t\tignoreHostsCount := throttler.mysqlInventory.IgnoreHostsCount[clusterName]\n\t\tignoreHostsThreshold := throttler.mysqlInventory.IgnoreHostsThreshold[clusterName]\n\t\taggregatedMetric := aggregateMySQLProbes(ctx, probes, clusterName, throttler.mysqlInventory.InstanceKeyMetrics, ignoreHostsCount, config.Settings().Stores.MySQL.IgnoreDialTCPErrors, ignoreHostsThreshold)\n\t\tthrottler.aggregatedMetrics.Set(metricName, aggregatedMetric, cache.DefaultExpiration)\n\t}\n\treturn nil\n}", "func (c *ClusterManager) Collect(ch chan<- prometheus.Metric) {\n\toomCountByHost, ramUsageByHost := c.ReallyExpensiveAssessmentOfTheSystemState()\n\tfor host, oomCount := range oomCountByHost {\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tc.OOMCountDesc,\n\t\t\tprometheus.CounterValue,\n\t\t\tfloat64(oomCount),\n\t\t\thost,\n\t\t)\n\t}\n\tfor host, ramUsage := range ramUsageByHost {\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tc.RAMUsageDesc,\n\t\t\tprometheus.GaugeValue,\n\t\t\tramUsage,\n\t\t\thost,\n\t\t)\n\t}\n}", "func (c *CloudWatch) gatherMetrics(\n\tparams *cwClient.GetMetricDataInput,\n) ([]types.MetricDataResult, error) {\n\tresults := []types.MetricDataResult{}\n\n\tfor {\n\t\tresp, err := c.client.GetMetricData(context.Background(), params)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"failed to get metric data: %w\", err)\n\t\t}\n\n\t\tresults = append(results, resp.MetricDataResults...)\n\t\tif resp.NextToken == nil {\n\t\t\tbreak\n\t\t}\n\t\tparams.NextToken = resp.NextToken\n\t}\n\n\treturn results, nil\n}", "func (client *XenClient) VMGuestMetricsGetAll() (result []string, err error) {\n\tobj, err := client.APICall(\"VM_guest_metrics.get_all\")\n\tif err != nil {\n\t\treturn\n\t}\n\n\tresult = make([]string, len(obj.([]interface{})))\n\tfor i, value := range obj.([]interface{}) {\n\t\tresult[i] = value.(string)\n\t}\n\n\treturn\n}", "func CaptureRuntimeMemStats(registry RootRegistry, collectionFreq time.Duration) {\n\truntimeMemStats.Do(func() {\n\t\tif reg, ok := registry.(*rootRegistry); ok {\n\t\t\tgoRegistry := metrics.NewPrefixedChildRegistry(reg.registry, \"go.\")\n\t\t\tmetrics.RegisterRuntimeMemStats(goRegistry)\n\t\t\tgo metrics.CaptureRuntimeMemStats(goRegistry, collectionFreq)\n\t\t}\n\t})\n}", "func (w *windowsResourceUsageGatherer) Gather(executor QueryExecutor, startTime time.Time, config *measurement.MeasurementConfig) ([]measurement.Summary, error) {\n\tcpuSummary, err := getSummary(cpuUsageQueryTop10, convertToCPUPerfData, cpuUsageMetricsName, executor, config)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tmemorySummary, err := getSummary(memoryUsageQueryTop10, convertToMemoryPerfData, memoryUsageMetricsName, executor, config)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn []measurement.Summary{cpuSummary, memorySummary}, nil\n}", "func runCPUUsageStats(){\n\tnbCPU := float64(runtime.NumCPU())\n\tparams := fmt.Sprintf(\"(Get-process -Id %d).CPU\",os.Getpid())\n\tfor {\n\t\tcmd := exec.Command(\"powershell\", params)\n\t\tdata, _ := cmd.Output()\n\t\tcurrent,_ := strconv.ParseFloat(strings.Replace(string(data),\"\\r\\n\",\"\",-1),32)\n\t\tif previous == 0 {\n\t\t\tprevious = current\n\t\t}\n\t\tcurrentUsage = int(((current - previous)*float64(100))/(waitTime*nbCPU) )\n\t\tprevious = current\n\t\ttime.Sleep(time.Duration(waitTime )*time.Second)\n\t}\n}", "func GetRuntimeStats() (result map[string]float64) {\n\truntime.ReadMemStats(memStats)\n\n\tnow = time.Now()\n\tdiffTime = now.Sub(lastSampleTime).Seconds()\n\n\tresult = map[string]float64{\n\t\t\"alloc\": float64(memStats.Alloc),\n\t\t\"frees\": float64(memStats.Frees),\n\t\t\"gc.pause_total\": float64(memStats.PauseTotalNs) / nsInMs,\n\t\t\"heap.alloc\": float64(memStats.HeapAlloc),\n\t\t\"heap.objects\": float64(memStats.HeapObjects),\n\t\t\"mallocs\": float64(memStats.Mallocs),\n\t\t\"stack\": float64(memStats.StackInuse),\n\t}\n\n\tif lastPauseNs > 0 {\n\t\tpauseSinceLastSample = memStats.PauseTotalNs - lastPauseNs\n\t\tresult[\"gc.pause_per_second\"] = float64(pauseSinceLastSample) / nsInMs / diffTime\n\t}\n\n\tlastPauseNs = memStats.PauseTotalNs\n\n\tnbGc = memStats.NumGC - lastNumGc\n\tif lastNumGc > 0 {\n\t\tresult[\"gc.gc_per_second\"] = float64(nbGc) / diffTime\n\t}\n\n\t// Collect GC pauses\n\tif nbGc > 0 {\n\t\tif nbGc > 256 {\n\t\t\tnbGc = 256\n\t\t}\n\n\t\tvar i uint32\n\n\t\tfor i = 0; i < nbGc; i++ {\n\t\t\tidx := int((memStats.NumGC-uint32(i))+255) % 256\n\t\t\tpause := float64(memStats.PauseNs[idx])\n\t\t\tresult[\"gc.pause\"] = pause / nsInMs\n\t\t}\n\t}\n\n\t// Store last values\n\tlastNumGc = memStats.NumGC\n\tlastSampleTime = now\n\n\treturn result\n}", "func ReadMemStats(m *MemStats) {\n\tm.HeapIdle = 0\n\tm.HeapInuse = 0\n\tfor block := gcBlock(0); block < endBlock; block++ {\n\t\tbstate := block.state()\n\t\tif bstate == blockStateFree {\n\t\t\tm.HeapIdle += uint64(bytesPerBlock)\n\t\t} else {\n\t\t\tm.HeapInuse += uint64(bytesPerBlock)\n\t\t}\n\t}\n\tm.HeapReleased = 0 // always 0, we don't currently release memory back to the OS.\n\tm.HeapSys = m.HeapInuse + m.HeapIdle\n\tm.GCSys = uint64(heapEnd - uintptr(metadataStart))\n\tm.Sys = uint64(heapEnd - heapStart)\n}", "func collectGauges(e *Exporter, ch chan<- prometheus.Metric) {\n\te.chipStatGauge.Collect(ch)\n\te.devsHashRateGauge.Collect(ch)\n\te.devsHashCountGauge.Collect(ch)\n\te.devsErrorsGauge.Collect(ch)\n\te.devsTemperatureGauge.Collect(ch)\n}", "func (u *Use) CollectMetrics(mts []plugin.Metric) ([]plugin.Metric, error) {\n\tcfg := mts[0].Config\n\tif !u.initialized {\n\t\tu.init(cfg)\n\t}\n\n\tmetrics := make([]plugin.Metric, len(mts))\n\tfor i, p := range mts {\n\t\tns := p.Namespace.String()\n\t\tswitch {\n\t\tcase cpure.MatchString(ns):\n\t\t\tmetric, err := u.computeStat(p.Namespace)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, errors.New(\"Unable to get compute stat: \" + err.Error())\n\t\t\t}\n\t\t\tmetrics[i] = *metric\n\n\t\tcase storre.MatchString(ns):\n\t\t\tmetric, err := u.diskStat(p.Namespace)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, errors.New(\"Unable to get disk stat: \" + err.Error())\n\t\t\t}\n\t\t\tmetrics[i] = *metric\n\t\tcase memre.MatchString(ns):\n\t\t\tmetric, err := memStat(p.Namespace, u.VmStatPath, u.MemInfoPath)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, errors.New(\"Unable to get mem stat: \" + err.Error())\n\t\t\t}\n\t\t\tmetrics[i] = *metric\n\t\t}\n\t\ttags, err := hostTags()\n\n\t\tif err == nil {\n\t\t\tmetrics[i].Tags = tags\n\t\t}\n\t\tmetrics[i].Timestamp = time.Now()\n\n\t}\n\treturn metrics, nil\n}", "func FetchAppServerCPUStats(r Result) []float32 {\n\treturn r.AppServerStats().CPU\n}", "func processHealthMonitor(duration time.Duration) {\n\tfor {\n\t\t<-time.After(duration)\n\t\tvar numOfGoroutines = runtime.NumGoroutine()\n\t\t//var memStats runtime.MemStats\n\t\t//runtime.ReadMemStats(&memStats)\n\t\t//core.Info(\"Number of goroutines: %d\",numOfGoroutines)\n\t\t//core.Info(\"Mem stats: %v\",memStats)\n\t\tcore.CloudWatchClient.PutMetric(\"num_of_goroutines\", \"Count\", float64(numOfGoroutines), \"httshark_health_monitor\")\n\t}\n}", "func UpdateMetrics(result *Results) {\n\n\t// Publish system variables\n\tupTimeGauge.Set(float64(result.SysMonitorInfo.Uptime))\n\tcpuUsageGauge.Set(float64(result.SysMonitorInfo.CpuUsagePercent))\n\n\t// Memory\n\tmemUsagePercentGauge.Set(result.SysMonitorInfo.MemUsagePercent)\n\tmemTotalGauge.Set(float64(result.SysMonitorInfo.MemTotal))\n\tmemAvailableGauge.Set(float64(result.SysMonitorInfo.MemAvailable))\n\n\t// Bandwidth\n\tbandwidthUsageTotalGauge.Set(float64(result.SysMonitorInfo.BandwidthUsageTotal))\n\tbandwidthUsageSentGauge.Set(float64(result.SysMonitorInfo.BandwidthUsageSent))\n\tbandwidthUsageRecvGauge.Set(float64(result.SysMonitorInfo.BandwidthUsageRecv))\n\n\tfor _, driveUsage := range result.SysMonitorInfo.DriveUsage {\n\t\t// \"drive_path\", \"available\", \"growth_rate\", \"full_in\", \"physical_drive\"\n\n\t\tdays := strconv.FormatFloat(driveUsage.DaysTillFull, 'f', 3, 64)\n\n\t\tif math.IsInf(driveUsage.DaysTillFull, 0) {\n\t\t\tdays = \"10 years\"\n\t\t}\n\n\t\tdriveSpace.WithLabelValues(driveUsage.Path,\n\t\t\tstrconv.FormatFloat(driveUsage.PercentUsed, 'f', 3, 64),\n\t\t\tstrconv.FormatUint(driveUsage.GrowthPerDayBytes, 10),\n\t\t\tdays,\n\t\t\tdriveUsage.VolumeName).Set(driveUsage.PercentUsed)\n\t}\n\n\t// Publish endpoints being monitored\n\tfor _, uptimeResponse := range result.UptimeList {\n\n\t\tif uptimeResponse.ResponseCode == 200 {\n\t\t\tendpointAvailable.WithLabelValues(uptimeResponse.Endpoint).Set(1)\n\t\t} else {\n\t\t\tendpointAvailable.WithLabelValues(uptimeResponse.Endpoint).Set(0)\n\t\t}\n\n\t\tendpointDuration.WithLabelValues(uptimeResponse.Endpoint).Set(uptimeResponse.ResponseTime.Seconds())\n\t}\n\n\tfor _, backupInfo := range result.BackupInfoList {\n\n\t\t/*\n\t\t\tif backupInfo.WasBackedUp {\n\t\t\t\tbackupsDone.WithLabelValues(backupInfo.Folder).Set(1)\n\t\t\t} else {\n\t\t\t\tbackupsDone.WithLabelValues(backupInfo.Folder).Set(0)\n\t\t\t}\n\t\t*/\n\n\t\t// {\"backup_directory\", \"backup_in_last_24_hours\", \"last_backup_size\", \"last_backup_date\", \"last_backup_time\"})\n\n\t\t// backupsSize.WithLabelValues(backupInfo.Folder).Set(float64(backupInfo.BackupFileSize))\n\n\t\tbackupInfoGauge.WithLabelValues(backupInfo.Folder,\n\t\t\tbtoa(backupInfo.WasBackedUp),\n\t\t\titoa(backupInfo.LastBackupSize),\n\t\t\tttoa(backupInfo.LastBackupTime),\n\t\t\tbackupInfo.LastBackupFile).Set(btof(backupInfo.WasBackedUp))\n\t}\n\n\t// TODO: This loop is not needed, you can build the summary on the first loop\n\tvar too_many_lines = 500\n\tfor _, logLine := range result.LoglineList {\n\n\t\tsummary, ok := result.LogSummary[logLine.LogPath]\n\n\t\tif ok == false {\n\t\t\tsummary = LogSummary{}\n\t\t\tsummary.StatusCount = make(map[string]int64)\n\t\t\tsummary.SeverityLevelCount = make(map[string]int64)\n\t\t}\n\n\t\tsummary.StatusCount[logLine.StatusCode] = summary.StatusCount[logLine.StatusCode] + 1\n\n\t\tif len(logLine.Severity) > 0 {\n\t\t\tsummary.SeverityLevelCount[logLine.Severity] = summary.SeverityLevelCount[logLine.Severity] + 1\n\t\t}\n\n\t\tresult.LogSummary[logLine.LogPath] = summary\n\n\t\tif too_many_lines <= 0 {\n\t\t\t// Pending a better solution, let's not allow the processing\n\t\t\t// of too many lines, to not kill the server\n\t\t\tlLog.Print(\"Too many lines for a single tick to process\")\n\t\t\tbreak\n\t\t}\n\n\t}\n\n\t// Set the values for the logs. We use two labels (logpath, code)\n\tfor logFilePath, logSummary := range result.LogSummary {\n\n\t\tfor s, value := range logSummary.StatusCount {\n\t\t\tstatusCodes.WithLabelValues(logFilePath, s).Set(float64(value))\n\t\t}\n\n\t\tfor s, value := range logSummary.SeverityLevelCount {\n\t\t\tseverity.WithLabelValues(logFilePath, s).Set(float64(value))\n\t\t}\n\n\t}\n}", "func Collectmem(serverName string) (Metric, error) {\n\tvalues := Metric{}\n\tvar err error\n\tvalues.Timestamp = time.Now()\n\tvalues.MetricType = \"mem\"\n\tvar output string\n\n\tvar response cpmserverapi.MetricMEMResponse\n\trequest := &cpmserverapi.MetricMEMRequest{}\n\tresponse, err = cpmserverapi.MetricMEMClient(serverName, request)\n\tif err != nil {\n\t\tlogit.Error.Println(\"mem metric error:\" + err.Error())\n\t\treturn values, err\n\t}\n\n\toutput = strings.TrimSpace(response.Output)\n\n\tvalues.Value, err = strconv.ParseFloat(output, 64)\n\tif err != nil {\n\t\tlogit.Error.Println(\"parseFloat error in mem metric \" + err.Error())\n\t}\n\n\treturn values, err\n}", "func (s *Systemctl) Gather(acc telegraf.Accumulator) error {\n\ts.mux.Lock()\n\tdefer s.mux.Unlock()\n\n\t// for each systemctl service being monitored\n\tfor _, aggregator := range s.Aggregators {\n\t\t// aggregate the data from the set of samples\n\t\tlog.WithFields(log.Fields{\n\t\t\t\"InputPlugin\": \"systemctl\",\n\t\t\t\"ResourceName\": aggregator.ResourceName,\n\t\t}).Debug(\"Aggregating\")\n\t\terr := aggregator.Aggregate()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t// create fields\n\t\tfields := map[string]interface{}{\n\t\t\t\"current_state_time\": aggregator.CurrentStateDuration,\n\t\t\t\"current_state\": aggregator.CurrentState,\n\t\t}\n\t\tfor k := range aggregator.AggState {\n\t\t\tfields[k] = aggregator.AggState[k]\n\t\t}\n\t\t// create tags\n\t\ttags := map[string]string{\"resource\": aggregator.ResourceName}\n\t\tacc.AddFields(\"service_config_state\", fields, tags)\n\t\tlog.WithFields(log.Fields{\n\t\t\t\"InputPlugin\": \"systemctl\",\n\t\t\t\"ResourceName\": aggregator.ResourceName,\n\t\t}).Debug(\"Added fields\")\n\t}\n\treturn nil\n}", "func (c *Canary) GatherMetrics(config schemas.Config) error {\n\tif !c.StepStatus[constants.StepCleanChecking] {\n\t\treturn nil\n\t}\n\tif config.DisableMetrics {\n\t\treturn nil\n\t}\n\n\tif len(config.Region) > 0 {\n\t\tif !CheckRegionExist(config.Region, c.Stack.Regions) {\n\t\t\treturn nil\n\t\t}\n\t}\n\n\tif !config.CompleteCanary {\n\t\tc.Logger.Debug(\"Skip gathering metrics because canary is now applied\")\n\t\treturn nil\n\t}\n\n\tif err := c.Deployer.StartGatheringMetrics(config); err != nil {\n\t\treturn err\n\t}\n\n\tc.StepStatus[constants.StepGatherMetrics] = true\n\treturn nil\n}", "func (d *Director) collectCheckStats() {\n\tllog := d.Log.WithField(\"method\", \"collectCheckStats\")\n\n\td.CheckStatsLooper.Loop(func() error {\n\t\t// To avoid a potential race here; all members have a count of how many\n\t\t// checks each member is assigned.\n\t\t// This will *probably* be switched to utilize `state` later on.\n\t\tcheckStats, err := d.DalClient.FetchCheckStats()\n\t\tif err != nil {\n\t\t\td.Config.EQClient.AddWithErrorLog(\"Unable to fetch check stats\", llog, log.Fields{\"err\": err})\n\t\t\treturn nil\n\t\t}\n\n\t\td.CheckStatsMutex.Lock()\n\t\td.CheckStats = checkStats\n\t\td.CheckStatsMutex.Unlock()\n\n\t\treturn nil\n\t})\n\n\tllog.Debug(\"Exiting...\")\n}", "func (c collector) Collect(ch chan<- prometheus.Metric) {\n\tvar wg sync.WaitGroup\n\n\t// We don't bail out on errors because those can happen if there is a race condition between\n\t// the destruction of a container and us getting to read the cgroup data. We just don't report\n\t// the values we don't get.\n\n\tcollectors := []func(string, *regexp.Regexp){\n\t\tfunc(path string, re *regexp.Regexp) {\n\t\t\tdefer wg.Done()\n\t\t\tnuma, err := cgroups.GetNumaStats(cgroupPath(\"memory\", path))\n\t\t\tif err == nil {\n\t\t\t\tupdateNumaStatMetric(ch, re.FindStringSubmatch(filepath.Base(path))[0], numa)\n\t\t\t} else {\n\t\t\t\tlog.Error(\"failed to collect NUMA stats for %s: %v\", path, err)\n\t\t\t}\n\t\t},\n\t\tfunc(path string, re *regexp.Regexp) {\n\t\t\tdefer wg.Done()\n\t\t\tmemory, err := cgroups.GetMemoryUsage(cgroupPath(\"memory\", path))\n\t\t\tif err == nil {\n\t\t\t\tupdateMemoryUsageMetric(ch, re.FindStringSubmatch(filepath.Base(path))[0], memory)\n\t\t\t} else {\n\t\t\t\tlog.Error(\"failed to collect memory usage stats for %s: %v\", path, err)\n\t\t\t}\n\t\t},\n\t\tfunc(path string, re *regexp.Regexp) {\n\t\t\tdefer wg.Done()\n\t\t\tmigrate, err := cgroups.GetCPUSetMemoryMigrate(cgroupPath(\"cpuset\", path))\n\t\t\tif err == nil {\n\t\t\t\tupdateMemoryMigrateMetric(ch, re.FindStringSubmatch(filepath.Base(path))[0], migrate)\n\t\t\t} else {\n\t\t\t\tlog.Error(\"failed to collect memory migration stats for %s: %v\", path, err)\n\t\t\t}\n\t\t},\n\t\tfunc(path string, re *regexp.Regexp) {\n\t\t\tdefer wg.Done()\n\t\t\tcpuAcctUsage, err := cgroups.GetCPUAcctStats(cgroupPath(\"cpuacct\", path))\n\t\t\tif err == nil {\n\t\t\t\tupdateCPUAcctUsageMetric(ch, re.FindStringSubmatch(filepath.Base(path))[0], cpuAcctUsage)\n\t\t\t} else {\n\t\t\t\tlog.Error(\"failed to collect CPU accounting stats for %s: %v\", path, err)\n\t\t\t}\n\t\t},\n\t\tfunc(path string, re *regexp.Regexp) {\n\t\t\tdefer wg.Done()\n\t\t\thugeTlbUsage, err := cgroups.GetHugetlbUsage(cgroupPath(\"hugetlb\", path))\n\t\t\tif err == nil {\n\t\t\t\tupdateHugeTlbUsageMetric(ch, re.FindStringSubmatch(filepath.Base(path))[0], hugeTlbUsage)\n\t\t\t} else {\n\t\t\t\tlog.Error(\"failed to collect hugetlb stats for %s: %v\", path, err)\n\t\t\t}\n\t\t},\n\t\tfunc(path string, re *regexp.Regexp) {\n\t\t\tdefer wg.Done()\n\t\t\tblkioDeviceUsage, err := cgroups.GetBlkioThrottleBytes(cgroupPath(\"blkio\", path))\n\t\t\tif err == nil {\n\t\t\t\tupdateBlkioDeviceUsageMetric(ch, re.FindStringSubmatch(filepath.Base(path))[0], blkioDeviceUsage)\n\t\t\t} else {\n\t\t\t\tlog.Error(\"failed to collect blkio stats for %s: %v\", path, err)\n\t\t\t}\n\t\t},\n\t}\n\n\tcontainerIDRegexp := regexp.MustCompile(`[a-z0-9]{64}`)\n\n\tfor _, path := range walkCgroups() {\n\t\twg.Add(len(collectors))\n\t\tfor _, fn := range collectors {\n\t\t\tgo fn(path, containerIDRegexp)\n\t\t}\n\t}\n\n\t// We need to wait so that the response channel doesn't get closed.\n\twg.Wait()\n}", "func ComputeStats(res *sdk.Result, v *venom.Tests) []string {\n\t// update global stats\n\tfor _, ts := range v.TestSuites {\n\t\tnSkipped := 0\n\t\tfor _, tc := range ts.TestCases {\n\t\t\tnSkipped += len(tc.Skipped)\n\t\t}\n\t\tif ts.Skipped < nSkipped {\n\t\t\tts.Skipped = nSkipped\n\t\t}\n\t\tif ts.Total < len(ts.TestCases)-nSkipped {\n\t\t\tts.Total = len(ts.TestCases) - nSkipped\n\t\t}\n\t\tv.Total += ts.Total\n\t\tv.TotalOK += ts.Total - ts.Failures - ts.Errors\n\t\tv.TotalKO += ts.Failures + ts.Errors\n\t\tv.TotalSkipped += ts.Skipped\n\t}\n\n\tvar nbOK, nbKO, nbSkipped int\n\n\treasons := []string{}\n\treasons = append(reasons, fmt.Sprintf(\"JUnit parser: %d testsuite(s)\", len(v.TestSuites)))\n\n\tfor i, ts := range v.TestSuites {\n\t\tvar nbKOTC, nbFailures, nbErrors, nbSkippedTC int\n\t\tif ts.Name == \"\" {\n\t\t\tts.Name = fmt.Sprintf(\"TestSuite.%d\", i)\n\t\t}\n\t\treasons = append(reasons, fmt.Sprintf(\"JUnit parser: testsuite %s has %d testcase(s)\", ts.Name, len(ts.TestCases)))\n\t\tfor k, tc := range ts.TestCases {\n\t\t\tif tc.Name == \"\" {\n\t\t\t\ttc.Name = fmt.Sprintf(\"TestCase.%d\", k)\n\t\t\t}\n\t\t\tif len(tc.Failures) > 0 {\n\t\t\t\treasons = append(reasons, fmt.Sprintf(\"JUnit parser: testcase %s has %d failure(s)\", tc.Name, len(tc.Failures)))\n\t\t\t\tnbFailures += len(tc.Failures)\n\t\t\t}\n\t\t\tif len(tc.Errors) > 0 {\n\t\t\t\treasons = append(reasons, fmt.Sprintf(\"JUnit parser: testcase %s has %d error(s)\", tc.Name, len(tc.Errors)))\n\t\t\t\tnbErrors += len(tc.Errors)\n\t\t\t}\n\t\t\tif len(tc.Failures) > 0 || len(tc.Errors) > 0 {\n\t\t\t\tnbKOTC++\n\t\t\t} else if len(tc.Skipped) > 0 {\n\t\t\t\tnbSkippedTC += len(tc.Skipped)\n\t\t\t}\n\t\t\tv.TestSuites[i].TestCases[k] = tc\n\t\t}\n\t\tnbOK += len(ts.TestCases) - nbKOTC\n\t\tnbKO += nbKOTC\n\t\tnbSkipped += nbSkippedTC\n\t\tif ts.Failures > nbFailures {\n\t\t\tnbFailures = ts.Failures\n\t\t}\n\t\tif ts.Errors > nbErrors {\n\t\t\tnbErrors = ts.Errors\n\t\t}\n\n\t\tif nbFailures > 0 {\n\t\t\treasons = append(reasons, fmt.Sprintf(\"JUnit parser: testsuite %s has %d failure(s)\", ts.Name, nbFailures))\n\t\t}\n\t\tif nbErrors > 0 {\n\t\t\treasons = append(reasons, fmt.Sprintf(\"JUnit parser: testsuite %s has %d error(s)\", ts.Name, nbErrors))\n\t\t}\n\t\tif nbKOTC > 0 {\n\t\t\treasons = append(reasons, fmt.Sprintf(\"JUnit parser: testsuite %s has %d test(s) failed\", ts.Name, nbKOTC))\n\t\t}\n\t\tif nbSkippedTC > 0 {\n\t\t\treasons = append(reasons, fmt.Sprintf(\"JUnit parser: testsuite %s has %d test(s) skipped\", ts.Name, nbSkippedTC))\n\t\t}\n\t\tv.TestSuites[i] = ts\n\t}\n\n\tif nbKO > v.TotalKO {\n\t\tv.TotalKO = nbKO\n\t}\n\n\tif nbOK != v.TotalOK {\n\t\tv.TotalOK = nbOK\n\t}\n\n\tif nbSkipped != v.TotalSkipped {\n\t\tv.TotalSkipped = nbSkipped\n\t}\n\n\tif v.TotalKO+v.TotalOK != v.Total {\n\t\tv.Total = v.TotalKO + v.TotalOK + v.TotalSkipped\n\t}\n\n\tres.Status = sdk.StatusFail\n\tif v.TotalKO == 0 {\n\t\tres.Status = sdk.StatusSuccess\n\t}\n\treturn reasons\n}", "func NewVMCollector(cfgBaseName string) (collector.Collector, error) {\n\tprocFile := \"meminfo\"\n\n\tc := VM{}\n\tc.id = \"vm\"\n\tc.pkgID = \"builtins.linux.procfs.\" + c.id\n\tc.procFSPath = \"/proc\"\n\tc.file = filepath.Join(c.procFSPath, procFile)\n\tc.logger = log.With().Str(\"pkg\", c.pkgID).Logger()\n\tc.metricStatus = map[string]bool{}\n\tc.metricDefaultActive = true\n\n\tif cfgBaseName == \"\" {\n\t\tif _, err := os.Stat(c.file); err != nil {\n\t\t\treturn nil, errors.Wrap(err, c.pkgID)\n\t\t}\n\t\treturn &c, nil\n\t}\n\n\tvar opts vmOptions\n\terr := config.LoadConfigFile(cfgBaseName, &opts)\n\tif err != nil {\n\t\tif strings.Contains(err.Error(), \"no config found matching\") {\n\t\t\treturn &c, nil\n\t\t}\n\t\tc.logger.Warn().Err(err).Str(\"file\", cfgBaseName).Msg(\"loading config file\")\n\t\treturn nil, errors.Wrapf(err, \"%s config\", c.pkgID)\n\t}\n\n\tc.logger.Debug().Str(\"base\", cfgBaseName).Interface(\"config\", opts).Msg(\"loaded config\")\n\n\tif opts.ID != \"\" {\n\t\tc.id = opts.ID\n\t}\n\n\tif opts.ProcFSPath != \"\" {\n\t\tc.procFSPath = opts.ProcFSPath\n\t\tc.file = filepath.Join(c.procFSPath, procFile)\n\t}\n\n\tif len(opts.MetricsEnabled) > 0 {\n\t\tfor _, name := range opts.MetricsEnabled {\n\t\t\tc.metricStatus[name] = true\n\t\t}\n\t}\n\tif len(opts.MetricsDisabled) > 0 {\n\t\tfor _, name := range opts.MetricsDisabled {\n\t\t\tc.metricStatus[name] = false\n\t\t}\n\t}\n\n\tif opts.MetricsDefaultStatus != \"\" {\n\t\tif ok, _ := regexp.MatchString(`^(enabled|disabled)$`, strings.ToLower(opts.MetricsDefaultStatus)); ok {\n\t\t\tc.metricDefaultActive = strings.ToLower(opts.MetricsDefaultStatus) == metricStatusEnabled\n\t\t} else {\n\t\t\treturn nil, errors.Errorf(\"%s invalid metric default status (%s)\", c.pkgID, opts.MetricsDefaultStatus)\n\t\t}\n\t}\n\n\tif opts.RunTTL != \"\" {\n\t\tdur, err := time.ParseDuration(opts.RunTTL)\n\t\tif err != nil {\n\t\t\treturn nil, errors.Wrapf(err, \"%s parsing run_ttl\", c.pkgID)\n\t\t}\n\t\tc.runTTL = dur\n\t}\n\n\tif _, err := os.Stat(c.file); os.IsNotExist(err) {\n\t\treturn nil, errors.Wrap(err, c.pkgID)\n\t}\n\n\treturn &c, nil\n}", "func (ps *linuxHarvester) populateGauges(sample *types.ProcessSample, process Snapshot) error {\n\tvar err error\n\n\tcpuTimes, err := process.CPUTimes()\n\tif err != nil {\n\t\treturn err\n\t}\n\tsample.CPUPercent = cpuTimes.Percent\n\n\ttotalCPU := cpuTimes.User + cpuTimes.System\n\n\tif totalCPU > 0 {\n\t\tsample.CPUUserPercent = (cpuTimes.User / totalCPU) * sample.CPUPercent\n\t\tsample.CPUSystemPercent = (cpuTimes.System / totalCPU) * sample.CPUPercent\n\t} else {\n\t\tsample.CPUUserPercent = 0\n\t\tsample.CPUSystemPercent = 0\n\t}\n\n\tif ps.privileged {\n\t\tfds, err := process.NumFDs()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif fds >= 0 {\n\t\t\tsample.FdCount = &fds\n\t\t}\n\t}\n\n\t// Extra status data\n\tsample.Status = process.Status()\n\tsample.ThreadCount = process.NumThreads()\n\tsample.MemoryVMSBytes = process.VmSize()\n\tsample.MemoryRSSBytes = process.VmRSS()\n\n\treturn nil\n}", "func initializeDaemonMetrics() {\n\t// CLUSTER METRICS\n\tnumACLRules = createClusterGauge(numACLRulesName, numACLRulesHelp)\n\tnumIPSets = createClusterGauge(numIPSetsName, numIPSetsHelp)\n\tnumIPSetEntries = createClusterGauge(numIPSetEntriesName, numIPSetEntriesHelp)\n\tipsetInventory = createClusterGaugeVec(ipsetInventoryName, ipsetInventoryHelp, ipsetInventoryLabels)\n\tipsetInventoryMap = make(map[string]int)\n\n\t// NODE METRICS\n\taddACLRuleExecTime = createNodeSummary(addACLRuleExecTimeName, addACLRuleExecTimeHelp)\n\taddIPSetExecTime = createNodeSummary(addIPSetExecTimeName, addIPSetExecTimeHelp)\n}", "func CollectGoStatsTotals() Composer {\n\ts := &GoRuntimeInfo{}\n\ts.build()\n\n\treturn s\n}", "func (dh *darwinHarvester) populateGauges(sample *types.ProcessSample, process Snapshot) error {\n\tvar err error\n\n\tcpuTimes, err := process.CPUTimes()\n\tif err != nil {\n\t\treturn err\n\t}\n\tsample.CPUPercent = cpuTimes.Percent\n\n\ttotalCPU := cpuTimes.User + cpuTimes.System\n\n\tif totalCPU > 0 {\n\t\tsample.CPUUserPercent = (cpuTimes.User / totalCPU) * sample.CPUPercent\n\t\tsample.CPUSystemPercent = (cpuTimes.System / totalCPU) * sample.CPUPercent\n\t} else {\n\t\tsample.CPUUserPercent = 0\n\t\tsample.CPUSystemPercent = 0\n\t}\n\n\t// Extra status data\n\tsample.Status = process.Status()\n\tsample.ThreadCount = process.NumThreads()\n\tsample.MemoryVMSBytes = process.VmSize()\n\tsample.MemoryRSSBytes = process.VmRSS()\n\n\treturn nil\n}", "func logMemstatsSample() {\n\tl := log.WithField(\"process\", \"memstats\")\n\n\truntime.GC() // get up-to-date statistics\n\n\tmemStats := new(runtime.MemStats)\n\truntime.ReadMemStats(memStats)\n\n\tvar gcStats debug.GCStats\n\tdebug.ReadGCStats(&gcStats)\n\n\ts := memStats\n\n\tl.Infof(\"# runtime.MemStats\")\n\tl.Infof(\"# Alloc = %d\", s.Alloc)\n\tl.Infof(\"# TotalAlloc = %d\", s.TotalAlloc)\n\tl.Infof(\"# Sys = %d\", s.Sys)\n\tl.Infof(\"# Lookups = %d\", s.Lookups)\n\tl.Infof(\"# Mallocs = %d\", s.Mallocs)\n\tl.Infof(\"# Frees = %d\", s.Frees)\n\tl.Infof(\"# HeapAlloc = %d\", s.HeapAlloc)\n\tl.Infof(\"# HeapSys = %d\", s.HeapSys)\n\tl.Infof(\"# HeapIdle = %d\", s.HeapIdle)\n\tl.Infof(\"# HeapInuse = %d\", s.HeapInuse)\n\tl.Infof(\"# HeapReleased = %d\", s.HeapReleased)\n\tl.Infof(\"# HeapObjects = %d\", s.HeapObjects)\n\tl.Infof(\"# Stack = %d / %d\", s.StackInuse, s.StackSys)\n\tl.Infof(\"# NumGoroutine = %d\", runtime.NumGoroutine())\n\n\t// Record GC pause history, most recent 5 entries\n\tl.Infof(\"# Stop-the-world Pause time\")\n\n\tfor i, v := range gcStats.Pause {\n\t\tl.Infof(\"# gcStats.Pause[%d] = %d ns\", i, v)\n\n\t\tif i == 5 {\n\t\t\tbreak\n\t\t}\n\t}\n}", "func (c *metricCollector) collectMemory(service *rrhttp.Service, tick time.Duration) {\n\tstarted := false\n\tfor {\n\t\tserver := service.Server()\n\t\tif server == nil && started {\n\t\t\t// stopped\n\t\t\treturn\n\t\t}\n\n\t\tstarted = true\n\n\t\tif workers, err := util.ServerState(server); err == nil {\n\t\t\tsum := 0.0\n\t\t\tfor _, w := range workers {\n\t\t\t\tsum = sum + float64(w.MemoryUsage)\n\t\t\t}\n\n\t\t\tc.workersMemory.Set(sum)\n\t\t}\n\n\t\ttime.Sleep(tick)\n\t}\n}", "func (bot *AwakenBot) CollectGlobalMetrics() {\n\truntime.ReadMemStats(&mem)\n\ttags := map[string]string{\"metric\": \"server_metrics\", \"server\": \"global\"}\n\tfields := map[string]interface{}{\n\t\t\"memAlloc\": int(mem.Alloc),\n\t\t\"memTotalAlloc\": int(mem.TotalAlloc),\n\t\t\"memHeapAlloc\": int(mem.HeapAlloc),\n\t\t\"memHeapSys\": int(mem.HeapSys),\n\t}\n\n\terr := bot.iDB.AddMetric(\"server_metrics\", tags, fields)\n\tif err != nil {\n\t\tlog.Errorln(\"Error adding Metric:\", err)\n\t}\n}", "func (s *Stats) GetMemoryInfo(logMemory, logGoMemory bool) {\n\n if logGoMemory {\n if s.GoInfo == nil {\n s.initGoInfo()\n }\n\n runtime.ReadMemStats(s.GoInfo.Memory.mem)\n s.GoInfo.GoRoutines = runtime.NumGoroutine()\n s.GoInfo.Memory.Alloc = s.GoInfo.Memory.mem.Alloc\n s.GoInfo.Memory.HeapAlloc = s.GoInfo.Memory.mem.HeapAlloc\n s.GoInfo.Memory.HeapSys = s.GoInfo.Memory.mem.HeapSys\n\n if s.GoInfo.Memory.LastGC != s.GoInfo.Memory.mem.LastGC {\n s.GoInfo.Memory.LastGC = s.GoInfo.Memory.mem.LastGC\n s.GoInfo.Memory.NumGC = s.GoInfo.Memory.mem.NumGC - s.GoInfo.Memory.lastNumGC\n s.GoInfo.Memory.lastNumGC = s.GoInfo.Memory.mem.NumGC\n s.GoInfo.Memory.LastGCPauseDuration = s.GoInfo.Memory.mem.PauseNs[(s.GoInfo.Memory.mem.NumGC+255)%256]\n } else {\n s.GoInfo.Memory.NumGC = 0\n s.GoInfo.Memory.LastGCPauseDuration = 0\n }\n }\n\n if logMemory {\n\n if s.MemInfo == nil {\n s.MemInfo = new(MemInfo)\n }\n\n s.MemInfo.Memory, _ = mem.VirtualMemory()\n s.MemInfo.Swap, _ = mem.SwapMemory()\n }\n}", "func FetchDBServerMemStats(r Result) []float32 {\n\treturn r.DBServerStats().Mem\n}", "func (q *QueryBenchmarker) processStats(telemetrySink chan *report.Point) {\n\n\tq.statMapping = StatsMap{\n\t\tAllQueriesLabel: &StatGroup{},\n\t}\n\n\tlastRefresh := time.Time{}\n\ti := uint64(0)\n\tfor stat := range q.statChan {\n\t\tq.isBurnIn = i < q.burnIn\n\t\tif q.isBurnIn {\n\t\t\ti++\n\t\t\tq.statPool.Put(stat)\n\t\t\tcontinue\n\t\t} else if i == q.burnIn && q.burnIn > 0 {\n\t\t\tlog.Printf(\"burn-in complete after %d queries with %d workers\\n\", q.burnIn, q.workers)\n\t\t}\n\n\t\tif _, ok := q.statMapping[string(stat.Label)]; !ok {\n\t\t\tq.statMapping[string(stat.Label)] = &StatGroup{}\n\t\t}\n\n\t\tnow := time.Now()\n\n\t\tif stat.IsActual {\n\t\t\tq.movingAverageStat.Push(now, stat.Value)\n\t\t\tq.statMapping[AllQueriesLabel].Push(stat.Value)\n\t\t\tq.statMapping[string(stat.Label)].Push(stat.Value)\n\t\t\ti++\n\t\t}\n\n\t\tq.statPool.Put(stat)\n\n\t\tif lastRefresh.Nanosecond() == 0 || now.Sub(lastRefresh).Seconds() >= 1.0 {\n\t\t\tq.movingAverageStat.UpdateAvg(now, q.workers)\n\t\t\tlastRefresh = now\n\t\t\t// Report telemetry, if applicable:\n\t\t\tif telemetrySink != nil {\n\t\t\t\tp := report.GetPointFromGlobalPool()\n\t\t\t\tp.Init(\"benchmarks_telemetry\", now.UnixNano())\n\t\t\t\tfor _, tagpair := range q.reportTags {\n\t\t\t\t\tp.AddTag(tagpair[0], tagpair[1])\n\t\t\t\t}\n\t\t\t\tp.AddTag(\"client_type\", \"query\")\n\t\t\t\tp.AddFloat64Field(\"query_response_time_mean\", q.statMapping[AllQueriesLabel].Mean)\n\t\t\t\tp.AddFloat64Field(\"query_response_time_moving_mean\", q.movingAverageStat.Avg())\n\t\t\t\tp.AddIntField(\"query_workers\", q.workers)\n\t\t\t\tp.AddInt64Field(\"queries\", int64(i))\n\t\t\t\ttelemetrySink <- p\n\t\t\t}\n\t\t}\n\t\t// print stats to stderr (if printInterval is greater than zero):\n\t\tif q.printInterval > 0 && i > 0 && i%q.printInterval == 0 && (int64(i) < q.limit || q.limit < 0) {\n\t\t\tlog.Printf(\"%s: after %d queries with %d workers:\\n\", time.Now().String(), i-q.burnIn, q.workers)\n\t\t\tfprintStats(os.Stderr, q)\n\t\t\tlog.Printf(\"\\n\")\n\t\t}\n\n\t}\n\n\tlog.Printf(\"run complete after %d queries with %d workers:\\n\", i-q.burnIn, q.workers)\n\tq.totalQueries = int(i)\n\tq.statGroup.Done()\n}", "func collectMetrics(db *sql.DB, populaterWg *sync.WaitGroup, i *integration.Integration, instanceLookUp map[string]string) {\n\tdefer populaterWg.Done()\n\n\tvar collectorWg sync.WaitGroup\n\tmetricChan := make(chan newrelicMetricSender, 100) // large buffer for speed\n\n\t// Create a goroutine for each of the metric groups to collect\n\tcollectorWg.Add(5)\n\tgo oracleReadWriteMetrics.Collect(db, &collectorWg, metricChan)\n\tgo oraclePgaMetrics.Collect(db, &collectorWg, metricChan)\n\tgo oracleSysMetrics.Collect(db, &collectorWg, metricChan)\n\tgo globalNameInstanceMetric.Collect(db, &collectorWg, metricChan)\n\tgo dbIDInstanceMetric.Collect(db, &collectorWg, metricChan)\n\n\t// Separate logic is needed to see if we should even collect tablespaces\n\tcollectTableSpaces(db, &collectorWg, metricChan)\n\n\t// When the metric groups are finished collecting, close the channel\n\tgo func() {\n\t\tcollectorWg.Wait()\n\t\tclose(metricChan)\n\t}()\n\n\t// Create a goroutine to read from the metric channel and insert the metrics\n\tpopulateMetrics(metricChan, i, instanceLookUp)\n}", "func (phStats *passwordHasherStats) accumulateStats() {\n\tphStats.logger.Print(\"Collecting stats...\")\n\tok := true\n\tfor ok {\n\t\tvar ms microseconds\n\t\tif ms, ok = <-phStats.queue; ok {\n\t\t\tphStats.logger.Printf(\"Elapsed time: %dms\", ms)\n\n\t\t\t// block reads while appending/resizing/reallocating\n\t\t\tphStats.lock.Lock()\n\t\t\tphStats.times = append(phStats.times, ms)\n\t\t\tphStats.lock.Unlock()\n\t\t}\n\t}\n\tphStats.logger.Print(\"Done collecting stats\")\n}", "func (e *Exporter) Collect(ch chan<- prometheus.Metric) {\n\te.mutex.Lock() // To protect metrics from concurrent collects.\n\tdefer e.mutex.Unlock()\n if err := e.scrape(ch); err != nil {\n\t\tlog.Infof(\"Error scraping tinystats: %s\", err)\n\t}\n e.ipv4QueryA.Collect(ch)\n e.ipv4QueryNS.Collect(ch)\n e.ipv4QueryCNAME.Collect(ch)\n e.ipv4QuerySOA.Collect(ch)\n e.ipv4QueryPTR.Collect(ch)\n e.ipv4QueryHINFO.Collect(ch)\n e.ipv4QueryMX.Collect(ch)\n e.ipv4QueryTXT.Collect(ch)\n e.ipv4QueryRP.Collect(ch)\n e.ipv4QuerySIG.Collect(ch)\n e.ipv4QueryKEY.Collect(ch)\n e.ipv4QueryAAAA.Collect(ch)\n e.ipv4QueryAXFR.Collect(ch)\n e.ipv4QueryANY.Collect(ch)\n e.ipv4QueryTOTAL.Collect(ch)\n e.ipv4QueryOTHER.Collect(ch)\n e.ipv4QueryNOTAUTH.Collect(ch)\n e.ipv4QueryNOTIMPL.Collect(ch)\n e.ipv4QueryBADCLASS.Collect(ch)\n e.ipv4QueryNOQUERY.Collect(ch)\n\n e.ipv6QueryA.Collect(ch)\n e.ipv6QueryNS.Collect(ch)\n e.ipv6QueryCNAME.Collect(ch)\n e.ipv6QuerySOA.Collect(ch)\n e.ipv6QueryPTR.Collect(ch)\n e.ipv6QueryHINFO.Collect(ch)\n e.ipv6QueryMX.Collect(ch)\n e.ipv6QueryTXT.Collect(ch)\n e.ipv6QueryRP.Collect(ch)\n e.ipv6QuerySIG.Collect(ch)\n e.ipv6QueryKEY.Collect(ch)\n e.ipv6QueryAAAA.Collect(ch)\n e.ipv6QueryAXFR.Collect(ch)\n e.ipv6QueryANY.Collect(ch)\n e.ipv6QueryTOTAL.Collect(ch)\n e.ipv6QueryOTHER.Collect(ch)\n e.ipv6QueryNOTAUTH.Collect(ch)\n e.ipv6QueryNOTIMPL.Collect(ch)\n e.ipv6QueryBADCLASS.Collect(ch)\n e.ipv6QueryNOQUERY.Collect(ch)\n\n\treturn\n}", "func (p *Kafka) CollectMetrics(mts []plugin.MetricType) ([]plugin.MetricType, error) {\n\tmetrics := []plugin.MetricType{}\n\n\terr := p.loadMetricAPI(mts[0].Config())\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tfor _, m := range mts {\n\t\tresults := []nodeData{}\n\t\tsearch := strings.Split(replaceUnderscoreToDot(strings.TrimLeft(m.Namespace().String(), \"/\")), \"/\")\n\t\tif len(search) > 3 {\n\t\t\tp.client.Root.Get(p.client.client.GetUrl(), search[4:], 0, &results)\n\t\t}\n\n\t\tfor _, result := range results {\n\t\t\tns := append([]string{\"hyperpilot\", \"kafka\", \"node\", p.client.host}, strings.Split(result.Path, Slash)...)\n\t\t\tmetrics = append(metrics, plugin.MetricType{\n\t\t\t\tNamespace_: core.NewNamespace(ns...),\n\t\t\t\tTimestamp_: time.Now(),\n\t\t\t\tData_: result.Data,\n\t\t\t\tUnit_: reflect.TypeOf(result.Data).String(),\n\t\t\t})\n\t\t}\n\n\t}\n\n\treturn metrics, nil\n}", "func rcMemStats(ctx context.Context, in Params) (out Params, err error) {\n\tout = make(Params)\n\tvar m runtime.MemStats\n\truntime.ReadMemStats(&m)\n\tout[\"Alloc\"] = m.Alloc\n\tout[\"TotalAlloc\"] = m.TotalAlloc\n\tout[\"Sys\"] = m.Sys\n\tout[\"Mallocs\"] = m.Mallocs\n\tout[\"Frees\"] = m.Frees\n\tout[\"HeapAlloc\"] = m.HeapAlloc\n\tout[\"HeapSys\"] = m.HeapSys\n\tout[\"HeapIdle\"] = m.HeapIdle\n\tout[\"HeapInuse\"] = m.HeapInuse\n\tout[\"HeapReleased\"] = m.HeapReleased\n\tout[\"HeapObjects\"] = m.HeapObjects\n\tout[\"StackInuse\"] = m.StackInuse\n\tout[\"StackSys\"] = m.StackSys\n\tout[\"MSpanInuse\"] = m.MSpanInuse\n\tout[\"MSpanSys\"] = m.MSpanSys\n\tout[\"MCacheInuse\"] = m.MCacheInuse\n\tout[\"MCacheSys\"] = m.MCacheSys\n\tout[\"BuckHashSys\"] = m.BuckHashSys\n\tout[\"GCSys\"] = m.GCSys\n\tout[\"OtherSys\"] = m.OtherSys\n\treturn out, nil\n}", "func Collect(ctx context.Context) error {\n\tif !singleton.enabled {\n\t\treturn nil\n\t}\n\n\tif singleton.darkstatAddr == \"\" {\n\t\treturn fmt.Errorf(\"Darkstat address is empty\")\n\t}\n\n\tstartTime := time.Now()\n\n\tinventoryHosts := inventory.Get()\n\n\tlocalAddr, err := network.DefaultLocalAddr()\n\tif err != nil {\n\t\treturn err\n\t}\n\t// To label source traffic that we need to build dependency graph\n\tlocalHostgroup := localAddr.String()\n\tlocalDomain := localAddr.String()\n\tlocalInventory, ok := inventoryHosts[localAddr.String()]\n\tif ok {\n\t\tlocalHostgroup = localInventory.Hostgroup\n\t\tlocalDomain = localInventory.Domain\n\t}\n\tlog.Debugf(\"Local address don't exist in inventory: %v\", localAddr.String())\n\n\t// Scrape darkstat prometheus endpoint for host_bytes_total\n\tvar darkstatHostBytesTotal *prom2json.Family\n\tdarkstatScrape, err := prometheus.Scrape(singleton.darkstatAddr)\n\tif err != nil {\n\t\treturn err\n\t}\n\tfor _, v := range darkstatScrape {\n\t\tif v.Name == \"host_bytes_total\" {\n\t\t\tdarkstatHostBytesTotal = v\n\t\t\tbreak\n\t\t}\n\t}\n\tif darkstatHostBytesTotal == nil {\n\t\treturn fmt.Errorf(\"Metric host_bytes_total doesn't exist\")\n\t}\n\n\t// Extract relevant data out of host_bytes_total\n\tvar hosts []Metric\n\tfor _, m := range darkstatHostBytesTotal.Metrics {\n\t\tmetric := m.(prom2json.Metric)\n\n\t\tip := net.ParseIP(metric.Labels[\"ip\"])\n\n\t\t// Skip its own IP as we don't need it\n\t\tif ip.Equal(localAddr) {\n\t\t\tcontinue\n\t\t}\n\n\t\tinventoryHostInfo := inventoryHosts[metric.Labels[\"ip\"]]\n\n\t\tbandwidth, err := strconv.ParseFloat(metric.Value, 64)\n\t\tif err != nil {\n\t\t\tlog.Errorf(\"Failed to parse 'host_bytes_total' value: %v\", err)\n\t\t\tcontinue\n\t\t}\n\n\t\tdirection := \"\"\n\t\t// Reversed from netfilter perspective\n\t\tswitch metric.Labels[\"dir\"] {\n\t\tcase \"out\":\n\t\t\tdirection = \"ingress\"\n\t\tcase \"in\":\n\t\t\tdirection = \"egress\"\n\t\t}\n\n\t\thosts = append(hosts, Metric{\n\t\t\tLocalHostgroup: localHostgroup,\n\t\t\tRemoteHostgroup: inventoryHostInfo.Hostgroup,\n\t\t\tRemoteIPAddr: metric.Labels[\"ip\"],\n\t\t\tLocalDomain: localDomain,\n\t\t\tRemoteDomain: inventoryHostInfo.Domain,\n\t\t\tDirection: direction,\n\t\t\tBandwidth: bandwidth,\n\t\t})\n\t}\n\n\tsingleton.mu.Lock()\n\tsingleton.hosts = hosts\n\tsingleton.mu.Unlock()\n\n\tlog.Debugf(\"taskdarkstat.Collect retrieved %v downstreams metrics\", len(hosts))\n\tlog.Debugf(\"taskdarkstat.Collect process took %v\", time.Since(startTime))\n\treturn nil\n}", "func (p *Libvirt) CollectMetrics(mts []plugin.MetricType) ([]plugin.MetricType, error) {\n\tmetrics := []plugin.MetricType{}\n\tconn, err := libvirt.NewVirConnection(getHypervisorURI(mts[0].Config().Table()))\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer conn.CloseConnection()\n\n\tfor _, p := range mts {\n\n\t\tns := p.Namespace()\n\t\tif ns.Strings()[1] == \"*\" {\n\t\t\tdomains, err := conn.ListDomains()\n\t\t\tif err != nil {\n\t\t\t\treturn metrics, err\n\t\t\t}\n\t\t\tfor j := 0; j < domainCount(domains); j++ {\n\t\t\t\tdom, err := conn.LookupDomainById(domains[j])\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn metrics, err\n\t\t\t\t}\n\t\t\t\tdefer dom.Free()\n\t\t\t\tmetric, err := processMetric(ns.String(), dom, p)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn metrics, err\n\t\t\t\t}\n\t\t\t\tif metricReported(metrics, metric.Namespace().String()) == false {\n\t\t\t\t\thostname, err := conn.GetHostname()\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\treturn metrics, err\n\t\t\t\t\t}\n\t\t\t\t\tdomainname := p.Namespace()[1]\n\t\t\t\t\tmetric.Tags_ = addTags(hostname, domainname.Value)\n\t\t\t\t\tmetrics = append(metrics, metric)\n\t\t\t\t}\n\n\t\t\t}\n\t\t} else {\n\n\t\t\tif metricReported(metrics, p.Namespace().String()) == false {\n\t\t\t\tdomainName, err := namespacetoDomain(p.Namespace().Strings())\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn nil, err\n\t\t\t\t}\n\t\t\t\tdom, err := conn.LookupDomainByName(domainName)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn nil, err\n\t\t\t\t}\n\t\t\t\tdefer dom.Free()\n\t\t\t\tmetric, err := processMetric(ns.String(), dom, p)\n\t\t\t\thostname, err := conn.GetHostname()\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn metrics, err\n\t\t\t\t}\n\t\t\t\tdomainname := p.Namespace()[1]\n\t\t\t\tmetric.Tags_ = addTags(hostname, domainname.Value)\n\t\t\t\tmetrics = append(metrics, metric)\n\t\t\t}\n\t\t}\n\n\t}\n\treturn metrics, err\n}", "func (e *Exporter) Collect(ch chan<- prometheus.Metric) {\n\te.mutex.Lock()\n\tdefer e.mutex.Unlock()\n\n\tfor _, vec := range e.gauges {\n\t\tvec.Reset()\n\t}\n\n\tdefer func() { ch <- e.up }()\n\n\t// If we fail at any point in retrieving GPU status, we fail 0\n\te.up.Set(1)\n\n\te.GetTelemetryFromNVML()\n\n\tfor _, vec := range e.gauges {\n\t\tvec.Collect(ch)\n\t}\n}", "func measureSpammerMetrics() {\n\tif spammerStartTime.IsZero() {\n\t\t// Spammer not started yet\n\t\treturn\n\t}\n\n\tsentSpamMsgsCnt := deps.ServerMetrics.SentSpamMessages.Load()\n\tnew := utils.GetUint32Diff(sentSpamMsgsCnt, lastSentSpamMsgsCnt)\n\tlastSentSpamMsgsCnt = sentSpamMsgsCnt\n\n\tspammerAvgHeap.Add(uint64(new))\n\n\ttimeDiff := time.Since(spammerStartTime)\n\tif timeDiff > 60*time.Second {\n\t\t// Only filter over one minute maximum\n\t\ttimeDiff = 60 * time.Second\n\t}\n\n\t// trigger events for outside listeners\n\tEvents.AvgSpamMetricsUpdated.Trigger(&spammer.AvgSpamMetrics{\n\t\tNewMessages: new,\n\t\tAverageMessagesPerSecond: spammerAvgHeap.GetAveragePerSecond(timeDiff),\n\t})\n}", "func (cg *CGroup) GetMemoryStats() (map[string]uint64, error) {\n\tvar (\n\t\terr error\n\t\tstats string\n\t)\n\n\tout := make(map[string]uint64)\n\n\tversion := cgControllers[\"memory\"]\n\tswitch version {\n\tcase Unavailable:\n\t\treturn nil, ErrControllerMissing\n\tcase V1, V2:\n\t\tstats, err = cg.rw.Get(version, \"memory\", \"memory.stat\")\n\t}\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tfor _, stat := range strings.Split(stats, \"\\n\") {\n\t\tfield := strings.Split(stat, \" \")\n\n\t\tswitch field[0] {\n\t\tcase \"total_active_anon\", \"active_anon\":\n\t\t\tout[\"active_anon\"], _ = strconv.ParseUint(field[1], 10, 64)\n\t\tcase \"total_active_file\", \"active_file\":\n\t\t\tout[\"active_file\"], _ = strconv.ParseUint(field[1], 10, 64)\n\t\tcase \"total_inactive_anon\", \"inactive_anon\":\n\t\t\tout[\"inactive_anon\"], _ = strconv.ParseUint(field[1], 10, 64)\n\t\tcase \"total_inactive_file\", \"inactive_file\":\n\t\t\tout[\"inactive_file\"], _ = strconv.ParseUint(field[1], 10, 64)\n\t\tcase \"total_unevictable\", \"unevictable\":\n\t\t\tout[\"unevictable\"], _ = strconv.ParseUint(field[1], 10, 64)\n\t\tcase \"total_writeback\", \"file_writeback\":\n\t\t\tout[\"writeback\"], _ = strconv.ParseUint(field[1], 10, 64)\n\t\tcase \"total_dirty\", \"file_dirty\":\n\t\t\tout[\"dirty\"], _ = strconv.ParseUint(field[1], 10, 64)\n\t\tcase \"total_mapped_file\", \"file_mapped\":\n\t\t\tout[\"mapped\"], _ = strconv.ParseUint(field[1], 10, 64)\n\t\tcase \"total_rss\": // v1 only\n\t\t\tout[\"rss\"], _ = strconv.ParseUint(field[1], 10, 64)\n\t\tcase \"total_shmem\", \"shmem\":\n\t\t\tout[\"shmem\"], _ = strconv.ParseUint(field[1], 10, 64)\n\t\tcase \"total_cache\", \"file\":\n\t\t\tout[\"cache\"], _ = strconv.ParseUint(field[1], 10, 64)\n\t\t}\n\t}\n\n\t// Calculated values\n\tout[\"active\"] = out[\"active_anon\"] + out[\"active_file\"]\n\tout[\"inactive\"] = out[\"inactive_anon\"] + out[\"inactive_file\"]\n\n\treturn out, nil\n}", "func (h BryanMemoryStatsHook) printMemStats() {\n\th.Log.Debug(\"Reading memory statistics\")\n\tvar m runtime.MemStats\n\truntime.ReadMemStats(&m)\n\t\n\th.Log.Debug(\"Printing memory statistics\")\n\th.printHeader(&m)\n\th.printMemAlloc(&m)\n\th.printMemTotalAlloc(&m)\n\th.printMemSys(&m)\n\th.printMemFrees(&m)\n\th.printMemNumGC(&m)\n\th.printFooter(&m)\n}", "func getServerStatsMemory(printDetails bool){\n\n\t/*\n\tresident, virtual, err := getServerStatsMemory(true)\n\tif err == nil {\n\t\tfmt.Printf(\"phys. Memory is %v - of data stock size of %v (useful only on mongod)\\n\",resident, virtual)\n\t} else {\n\t\tfmt.Println(\"no mem info\", err )\n\t}\n\t*/\n\t\n}", "func (logstash *Logstash) gatherPluginsStats(\n\tplugins []Plugin,\n\tpluginType string,\n\ttags map[string]string,\n\taccumulator telegraf.Accumulator,\n) error {\n\tfor _, plugin := range plugins {\n\t\tpluginTags := map[string]string{\n\t\t\t\"plugin_name\": plugin.Name,\n\t\t\t\"plugin_id\": plugin.ID,\n\t\t\t\"plugin_type\": pluginType,\n\t\t}\n\t\tfor tag, value := range tags {\n\t\t\tpluginTags[tag] = value\n\t\t}\n\t\tflattener := jsonParser.JSONFlattener{}\n\t\terr := flattener.FlattenJSON(\"\", plugin.Events)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\taccumulator.AddFields(\"logstash_plugins\", flattener.Fields, pluginTags)\n\t\tif plugin.Failures != nil {\n\t\t\tfailuresFields := map[string]interface{}{\"failures\": *plugin.Failures}\n\t\t\taccumulator.AddFields(\"logstash_plugins\", failuresFields, pluginTags)\n\t\t}\n\t\t/*\n\t\t\tThe elasticsearch & opensearch output produces additional stats\n\t\t\taround bulk requests and document writes (that are elasticsearch\n\t\t\tand opensearch specific). Collect those below:\n\t\t*/\n\t\tif pluginType == \"output\" && (plugin.Name == \"elasticsearch\" || plugin.Name == \"opensearch\") {\n\t\t\t/*\n\t\t\t\tThe \"bulk_requests\" section has details about batch writes\n\t\t\t\tinto Elasticsearch\n\n\t\t\t\t \"bulk_requests\" : {\n\t\t\t\t\t\"successes\" : 2870,\n\t\t\t\t\t\"responses\" : {\n\t\t\t\t\t \"200\" : 2870\n\t\t\t\t\t},\n\t\t\t\t\t\"failures\": 262,\n\t\t\t\t\t\"with_errors\": 9089\n\t\t\t\t },\n\t\t\t*/\n\t\t\tflattener := jsonParser.JSONFlattener{}\n\t\t\terr := flattener.FlattenJSON(\"\", plugin.BulkRequests)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tfor k, v := range flattener.Fields {\n\t\t\t\tif strings.HasPrefix(k, \"bulk_requests\") {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tnewKey := fmt.Sprintf(\"bulk_requests_%s\", k)\n\t\t\t\tflattener.Fields[newKey] = v\n\t\t\t\tdelete(flattener.Fields, k)\n\t\t\t}\n\t\t\taccumulator.AddFields(\"logstash_plugins\", flattener.Fields, pluginTags)\n\n\t\t\t/*\n\t\t\t\tThe \"documents\" section has counts of individual documents\n\t\t\t\twritten/retried/etc.\n\t\t\t\t \"documents\" : {\n\t\t\t\t\t\"successes\" : 2665549,\n\t\t\t\t\t\"retryable_failures\": 13733\n\t\t\t\t }\n\t\t\t*/\n\t\t\tflattener = jsonParser.JSONFlattener{}\n\t\t\terr = flattener.FlattenJSON(\"\", plugin.Documents)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tfor k, v := range flattener.Fields {\n\t\t\t\tif strings.HasPrefix(k, \"documents\") {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tnewKey := fmt.Sprintf(\"documents_%s\", k)\n\t\t\t\tflattener.Fields[newKey] = v\n\t\t\t\tdelete(flattener.Fields, k)\n\t\t\t}\n\t\t\taccumulator.AddFields(\"logstash_plugins\", flattener.Fields, pluginTags)\n\t\t}\n\t}\n\n\treturn nil\n}", "func ReadMemStats(ms *MemStats)", "func reduceJavaAgentYaml(m *mainDefinitionParser) (map[string]*domainReducer, error) {\n\tthisDomainMap := make(map[string]*domainReducer)\n\tfor _, jmxObject := range m.JMX {\n\t\tvar thisDomain *domainReducer\n\t\tvar thisBean *beanReducer\n\t\tvar domainAndQuery = strings.Split(jmxObject.ObjectName, \":\")\n\t\tif _, ok := thisDomainMap[domainAndQuery[0]]; ok {\n\t\t\tthisDomain = thisDomainMap[domainAndQuery[0]]\n\t\t\tif _, ok := thisDomain.BeansMap[domainAndQuery[1]]; ok {\n\t\t\t\tthisBean = thisDomain.BeansMap[domainAndQuery[1]]\n\t\t\t}\n\t\t}\n\t\tfor _, thisMetric := range jmxObject.Metrics {\n\t\t\tvar inAttrs = strings.Split(thisMetric.Attributes, \",\")\n\t\t\tfor _, thisAttr := range inAttrs {\n\t\t\t\tthisAttr = strings.TrimSpace(thisAttr)\n\t\t\t\tif thisBean != nil {\n\t\t\t\t\tif _, ok := thisBean.AttributesMap[thisAttr]; !ok {\n\t\t\t\t\t\tthisBean.AttributesMap[thisAttr] = &attributeReducer{MetricType: convertMetricType(thisMetric.Type), MetricName: getMetricName(thisAttr, jmxObject.RootMetricName, domainAndQuery[1])}\n\t\t\t\t\t}\n\t\t\t\t} else {\n\t\t\t\t\tthisAttrMap := make(map[string]*attributeReducer)\n\t\t\t\t\tthisAttrMap[thisAttr] = &attributeReducer{MetricType: convertMetricType(thisMetric.Type), MetricName: getMetricName(thisAttr, jmxObject.RootMetricName, domainAndQuery[1])}\n\t\t\t\t\tthisBean = &beanReducer{AttributesMap: thisAttrMap}\n\t\t\t\t\tif thisDomain == nil {\n\t\t\t\t\t\tvar outEventType = getEventType(m.Name, domainAndQuery[0])\n\t\t\t\t\t\tthisBeanMap := make(map[string]*beanReducer)\n\t\t\t\t\t\tthisBeanMap[domainAndQuery[1]] = thisBean\n\t\t\t\t\t\tthisDomainMap[domainAndQuery[0]] = &domainReducer{EventType: outEventType, BeansMap: thisBeanMap}\n\t\t\t\t\t} else {\n\t\t\t\t\t\tthisDomain.BeansMap[domainAndQuery[1]] = thisBean\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\treturn thisDomainMap, nil\n}", "func (s *Stats) GetAllCPUInfo() {\n s.GetCPUInfo()\n s.GetCPUTimes()\n}", "func (c *HostMetricCollector) Run() (HostMetrics, error) {\n\tcpuTimes, err := cpu.Times(false)\n\tif err != nil {\n\t\t// note: can't happen on Linux. gopsutil doesn't\n\t\t// return an error\n\t\treturn HostMetrics{}, fmt.Errorf(\"cpu.Times() failed: %s\", err)\n\t}\n\tif len(cpuTimes) == 0 {\n\t\t// possible with hardware failure\n\t\treturn HostMetrics{}, fmt.Errorf(\"cpu.Times() returns no cpus\")\n\t}\n\tt := cpuTimes[0]\n\tjiffy := t.Total()\n\ttoPercent := 100 / (jiffy - c.lastJiffy)\n\n\tlastTimes := c.lastTimes\n\tc.lastJiffy = jiffy\n\tc.lastTimes = t\n\n\tconst mbSize float64 = 1024 * 1024\n\tvmem, err := mem.VirtualMemory()\n\tif err != nil {\n\t\t// only possible if can't parse numbers in /proc/meminfo\n\t\t// that would be massive failure\n\t\treturn HostMetrics{}, fmt.Errorf(\"mem.VirtualMemory() failed: %s:\", err)\n\t}\n\n\treturn HostMetrics{\n\t\tCPUUser: ((t.User + t.Nice) - (lastTimes.User + lastTimes.Nice)) * toPercent,\n\t\tCPUSystem: ((t.System + t.Irq + t.Softirq) - (lastTimes.System + lastTimes.Irq + lastTimes.Softirq)) * toPercent,\n\t\tCPUIowait: (t.Iowait - lastTimes.Iowait) * toPercent,\n\t\tCPUIdle: (t.Idle - lastTimes.Idle) * toPercent,\n\t\tCPUStolen: (t.Steal - lastTimes.Steal) * toPercent,\n\t\tCPUGuest: (t.Guest - lastTimes.Guest) * toPercent,\n\t\tMemTotal: float64(vmem.Total) / mbSize,\n\t\tMemFree: float64(vmem.Free) / mbSize,\n\t\tMemUsed: float64(vmem.Total-vmem.Free) / mbSize,\n\t\tMemUsable: float64(vmem.Available) / mbSize,\n\t\tMemPctUsable: float64(100-vmem.UsedPercent) / 100,\n\t}, nil\n}", "func (s *SystemdTimings) Gather(acc telegraf.Accumulator) error {\n\tif !bootIsFinished() {\n\t\t// We are not ready to collect yet, telegraf will call us later to\n\t\t// try again.\n\t\treturn nil\n\t}\n\n\tif s.Periodic == false {\n\t\t// We only want to run once.\n\t\tif collectionDone == true {\n\t\t\t// By default we only collect once since these are generally boot\n\t\t\t// time metrics.\n\t\t\treturn nil\n\t\t}\n\t}\n\n\t// Connect to the systemd dbus.\n\tdbusConn, err := dbus.NewSystemConnection()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tdefer dbusConn.Close()\n\n\terr = postAllManagerProps(dbusConn, acc)\n\tif err != nil {\n\t\tacc.AddError(err)\n\t\treturn err\n\t}\n\n\t// Read all unit timing data.\n\terr = postAllUnitTimingData(dbusConn, acc, s)\n\tif err != nil {\n\t\tacc.AddError(err)\n\t\treturn err\n\t}\n\n\tif err == nil {\n\t\tcollectionDone = true\n\t}\n\n\treturn err\n}", "func (c *VM) Collect(ctx context.Context) error {\n\tmetrics := cgm.Metrics{}\n\n\tc.Lock()\n\n\tif c.runTTL > time.Duration(0) {\n\t\tif time.Since(c.lastEnd) < c.runTTL {\n\t\t\tc.logger.Warn().Msg(collector.ErrTTLNotExpired.Error())\n\t\t\tc.Unlock()\n\t\t\treturn collector.ErrTTLNotExpired\n\t\t}\n\t}\n\tif c.running {\n\t\tc.logger.Warn().Msg(collector.ErrAlreadyRunning.Error())\n\t\tc.Unlock()\n\t\treturn collector.ErrAlreadyRunning\n\t}\n\n\tc.running = true\n\tc.lastStart = time.Now()\n\tc.Unlock()\n\n\tif err := c.parseMemstats(ctx, &metrics); err != nil {\n\t\tc.setStatus(metrics, err)\n\t\treturn fmt.Errorf(\"%s parseMemstats: %w\", c.pkgID, err)\n\t}\n\n\tif err := c.parseVMstats(ctx, &metrics); err != nil {\n\t\tc.setStatus(metrics, err)\n\t\treturn fmt.Errorf(\"%s parseVMstats: %w\", c.pkgID, err)\n\t}\n\n\tc.setStatus(metrics, nil)\n\treturn nil\n}", "func (pc *NginxProcessesMetricsCollector) Collect(ch chan<- prometheus.Metric) {\n\tpc.updateWorkerProcessCount()\n\tpc.workerProcessTotal.Collect(ch)\n}", "func (h *GrayLog) gatherServer(\n\tacc telegraf.Accumulator,\n\tserverURL string,\n) error {\n\tresp, _, err := h.sendRequest(serverURL)\n\tif err != nil {\n\t\treturn err\n\t}\n\trequestURL, err := url.Parse(serverURL)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"unable to parse address %q: %w\", serverURL, err)\n\t}\n\n\thost, port, _ := net.SplitHostPort(requestURL.Host)\n\tvar dat ResponseMetrics\n\tif err := json.Unmarshal([]byte(resp), &dat); err != nil {\n\t\treturn err\n\t}\n\tfor _, mItem := range dat.Metrics {\n\t\tfields := make(map[string]interface{})\n\t\ttags := map[string]string{\n\t\t\t\"server\": host,\n\t\t\t\"port\": port,\n\t\t\t\"name\": mItem.Name,\n\t\t\t\"type\": mItem.Type,\n\t\t}\n\t\th.flatten(mItem.Fields, fields, \"\")\n\t\tacc.AddFields(mItem.FullName, fields, tags)\n\t}\n\treturn nil\n}", "func (e *Exporter) Collect(ch chan<- prometheus.Metric) {\n\te.mutex.Lock() // To protect metrics from concurrent collects.\n\tdefer e.mutex.Unlock()\n\n\tif err := e.scrape(); err != nil {\n\t\tlog.Error(err)\n\t\tnomad_up.Set(0)\n\t\tch <- nomad_up\n\t\treturn\n\t}\n\n\tch <- nomad_up\n\tch <- metric_uptime\n\tch <- metric_request_response_time_total\n\tch <- metric_request_response_time_avg\n\n\tfor _, metric := range metric_request_status_count_current {\n\t\tch <- metric\n\t}\n\tfor _, metric := range metric_request_status_count_total {\n\t\tch <- metric\n\t}\n}", "func stats(stats elastic.BulkProcessorStats) {\n\t//构建Workers的json文本\n\tvar workersStr string\n\tvar workers Workers\n\tif err := workers.InitWorkers(stats.Workers); err == nil {\n\t\tworkersStr = workers.String()\n\t}\n\n\t//打印stats信息\n\tlog.Logger.WithFields(logrus.Fields{\n\t\t\"Flushed\": stats.Flushed,\n\t\t\"Committed\": stats.Committed,\n\t\t\"Indexed\": stats.Indexed,\n\t\t\"Created\": stats.Created,\n\t\t\"Updated\": stats.Updated,\n\t\t\"Deleted\": stats.Deleted,\n\t\t\"Succeeded\": stats.Succeeded,\n\t\t\"Failed\": stats.Failed,\n\t\t\"Workers\": workersStr,\n\t}).Info(\"stats info detail\")\n}", "func (w *HotCache) CollectMetrics() {\n\tw.writeFlow.CollectMetrics(\"write\")\n\tw.readFlow.CollectMetrics(\"read\")\n}", "func (e *Exporter) Collect(ch chan<- prometheus.Metric) {\n\t// Protect metrics from concurrent collects.\n\te.mutex.Lock()\n\tdefer e.mutex.Unlock()\n\n\t// Scrape metrics from Tankerkoenig API.\n\tif err := e.scrape(ch); err != nil {\n\t\te.logger.Printf(\"error: cannot scrape tankerkoenig api: %v\", err)\n\t}\n\n\t// Collect metrics.\n\te.up.Collect(ch)\n\te.scrapeDuration.Collect(ch)\n\te.failedScrapes.Collect(ch)\n\te.totalScrapes.Collect(ch)\n}", "func (c *VM) Collect() error {\n\tmetrics := cgm.Metrics{}\n\n\tc.Lock()\n\n\tif c.runTTL > time.Duration(0) {\n\t\tif time.Since(c.lastEnd) < c.runTTL {\n\t\t\tc.logger.Warn().Msg(collector.ErrTTLNotExpired.Error())\n\t\t\tc.Unlock()\n\t\t\treturn collector.ErrTTLNotExpired\n\t\t}\n\t}\n\tif c.running {\n\t\tc.logger.Warn().Msg(collector.ErrAlreadyRunning.Error())\n\t\tc.Unlock()\n\t\treturn collector.ErrAlreadyRunning\n\t}\n\n\tc.running = true\n\tc.lastStart = time.Now()\n\tc.Unlock()\n\n\tif err := c.parseMemstats(&metrics); err != nil {\n\t\tc.setStatus(metrics, err)\n\t\treturn errors.Wrap(err, c.pkgID)\n\t}\n\n\tif err := c.parseVMstats(&metrics); err != nil {\n\t\tc.setStatus(metrics, err)\n\t\treturn errors.Wrap(err, c.pkgID)\n\t}\n\n\tc.setStatus(metrics, nil)\n\treturn nil\n}", "func NewGoCollector(opts ...func(o *internal.GoCollectorOptions)) Collector {\n\topt := defaultGoCollectorOptions()\n\tfor _, o := range opts {\n\t\to(&opt)\n\t}\n\n\texposedDescriptions := matchRuntimeMetricsRules(opt.RuntimeMetricRules)\n\n\t// Collect all histogram samples so that we can get their buckets.\n\t// The API guarantees that the buckets are always fixed for the lifetime\n\t// of the process.\n\tvar histograms []metrics.Sample\n\tfor _, d := range exposedDescriptions {\n\t\tif d.Kind == metrics.KindFloat64Histogram {\n\t\t\thistograms = append(histograms, metrics.Sample{Name: d.Name})\n\t\t}\n\t}\n\n\tif len(histograms) > 0 {\n\t\tmetrics.Read(histograms)\n\t}\n\n\tbucketsMap := make(map[string][]float64)\n\tfor i := range histograms {\n\t\tbucketsMap[histograms[i].Name] = histograms[i].Value.Float64Histogram().Buckets\n\t}\n\n\t// Generate a collector for each exposed runtime/metrics metric.\n\tmetricSet := make([]collectorMetric, 0, len(exposedDescriptions))\n\t// SampleBuf is used for reading from runtime/metrics.\n\t// We are assuming the largest case to have stable pointers for sampleMap purposes.\n\tsampleBuf := make([]metrics.Sample, 0, len(exposedDescriptions)+len(opt.RuntimeMetricSumForHist)+len(rmNamesForMemStatsMetrics))\n\tsampleMap := make(map[string]*metrics.Sample, len(exposedDescriptions))\n\tfor _, d := range exposedDescriptions {\n\t\tnamespace, subsystem, name, ok := internal.RuntimeMetricsToProm(&d.Description)\n\t\tif !ok {\n\t\t\t// Just ignore this metric; we can't do anything with it here.\n\t\t\t// If a user decides to use the latest version of Go, we don't want\n\t\t\t// to fail here. This condition is tested in TestExpectedRuntimeMetrics.\n\t\t\tcontinue\n\t\t}\n\n\t\tsampleBuf = append(sampleBuf, metrics.Sample{Name: d.Name})\n\t\tsampleMap[d.Name] = &sampleBuf[len(sampleBuf)-1]\n\n\t\tvar m collectorMetric\n\t\tif d.Kind == metrics.KindFloat64Histogram {\n\t\t\t_, hasSum := opt.RuntimeMetricSumForHist[d.Name]\n\t\t\tunit := d.Name[strings.IndexRune(d.Name, ':')+1:]\n\t\t\tm = newBatchHistogram(\n\t\t\t\tNewDesc(\n\t\t\t\t\tBuildFQName(namespace, subsystem, name),\n\t\t\t\t\td.Description.Description,\n\t\t\t\t\tnil,\n\t\t\t\t\tnil,\n\t\t\t\t),\n\t\t\t\tinternal.RuntimeMetricsBucketsForUnit(bucketsMap[d.Name], unit),\n\t\t\t\thasSum,\n\t\t\t)\n\t\t} else if d.Cumulative {\n\t\t\tm = NewCounter(CounterOpts{\n\t\t\t\tNamespace: namespace,\n\t\t\t\tSubsystem: subsystem,\n\t\t\t\tName: name,\n\t\t\t\tHelp: d.Description.Description,\n\t\t\t},\n\t\t\t)\n\t\t} else {\n\t\t\tm = NewGauge(GaugeOpts{\n\t\t\t\tNamespace: namespace,\n\t\t\t\tSubsystem: subsystem,\n\t\t\t\tName: name,\n\t\t\t\tHelp: d.Description.Description,\n\t\t\t})\n\t\t}\n\t\tmetricSet = append(metricSet, m)\n\t}\n\n\t// Add exact sum metrics to sampleBuf if not added before.\n\tfor _, h := range histograms {\n\t\tsumMetric, ok := opt.RuntimeMetricSumForHist[h.Name]\n\t\tif !ok {\n\t\t\tcontinue\n\t\t}\n\n\t\tif _, ok := sampleMap[sumMetric]; ok {\n\t\t\tcontinue\n\t\t}\n\t\tsampleBuf = append(sampleBuf, metrics.Sample{Name: sumMetric})\n\t\tsampleMap[sumMetric] = &sampleBuf[len(sampleBuf)-1]\n\t}\n\n\tvar (\n\t\tmsMetrics memStatsMetrics\n\t\tmsDescriptions []metrics.Description\n\t)\n\n\tif !opt.DisableMemStatsLikeMetrics {\n\t\tmsMetrics = goRuntimeMemStats()\n\t\tmsDescriptions = bestEffortLookupRM(rmNamesForMemStatsMetrics)\n\n\t\t// Check if metric was not exposed before and if not, add to sampleBuf.\n\t\tfor _, mdDesc := range msDescriptions {\n\t\t\tif _, ok := sampleMap[mdDesc.Name]; ok {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tsampleBuf = append(sampleBuf, metrics.Sample{Name: mdDesc.Name})\n\t\t\tsampleMap[mdDesc.Name] = &sampleBuf[len(sampleBuf)-1]\n\t\t}\n\t}\n\n\treturn &goCollector{\n\t\tbase: newBaseGoCollector(),\n\t\tsampleBuf: sampleBuf,\n\t\tsampleMap: sampleMap,\n\t\trmExposedMetrics: metricSet,\n\t\trmExactSumMapForHist: opt.RuntimeMetricSumForHist,\n\t\tmsMetrics: msMetrics,\n\t\tmsMetricsEnabled: !opt.DisableMemStatsLikeMetrics,\n\t}\n}", "func (a *app) gatherStat() {\n\tabout, err := a.srv.About.Get().Fields(\"storageQuota\").Do()\n\tif err != nil {\n\t\tlog.Fatalf(\"Unable to execute an about request: %v\", err)\n\t}\n\n\ta.sq = about.StorageQuota\n}", "func systemMemoryMonitor(logger *logrus.Logger, wg *sync.WaitGroup, done chan struct{}, kill chan struct{}) {\n\tdefer wg.Done()\n\tdefer close(kill)\n\n\tvar swapUsedBaseline uint64 = math.MaxUint64\n\n\tfor {\n\t\tselect {\n\t\tcase <-done:\n\t\t\treturn\n\t\tcase <-time.After(1 * time.Second):\n\t\t}\n\n\t\tctx, cancel := context.WithTimeout(context.Background(), 100*time.Millisecond)\n\t\tmemStat, err := mem.VirtualMemoryWithContext(ctx)\n\n\t\tif err != nil {\n\t\t\tlogger.WithError(err).Debugf(\"Failed to retrieve memory usage.\")\n\t\t}\n\n\t\tcancel()\n\n\t\tctx, cancel = context.WithTimeout(context.Background(), 100*time.Millisecond)\n\t\tswapStat, err := mem.SwapMemoryWithContext(ctx)\n\n\t\tif err != nil {\n\t\t\tlogger.WithError(err).Debugf(\"Failed to retrieve swap usage.\")\n\t\t}\n\n\t\tcancel()\n\n\t\tswapUsed := uint64(0)\n\t\tif swapStat.Used < swapUsedBaseline {\n\t\t\tswapUsed = swapStat.Used\n\t\t} else {\n\t\t\tswapUsed = swapStat.Used - swapUsedBaseline\n\t\t}\n\n\t\tused := float64(memStat.Used+swapUsed) / float64(memStat.Total)\n\t\tlogger.Debugf(\n\t\t\t\"Memory usage: %.2f%% - RAM %s / Swap %s.\",\n\t\t\tused*100,\n\t\t\thumanBytes(memStat.Used),\n\t\t\thumanBytes(swapUsed),\n\t\t)\n\n\t\tif used > 0.9 {\n\t\t\treturn\n\t\t}\n\t}\n}", "func (ld *loader) CollectMetrics() (writes metrics.RequestsSummary, reads metrics.RequestsSummary, err error) {\n\t// https://pkg.go.dev/github.com/prometheus/client_golang/prometheus?tab=doc#Gatherer\n\tmfs, err := prometheus.DefaultGatherer.Gather()\n\tif err != nil {\n\t\tld.cfg.Logger.Warn(\"failed to gather prometheus metrics\", zap.Error(err))\n\t\treturn metrics.RequestsSummary{}, metrics.RequestsSummary{}, err\n\t}\n\tfor _, mf := range mfs {\n\t\tif mf == nil {\n\t\t\tcontinue\n\t\t}\n\t\tswitch *mf.Name {\n\t\tcase \"secrets_client_write_requests_success_total\":\n\t\t\tgg := mf.Metric[0].GetGauge()\n\t\t\twrites.SuccessTotal = gg.GetValue()\n\t\tcase \"secrets_client_write_requests_failure_total\":\n\t\t\tgg := mf.Metric[0].GetGauge()\n\t\t\twrites.FailureTotal = gg.GetValue()\n\t\tcase \"secrets_client_write_request_latency_milliseconds\":\n\t\t\twrites.LatencyHistogram, err = metrics.ParseHistogram(\"milliseconds\", mf.Metric[0].GetHistogram())\n\t\t\tif err != nil {\n\t\t\t\treturn metrics.RequestsSummary{}, metrics.RequestsSummary{}, err\n\t\t\t}\n\n\t\tcase \"secrets_client_read_requests_success_total\":\n\t\t\tgg := mf.Metric[0].GetGauge()\n\t\t\treads.SuccessTotal = gg.GetValue()\n\t\tcase \"secrets_client_read_requests_failure_total\":\n\t\t\tgg := mf.Metric[0].GetGauge()\n\t\t\treads.FailureTotal = gg.GetValue()\n\t\tcase \"secrets_client_read_request_latency_milliseconds\":\n\t\t\treads.LatencyHistogram, err = metrics.ParseHistogram(\"milliseconds\", mf.Metric[0].GetHistogram())\n\t\t\tif err != nil {\n\t\t\t\treturn metrics.RequestsSummary{}, metrics.RequestsSummary{}, err\n\t\t\t}\n\t\t}\n\t}\n\n\tld.cfg.Logger.Info(\"sorting write latency results\", zap.Int(\"total-data-points\", ld.writeLatencies.Len()))\n\tnow := time.Now()\n\tsort.Sort(ld.writeLatencies)\n\tld.cfg.Logger.Info(\"sorted write latency results\", zap.Int(\"total-data-points\", ld.writeLatencies.Len()), zap.String(\"took\", time.Since(now).String()))\n\twrites.LantencyP50 = ld.writeLatencies.PickLantencyP50()\n\twrites.LantencyP90 = ld.writeLatencies.PickLantencyP90()\n\twrites.LantencyP99 = ld.writeLatencies.PickLantencyP99()\n\twrites.LantencyP999 = ld.writeLatencies.PickLantencyP999()\n\twrites.LantencyP9999 = ld.writeLatencies.PickLantencyP9999()\n\n\tld.cfg.Logger.Info(\"writing latency results in JSON to disk\", zap.String(\"path\", ld.cfg.WritesJSONPath))\n\twb, err := json.Marshal(ld.writeLatencies)\n\tif err != nil {\n\t\tld.cfg.Logger.Warn(\"failed to encode latency results in JSON\", zap.Error(err))\n\t\treturn metrics.RequestsSummary{}, metrics.RequestsSummary{}, err\n\t}\n\tif err = ioutil.WriteFile(ld.cfg.WritesJSONPath, wb, 0600); err != nil {\n\t\tld.cfg.Logger.Warn(\"failed to write latency results in JSON to disk\", zap.String(\"path\", ld.cfg.WritesJSONPath), zap.Error(err))\n\t\treturn metrics.RequestsSummary{}, metrics.RequestsSummary{}, err\n\t}\n\tld.cfg.Logger.Info(\"wrote latency results in JSON to disk\", zap.String(\"path\", ld.cfg.WritesJSONPath))\n\n\tld.cfg.Logger.Info(\"sorting read latency results\", zap.Int(\"total-data-points\", ld.readLatencies.Len()))\n\tnow = time.Now()\n\tsort.Sort(ld.readLatencies)\n\tld.cfg.Logger.Info(\"sorted read latency results\", zap.Int(\"total-data-points\", ld.readLatencies.Len()), zap.String(\"took\", time.Since(now).String()))\n\treads.LantencyP50 = ld.readLatencies.PickLantencyP50()\n\treads.LantencyP90 = ld.readLatencies.PickLantencyP90()\n\treads.LantencyP99 = ld.readLatencies.PickLantencyP99()\n\treads.LantencyP999 = ld.readLatencies.PickLantencyP999()\n\treads.LantencyP9999 = ld.readLatencies.PickLantencyP9999()\n\n\tld.cfg.Logger.Info(\"writing latency results in JSON to disk\", zap.String(\"path\", ld.cfg.ReadsJSONPath))\n\twb, err = json.Marshal(ld.readLatencies)\n\tif err != nil {\n\t\tld.cfg.Logger.Warn(\"failed to encode latency results in JSON\", zap.Error(err))\n\t\treturn metrics.RequestsSummary{}, metrics.RequestsSummary{}, err\n\t}\n\tif err = ioutil.WriteFile(ld.cfg.ReadsJSONPath, wb, 0600); err != nil {\n\t\tld.cfg.Logger.Warn(\"failed to write latency results in JSON to disk\", zap.String(\"path\", ld.cfg.ReadsJSONPath), zap.Error(err))\n\t\treturn metrics.RequestsSummary{}, metrics.RequestsSummary{}, err\n\t}\n\tld.cfg.Logger.Info(\"wrote latency results in JSON to disk\", zap.String(\"path\", ld.cfg.ReadsJSONPath))\n\n\treturn writes, reads, nil\n}", "func (s *SystemMetrics) GetVirtualMemoryStats(c chan *MemoryStats) {\n\tstats, err := gomem.VirtualMemory()\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tc <- &MemoryStats{\n\t\t// default is always in bytes. hence, convert into the required format.\n\t\tTotal: stats.Total / 1000000,\n\t\tAvailable: stats.Available / 1000000,\n\t\tUsed: stats.Used / 1000000,\n\t\tUsedPercent: stats.UsedPercent,\n\t\tFree: stats.Free / 1000000,\n\t}\n}", "func CaptureMemStats(d time.Duration) {\n\tcaptureMemStats(metrics.DefaultRegistry, d)\n}", "func vgCollect(ch chan<- prometheus.Metric, vgs []map[string]string) {\n for _, vg := range vgs {\n vgSizeF, err := strconv.ParseFloat(strings.Trim(vg[\"vg_size\"], \"B\"), 64)\n if err != nil {\n log.Print(err)\n return\n }\n ch <- prometheus.MustNewConstMetric(vgSizeMetric, prometheus.GaugeValue, vgSizeF, vg[\"vg_name\"], vg[\"vg_uuid\"])\n\n vgFreeF, err := strconv.ParseFloat(strings.Trim(vg[\"vg_free\"], \"B\"), 64)\n if err != nil {\n log.Print(err)\n return\n }\n ch <- prometheus.MustNewConstMetric(vgFreeMetric, prometheus.GaugeValue, vgFreeF, vg[\"vg_name\"], vg[\"vg_uuid\"])\n }\n}", "func (c Collector) CollectMetrics(mts []plugin.Metric) ([]plugin.Metric, error) {\n\tmetrics := []plugin.Metric{}\n\n\tuser, err := mts[0].Config.GetString(\"user\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tmp, err := c.client.getMetricData(user)\n\tfor _, mt := range mts {\n\t\tns := mt.Namespace.Strings()\n\t\tns[3] = user\n\t\tnss := strings.Join(ns, \"/\")\n\t\tmetrics = append(metrics, mp[nss])\n\t}\n\n\treturn metrics, nil\n}", "func (m *KubeletMonitor) parseNodeStats(nodeStats stats.NodeStats) {\n\t// cpu\n\tcpuUsageCore := float64(*nodeStats.CPU.UsageNanoCores) / util.NanoToUnit\n\tglog.V(4).Infof(\"Cpu usage of node %s is %f core\", nodeStats.NodeName, cpuUsageCore)\n\tnodeCpuUsageCoreMetrics := metrics.NewEntityResourceMetric(task.NodeType, util.NodeStatsKeyFunc(nodeStats),\n\t\tmetrics.CPU, metrics.Used, cpuUsageCore)\n\n\t// memory\n\tmemoryUsageKiloBytes := float64(*nodeStats.Memory.UsageBytes) / util.KilobytesToBytes\n\tglog.V(4).Infof(\"Memory usage of node %s is %f Kb\", nodeStats.NodeName, memoryUsageKiloBytes)\n\tnodeMemoryUsageKiloBytesMetrics := metrics.NewEntityResourceMetric(task.NodeType,\n\t\tutil.NodeStatsKeyFunc(nodeStats), metrics.Memory, metrics.Used, memoryUsageKiloBytes)\n\n\tm.metricSink.AddNewMetricEntries(nodeCpuUsageCoreMetrics, nodeMemoryUsageKiloBytesMetrics)\n\n}", "func (s *Stats) CalculateTotalCPUTimes() []CPUPercentages {\n\n percentages := make([]CPUPercentages, len(s.CPUInfo.TotalTimes))\n\n if len(s.CPUInfo.PrevTotalTimes) == 0 || len(s.CPUInfo.TotalTimes) == 0 {\n return percentages\n }\n\n var diff float64\n var total float64\n var prevTotal float64\n var prevStat cpu.TimesStat\n var cpuStat *CPUPercentages\n\n for i, t := range s.CPUInfo.TotalTimes {\n cpuStat = &percentages[i]\n prevStat = s.CPUInfo.PrevTotalTimes[i]\n\n total = t.User + t.System + t.Idle + t.Nice + t.Iowait + t.Irq + t.Softirq + t.Steal + t.Guest + t.GuestNice + t.Stolen\n prevTotal = prevStat.User + prevStat.System + prevStat.Idle + prevStat.Nice + prevStat.Iowait + prevStat.Irq + prevStat.Softirq + prevStat.Steal + prevStat.Guest + prevStat.GuestNice + prevStat.Stolen\n\n diff = total - prevTotal\n\n cpuStat.CPU = t.CPU\n cpuStat.User = (t.User - prevStat.User) / diff * 100\n cpuStat.System = (t.System - prevStat.System) / diff * 100\n cpuStat.Idle = (t.Idle - prevStat.Idle) / diff * 100\n cpuStat.Nice = (t.Nice - prevStat.Nice) / diff * 100\n cpuStat.IOWait = (t.Iowait - prevStat.Iowait) / diff * 100\n cpuStat.IRQ = (t.Irq - prevStat.Irq) / diff * 100\n cpuStat.SoftIRQ = (t.Softirq - prevStat.Softirq) / diff * 100\n cpuStat.Steal = (t.Steal - prevStat.Steal) / diff * 100\n cpuStat.Guest = (t.Guest - prevStat.Guest) / diff * 100\n cpuStat.GuestNice = (t.GuestNice - prevStat.GuestNice) / diff * 100\n cpuStat.Stolen = (t.Stolen - prevStat.Stolen) / diff * 100\n cpuStat.Total = 100 * (diff - (t.Idle - prevStat.Idle)) / diff\n }\n\n return percentages\n}", "func PrintMemStats(m *runtime.MemStats, mstats, ostats, astats, gc bool, pauses int) {\n\tif mstats {\n\t\tfmt.Printf(\"Alloc=%h, TotalAlloc=%h, Sys=%h, Lookups=%h, Mallocs=%h, Frees=%h\\n\",\n\t\t\thu(m.Alloc, \"B\"), hu(m.TotalAlloc, \"B\"), hu(m.Sys, \"B\"), hu(m.Lookups, \"\"), hu(m.Mallocs, \"\"), hu(m.Frees, \"\"))\n\t\tfmt.Printf(\"HeapAlloc=%h, HeapSys=%h, HeapIdle=%h, HeapInuse=%h, HeapReleased=%h, HeapObjects=%h, StackInuse=%h, StackSys=%h\\n\",\n\t\t\thu(m.HeapAlloc, \"B\"), hu(m.HeapSys, \"B\"), hu(m.HeapIdle, \"B\"), hu(m.HeapInuse, \"B\"), hu(m.HeapReleased, \"B\"),\n\t\t\thu(m.HeapObjects, \"\"), hu(m.StackInuse, \"B\"), hu(m.StackSys, \"B\"))\n\t\tif ostats {\n\t\t\tfmt.Printf(\"MSpanInuse=%d, MSpanSys=%d, m.MCacheInuse=%d, MCacheSys=%d, BuckHashSys=%d, GCSys=%d, OtherSys=%d\\n\",\n\t\t\t\tm.MSpanInuse, m.MSpanSys, m.MCacheInuse, m.MCacheSys, m.BuckHashSys, m.GCSys, m.OtherSys)\n\t\t}\n\n\t\tt1 := time.Unix(0, int64(m.LastGC))\n\t\t//t2 := time.Now()\n\t\t//t3 := time.Unix(int64(0), int64(m.PauseTotalNs))\n\t\tet := time.Duration(int64(m.PauseTotalNs)) // Since(t3)\n\t\tfmt.Printf(\"NextGC=%h, NumGC=%d, LastGC=%s, PauseTotalNs=%v, NumForcedGC=%d, GCCPUFraction=%0.2f\\n\",\n\t\t\thu(m.NextGC, \"B\"), m.NumGC, t1.Format(\"15:04:05.99\"), et, m.NumForcedGC, m.GCCPUFraction)\n\t}\n\tfmt.Printf(\"\\n\")\n\n\tif astats {\n\t\tfor i, b := range m.BySize {\n\t\t\tif b.Mallocs == 0 {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tfmt.Printf(\"BySize[%d]: Size=%d, Malloc=%d, Frees=%d\\n\", i, b.Size, b.Mallocs, b.Frees)\n\t\t}\n\t\tfmt.Printf(\"\\n\")\n\t}\n\n\tif gc {\n\t\tPrintCircularBuffer(\"PauseNs\", pauses, int(m.NumGC), true, m.PauseNs)\n\t\tPrintCircularBuffer(\"PauseEnd\", pauses, int(m.NumGC), false, m.PauseEnd)\n\t\tfmt.Printf(\"\\n\")\n\t}\n}", "func (n *NetworkNode) GatherMetrics() {\n\tn.Lock()\n\tdefer n.Unlock()\n\tlog.Debugf(\"%s: GatherMetrics() locked for %s\", n.UUID, n.Name)\n\tif time.Now().Unix() < n.nextCollectionTicker {\n\t\treturn\n\t}\n\tstart := time.Now()\n\tif len(n.metrics) > 0 {\n\t\tn.metrics = n.metrics[:0]\n\t\tlog.Debugf(\"%s: GatherMetrics() cleared metrics\", n.UUID)\n\t}\n\tupValue := 1\n\n\tcli := api.NewClient()\n\tcli.SetHost(n.target)\n\tif n.port != 0 {\n\t\tcli.SetPort(n.port)\n\t}\n\tif n.proto != \"\" {\n\t\tcli.SetProtocol(n.proto)\n\t}\n\n\tvar info *api.SysInfo\n\tvar workingCredential *credential\n\t// test all available credentials\n\ttryFailed := false\n\tfailedCredentials := make(map[int]bool)\n\tfor {\n\t\tfor i, c := range n.credentials {\n\t\t\tif c.Failed && !tryFailed {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif _, exists := failedCredentials[i]; exists {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tcli.SetUsername(c.Username)\n\t\t\tcli.SetPassword(c.Password)\n\t\t\tdata, err := cli.GetSystemInfo()\n\t\t\tif err != nil {\n\t\t\t\tfailedCredentials[i] = true\n\t\t\t\tlog.Debugf(\"%s: GetSystemInfo() failed (host: %s, target: %s, username: %s): %s\", n.UUID, n.Name, n.target, c.Username, err)\n\t\t\t\tc.Failed = true\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tc.Failed = false\n\t\t\tinfo = data\n\t\t\tworkingCredential = c\n\t\t\tbreak\n\t\t}\n\t\tif workingCredential == nil {\n\t\t\tif tryFailed {\n\t\t\t\tbreak\n\t\t\t}\n\t\t\ttryFailed = true\n\t\t\tcontinue\n\t\t}\n\t\tbreak\n\t}\n\n\tif workingCredential == nil || info == nil {\n\t\tn.IncrementErrorCounter()\n\t\tupValue = 0\n\t} else {\n\t\tlog.Debugf(\"%s: hostname: %s, chassis id: %s\", n.UUID, info.Hostname, info.ChassisID)\n\t\t// General Metrics\n\t\tn.metrics = append(n.metrics, prometheus.MustNewConstMetric(\n\t\t\tnodeSystemHostname,\n\t\t\tprometheus.GaugeValue,\n\t\t\t1,\n\t\t\tn.UUID,\n\t\t\tinfo.Hostname,\n\t\t))\n\t\tn.metrics = append(n.metrics, prometheus.MustNewConstMetric(\n\t\t\tnodeSystemIdentifier,\n\t\t\tprometheus.GaugeValue,\n\t\t\t1,\n\t\t\tn.UUID,\n\t\t\tinfo.ProcessorBoardID,\n\t\t))\n\n\t\tvar wg sync.WaitGroup\n\t\twg.Add(6)\n\n\t\tgo func() {\n\t\t\tdefer wg.Done()\n\t\t\tn.GetInterfaces(cli)\n\t\t}()\n\n\t\tgo func() {\n\t\t\tdefer wg.Done()\n\t\t\tn.GetVlans(cli)\n\t\t}()\n\n\t\tgo func() {\n\t\t\tdefer wg.Done()\n\t\t\tn.GetSystemEnvironment(cli)\n\t\t}()\n\n\t\tgo func() {\n\t\t\tdefer wg.Done()\n\t\t\tn.GetSystemResources(cli)\n\t\t}()\n\n\t\tgo func() {\n\t\t\tdefer wg.Done()\n\t\t\tn.GetTransceivers(cli)\n\t\t}()\n\n\t\tgo func() {\n\t\t\tdefer wg.Done()\n\t\t\tn.GetRoutingBgp(cli)\n\t\t}()\n\n\t\twg.Wait()\n\t}\n\n\t// Generic Metrics\n\tn.metrics = append(n.metrics, prometheus.MustNewConstMetric(\n\t\tnodeUp,\n\t\tprometheus.GaugeValue,\n\t\tfloat64(upValue),\n\t\tn.UUID,\n\t))\n\tn.metrics = append(n.metrics, prometheus.MustNewConstMetric(\n\t\tnodeHostname,\n\t\tprometheus.GaugeValue,\n\t\t1,\n\t\tn.UUID,\n\t\tn.Name,\n\t))\n\tn.metrics = append(n.metrics, prometheus.MustNewConstMetric(\n\t\tnodeErrors,\n\t\tprometheus.CounterValue,\n\t\tfloat64(n.errors),\n\t\tn.UUID,\n\t))\n\tn.metrics = append(n.metrics, prometheus.MustNewConstMetric(\n\t\tnodeNextScrape,\n\t\tprometheus.CounterValue,\n\t\tfloat64(n.nextCollectionTicker),\n\t\tn.UUID,\n\t))\n\tn.metrics = append(n.metrics, prometheus.MustNewConstMetric(\n\t\tnodeScrapeTime,\n\t\tprometheus.GaugeValue,\n\t\ttime.Since(start).Seconds(),\n\t\tn.UUID,\n\t))\n\n\tn.nextCollectionTicker = time.Now().Add(time.Duration(n.pollInterval) * time.Second).Unix()\n\n\tif upValue > 0 {\n\t\tn.result = \"success\"\n\t} else {\n\t\tn.result = \"failure\"\n\t}\n\tn.timestamp = time.Now().Format(time.RFC3339)\n\n\tlog.Debugf(\"%s: GatherMetrics() returns\", n.UUID)\n\treturn\n}", "func ReadMemStats() *MemStats {\n\tvar m runtime.MemStats\n\truntime.ReadMemStats(&m)\n\treturn &MemStats{\n\t\tAllocKB: bytesToKB(m.Alloc),\n\t\tSysKB: bytesToKB(m.Sys),\n\n\t\tHeapAllocKB: bytesToKB(m.HeapAlloc),\n\t\tHeapSysKB: bytesToKB(m.HeapSys),\n\t\tHeapIdleKB: bytesToKB(m.HeapIdle),\n\t\tHeapInuseKB: bytesToKB(m.HeapInuse),\n\t\tHeapReleasedKB: bytesToKB(m.HeapReleased),\n\t\tHeapObjects: m.HeapObjects,\n\n\t\tNextGCKB: bytesToKB(m.NextGC),\n\t\tLastGC: time.Unix(int64(m.LastGC/(1000000000)), 0),\n\t}\n}", "func (s *Stats) StatsMonitor() {\n\tlog.Printf(\"Initialized stats goroutine\")\n\ts.startTime = time.Now()\n\tfor {\n\t\telapsed := time.Since(s.startTime)\n\t\tlog.Printf(\"[%10.4f] cases %10d | fcps %8.4f | cov %2.1f%% (hit: %3d, tot: %3d) | corpus: %d | crashes: %d\", elapsed.Seconds(), s.IterationCount, float64(s.IterationCount)/elapsed.Seconds(), float64(s.FoundBreakpoints)/float64(s.TotalBreakpoints)*100.0, s.FoundBreakpoints, s.TotalBreakpoints, s.CorpusLength, s.Crashes)\n\t\ttime.Sleep(time.Second)\n\t}\n}", "func (s *Stats) CalculateCPUTimes() []CPUPercentages {\n\n percentages := make([]CPUPercentages, len(s.CPUInfo.PerCPUTimes))\n\n if len(s.CPUInfo.PrevCPUTimes) == 0 || len(s.CPUInfo.PerCPUTimes) == 0 {\n return percentages\n }\n\n var diff float64\n var total float64\n var prevTotal float64\n var prevStat cpu.TimesStat\n var cpuStat *CPUPercentages\n\n for i, t := range s.CPUInfo.PerCPUTimes {\n cpuStat = &percentages[i]\n prevStat = s.CPUInfo.PrevCPUTimes[i]\n\n total = t.User + t.System + t.Idle + t.Nice + t.Iowait + t.Irq + t.Softirq + t.Steal + t.Guest + t.GuestNice + t.Stolen\n prevTotal = prevStat.User + prevStat.System + prevStat.Idle + prevStat.Nice + prevStat.Iowait + prevStat.Irq + prevStat.Softirq + prevStat.Steal + prevStat.Guest + prevStat.GuestNice + prevStat.Stolen\n\n diff = total - prevTotal\n\n cpuStat.CPU = t.CPU\n cpuStat.User = (t.User - prevStat.User) / diff * 100\n cpuStat.System = (t.System - prevStat.System) / diff * 100\n cpuStat.Idle = (t.Idle - prevStat.Idle) / diff * 100\n cpuStat.Nice = (t.Nice - prevStat.Nice) / diff * 100\n cpuStat.IOWait = (t.Iowait - prevStat.Iowait) / diff * 100\n cpuStat.IRQ = (t.Irq - prevStat.Irq) / diff * 100\n cpuStat.SoftIRQ = (t.Softirq - prevStat.Softirq) / diff * 100\n cpuStat.Steal = (t.Steal - prevStat.Steal) / diff * 100\n cpuStat.Guest = (t.Guest - prevStat.Guest) / diff * 100\n cpuStat.GuestNice = (t.GuestNice - prevStat.GuestNice) / diff * 100\n cpuStat.Stolen = (t.Stolen - prevStat.Stolen) / diff * 100\n cpuStat.Total = 100 * (diff - (t.Idle - prevStat.Idle)) / diff\n }\n\n return percentages\n}", "func (s SolrPlugin) FetchMetrics() (map[string]interface{}, error) {\n\tstat := make(map[string]interface{})\n\tfor core, stats := range s.Stats {\n\t\tfor k, v := range stats {\n\t\t\tstat[core+\"_\"+k] = v\n\t\t}\n\t}\n\treturn stat, nil\n}" ]
[ "0.6065083", "0.60630685", "0.58699715", "0.5825579", "0.57826567", "0.5757542", "0.5711092", "0.5698597", "0.55576074", "0.555303", "0.5470345", "0.54669636", "0.5441396", "0.54269797", "0.5400085", "0.539636", "0.53745866", "0.5287389", "0.5285424", "0.5222219", "0.5189093", "0.5158422", "0.5134228", "0.5102943", "0.50989157", "0.50758225", "0.5059003", "0.5055005", "0.5047196", "0.50383323", "0.5037925", "0.5032591", "0.498779", "0.4949011", "0.49304295", "0.4908746", "0.48955795", "0.48925066", "0.48881665", "0.4885445", "0.48824403", "0.4871647", "0.48662156", "0.4858663", "0.48574352", "0.4855776", "0.48550737", "0.4844171", "0.48344865", "0.4827119", "0.4820032", "0.481447", "0.4814469", "0.4791942", "0.47868463", "0.47769985", "0.47720274", "0.47688323", "0.47668216", "0.47607517", "0.47589096", "0.47564673", "0.47543648", "0.47495216", "0.4741596", "0.4738733", "0.4728976", "0.47231537", "0.471735", "0.47106788", "0.47045448", "0.47037014", "0.47028857", "0.46998358", "0.46860492", "0.46759397", "0.46572664", "0.46461913", "0.4640049", "0.46304017", "0.462926", "0.4628518", "0.4626826", "0.46250513", "0.4598814", "0.45868498", "0.45808497", "0.45783675", "0.4577372", "0.4563498", "0.45615122", "0.45613277", "0.4554008", "0.4553482", "0.45520326", "0.45392215", "0.45368093", "0.45336726", "0.450413", "0.45021054" ]
0.8012715
0
gatherJVMStats gather the Process metrics and add results to the accumulator
func (logstash *Logstash) gatherProcessStats(address string, accumulator telegraf.Accumulator) error { processStats := &ProcessStats{} err := logstash.gatherJSONData(address, processStats) if err != nil { return err } tags := map[string]string{ "node_id": processStats.ID, "node_name": processStats.Name, "node_version": processStats.Version, "source": processStats.Host, } flattener := jsonParser.JSONFlattener{} err = flattener.FlattenJSON("", processStats.Process) if err != nil { return err } accumulator.AddFields("logstash_process", flattener.Fields, tags) return nil }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (logstash *Logstash) gatherJVMStats(address string, accumulator telegraf.Accumulator) error {\n\tjvmStats := &JVMStats{}\n\n\terr := logstash.gatherJSONData(address, jvmStats)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\ttags := map[string]string{\n\t\t\"node_id\": jvmStats.ID,\n\t\t\"node_name\": jvmStats.Name,\n\t\t\"node_version\": jvmStats.Version,\n\t\t\"source\": jvmStats.Host,\n\t}\n\n\tflattener := jsonParser.JSONFlattener{}\n\terr = flattener.FlattenJSON(\"\", jvmStats.JVM)\n\tif err != nil {\n\t\treturn err\n\t}\n\taccumulator.AddFields(\"logstash_jvm\", flattener.Fields, tags)\n\n\treturn nil\n}", "func CollectProcessMetrics(refresh time.Duration) {\n\t// Short circuit if the metics system is disabled\n\tif !Enabled {\n\t\treturn\n\t}\n\t// Create the various data collectors\n\tmemstates := make([]*runtime.MemStats, 2)\n\tdiskstates := make([]*DiskStats, 2)\n\tfor i := 0; i < len(memstates); i++ {\n\t\tmemstates[i] = new(runtime.MemStats)\n\t\tdiskstates[i] = new(DiskStats)\n\t}\n\t// Define the various metics to collect\n\tmemAllocs := metics.GetOrRegisterMeter(\"system/memory/allocs\", metics.DefaultRegistry)\n\tmemFrees := metics.GetOrRegisterMeter(\"system/memory/frees\", metics.DefaultRegistry)\n\tmemInuse := metics.GetOrRegisterMeter(\"system/memory/inuse\", metics.DefaultRegistry)\n\tmemPauses := metics.GetOrRegisterMeter(\"system/memory/pauses\", metics.DefaultRegistry)\n\n\tvar diskReads, diskReadBytes, diskWrites, diskWriteBytes metics.Meter\n\tif err := ReadDiskStats(diskstates[0]); err == nil {\n\t\tdiskReads = metics.GetOrRegisterMeter(\"system/disk/readcount\", metics.DefaultRegistry)\n\t\tdiskReadBytes = metics.GetOrRegisterMeter(\"system/disk/readdata\", metics.DefaultRegistry)\n\t\tdiskWrites = metics.GetOrRegisterMeter(\"system/disk/writecount\", metics.DefaultRegistry)\n\t\tdiskWriteBytes = metics.GetOrRegisterMeter(\"system/disk/writedata\", metics.DefaultRegistry)\n\t} else {\n\t\tbgmlogs.Debug(\"Failed to read disk metics\", \"err\", err)\n\t}\n\t// Iterate loading the different states and updating the meters\n\tfor i := 1; ; i++ {\n\t\truntime.ReadMemStats(memstates[i%2])\n\t\tmemAllocs.Mark(int64(memstates[i%2].Mallocs - memstates[(i-1)%2].Mallocs))\n\t\tmemFrees.Mark(int64(memstates[i%2].Frees - memstates[(i-1)%2].Frees))\n\t\tmemInuse.Mark(int64(memstates[i%2].Alloc - memstates[(i-1)%2].Alloc))\n\t\tmemPauses.Mark(int64(memstates[i%2].PauseTotalNs - memstates[(i-1)%2].PauseTotalNs))\n\n\t\tif ReadDiskStats(diskstates[i%2]) == nil {\n\t\t\tdiskReads.Mark(diskstates[i%2].ReadCount - diskstates[(i-1)%2].ReadCount)\n\t\t\tdiskReadBytes.Mark(diskstates[i%2].ReadBytes - diskstates[(i-1)%2].ReadBytes)\n\t\t\tdiskWrites.Mark(diskstates[i%2].WriteCount - diskstates[(i-1)%2].WriteCount)\n\t\t\tdiskWriteBytes.Mark(diskstates[i%2].WriteBytes - diskstates[(i-1)%2].WriteBytes)\n\t\t}\n\t\ttime.Sleep(refresh)\n\t}\n}", "func (p *ProcMetrics) Collect() {\n\tif m, err := CollectProcInfo(p.pid); err == nil {\n\t\tnow := time.Now()\n\n\t\tif !p.lastTime.IsZero() {\n\t\t\tratio := 1.0\n\t\t\tswitch {\n\t\t\tcase m.CPU.Period > 0 && m.CPU.Quota > 0:\n\t\t\t\tratio = float64(m.CPU.Quota) / float64(m.CPU.Period)\n\t\t\tcase m.CPU.Shares > 0:\n\t\t\t\tratio = float64(m.CPU.Shares) / 1024\n\t\t\tdefault:\n\t\t\t\tratio = 1 / float64(runtime.NumCPU())\n\t\t\t}\n\n\t\t\tinterval := ratio * float64(now.Sub(p.lastTime))\n\n\t\t\tp.cpu.user.time = m.CPU.User - p.last.CPU.User\n\t\t\tp.cpu.user.percent = 100 * float64(p.cpu.user.time) / interval\n\n\t\t\tp.cpu.system.time = m.CPU.Sys - p.last.CPU.Sys\n\t\t\tp.cpu.system.percent = 100 * float64(p.cpu.system.time) / interval\n\n\t\t\tp.cpu.total.time = (m.CPU.User + m.CPU.Sys) - (p.last.CPU.User + p.last.CPU.Sys)\n\t\t\tp.cpu.total.percent = 100 * float64(p.cpu.total.time) / interval\n\t\t}\n\n\t\tp.memory.available = m.Memory.Available\n\t\tp.memory.size = m.Memory.Size\n\t\tp.memory.resident.usage = m.Memory.Resident\n\t\tp.memory.resident.percent = 100 * float64(p.memory.resident.usage) / float64(p.memory.available)\n\t\tp.memory.shared.usage = m.Memory.Shared\n\t\tp.memory.text.usage = m.Memory.Text\n\t\tp.memory.data.usage = m.Memory.Data\n\t\tp.memory.pagefault.major.count = m.Memory.MajorPageFaults - p.last.Memory.MajorPageFaults\n\t\tp.memory.pagefault.minor.count = m.Memory.MinorPageFaults - p.last.Memory.MinorPageFaults\n\n\t\tp.files.open = m.Files.Open\n\t\tp.files.max = m.Files.Max\n\n\t\tp.threads.num = m.Threads.Num\n\t\tp.threads.switches.voluntary.count = m.Threads.VoluntaryContextSwitches - p.last.Threads.VoluntaryContextSwitches\n\t\tp.threads.switches.involuntary.count = m.Threads.InvoluntaryContextSwitches - p.last.Threads.InvoluntaryContextSwitches\n\n\t\tp.last = m\n\t\tp.lastTime = now\n\t\tp.engine.Report(p)\n\t}\n}", "func CollectRuntimeMemStats(statsd scopedstatsd.Client, memstatsCurrent *runtime.MemStats, memstatsPrev *runtime.MemStats, tags []string) {\n\t// Collect number of bytes obtained from system.\n\tstatsd.Gauge(\"mem.sys_bytes\", float64(memstatsCurrent.Sys), tags, 1)\n\n\t// Collect number of pointer lookups.\n\tstatsd.Gauge(\"mem.pointer_lookups\", float64(memstatsCurrent.Lookups), tags, 1)\n\n\t// Collect increased heap objects allocated compared to last flush.\n\tstatsd.Count(\"mem.mallocs_total\", int64(memstatsCurrent.Mallocs-memstatsPrev.Mallocs), tags, 1)\n\n\t// Collect increased heap objects freed compared to last flush.\n\tstatsd.Count(\"mem.frees_total\", int64(memstatsCurrent.Frees-memstatsPrev.Frees), tags, 1)\n\n\t// Collect number of mallocs.\n\tstatsd.Gauge(\"mem.mallocs_count\", float64(memstatsCurrent.Mallocs-memstatsCurrent.Frees), tags, 1)\n\n\t// Collect number of bytes newly allocated for heap objects compared to last flush.\n\tstatsd.Count(\"mem.heap_alloc_bytes_total\", int64(memstatsCurrent.TotalAlloc-memstatsPrev.TotalAlloc), tags, 1)\n\n\t// Collect number of heap bytes allocated and still in use.\n\tstatsd.Gauge(\"mem.heap_alloc_bytes\", float64(memstatsCurrent.HeapAlloc), tags, 1)\n\n\t// Collect number of heap bytes obtained from system.\n\tstatsd.Gauge(\"mem.heap_sys_bytes\", float64(memstatsCurrent.HeapSys), tags, 1)\n\n\t// Collect number of heap bytes waiting to be used.\n\tstatsd.Gauge(\"mem.heap_idle_bytes\", float64(memstatsCurrent.HeapIdle), tags, 1)\n\n\t// Collect number of heap bytes that are in use.\n\tstatsd.Gauge(\"mem.heap_inuse_bytes\", float64(memstatsCurrent.HeapInuse), tags, 1)\n\n\t// Collect number of heap bytes released to OS.\n\tstatsd.Gauge(\"mem.heap_released_bytes\", float64(memstatsCurrent.HeapReleased), tags, 1)\n\n\t// Collect number of allocated objects.\n\tstatsd.Gauge(\"mem.heap_objects_count\", float64(memstatsCurrent.HeapObjects), tags, 1)\n\n\t// Collect number of bytes in use by the stack allocator.\n\tstatsd.Gauge(\"mem.stack_inuse_bytes\", float64(memstatsCurrent.StackInuse), tags, 1)\n\n\t// Collect number of bytes obtained from system for stack allocator.\n\tstatsd.Gauge(\"mem.stack_sys_bytes\", float64(memstatsCurrent.StackSys), tags, 1)\n\n\t// Collect number of bytes in use by mspan structures.\n\tstatsd.Gauge(\"mem.mspan_inuse_bytes\", float64(memstatsCurrent.MSpanInuse), tags, 1)\n\n\t// Collect number of bytes used for mspan structures obtained from system.\n\tstatsd.Gauge(\"mem.mspan_sys_bytes\", float64(memstatsCurrent.MSpanSys), tags, 1)\n\n\t// Collect number of bytes in use by mcache structures.\n\tstatsd.Gauge(\"mem.mcache_inuse_bytes\", float64(memstatsCurrent.MCacheInuse), tags, 1)\n\n\t// Collect number of bytes used for mcache structures obtained from system.\n\tstatsd.Gauge(\"mem.mcache_sys_bytes\", float64(memstatsCurrent.MCacheSys), tags, 1)\n\n\t// Collect number of bytes used by the profiling bucket hash table.\n\tstatsd.Gauge(\"mem.buck_hash_sys_bytes\", float64(memstatsCurrent.BuckHashSys), tags, 1)\n\n\t// Collect number of bytes used for garbage collection system metadata.\n\tstatsd.Gauge(\"mem.gc_sys_bytes\", float64(memstatsCurrent.GCSys), tags, 1)\n\n\t// Collect number of bytes used for other system allocations.\n\tstatsd.Gauge(\"mem.other_sys_bytes\", float64(memstatsCurrent.OtherSys), tags, 1)\n\n\t// Collect number of heap bytes when next garbage collection will take pace.\n\tstatsd.Gauge(\"mem.next_gc_bytes\", float64(memstatsCurrent.NextGC), tags, 1)\n}", "func (c *Collector) Transform(allStats *NodeStatsResponse) (metrics []*exportertools.Metric) {\n for _, stats := range allStats.Nodes {\n // GC Stats\n for _, gcstats := range stats.JVM.GC.Collectors {\n metrics = append(metrics, c.ConvertToMetric(\"jvm_gc_collection_seconds_count\",\n float64(gcstats.CollectionCount),\n \"COUNTER\",\n nil))\n\n metrics = append(metrics, c.ConvertToMetric(\"jvm_gc_collection_seconds_sum\",\n float64(gcstats.CollectionTime / 1000),\n \"COUNTER\",\n nil))\n }\n\n // Breaker stats\n for _, bstats := range stats.Breakers {\n metrics = append(metrics, c.ConvertToMetric(\"breakers_estimated_size_bytes\",\n float64(bstats.EstimatedSize),\n \"GAUGE\",\n nil))\n\n metrics = append(metrics, c.ConvertToMetric(\"breakers_limit_size_bytes\",\n float64(bstats.LimitSize),\n \"GAUGE\",\n nil))\n }\n\n // Thread Pool stats\n for pool, pstats := range stats.ThreadPool {\n metrics = append(metrics, c.ConvertToMetric(\"thread_pool_completed_count\",\n float64(pstats.Completed),\n \"COUNTER\",\n map[string]string{\"type\": pool}))\n\n metrics = append(metrics, c.ConvertToMetric(\"thread_pool_rejected_count\",\n float64(pstats.Rejected),\n \"COUNTER\",\n map[string]string{\"type\": pool}))\n\n metrics = append(metrics, c.ConvertToMetric(\"thread_pool_active_count\",\n float64(pstats.Active),\n \"GAUGE\",\n map[string]string{\"type\": pool}))\n\n metrics = append(metrics, c.ConvertToMetric(\"thread_pool_threads_count\",\n float64(pstats.Threads),\n \"GAUGE\",\n map[string]string{\"type\": pool}))\n\n metrics = append(metrics, c.ConvertToMetric(\"thread_pool_largest_count\",\n float64(pstats.Largest),\n \"GAUGE\",\n map[string]string{\"type\": pool}))\n\n metrics = append(metrics, c.ConvertToMetric(\"thread_pool_queue_count\",\n float64(pstats.Queue),\n \"GAUGE\",\n map[string]string{\"type\": pool}))\n }\n\n // JVM Memory Stats\n metrics = append(metrics, c.ConvertToMetric(\"jvm_memory_committed_bytes\",\n float64(stats.JVM.Mem.HeapCommitted),\n \"GAUGE\",\n nil))\n\n metrics = append(metrics, c.ConvertToMetric(\"jvm_memory_used_bytes\",\n float64(stats.JVM.Mem.HeapUsed),\n \"GAUGE\",\n nil))\n\n\n metrics = append(metrics, c.ConvertToMetric(\"jvm_memory_max_bytes\",\n float64(stats.JVM.Mem.HeapMax),\n \"GAUGE\",\n nil))\n\n metrics = append(metrics, c.ConvertToMetric(\"jvm_memory_committed_bytes\",\n float64(stats.JVM.Mem.NonHeapCommitted),\n \"GAUGE\",\n nil))\n\n metrics = append(metrics, c.ConvertToMetric(\"jvm_memory_used_bytes\",\n float64(stats.JVM.Mem.NonHeapUsed),\n \"GAUGE\",\n nil))\n\n // Indices Stats)\n metrics = append(metrics, c.ConvertToMetric(\"indices_fielddata_memory_size_bytes\",\n float64(stats.Indices.FieldData.MemorySize),\n \"GAUGE\",\n nil))\n\n metrics = append(metrics, c.ConvertToMetric(\"indices_fielddata_evictions\",\n float64(stats.Indices.FieldData.Evictions),\n \"COUNTER\",\n nil))\n\n metrics = append(metrics, c.ConvertToMetric(\"indices_filter_cache_memory_size_bytes\",\n float64(stats.Indices.FilterCache.MemorySize),\n \"GAUGE\",\n nil))\n\n metrics = append(metrics, c.ConvertToMetric(\"indices_filter_cache_evictions\",\n float64(stats.Indices.FilterCache.Evictions),\n \"COUNTER\",\n nil))\n\n metrics = append(metrics, c.ConvertToMetric(\"indices_query_cache_memory_size_bytes\",\n float64(stats.Indices.QueryCache.MemorySize),\n \"GAUGE\",\n nil))\n\n metrics = append(metrics, c.ConvertToMetric(\"indices_query_cache_evictions\",\n float64(stats.Indices.QueryCache.Evictions),\n \"COUNTER\",\n nil))\n\n metrics = append(metrics, c.ConvertToMetric(\"indices_request_cache_memory_size_bytes\",\n float64(stats.Indices.QueryCache.MemorySize),\n \"GAUGE\",\n nil))\n\n metrics = append(metrics, c.ConvertToMetric(\"indices_request_cache_evictions\",\n float64(stats.Indices.QueryCache.Evictions),\n \"COUNTER\",\n nil))\n\n metrics = append(metrics, c.ConvertToMetric(\"indices_docs\",\n float64(stats.Indices.Docs.Count),\n \"GAUGE\",\n nil))\n\n metrics = append(metrics, c.ConvertToMetric(\"indices_docs_deleted\",\n float64(stats.Indices.Docs.Deleted),\n \"GAUGE\",\n nil))\n\n metrics = append(metrics, c.ConvertToMetric(\"indices_segments_memory_bytes\",\n float64(stats.Indices.Segments.Memory),\n \"GAUGE\",\n nil))\n\n metrics = append(metrics, c.ConvertToMetric(\"indices_segments_count\",\n float64(stats.Indices.Segments.Count),\n \"GAUGE\",\n nil))\n\n metrics = append(metrics, c.ConvertToMetric(\"indices_store_size_bytes\",\n float64(stats.Indices.Store.Size),\n \"GAUGE\",\n nil))\n\n metrics = append(metrics, c.ConvertToMetric(\"indices_store_throttle_time_ms_total\",\n float64(stats.Indices.Store.ThrottleTime),\n \"COUNTER\",\n nil))\n\n metrics = append(metrics, c.ConvertToMetric(\"indices_flush_total\",\n float64(stats.Indices.Flush.Total),\n \"COUNTER\",\n nil))\n\n metrics = append(metrics, c.ConvertToMetric(\"indices_flush_time_ms_total\",\n float64(stats.Indices.Flush.Time),\n \"COUNTER\",\n nil))\n\n metrics = append(metrics, c.ConvertToMetric(\"indices_indexing_index_time_ms_total\",\n float64(stats.Indices.Indexing.IndexTime),\n \"COUNTER\",\n nil))\n\n metrics = append(metrics, c.ConvertToMetric(\"indices_indexing_index_total\",\n float64(stats.Indices.Indexing.IndexTotal),\n \"COUNTER\",\n nil))\n\n metrics = append(metrics, c.ConvertToMetric(\"indices_merges_total_time_ms_total\",\n float64(stats.Indices.Merges.TotalTime),\n \"COUNTER\",\n nil))\n\n metrics = append(metrics, c.ConvertToMetric(\"indices_merges_total_size_bytes_total\",\n float64(stats.Indices.Merges.TotalSize),\n \"COUNTER\",\n nil))\n\n metrics = append(metrics, c.ConvertToMetric(\"indices_merges_total\",\n float64(stats.Indices.Merges.Total),\n \"COUNTER\",\n nil))\n\n metrics = append(metrics, c.ConvertToMetric(\"indices_refresh_total_time_ms_total\",\n float64(stats.Indices.Refresh.TotalTime),\n \"COUNTER\",\n nil))\n\n metrics = append(metrics, c.ConvertToMetric(\"indices_refresh_total\",\n float64(stats.Indices.Refresh.Total),\n \"COUNTER\",\n nil))\n\n // Transport Stats)\n metrics = append(metrics, c.ConvertToMetric(\"transport_rx_packets_total\",\n float64(stats.Transport.RxCount),\n \"COUNTER\",\n nil))\n\n metrics = append(metrics, c.ConvertToMetric(\"transport_rx_size_bytes_total\",\n float64(stats.Transport.RxSize),\n \"COUNTER\",\n nil))\n\n\n metrics = append(metrics, c.ConvertToMetric(\"transport_tx_packets_total\",\n float64(stats.Transport.TxCount),\n \"COUNTER\",\n nil))\n\n metrics = append(metrics, c.ConvertToMetric(\"transport_tx_size_bytes_total\",\n float64(stats.Transport.TxSize),\n \"COUNTER\",\n nil))\n\n // Process Stats)\n metrics = append(metrics, c.ConvertToMetric(\"process_cpu_percent\",\n float64(stats.Process.CPU.Percent),\n \"GAUGE\",\n nil))\n\n metrics = append(metrics, c.ConvertToMetric(\"process_mem_resident_size_bytes\",\n float64(stats.Process.Memory.Resident),\n \"GAUGE\",\n nil))\n\n metrics = append(metrics, c.ConvertToMetric(\"process_mem_share_size_bytes\",\n float64(stats.Process.Memory.Share),\n \"GAUGE\",\n nil))\n\n metrics = append(metrics, c.ConvertToMetric(\"process_mem_virtual_size_bytes\",\n float64(stats.Process.Memory.TotalVirtual),\n \"GAUGE\",\n nil))\n\n metrics = append(metrics, c.ConvertToMetric(\"process_open_files_count\",\n float64(stats.Process.OpenFD),\n \"GAUGE\",\n nil))\n\n metrics = append(metrics, c.ConvertToMetric(\"process_cpu_time_seconds_sum\",\n float64(stats.Process.CPU.Total / 1000),\n \"COUNTER\",\n nil))\n\n metrics = append(metrics, c.ConvertToMetric(\"process_cpu_time_seconds_sum\",\n float64(stats.Process.CPU.Sys / 1000),\n \"COUNTER\",\n nil))\n\n metrics = append(metrics, c.ConvertToMetric(\"process_cpu_time_seconds_sum\",\n float64(stats.Process.CPU.User / 1000),\n \"COUNTER\",\n nil))\n\n }\n\n return metrics\n}", "func (e *Exporter) collect(ch chan<- prometheus.Metric) error {\n\tvar mempct, memtot, memfree float64\n\tif v, e := mem.VirtualMemory(); e == nil {\n\t\tmempct = v.UsedPercent\n\t\tmemtot = float64(v.Total)\n\t\tmemfree = float64(v.Free)\n\t}\n\tvar swappct, swaptot, swapfree float64\n\tif v, e := mem.SwapMemory(); e == nil {\n\t\tswappct = v.UsedPercent\n\t\tswaptot = float64(v.Total)\n\t\tswapfree = float64(v.Free)\n\t}\n\tvar cpupct float64\n\tif c, e := cpu.Percent(time.Millisecond, false); e == nil {\n\t\tcpupct = c[0] // one value since we didn't ask per cpu\n\t}\n\tvar load1, load5, load15 float64\n\tif l, e := load.Avg(); e == nil {\n\t\tload1 = l.Load1\n\t\tload5 = l.Load5\n\t\tload15 = l.Load15\n\t}\n\n\tvar cpuTotal, vsize, rss, openFDs, maxFDs, maxVsize float64\n\tif proc, err := procfs.NewProc(int(*pid)); err == nil {\n\t\tif stat, err := proc.NewStat(); err == nil {\n\t\t\tcpuTotal = float64(stat.CPUTime())\n\t\t\tvsize = float64(stat.VirtualMemory())\n\t\t\trss = float64(stat.ResidentMemory())\n\t\t}\n\t\tif fds, err := proc.FileDescriptorsLen(); err == nil {\n\t\t\topenFDs = float64(fds)\n\t\t}\n\t\tif limits, err := proc.NewLimits(); err == nil {\n\t\t\tmaxFDs = float64(limits.OpenFiles)\n\t\t\tmaxVsize = float64(limits.AddressSpace)\n\t\t}\n\t}\n\n\tvar procCpu, procMem float64\n\tvar estCon, lisCon, othCon, totCon, closeCon, timeCon, openFiles float64\n\tvar nThreads float64\n\tif proc, err := process.NewProcess(int32(*pid)); err == nil {\n\t\tif v, e := proc.CPUPercent(); e == nil {\n\t\t\tprocCpu = float64(v)\n\t\t}\n\t\tif v, e := proc.MemoryPercent(); e == nil {\n\t\t\tprocMem = float64(v)\n\t\t}\n\n\t\tif v, e := proc.NumThreads(); e == nil {\n\t\t\tnThreads = float64(v)\n\t\t}\n\t\tif connections, e := proc.Connections(); e == nil {\n\t\t\tfor _, v := range connections {\n\t\t\t\tif v.Status == \"LISTEN\" {\n\t\t\t\t\tlisCon += 1\n\t\t\t\t} else if v.Status == \"ESTABLISHED\" {\n\t\t\t\t\testCon += 1\n\t\t\t\t} else if v.Status == \"TIME_WAIT\" {\n\t\t\t\t\ttimeCon += 1\n\t\t\t\t} else if v.Status == \"CLOSE_WAIT\" {\n\t\t\t\t\tcloseCon += 1\n\t\t\t\t} else {\n\t\t\t\t\tothCon += 1\n\t\t\t\t}\n\t\t\t}\n\t\t\ttotCon = lisCon + estCon + timeCon + closeCon + othCon\n\t\t}\n\t\tif oFiles, e := proc.OpenFiles(); e == nil {\n\t\t\topenFiles = float64(len(oFiles))\n\t\t}\n\t}\n\n\t// metrics from process collector\n\tch <- prometheus.MustNewConstMetric(e.cpuTotal, prometheus.CounterValue, cpuTotal)\n\tch <- prometheus.MustNewConstMetric(e.openFDs, prometheus.CounterValue, openFDs)\n\tch <- prometheus.MustNewConstMetric(e.maxFDs, prometheus.CounterValue, maxFDs)\n\tch <- prometheus.MustNewConstMetric(e.vsize, prometheus.CounterValue, vsize)\n\tch <- prometheus.MustNewConstMetric(e.maxVsize, prometheus.CounterValue, maxVsize)\n\tch <- prometheus.MustNewConstMetric(e.rss, prometheus.CounterValue, rss)\n\t// node specific metrics\n\tch <- prometheus.MustNewConstMetric(e.memPercent, prometheus.CounterValue, mempct)\n\tch <- prometheus.MustNewConstMetric(e.memTotal, prometheus.CounterValue, memtot)\n\tch <- prometheus.MustNewConstMetric(e.memFree, prometheus.CounterValue, memfree)\n\tch <- prometheus.MustNewConstMetric(e.swapPercent, prometheus.CounterValue, swappct)\n\tch <- prometheus.MustNewConstMetric(e.swapTotal, prometheus.CounterValue, swaptot)\n\tch <- prometheus.MustNewConstMetric(e.swapFree, prometheus.CounterValue, swapfree)\n\tch <- prometheus.MustNewConstMetric(e.numCpus, prometheus.CounterValue, float64(runtime.NumCPU()))\n\tch <- prometheus.MustNewConstMetric(e.load1, prometheus.CounterValue, load1)\n\tch <- prometheus.MustNewConstMetric(e.load5, prometheus.CounterValue, load5)\n\tch <- prometheus.MustNewConstMetric(e.load15, prometheus.CounterValue, load15)\n\t// process specific metrics\n\tch <- prometheus.MustNewConstMetric(e.procCpu, prometheus.CounterValue, procCpu)\n\tch <- prometheus.MustNewConstMetric(e.procMem, prometheus.CounterValue, procMem)\n\tch <- prometheus.MustNewConstMetric(e.numThreads, prometheus.CounterValue, nThreads)\n\tch <- prometheus.MustNewConstMetric(e.cpuPercent, prometheus.CounterValue, cpupct)\n\tch <- prometheus.MustNewConstMetric(e.openFiles, prometheus.CounterValue, openFiles)\n\tch <- prometheus.MustNewConstMetric(e.totCon, prometheus.CounterValue, totCon)\n\tch <- prometheus.MustNewConstMetric(e.lisCon, prometheus.CounterValue, lisCon)\n\tch <- prometheus.MustNewConstMetric(e.estCon, prometheus.CounterValue, estCon)\n\tch <- prometheus.MustNewConstMetric(e.closeCon, prometheus.CounterValue, closeCon)\n\tch <- prometheus.MustNewConstMetric(e.timeCon, prometheus.CounterValue, timeCon)\n\treturn nil\n}", "func CollectRuntimeMetrics(registry *Registry) {\n\tCollectMemStats(registry)\n\tCollectSysStats(registry)\n}", "func (e *Exporter) Collect(ch chan<- prometheus.Metric) {\n\te.mutex.Lock() // To protect metrics from concurrent collects.\n\tdefer e.mutex.Unlock()\n\n\t// Reset metrics.\n\tfor _, vec := range e.gauges {\n\t\tvec.Reset()\n\t}\n\n\tfor _, vec := range e.counters {\n\t\tvec.Reset()\n\t}\n\n\tresp, err := e.client.Get(e.URI)\n\tif err != nil {\n\t\te.up.Set(0)\n\t\tlog.Printf(\"Error while querying Elasticsearch: %v\", err)\n\t\treturn\n\t}\n\tdefer resp.Body.Close()\n\n\tbody, err := ioutil.ReadAll(resp.Body)\n\n\tif err != nil {\n\t\tlog.Printf(\"Failed to read ES response body: %v\", err)\n\t\te.up.Set(0)\n\t\treturn\n\t}\n\n\te.up.Set(1)\n\n\tvar all_stats NodeStatsResponse\n\terr = json.Unmarshal(body, &all_stats)\n\n\tif err != nil {\n\t\tlog.Printf(\"Failed to unmarshal JSON into struct: %v\", err)\n\t\treturn\n\t}\n\n\t// Regardless of whether we're querying the local host or the whole\n\t// cluster, here we can just iterate through all nodes found.\n\n\tfor node, stats := range all_stats.Nodes {\n\t\tlog.Printf(\"Processing node %v\", node)\n\t\t// GC Stats\n\t\tfor collector, gcstats := range stats.JVM.GC.Collectors {\n\t\t\te.counters[\"jvm_gc_collection_count\"].WithLabelValues(all_stats.ClusterName, stats.Name, collector).Set(float64(gcstats.CollectionCount))\n\t\t\te.counters[\"jvm_gc_collection_time_in_millis\"].WithLabelValues(all_stats.ClusterName, stats.Name, collector).Set(float64(gcstats.CollectionTime))\n\t\t}\n\n\t\t// Breaker stats\n\t\tfor breaker, bstats := range stats.Breakers {\n\t\t\te.gauges[\"breakers_estimated_size_in_bytes\"].WithLabelValues(all_stats.ClusterName, stats.Name, breaker).Set(float64(bstats.EstimatedSize))\n\t\t\te.gauges[\"breakers_limit_size_in_bytes\"].WithLabelValues(all_stats.ClusterName, stats.Name, breaker).Set(float64(bstats.LimitSize))\n\t\t}\n\n\t\t// JVM Memory Stats\n\t\te.gauges[\"jvm_mem_heap_committed_in_bytes\"].WithLabelValues(all_stats.ClusterName, stats.Name).Set(float64(stats.JVM.Mem.HeapCommitted))\n\t\te.gauges[\"jvm_mem_heap_used_in_bytes\"].WithLabelValues(all_stats.ClusterName, stats.Name).Set(float64(stats.JVM.Mem.HeapUsed))\n\t\te.gauges[\"jvm_mem_heap_max_in_bytes\"].WithLabelValues(all_stats.ClusterName, stats.Name).Set(float64(stats.JVM.Mem.HeapMax))\n\t\te.gauges[\"jvm_mem_non_heap_committed_in_bytes\"].WithLabelValues(all_stats.ClusterName, stats.Name).Set(float64(stats.JVM.Mem.NonHeapCommitted))\n\t\te.gauges[\"jvm_mem_non_heap_used_in_bytes\"].WithLabelValues(all_stats.ClusterName, stats.Name).Set(float64(stats.JVM.Mem.NonHeapUsed))\n\n\t\t// Indices Stats\n\t\te.gauges[\"indices_fielddata_evictions\"].WithLabelValues(all_stats.ClusterName, stats.Name).Set(float64(stats.Indices.FieldData.Evictions))\n\t\te.gauges[\"indices_fielddata_memory_size_in_bytes\"].WithLabelValues(all_stats.ClusterName, stats.Name).Set(float64(stats.Indices.FieldData.MemorySize))\n\t\te.gauges[\"indices_filter_cache_evictions\"].WithLabelValues(all_stats.ClusterName, stats.Name).Set(float64(stats.Indices.FilterCache.Evictions))\n\t\te.gauges[\"indices_filter_cache_memory_size_in_bytes\"].WithLabelValues(all_stats.ClusterName, stats.Name).Set(float64(stats.Indices.FilterCache.MemorySize))\n\n\t\te.gauges[\"indices_docs_count\"].WithLabelValues(all_stats.ClusterName, stats.Name).Set(float64(stats.Indices.Docs.Count))\n\t\te.gauges[\"indices_docs_deleted\"].WithLabelValues(all_stats.ClusterName, stats.Name).Set(float64(stats.Indices.Docs.Deleted))\n\n\t\te.gauges[\"indices_segments_memory_in_bytes\"].WithLabelValues(all_stats.ClusterName, stats.Name).Set(float64(stats.Indices.Segments.Memory))\n\n\t\te.gauges[\"indices_store_size_in_bytes\"].WithLabelValues(all_stats.ClusterName, stats.Name).Set(float64(stats.Indices.Store.Size))\n\t\te.counters[\"indices_store_throttle_time_in_millis\"].WithLabelValues(all_stats.ClusterName, stats.Name).Set(float64(stats.Indices.Store.ThrottleTime))\n\n\t\te.counters[\"indices_flush_total\"].WithLabelValues(all_stats.ClusterName, stats.Name).Set(float64(stats.Indices.Flush.Total))\n\t\te.counters[\"indices_flush_time_in_millis\"].WithLabelValues(all_stats.ClusterName, stats.Name).Set(float64(stats.Indices.Flush.Time))\n\n\t\t// Transport Stats\n\t\te.counters[\"transport_rx_count\"].WithLabelValues(all_stats.ClusterName, stats.Name).Set(float64(stats.Transport.RxCount))\n\t\te.counters[\"transport_rx_size_in_bytes\"].WithLabelValues(all_stats.ClusterName, stats.Name).Set(float64(stats.Transport.RxSize))\n\t\te.counters[\"transport_tx_count\"].WithLabelValues(all_stats.ClusterName, stats.Name).Set(float64(stats.Transport.TxCount))\n\t\te.counters[\"transport_tx_size_in_bytes\"].WithLabelValues(all_stats.ClusterName, stats.Name).Set(float64(stats.Transport.TxSize))\n\t}\n\n\t// Report metrics.\n\tch <- e.up\n\n\tfor _, vec := range e.counters {\n\t\tvec.Collect(ch)\n\t}\n\n\tfor _, vec := range e.gauges {\n\t\tvec.Collect(ch)\n\t}\n}", "func (s *Server) agentMemoryStats(metrics cgm.Metrics, mtags []string) {\n\t// var mem syscall.Rusage\n\t// if err := syscall.Getrusage(syscall.RUSAGE_SELF, &mem); err == nil {\n\t// \tmetrics[tags.MetricNameWithStreamTags(\"agent_max_rss\", tags.FromList(ctags))] = cgm.Metric{Value: uint64(mem.Maxrss * 1024), Type: \"L\"} // maximum resident set size used (in kilobytes)\n\t// } else {\n\t// \ts.logger.Warn().Err(err).Msg(\"collecting rss from system\")\n\t// }\n}", "func (ps *linuxHarvester) populateGauges(sample *types.ProcessSample, process Snapshot) error {\n\tvar err error\n\n\tcpuTimes, err := process.CPUTimes()\n\tif err != nil {\n\t\treturn err\n\t}\n\tsample.CPUPercent = cpuTimes.Percent\n\n\ttotalCPU := cpuTimes.User + cpuTimes.System\n\n\tif totalCPU > 0 {\n\t\tsample.CPUUserPercent = (cpuTimes.User / totalCPU) * sample.CPUPercent\n\t\tsample.CPUSystemPercent = (cpuTimes.System / totalCPU) * sample.CPUPercent\n\t} else {\n\t\tsample.CPUUserPercent = 0\n\t\tsample.CPUSystemPercent = 0\n\t}\n\n\tif ps.privileged {\n\t\tfds, err := process.NumFDs()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif fds >= 0 {\n\t\t\tsample.FdCount = &fds\n\t\t}\n\t}\n\n\t// Extra status data\n\tsample.Status = process.Status()\n\tsample.ThreadCount = process.NumThreads()\n\tsample.MemoryVMSBytes = process.VmSize()\n\tsample.MemoryRSSBytes = process.VmRSS()\n\n\treturn nil\n}", "func (h *Hugepages) gatherStatsPerNode(acc telegraf.Accumulator) error {\n\tnodeDirs, err := os.ReadDir(h.numaNodePath)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// read metrics from: node*/hugepages/hugepages-*/*\n\tfor _, nodeDir := range nodeDirs {\n\t\tif !nodeDir.IsDir() || !strings.HasPrefix(nodeDir.Name(), \"node\") {\n\t\t\tcontinue\n\t\t}\n\n\t\tnodeNumber := strings.TrimPrefix(nodeDir.Name(), \"node\")\n\t\t_, err := strconv.Atoi(nodeNumber)\n\t\tif err != nil {\n\t\t\tcontinue\n\t\t}\n\n\t\tperNodeTags := map[string]string{\n\t\t\t\"node\": nodeNumber,\n\t\t}\n\t\thugepagesPath := filepath.Join(h.numaNodePath, nodeDir.Name(), \"hugepages\")\n\t\terr = h.gatherFromHugepagePath(acc, \"hugepages_\"+perNodeHugepages, hugepagesPath, hugepagesMetricsPerNUMANode, perNodeTags)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}", "func (p *Psutil) CollectMetrics(mts []plugin.MetricType) ([]plugin.MetricType, error) {\n\tloadReqs := []core.Namespace{}\n\tcpuReqs := []core.Namespace{}\n\tmemReqs := []core.Namespace{}\n\tnetReqs := []core.Namespace{}\n\tdiskReqs := []core.Namespace{}\n\n\tfor _, m := range mts {\n\t\tns := m.Namespace()\n\t\tswitch ns[2].Value {\n\t\tcase \"load\":\n\t\t\tloadReqs = append(loadReqs, ns)\n\t\tcase \"cpu\":\n\t\t\tcpuReqs = append(cpuReqs, ns)\n\t\tcase \"vm\":\n\t\t\tmemReqs = append(memReqs, ns)\n\t\tcase \"net\":\n\t\t\tnetReqs = append(netReqs, ns)\n\t\tcase \"disk\":\n\t\t\tdiskReqs = append(diskReqs, ns)\n\t\tdefault:\n\t\t\treturn nil, fmt.Errorf(\"Requested metric %s does not match any known psutil metric\", m.Namespace().String())\n\t\t}\n\t}\n\n\tmetrics := []plugin.MetricType{}\n\n\tloadMts, err := loadAvg(loadReqs)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tmetrics = append(metrics, loadMts...)\n\n\tcpuMts, err := cpuTimes(cpuReqs)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tmetrics = append(metrics, cpuMts...)\n\n\tmemMts, err := virtualMemory(memReqs)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tmetrics = append(metrics, memMts...)\n\n\tnetMts, err := netIOCounters(netReqs)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tmetrics = append(metrics, netMts...)\n\tmounts := getMountpoints(mts[0].Config().Table())\n\tdiskMts, err := getDiskUsageMetrics(diskReqs, mounts)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tmetrics = append(metrics, diskMts...)\n\n\treturn metrics, nil\n}", "func (h *Hugepages) gatherStatsFromMeminfo(acc telegraf.Accumulator) error {\n\tmeminfo, err := os.ReadFile(h.meminfoPath)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tmetrics := make(map[string]interface{})\n\tlines := bytes.Split(meminfo, newlineByte)\n\tfor _, line := range lines {\n\t\tfields := bytes.Fields(line)\n\t\tif len(fields) < 2 {\n\t\t\tcontinue\n\t\t}\n\t\tfieldName := string(bytes.TrimSuffix(fields[0], colonByte))\n\t\tmetricName, ok := hugepagesMetricsFromMeminfo[fieldName]\n\t\tif !ok {\n\t\t\tcontinue\n\t\t}\n\n\t\tfieldValue, err := strconv.Atoi(string(fields[1]))\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"failed to convert content of %q: %w\", fieldName, err)\n\t\t}\n\n\t\tmetrics[metricName] = fieldValue\n\t}\n\n\tacc.AddFields(\"hugepages_\"+meminfoHugepages, metrics, map[string]string{})\n\treturn nil\n}", "func (dh *darwinHarvester) populateGauges(sample *types.ProcessSample, process Snapshot) error {\n\tvar err error\n\n\tcpuTimes, err := process.CPUTimes()\n\tif err != nil {\n\t\treturn err\n\t}\n\tsample.CPUPercent = cpuTimes.Percent\n\n\ttotalCPU := cpuTimes.User + cpuTimes.System\n\n\tif totalCPU > 0 {\n\t\tsample.CPUUserPercent = (cpuTimes.User / totalCPU) * sample.CPUPercent\n\t\tsample.CPUSystemPercent = (cpuTimes.System / totalCPU) * sample.CPUPercent\n\t} else {\n\t\tsample.CPUUserPercent = 0\n\t\tsample.CPUSystemPercent = 0\n\t}\n\n\t// Extra status data\n\tsample.Status = process.Status()\n\tsample.ThreadCount = process.NumThreads()\n\tsample.MemoryVMSBytes = process.VmSize()\n\tsample.MemoryRSSBytes = process.VmRSS()\n\n\treturn nil\n}", "func (pc *NginxProcessesMetricsCollector) Collect(ch chan<- prometheus.Metric) {\n\tpc.updateWorkerProcessCount()\n\tpc.workerProcessTotal.Collect(ch)\n}", "func (logstash *Logstash) gatherPipelineStats(address string, accumulator telegraf.Accumulator) error {\n\tpipelineStats := &PipelineStats{}\n\n\terr := logstash.gatherJSONData(address, pipelineStats)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\ttags := map[string]string{\n\t\t\"node_id\": pipelineStats.ID,\n\t\t\"node_name\": pipelineStats.Name,\n\t\t\"node_version\": pipelineStats.Version,\n\t\t\"source\": pipelineStats.Host,\n\t}\n\n\tflattener := jsonParser.JSONFlattener{}\n\terr = flattener.FlattenJSON(\"\", pipelineStats.Pipeline.Events)\n\tif err != nil {\n\t\treturn err\n\t}\n\taccumulator.AddFields(\"logstash_events\", flattener.Fields, tags)\n\n\terr = logstash.gatherPluginsStats(pipelineStats.Pipeline.Plugins.Inputs, \"input\", tags, accumulator)\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = logstash.gatherPluginsStats(pipelineStats.Pipeline.Plugins.Filters, \"filter\", tags, accumulator)\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = logstash.gatherPluginsStats(pipelineStats.Pipeline.Plugins.Outputs, \"output\", tags, accumulator)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = logstash.gatherQueueStats(&pipelineStats.Pipeline.Queue, tags, accumulator)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}", "func (logstash *Logstash) Gather(accumulator telegraf.Accumulator) error {\n\tif logstash.client == nil {\n\t\tclient, err := logstash.createHTTPClient()\n\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tlogstash.client = client\n\t}\n\n\tif choice.Contains(\"jvm\", logstash.Collect) {\n\t\tjvmURL, err := url.Parse(logstash.URL + jvmStats)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif err := logstash.gatherJVMStats(jvmURL.String(), accumulator); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tif choice.Contains(\"process\", logstash.Collect) {\n\t\tprocessURL, err := url.Parse(logstash.URL + processStats)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif err := logstash.gatherProcessStats(processURL.String(), accumulator); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tif choice.Contains(\"pipelines\", logstash.Collect) {\n\t\tif logstash.SinglePipeline {\n\t\t\tpipelineURL, err := url.Parse(logstash.URL + pipelineStats)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tif err := logstash.gatherPipelineStats(pipelineURL.String(), accumulator); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t} else {\n\t\t\tpipelinesURL, err := url.Parse(logstash.URL + pipelinesStats)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tif err := logstash.gatherPipelinesStats(pipelinesURL.String(), accumulator); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\n\treturn nil\n}", "func FetchAppServerMemStats(r Result) []float32 {\n\treturn r.AppServerStats().Mem\n}", "func (p *Process) ExternalStats() error {\n\tproc := gopsutil.Process{\n\t\tPid: int32(p.Pid),\n\t}\n\tcpup, err := proc.CPUPercent()\n\tp.Cpup = cpup\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tt, err := proc.Times()\n\tif err != nil {\n\t\treturn err\n\t}\n\tp.Cput = t.Total()\n\n\tp.Memp, err = proc.MemoryPercent()\n\treturn err\n}", "func (sr *ServicedStatsReporter) gatherStats(t time.Time) []Sample {\n\tstats := []Sample{}\n\t// Handle the host metrics.\n\treg, _ := sr.hostRegistry.(*metrics.StandardRegistry)\n\treg.Each(func(name string, i interface{}) {\n\t\ttagmap := map[string]string{\n\t\t\t\"controlplane_host_id\": sr.hostID,\n\t\t}\n\t\tswitch metric := i.(type) {\n\t\tcase metrics.Gauge:\n\t\t\tstats = append(stats, Sample{name, strconv.FormatInt(metric.Value(), 10), t.Unix(), tagmap})\n\t\tcase metrics.GaugeFloat64:\n\t\t\tstats = append(stats, Sample{name, strconv.FormatFloat(metric.Value(), 'f', -1, 32), t.Unix(), tagmap})\n\t\t}\n\t})\n\t// Handle each container's metrics.\n\tfor key, registry := range sr.containerRegistries {\n\t\treg, _ := registry.(*metrics.StandardRegistry)\n\t\treg.Each(func(name string, i interface{}) {\n\t\t\ttagmap := map[string]string{\n\t\t\t\t\"controlplane_host_id\": sr.hostID,\n\t\t\t\t\"controlplane_service_id\": key.serviceID,\n\t\t\t\t\"controlplane_instance_id\": strconv.FormatInt(int64(key.instanceID), 10),\n\t\t\t}\n\t\t\tswitch metric := i.(type) {\n\t\t\tcase metrics.Gauge:\n\t\t\t\tstats = append(stats, Sample{name, strconv.FormatInt(metric.Value(), 10), t.Unix(), tagmap})\n\t\t\tcase metrics.GaugeFloat64:\n\t\t\t\tstats = append(stats, Sample{name, strconv.FormatFloat(metric.Value(), 'f', -1, 32), t.Unix(), tagmap})\n\t\t\t}\n\t\t})\n\t}\n\treturn stats\n}", "func (h *Hugepages) gatherRootStats(acc telegraf.Accumulator) error {\n\treturn h.gatherFromHugepagePath(acc, \"hugepages_\"+rootHugepages, h.rootHugepagePath, hugepagesMetricsRoot, nil)\n}", "func ProcMeminfo(c *gin.Context) {\n\tres := CmdExec(\"cat /proc/meminfo | head -n 2| awk '{print $2}'\")\n\ttotalMem, _ := strconv.Atoi(res[0])\n\tfreeMem, _ := strconv.Atoi(res[1])\n\tusedMem := totalMem - freeMem\n\tc.JSON(http.StatusOK, gin.H{\n\t\t\"totalMem\": totalMem,\n\t\t\"usedMem\": usedMem,\n\t})\n}", "func ProcStat(c *gin.Context) {\n\tres := CmdExec(\"cat /proc/stat | head -n 1 | awk '{$1=\\\"\\\";print}'\")\n\tresArray := strings.Split(res[0], \" \")\n\tvar cpu []int64\n\tvar totalcpu, idlecpu int64\n\tfor _, v := range resArray {\n\t\ttemp, err := strconv.ParseInt(v, 10, 64)\n\t\tif err == nil {\n\t\t\tcpu = append(cpu, temp)\n\t\t\ttotalcpu = totalcpu + temp\n\t\t}\n\t}\n\tidlecpu = cpu[3]\n\tc.JSON(http.StatusOK, gin.H{\n\t\t\"totalcpu\": totalcpu,\n\t\t\"idlecpu\": idlecpu,\n\t})\n}", "func (m *KubeletMonitor) parsePodStats(podStats []stats.PodStats) {\n\tfor _, podStat := range podStats {\n\t\tvar cpuUsageNanoCoreSum uint64\n\t\tvar memoryUsageBytesSum uint64\n\t\tfor _, containerStat := range podStat.Containers {\n\t\t\tif containerStat.CPU != nil && containerStat.CPU.UsageNanoCores != nil {\n\t\t\t\tcpuUsageNanoCoreSum += *containerStat.CPU.UsageNanoCores\n\t\t\t}\n\t\t\tif containerStat.Memory != nil && containerStat.Memory.UsageBytes != nil {\n\t\t\t\tmemoryUsageBytesSum += *containerStat.Memory.UsageBytes\n\t\t\t}\n\t\t}\n\t\tglog.V(4).Infof(\"Cpu usage of pod %s is %f core\", util.PodStatsKeyFunc(podStat),\n\t\t\tfloat64(cpuUsageNanoCoreSum)/util.NanoToUnit)\n\t\tpodCpuUsageCoreMetrics := metrics.NewEntityResourceMetric(task.PodType, util.PodStatsKeyFunc(podStat),\n\t\t\tmetrics.CPU, metrics.Used, float64(cpuUsageNanoCoreSum)/util.NanoToUnit)\n\n\t\tglog.V(4).Infof(\"Memory usage of pod %s is %f Kb\", util.PodStatsKeyFunc(podStat),\n\t\t\tfloat64(memoryUsageBytesSum)/util.KilobytesToBytes)\n\t\tpodMemoryUsageCoreMetrics := metrics.NewEntityResourceMetric(task.PodType, util.PodStatsKeyFunc(podStat),\n\t\t\tmetrics.Memory, metrics.Used, float64(memoryUsageBytesSum)/util.KilobytesToBytes)\n\n\t\t// application cpu and mem used are the same as pod's.\n\t\tapplicationCpuUsageCoreMetrics := metrics.NewEntityResourceMetric(task.ApplicationType,\n\t\t\tutil.PodStatsKeyFunc(podStat), metrics.CPU, metrics.Used,\n\t\t\tfloat64(cpuUsageNanoCoreSum)/util.NanoToUnit)\n\t\tapplicationMemoryUsageCoreMetrics := metrics.NewEntityResourceMetric(task.ApplicationType,\n\t\t\tutil.PodStatsKeyFunc(podStat), metrics.Memory, metrics.Used,\n\t\t\tfloat64(memoryUsageBytesSum)/util.KilobytesToBytes)\n\n\t\tm.metricSink.AddNewMetricEntries(podCpuUsageCoreMetrics,\n\t\t\tpodMemoryUsageCoreMetrics,\n\t\t\tapplicationCpuUsageCoreMetrics,\n\t\t\tapplicationMemoryUsageCoreMetrics)\n\t}\n}", "func CollectSysStats(registry *Registry) {\n\tvar s sysStatsCollector\n\ts.registry = registry\n\ts.maxOpen = registry.Gauge(\"fh.allocated\", nil)\n\ts.curOpen = registry.Gauge(\"fh.max\", nil)\n\ts.numGoroutines = registry.Gauge(\"go.numGoroutines\", nil)\n\n\tticker := time.NewTicker(30 * time.Second)\n\tgo func() {\n\t\tlog := registry.log\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase <-ticker.C:\n\t\t\t\tlog.Debugf(\"Collecting system stats\")\n\t\t\t\tfdStats(&s)\n\t\t\t\tgoRuntimeStats(&s)\n\t\t\t}\n\t\t}\n\t}()\n}", "func (c *ClusterManager) Collect(ch chan<- prometheus.Metric) {\n\toomCountByHost, ramUsageByHost := c.ReallyExpensiveAssessmentOfTheSystemState()\n\tfor host, oomCount := range oomCountByHost {\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tc.OOMCountDesc,\n\t\t\tprometheus.CounterValue,\n\t\t\tfloat64(oomCount),\n\t\t\thost,\n\t\t)\n\t}\n\tfor host, ramUsage := range ramUsageByHost {\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tc.RAMUsageDesc,\n\t\t\tprometheus.GaugeValue,\n\t\t\tramUsage,\n\t\t\thost,\n\t\t)\n\t}\n}", "func (m VarnishPlugin) FetchMetrics() (map[string]interface{}, error) {\n\tvar out []byte\n\tvar err error\n\n\tif m.VarnishName == \"\" {\n\t\tout, err = exec.Command(m.VarnishStatPath, \"-1\").CombinedOutput()\n\t} else {\n\t\tout, err = exec.Command(m.VarnishStatPath, \"-1\", \"-n\", m.VarnishName).CombinedOutput()\n\t}\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"%s: %s\", err, out)\n\t}\n\n\tlineexp := regexp.MustCompile(`^([^ ]+) +(\\d+)`)\n\tsmaexp := regexp.MustCompile(`^SMA\\.([^\\.]+)\\.(.+)$`)\n\n\tstat := map[string]interface{}{\n\t\t\"requests\": float64(0),\n\t}\n\n\tvar tmpv float64\n\tfor _, line := range strings.Split(string(out), \"\\n\") {\n\t\tmatch := lineexp.FindStringSubmatch(line)\n\t\tif match == nil {\n\t\t\tcontinue\n\t\t}\n\n\t\ttmpv, err = strconv.ParseFloat(match[2], 64)\n\t\tif err != nil {\n\t\t\tcontinue\n\t\t}\n\n\t\tswitch match[1] {\n\t\tcase \"cache_hit\", \"MAIN.cache_hit\":\n\t\t\tstat[\"cache_hits\"] = tmpv\n\t\t\tstat[\"requests\"] = stat[\"requests\"].(float64) + tmpv\n\t\tcase \"cache_miss\", \"MAIN.cache_miss\":\n\t\t\tstat[\"requests\"] = stat[\"requests\"].(float64) + tmpv\n\t\tcase \"cache_hitpass\", \"MAIN.cache_hitpass\":\n\t\t\tstat[\"requests\"] = stat[\"requests\"].(float64) + tmpv\n\t\tcase \"MAIN.backend_req\":\n\t\t\tstat[\"backend_req\"] = tmpv\n\t\tcase \"MAIN.backend_conn\":\n\t\t\tstat[\"backend_conn\"] = tmpv\n\t\tcase \"MAIN.backend_fail\":\n\t\t\tstat[\"backend_fail\"] = tmpv\n\t\tcase \"MAIN.backend_reuse\":\n\t\t\tstat[\"backend_reuse\"] = tmpv\n\t\tcase \"MAIN.backend_recycle\":\n\t\t\tstat[\"backend_recycle\"] = tmpv\n\t\tcase \"MAIN.n_object\":\n\t\t\tstat[\"n_object\"] = tmpv\n\t\tcase \"MAIN.n_objectcore\":\n\t\t\tstat[\"n_objectcore\"] = tmpv\n\t\tcase \"MAIN.n_expired\":\n\t\t\tstat[\"n_expired\"] = tmpv\n\t\tcase \"MAIN.n_objecthead\":\n\t\t\tstat[\"n_objecthead\"] = tmpv\n\t\tcase \"MAIN.busy_sleep\":\n\t\t\tstat[\"busy_sleep\"] = tmpv\n\t\tcase \"MAIN.busy_wakeup\":\n\t\t\tstat[\"busy_wakeup\"] = tmpv\n\t\tdefault:\n\t\t\tsmamatch := smaexp.FindStringSubmatch(match[1])\n\t\t\tif smamatch == nil {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif smamatch[2] == \"g_alloc\" {\n\t\t\t\tstat[\"varnish.sma.g_alloc.\"+smamatch[1]+\".g_alloc\"] = tmpv\n\t\t\t} else if smamatch[2] == \"g_bytes\" {\n\t\t\t\tstat[\"varnish.sma.memory.\"+smamatch[1]+\".allocated\"] = tmpv\n\t\t\t} else if smamatch[2] == \"g_space\" {\n\t\t\t\tstat[\"varnish.sma.memory.\"+smamatch[1]+\".available\"] = tmpv\n\t\t\t}\n\t\t}\n\t}\n\n\treturn stat, err\n}", "func runCPUUsageStats(){\n\tnbCPU := float64(runtime.NumCPU())\n\tparams := fmt.Sprintf(\"(Get-process -Id %d).CPU\",os.Getpid())\n\tfor {\n\t\tcmd := exec.Command(\"powershell\", params)\n\t\tdata, _ := cmd.Output()\n\t\tcurrent,_ := strconv.ParseFloat(strings.Replace(string(data),\"\\r\\n\",\"\",-1),32)\n\t\tif previous == 0 {\n\t\t\tprevious = current\n\t\t}\n\t\tcurrentUsage = int(((current - previous)*float64(100))/(waitTime*nbCPU) )\n\t\tprevious = current\n\t\ttime.Sleep(time.Duration(waitTime )*time.Second)\n\t}\n}", "func initProcessMetrics(app *App) {\n\tapp.prometheusRegistry.MustRegister(\n\t\tprometheus.NewProcessCollector(prometheus.ProcessCollectorOpts{}),\n\t)\n}", "func getProcessStats(fs procfs.FS, pid int) (*ProcessStats, error) {\n\tvar stats []*ProcessStats\n\tif pid >= 0 { // one process, maybe self\n\t\tvar p procfs.Proc\n\t\tvar err error\n\t\tif pid == 0 {\n\t\t\tp, err = fs.Self()\n\t\t} else {\n\t\t\tp, err = fs.Proc(pid)\n\t\t}\n\t\tif err != nil {\n\t\t\treturn nil, errors.Wrapf(err, \"Proc(%v)\", pid)\n\t\t}\n\t\tstat, err := p.Stat()\n\t\tif err != nil {\n\t\t\treturn nil, errors.Wrapf(err, \"Stat(%v)\", p.PID)\n\t\t}\n\t\tpstat, err := getOneProcessStats(p, stat)\n\t\tif err != nil {\n\t\t\treturn nil, errors.Wrapf(err, \"getProcessStats(%v)\", p.PID)\n\t\t}\n\t\tstats = append(stats, pstat)\n\t} else { // process group\n\t\tpgid := -pid\n\t\tall, err := fs.AllProcs()\n\t\tif err != nil {\n\t\t\treturn nil, errors.Wrap(err, \"AllProcs\")\n\t\t}\n\t\tvar errs error\n\t\tfor _, p := range all {\n\t\t\tstat, err := p.Stat()\n\t\t\tif err != nil {\n\t\t\t\terrors.JoinInto(&errs, errors.Wrapf(err, \"Stat(%v)\", p.PID))\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif stat.PGRP != pgid {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tpstat, err := getOneProcessStats(p, stat)\n\t\t\tif err != nil {\n\t\t\t\terrors.JoinInto(&errs, errors.Wrapf(err, \"getProcessStats(%v)\", p.PID))\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tstats = append(stats, pstat)\n\t\t}\n\t\tif errs != nil {\n\t\t\treturn nil, errs\n\t\t}\n\t}\n\tvar result ProcessStats\n\tfor _, s := range stats {\n\t\tresult.CPUTime += s.CPUTime\n\t\tresult.CanceledWBytes += s.CanceledWBytes\n\t\tresult.FDCount += s.FDCount\n\t\tresult.RBytes += s.RBytes\n\t\tresult.WBytes += s.WBytes\n\t\tresult.CanceledWBytes += s.CanceledWBytes\n\t\tresult.RChars += s.RChars\n\t\tresult.WChars += s.WChars\n\t\tresult.ResidentMemory += s.ResidentMemory\n\t\tresult.RSysc += s.RSysc\n\t\tresult.WSysc += s.WSysc\n\t\tresult.OOMScore = miscutil.Max(result.OOMScore, s.OOMScore)\n\t}\n\treturn &result, nil\n}", "func CollectAllMetrics(client *statsd.Client, log *li.StandardLogger) {\n\n\tvar metrics []metric\n\tmetrics = append(metrics, metric{name: \"gpu.temperature\", cmd: \"vcgencmd measure_temp | egrep -o '[0-9]*\\\\.[0-9]*'\"})\n\tmetrics = append(metrics, metric{name: \"cpu.temperature\", cmd: \"cat /sys/class/thermal/thermal_zone0/temp | awk 'END {print $1/1000}'\"})\n\tmetrics = append(metrics, metric{name: \"threads\", cmd: \"ps -eo nlwp | tail -n +2 | awk '{ num_threads += $1 } END { print num_threads }'\"})\n\tmetrics = append(metrics, metric{name: \"processes\", cmd: \"ps axu | wc -l\"})\n\n\tfor range time.Tick(15 * time.Second) {\n\t\tlog.Info(\"Starting metric collection\")\n\t\tfor _, m := range metrics {\n\t\t\terr := collectMetric(m, client, log)\n\t\t\tif err != nil {\n\t\t\t\tlog.Error(err)\n\t\t\t}\n\t\t}\n\t}\n}", "func ReadMemStats(m *MemStats) {\n\tm.HeapIdle = 0\n\tm.HeapInuse = 0\n\tfor block := gcBlock(0); block < endBlock; block++ {\n\t\tbstate := block.state()\n\t\tif bstate == blockStateFree {\n\t\t\tm.HeapIdle += uint64(bytesPerBlock)\n\t\t} else {\n\t\t\tm.HeapInuse += uint64(bytesPerBlock)\n\t\t}\n\t}\n\tm.HeapReleased = 0 // always 0, we don't currently release memory back to the OS.\n\tm.HeapSys = m.HeapInuse + m.HeapIdle\n\tm.GCSys = uint64(heapEnd - uintptr(metadataStart))\n\tm.Sys = uint64(heapEnd - heapStart)\n}", "func GetProcessStats(pid int) (ProcessStats, error) {\n\t// Open the process.\n\tprocess, err := syscall.OpenProcess(processQueryLimitedInformation, false, uint32(pid))\n\tif err != nil {\n\t\treturn ProcessStats{}, nil\n\t}\n\tdefer syscall.CloseHandle(process)\n\n\t// Get memory info.\n\tpsapi := syscall.NewLazyDLL(\"psapi.dll\")\n\tgetProcessMemoryInfo := psapi.NewProc(\"GetProcessMemoryInfo\")\n\tmemoryInfo := processMemoryCounters{\n\t\tcb: 72,\n\t}\n\tres, _, _ := getProcessMemoryInfo.Call(uintptr(process), uintptr(unsafe.Pointer(&memoryInfo)), uintptr(memoryInfo.cb))\n\tif res == 0 {\n\t\treturn ProcessStats{}, nil\n\t}\n\n\t// Get CPU info.\n\tcreationTime1 := &syscall.Filetime{}\n\texitTime1 := &syscall.Filetime{}\n\tkernelTime1 := &syscall.Filetime{}\n\tuserTime1 := &syscall.Filetime{}\n\terr = syscall.GetProcessTimes(process, creationTime1, exitTime1, kernelTime1, userTime1)\n\tif err != nil {\n\t\treturn ProcessStats{RSSMemory: float64(memoryInfo.WorkingSetSize)}, nil\n\t}\n\t<-time.After(time.Millisecond * 50) // Not the most accurate, but it'll do.\n\tcreationTime2 := &syscall.Filetime{}\n\texitTime2 := &syscall.Filetime{}\n\tkernelTime2 := &syscall.Filetime{}\n\tuserTime2 := &syscall.Filetime{}\n\terr = syscall.GetProcessTimes(process, creationTime2, exitTime2, kernelTime2, userTime2)\n\tif err != nil {\n\t\treturn ProcessStats{RSSMemory: float64(memoryInfo.WorkingSetSize)}, nil\n\t}\n\tcpuTime := float64((userTime2.Nanoseconds() - userTime1.Nanoseconds()) / int64(runtime.NumCPU()))\n\n\treturn ProcessStats{\n\t\tRSSMemory: float64(memoryInfo.WorkingSetSize),\n\t\tCPUUsage: cpuTime / 500000, // Conversion: (cpuTime / (50*1000*1000)) * 100\n\t}, nil\n}", "func MonitorProcess(interval time.Duration) func() {\n\tif v := view.Find(processViews[0].Name); v == nil {\n\t\tif err := view.Register(processViews...); err != nil {\n\t\t\tpanic(errors.Wrap(err, \"unable to register process view\"))\n\t\t}\n\t}\n\n\tvar stopReport = make(chan struct{}, 1)\n\n\tgo func() {\n\t\ttick := time.NewTicker(interval)\n\t\tdefer tick.Stop()\n\n\t\tctx := context.Background()\n\t\tctx, _ = tag.New(ctx, // nolint: errcheck\n\t\t\ttag.Upsert(tagKeyProcessOS, runtime.GOOS),\n\t\t\ttag.Upsert(tagKeyProcessArch, runtime.GOARCH),\n\t\t\ttag.Upsert(tagKeyProcessGoVersion, runtime.Version()),\n\t\t)\n\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase <-stopReport:\n\t\t\t\treturn\n\t\t\tcase <-tick.C:\n\t\t\t\tvar memstats runtime.MemStats\n\t\t\t\truntime.ReadMemStats(&memstats)\n\n\t\t\t\tstats.Record(ctx,\n\t\t\t\t\tmetricProcessGoroutine.M(int64(runtime.NumGoroutine())),\n\t\t\t\t\tmetricProcessMemoryTotalAlloc.M(int64(memstats.TotalAlloc)),\n\t\t\t\t\tmetricProcessMemorySys.M(int64(memstats.Sys)),\n\t\t\t\t\tmetricProcessMemoryMalloc.M(int64(memstats.Mallocs)),\n\t\t\t\t\tmetricProcessMemoryFree.M(int64(memstats.Frees)),\n\t\t\t\t\tmetricProcessMemoryHeapAlloc.M(int64(memstats.HeapAlloc)),\n\t\t\t\t\tmetricProcessMemoryHeapReleased.M(int64(memstats.HeapReleased)),\n\t\t\t\t\tmetricProcessMemoryHeapObject.M(int64(memstats.HeapObjects)),\n\t\t\t\t)\n\t\t\t}\n\t\t}\n\t}()\n\treturn func() {\n\t\tclose(stopReport)\n\t}\n}", "func (q *QueryBenchmarker) processStats(telemetrySink chan *report.Point) {\n\n\tq.statMapping = StatsMap{\n\t\tAllQueriesLabel: &StatGroup{},\n\t}\n\n\tlastRefresh := time.Time{}\n\ti := uint64(0)\n\tfor stat := range q.statChan {\n\t\tq.isBurnIn = i < q.burnIn\n\t\tif q.isBurnIn {\n\t\t\ti++\n\t\t\tq.statPool.Put(stat)\n\t\t\tcontinue\n\t\t} else if i == q.burnIn && q.burnIn > 0 {\n\t\t\tlog.Printf(\"burn-in complete after %d queries with %d workers\\n\", q.burnIn, q.workers)\n\t\t}\n\n\t\tif _, ok := q.statMapping[string(stat.Label)]; !ok {\n\t\t\tq.statMapping[string(stat.Label)] = &StatGroup{}\n\t\t}\n\n\t\tnow := time.Now()\n\n\t\tif stat.IsActual {\n\t\t\tq.movingAverageStat.Push(now, stat.Value)\n\t\t\tq.statMapping[AllQueriesLabel].Push(stat.Value)\n\t\t\tq.statMapping[string(stat.Label)].Push(stat.Value)\n\t\t\ti++\n\t\t}\n\n\t\tq.statPool.Put(stat)\n\n\t\tif lastRefresh.Nanosecond() == 0 || now.Sub(lastRefresh).Seconds() >= 1.0 {\n\t\t\tq.movingAverageStat.UpdateAvg(now, q.workers)\n\t\t\tlastRefresh = now\n\t\t\t// Report telemetry, if applicable:\n\t\t\tif telemetrySink != nil {\n\t\t\t\tp := report.GetPointFromGlobalPool()\n\t\t\t\tp.Init(\"benchmarks_telemetry\", now.UnixNano())\n\t\t\t\tfor _, tagpair := range q.reportTags {\n\t\t\t\t\tp.AddTag(tagpair[0], tagpair[1])\n\t\t\t\t}\n\t\t\t\tp.AddTag(\"client_type\", \"query\")\n\t\t\t\tp.AddFloat64Field(\"query_response_time_mean\", q.statMapping[AllQueriesLabel].Mean)\n\t\t\t\tp.AddFloat64Field(\"query_response_time_moving_mean\", q.movingAverageStat.Avg())\n\t\t\t\tp.AddIntField(\"query_workers\", q.workers)\n\t\t\t\tp.AddInt64Field(\"queries\", int64(i))\n\t\t\t\ttelemetrySink <- p\n\t\t\t}\n\t\t}\n\t\t// print stats to stderr (if printInterval is greater than zero):\n\t\tif q.printInterval > 0 && i > 0 && i%q.printInterval == 0 && (int64(i) < q.limit || q.limit < 0) {\n\t\t\tlog.Printf(\"%s: after %d queries with %d workers:\\n\", time.Now().String(), i-q.burnIn, q.workers)\n\t\t\tfprintStats(os.Stderr, q)\n\t\t\tlog.Printf(\"\\n\")\n\t\t}\n\n\t}\n\n\tlog.Printf(\"run complete after %d queries with %d workers:\\n\", i-q.burnIn, q.workers)\n\tq.totalQueries = int(i)\n\tq.statGroup.Done()\n}", "func (logstash *Logstash) gatherPipelinesStats(address string, accumulator telegraf.Accumulator) error {\n\tpipelinesStats := &PipelinesStats{}\n\n\terr := logstash.gatherJSONData(address, pipelinesStats)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfor pipelineName, pipeline := range pipelinesStats.Pipelines {\n\t\ttags := map[string]string{\n\t\t\t\"node_id\": pipelinesStats.ID,\n\t\t\t\"node_name\": pipelinesStats.Name,\n\t\t\t\"node_version\": pipelinesStats.Version,\n\t\t\t\"pipeline\": pipelineName,\n\t\t\t\"source\": pipelinesStats.Host,\n\t\t}\n\n\t\tflattener := jsonParser.JSONFlattener{}\n\t\terr := flattener.FlattenJSON(\"\", pipeline.Events)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\taccumulator.AddFields(\"logstash_events\", flattener.Fields, tags)\n\n\t\terr = logstash.gatherPluginsStats(pipeline.Plugins.Inputs, \"input\", tags, accumulator)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\terr = logstash.gatherPluginsStats(pipeline.Plugins.Filters, \"filter\", tags, accumulator)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\terr = logstash.gatherPluginsStats(pipeline.Plugins.Outputs, \"output\", tags, accumulator)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\terr = logstash.gatherQueueStats(&pipeline.Queue, tags, accumulator)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}", "func (c *Consumer) VbProcessingStats() map[uint16]map[string]interface{} {\n\tvbstats := make(map[uint16]map[string]interface{})\n\tfor vbno := range c.vbProcessingStats {\n\t\tif _, ok := vbstats[vbno]; !ok {\n\t\t\tvbstats[vbno] = make(map[string]interface{})\n\t\t}\n\t\tassignedWorker := c.vbProcessingStats.getVbStat(vbno, \"assigned_worker\")\n\t\towner := c.vbProcessingStats.getVbStat(vbno, \"current_vb_owner\")\n\t\tstreamStatus := c.vbProcessingStats.getVbStat(vbno, \"dcp_stream_status\")\n\t\tseqNo := c.vbProcessingStats.getVbStat(vbno, \"last_processed_seq_no\")\n\t\tuuid := c.vbProcessingStats.getVbStat(vbno, \"node_uuid\")\n\t\tcurrentProcDocIDTimer := c.vbProcessingStats.getVbStat(vbno, \"currently_processed_doc_id_timer\").(string)\n\t\tcurrentProcCronTimer := c.vbProcessingStats.getVbStat(vbno, \"currently_processed_cron_timer\").(string)\n\t\tlastProcDocIDTimer := c.vbProcessingStats.getVbStat(vbno, \"last_processed_doc_id_timer_event\").(string)\n\t\tnextDocIDTimer := c.vbProcessingStats.getVbStat(vbno, \"next_doc_id_timer_to_process\").(string)\n\t\tnextCronTimer := c.vbProcessingStats.getVbStat(vbno, \"next_cron_timer_to_process\").(string)\n\t\tplasmaLastSeqNoPersist := c.vbProcessingStats.getVbStat(vbno, \"plasma_last_seq_no_persisted\").(uint64)\n\n\t\tvbstats[vbno][\"assigned_worker\"] = assignedWorker\n\t\tvbstats[vbno][\"current_vb_owner\"] = owner\n\t\tvbstats[vbno][\"node_uuid\"] = uuid\n\t\tvbstats[vbno][\"stream_status\"] = streamStatus\n\t\tvbstats[vbno][\"seq_no\"] = seqNo\n\t\tvbstats[vbno][\"currently_processed_doc_id_timer\"] = currentProcDocIDTimer\n\t\tvbstats[vbno][\"currently_processed_cron_timer\"] = currentProcCronTimer\n\t\tvbstats[vbno][\"last_processed_doc_id_timer_event\"] = lastProcDocIDTimer\n\t\tvbstats[vbno][\"next_doc_id_timer_to_process\"] = nextDocIDTimer\n\t\tvbstats[vbno][\"next_cron_timer_to_process\"] = nextCronTimer\n\t\tvbstats[vbno][\"plasma_last_seq_no_persisted\"] = plasmaLastSeqNoPersist\n\t}\n\n\treturn vbstats\n}", "func processHealthMonitor(duration time.Duration) {\n\tfor {\n\t\t<-time.After(duration)\n\t\tvar numOfGoroutines = runtime.NumGoroutine()\n\t\t//var memStats runtime.MemStats\n\t\t//runtime.ReadMemStats(&memStats)\n\t\t//core.Info(\"Number of goroutines: %d\",numOfGoroutines)\n\t\t//core.Info(\"Mem stats: %v\",memStats)\n\t\tcore.CloudWatchClient.PutMetric(\"num_of_goroutines\", \"Count\", float64(numOfGoroutines), \"httshark_health_monitor\")\n\t}\n}", "func (s *Stats) GetMemoryInfo(logMemory, logGoMemory bool) {\n\n if logGoMemory {\n if s.GoInfo == nil {\n s.initGoInfo()\n }\n\n runtime.ReadMemStats(s.GoInfo.Memory.mem)\n s.GoInfo.GoRoutines = runtime.NumGoroutine()\n s.GoInfo.Memory.Alloc = s.GoInfo.Memory.mem.Alloc\n s.GoInfo.Memory.HeapAlloc = s.GoInfo.Memory.mem.HeapAlloc\n s.GoInfo.Memory.HeapSys = s.GoInfo.Memory.mem.HeapSys\n\n if s.GoInfo.Memory.LastGC != s.GoInfo.Memory.mem.LastGC {\n s.GoInfo.Memory.LastGC = s.GoInfo.Memory.mem.LastGC\n s.GoInfo.Memory.NumGC = s.GoInfo.Memory.mem.NumGC - s.GoInfo.Memory.lastNumGC\n s.GoInfo.Memory.lastNumGC = s.GoInfo.Memory.mem.NumGC\n s.GoInfo.Memory.LastGCPauseDuration = s.GoInfo.Memory.mem.PauseNs[(s.GoInfo.Memory.mem.NumGC+255)%256]\n } else {\n s.GoInfo.Memory.NumGC = 0\n s.GoInfo.Memory.LastGCPauseDuration = 0\n }\n }\n\n if logMemory {\n\n if s.MemInfo == nil {\n s.MemInfo = new(MemInfo)\n }\n\n s.MemInfo.Memory, _ = mem.VirtualMemory()\n s.MemInfo.Swap, _ = mem.SwapMemory()\n }\n}", "func (s *ProcessStat) Collect(collectAttributes bool) {\n\n\th := s.Processes\n\tfor _, v := range h {\n\t\tv.dead = true\n\t}\n\n\tvar pDefaultSet C.processor_set_name_t\n\tvar pDefaultSetControl C.processor_set_t\n\tvar tasks C.task_array_t\n\tvar taskCount C.mach_msg_type_number_t\n\n\tif C.processor_set_default(s.hport, &pDefaultSet) != C.KERN_SUCCESS {\n\t\treturn\n\t}\n\n\t// get privileged port to get information about all tasks\n\n\tif C.host_processor_set_priv(C.host_priv_t(s.hport),\n\t\tpDefaultSet, &pDefaultSetControl) != C.KERN_SUCCESS {\n\t\treturn\n\t}\n\n\tif C.processor_set_tasks(pDefaultSetControl, &tasks, &taskCount) != C.KERN_SUCCESS {\n\t\treturn\n\t}\n\n\t// convert tasks to a Go slice\n\thdr := reflect.SliceHeader{\n\t\tData: uintptr(unsafe.Pointer(tasks)),\n\t\tLen: int(taskCount),\n\t\tCap: int(taskCount),\n\t}\n\n\tgoTaskList := *(*[]C.task_name_t)(unsafe.Pointer(&hdr))\n\n\t// mach_msg_type_number_t - type natural_t = uint32_t\n\tvar i uint32\n\tfor i = 0; i < uint32(taskCount); i++ {\n\n\t\ttaskID := goTaskList[i]\n\t\tvar pid C.int\n\t\t// var tinfo C.task_info_data_t\n\t\tvar count C.mach_msg_type_number_t\n\t\tvar taskBasicInfo C.mach_task_basic_info_data_t\n\t\tvar taskAbsoluteInfo C.task_absolutetime_info_data_t\n\n\t\tif (C.pid_for_task(C.mach_port_name_t(taskID), &pid) != C.KERN_SUCCESS) ||\n\t\t\t(pid < 0) {\n\t\t\tcontinue\n\t\t}\n\n\t\tcount = C.MACH_TASK_BASIC_INFO_COUNT\n\t\tkr := C.task_info(taskID, C.MACH_TASK_BASIC_INFO,\n\t\t\t(C.task_info_t)(unsafe.Pointer(&taskBasicInfo)),\n\t\t\t&count)\n\t\tif kr != C.KERN_SUCCESS {\n\t\t\tcontinue\n\t\t}\n\n\t\tspid := fmt.Sprintf(\"%v\", pid)\n\t\tpidstat, ok := h[spid]\n\t\tif !ok {\n\t\t\tpidstat = NewPerProcessStat(s.m, spid)\n\t\t\th[spid] = pidstat\n\t\t}\n\n\t\tif collectAttributes || !ok {\n\t\t\tpidstat.collectAttributes(pid)\n\t\t}\n\n\t\tpidstat.Metrics.VirtualSize.Set(float64(taskBasicInfo.virtual_size))\n\t\tpidstat.Metrics.ResidentSize.Set(float64(taskBasicInfo.resident_size))\n\t\tpidstat.Metrics.ResidentSizeMax.Set(float64(taskBasicInfo.resident_size_max))\n\n\t\tcount = C.TASK_ABSOLUTETIME_INFO_COUNT\n\t\tkr = C.task_info(taskID, C.TASK_ABSOLUTETIME_INFO,\n\t\t\t(C.task_info_t)(unsafe.Pointer(&taskAbsoluteInfo)),\n\t\t\t&count)\n\t\tif kr != C.KERN_SUCCESS {\n\t\t\tcontinue\n\t\t}\n\t\tpidstat.Metrics.UserTime.Set(\n\t\t\tuint64(C.absolute_to_nano(taskAbsoluteInfo.total_user)))\n\t\tpidstat.Metrics.SystemTime.Set(\n\t\t\tuint64(C.absolute_to_nano(taskAbsoluteInfo.total_system)))\n\t\tpidstat.dead = false\n\t}\n\n\t// remove dead processes\n\tfor k, v := range h {\n\t\tif v.dead {\n\t\t\tdelete(h, k)\n\t\t}\n\t}\n\n}", "func (g gatherer) GatherMetrics(ctx context.Context, out *apm.Metrics) error {\n\tmetricFamilies, err := g.p.Gather()\n\tif err != nil {\n\t\treturn errors.WithStack(err)\n\t}\n\tfor _, mf := range metricFamilies {\n\t\tname := mf.GetName()\n\t\tswitch mf.GetType() {\n\t\tcase dto.MetricType_COUNTER:\n\t\t\tfor _, m := range mf.GetMetric() {\n\t\t\t\tv := m.GetCounter().GetValue()\n\t\t\t\tout.Add(name, makeLabels(m.GetLabel()), v)\n\t\t\t}\n\t\tcase dto.MetricType_GAUGE:\n\t\t\tmetrics := mf.GetMetric()\n\t\t\tif name == \"go_info\" && len(metrics) == 1 && metrics[0].GetGauge().GetValue() == 1 {\n\t\t\t\t// Ignore the \"go_info\" metric from the\n\t\t\t\t// built-in GoCollector, as we provide\n\t\t\t\t// the same information in the payload.\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tfor _, m := range metrics {\n\t\t\t\tv := m.GetGauge().GetValue()\n\t\t\t\tout.Add(name, makeLabels(m.GetLabel()), v)\n\t\t\t}\n\t\tcase dto.MetricType_UNTYPED:\n\t\t\tfor _, m := range mf.GetMetric() {\n\t\t\t\tv := m.GetUntyped().GetValue()\n\t\t\t\tout.Add(name, makeLabels(m.GetLabel()), v)\n\t\t\t}\n\t\tcase dto.MetricType_SUMMARY:\n\t\t\tfor _, m := range mf.GetMetric() {\n\t\t\t\ts := m.GetSummary()\n\t\t\t\tlabels := makeLabels(m.GetLabel())\n\t\t\t\tout.Add(name+\".count\", labels, float64(s.GetSampleCount()))\n\t\t\t\tout.Add(name+\".total\", labels, float64(s.GetSampleSum()))\n\t\t\t\tfor _, q := range s.GetQuantile() {\n\t\t\t\t\tp := int(q.GetQuantile() * 100)\n\t\t\t\t\tout.Add(name+\".percentile.\"+strconv.Itoa(p), labels, q.GetValue())\n\t\t\t\t}\n\t\t\t}\n\t\tdefault:\n\t\t\t// TODO(axw) MetricType_HISTOGRAM\n\t\t}\n\t}\n\treturn nil\n}", "func MeasureProcessUsage(ctx context.Context, duration time.Duration,\n\texitOption ExitOption, ts ...*gtest.GTest) (measurements map[string]float64, retErr error) {\n\tconst (\n\t\tstabilizeTime = 1 * time.Second // time to wait for CPU to stabilize after launching proc.\n\t\tcleanupTime = 5 * time.Second // time reserved for cleanup after measuring.\n\t)\n\n\tfor _, t := range ts {\n\t\t// Start the process asynchronously by calling the provided startup function.\n\t\tcmd, err := t.Start(ctx)\n\t\tif err != nil {\n\t\t\treturn nil, errors.Wrap(err, \"failed to run binary\")\n\t\t}\n\n\t\t// Clean up the process upon exiting the function.\n\t\tdefer func() {\n\t\t\t// If the exit option is 'WaitProcess' wait for the process to terminate.\n\t\t\tif exitOption == WaitProcess {\n\t\t\t\tif err := cmd.Wait(); err != nil {\n\t\t\t\t\tretErr = err\n\t\t\t\t\ttesting.ContextLog(ctx, \"Failed waiting for the command to exit: \", retErr)\n\t\t\t\t}\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\t// If the exit option is 'KillProcess' we will send a 'SIGKILL' signal\n\t\t\t// to the process after collecting performance metrics.\n\t\t\tif err := cmd.Kill(); err != nil {\n\t\t\t\tretErr = err\n\t\t\t\ttesting.ContextLog(ctx, \"Failed to kill process: \", retErr)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\t// After sending a 'SIGKILL' signal to the process we need to wait\n\t\t\t// for the process to terminate. If Wait() doesn't return any error,\n\t\t\t// we know the process already terminated before we explicitly killed\n\t\t\t// it and the measured performance metrics are invalid.\n\t\t\terr = cmd.Wait()\n\t\t\tif err == nil {\n\t\t\t\tretErr = errors.New(\"process did not run for entire measurement duration\")\n\t\t\t\ttesting.ContextLog(ctx, retErr)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\t// Check whether the process was terminated with a 'SIGKILL' signal.\n\t\t\tws, ok := testexec.GetWaitStatus(err)\n\t\t\tif !ok {\n\t\t\t\tretErr = errors.Wrap(err, \"failed to get wait status\")\n\t\t\t\ttesting.ContextLog(ctx, retErr)\n\t\t\t} else if !ws.Signaled() || ws.Signal() != unix.SIGKILL {\n\t\t\t\tretErr = errors.Wrap(err, \"process did not terminate with SIGKILL signal\")\n\t\t\t\ttesting.ContextLog(ctx, retErr)\n\t\t\t}\n\t\t}()\n\t}\n\n\t// Use a shorter context to leave time for cleanup upon failure.\n\tctx, cancel := ctxutil.Shorten(ctx, cleanupTime)\n\tdefer cancel()\n\n\tif err := testing.Sleep(ctx, stabilizeTime); err != nil {\n\t\treturn nil, errors.Wrap(err, \"failed waiting for CPU usage to stabilize\")\n\t}\n\n\ttesting.ContextLog(ctx, \"Measuring CPU usage and power consumption for \", duration.Round(time.Second))\n\treturn MeasureUsage(ctx, duration)\n}", "func GetRuntimeStats() (result map[string]float64) {\n\truntime.ReadMemStats(memStats)\n\n\tnow = time.Now()\n\tdiffTime = now.Sub(lastSampleTime).Seconds()\n\n\tresult = map[string]float64{\n\t\t\"alloc\": float64(memStats.Alloc),\n\t\t\"frees\": float64(memStats.Frees),\n\t\t\"gc.pause_total\": float64(memStats.PauseTotalNs) / nsInMs,\n\t\t\"heap.alloc\": float64(memStats.HeapAlloc),\n\t\t\"heap.objects\": float64(memStats.HeapObjects),\n\t\t\"mallocs\": float64(memStats.Mallocs),\n\t\t\"stack\": float64(memStats.StackInuse),\n\t}\n\n\tif lastPauseNs > 0 {\n\t\tpauseSinceLastSample = memStats.PauseTotalNs - lastPauseNs\n\t\tresult[\"gc.pause_per_second\"] = float64(pauseSinceLastSample) / nsInMs / diffTime\n\t}\n\n\tlastPauseNs = memStats.PauseTotalNs\n\n\tnbGc = memStats.NumGC - lastNumGc\n\tif lastNumGc > 0 {\n\t\tresult[\"gc.gc_per_second\"] = float64(nbGc) / diffTime\n\t}\n\n\t// Collect GC pauses\n\tif nbGc > 0 {\n\t\tif nbGc > 256 {\n\t\t\tnbGc = 256\n\t\t}\n\n\t\tvar i uint32\n\n\t\tfor i = 0; i < nbGc; i++ {\n\t\t\tidx := int((memStats.NumGC-uint32(i))+255) % 256\n\t\t\tpause := float64(memStats.PauseNs[idx])\n\t\t\tresult[\"gc.pause\"] = pause / nsInMs\n\t\t}\n\t}\n\n\t// Store last values\n\tlastNumGc = memStats.NumGC\n\tlastSampleTime = now\n\n\treturn result\n}", "func (c *VMCollector) Collect(ch chan<- prometheus.Metric) {\n\tfor _, m := range c.getMetrics() {\n\t\tch <- m\n\t}\n}", "func (cg *CGroup) GetMemoryStats() (map[string]uint64, error) {\n\tvar (\n\t\terr error\n\t\tstats string\n\t)\n\n\tout := make(map[string]uint64)\n\n\tversion := cgControllers[\"memory\"]\n\tswitch version {\n\tcase Unavailable:\n\t\treturn nil, ErrControllerMissing\n\tcase V1, V2:\n\t\tstats, err = cg.rw.Get(version, \"memory\", \"memory.stat\")\n\t}\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tfor _, stat := range strings.Split(stats, \"\\n\") {\n\t\tfield := strings.Split(stat, \" \")\n\n\t\tswitch field[0] {\n\t\tcase \"total_active_anon\", \"active_anon\":\n\t\t\tout[\"active_anon\"], _ = strconv.ParseUint(field[1], 10, 64)\n\t\tcase \"total_active_file\", \"active_file\":\n\t\t\tout[\"active_file\"], _ = strconv.ParseUint(field[1], 10, 64)\n\t\tcase \"total_inactive_anon\", \"inactive_anon\":\n\t\t\tout[\"inactive_anon\"], _ = strconv.ParseUint(field[1], 10, 64)\n\t\tcase \"total_inactive_file\", \"inactive_file\":\n\t\t\tout[\"inactive_file\"], _ = strconv.ParseUint(field[1], 10, 64)\n\t\tcase \"total_unevictable\", \"unevictable\":\n\t\t\tout[\"unevictable\"], _ = strconv.ParseUint(field[1], 10, 64)\n\t\tcase \"total_writeback\", \"file_writeback\":\n\t\t\tout[\"writeback\"], _ = strconv.ParseUint(field[1], 10, 64)\n\t\tcase \"total_dirty\", \"file_dirty\":\n\t\t\tout[\"dirty\"], _ = strconv.ParseUint(field[1], 10, 64)\n\t\tcase \"total_mapped_file\", \"file_mapped\":\n\t\t\tout[\"mapped\"], _ = strconv.ParseUint(field[1], 10, 64)\n\t\tcase \"total_rss\": // v1 only\n\t\t\tout[\"rss\"], _ = strconv.ParseUint(field[1], 10, 64)\n\t\tcase \"total_shmem\", \"shmem\":\n\t\t\tout[\"shmem\"], _ = strconv.ParseUint(field[1], 10, 64)\n\t\tcase \"total_cache\", \"file\":\n\t\t\tout[\"cache\"], _ = strconv.ParseUint(field[1], 10, 64)\n\t\t}\n\t}\n\n\t// Calculated values\n\tout[\"active\"] = out[\"active_anon\"] + out[\"active_file\"]\n\tout[\"inactive\"] = out[\"inactive_anon\"] + out[\"inactive_file\"]\n\n\treturn out, nil\n}", "func (pc *NginxProcessesMetricsCollector) updateWorkerProcessCount() {\n\tcurrWorkerProcesses, prevWorkerProcesses, err := getWorkerProcesses()\n\tif err != nil {\n\t\tglog.Errorf(\"unable to collect process metrics : %v\", err)\n\t\treturn\n\t}\n\tpc.workerProcessTotal.WithLabelValues(\"current\").Set(float64(currWorkerProcesses))\n\tpc.workerProcessTotal.WithLabelValues(\"old\").Set(float64(prevWorkerProcesses))\n}", "func CaptureRuntimeMemStats(registry RootRegistry, collectionFreq time.Duration) {\n\truntimeMemStats.Do(func() {\n\t\tif reg, ok := registry.(*rootRegistry); ok {\n\t\t\tgoRegistry := metrics.NewPrefixedChildRegistry(reg.registry, \"go.\")\n\t\t\tmetrics.RegisterRuntimeMemStats(goRegistry)\n\t\t\tgo metrics.CaptureRuntimeMemStats(goRegistry, collectionFreq)\n\t\t}\n\t})\n}", "func NewVMCollector(cfgBaseName string) (collector.Collector, error) {\n\tprocFile := \"meminfo\"\n\n\tc := VM{}\n\tc.id = \"vm\"\n\tc.pkgID = \"builtins.linux.procfs.\" + c.id\n\tc.procFSPath = \"/proc\"\n\tc.file = filepath.Join(c.procFSPath, procFile)\n\tc.logger = log.With().Str(\"pkg\", c.pkgID).Logger()\n\tc.metricStatus = map[string]bool{}\n\tc.metricDefaultActive = true\n\n\tif cfgBaseName == \"\" {\n\t\tif _, err := os.Stat(c.file); err != nil {\n\t\t\treturn nil, errors.Wrap(err, c.pkgID)\n\t\t}\n\t\treturn &c, nil\n\t}\n\n\tvar opts vmOptions\n\terr := config.LoadConfigFile(cfgBaseName, &opts)\n\tif err != nil {\n\t\tif strings.Contains(err.Error(), \"no config found matching\") {\n\t\t\treturn &c, nil\n\t\t}\n\t\tc.logger.Warn().Err(err).Str(\"file\", cfgBaseName).Msg(\"loading config file\")\n\t\treturn nil, errors.Wrapf(err, \"%s config\", c.pkgID)\n\t}\n\n\tc.logger.Debug().Str(\"base\", cfgBaseName).Interface(\"config\", opts).Msg(\"loaded config\")\n\n\tif opts.ID != \"\" {\n\t\tc.id = opts.ID\n\t}\n\n\tif opts.ProcFSPath != \"\" {\n\t\tc.procFSPath = opts.ProcFSPath\n\t\tc.file = filepath.Join(c.procFSPath, procFile)\n\t}\n\n\tif len(opts.MetricsEnabled) > 0 {\n\t\tfor _, name := range opts.MetricsEnabled {\n\t\t\tc.metricStatus[name] = true\n\t\t}\n\t}\n\tif len(opts.MetricsDisabled) > 0 {\n\t\tfor _, name := range opts.MetricsDisabled {\n\t\t\tc.metricStatus[name] = false\n\t\t}\n\t}\n\n\tif opts.MetricsDefaultStatus != \"\" {\n\t\tif ok, _ := regexp.MatchString(`^(enabled|disabled)$`, strings.ToLower(opts.MetricsDefaultStatus)); ok {\n\t\t\tc.metricDefaultActive = strings.ToLower(opts.MetricsDefaultStatus) == metricStatusEnabled\n\t\t} else {\n\t\t\treturn nil, errors.Errorf(\"%s invalid metric default status (%s)\", c.pkgID, opts.MetricsDefaultStatus)\n\t\t}\n\t}\n\n\tif opts.RunTTL != \"\" {\n\t\tdur, err := time.ParseDuration(opts.RunTTL)\n\t\tif err != nil {\n\t\t\treturn nil, errors.Wrapf(err, \"%s parsing run_ttl\", c.pkgID)\n\t\t}\n\t\tc.runTTL = dur\n\t}\n\n\tif _, err := os.Stat(c.file); os.IsNotExist(err) {\n\t\treturn nil, errors.Wrap(err, c.pkgID)\n\t}\n\n\treturn &c, nil\n}", "func (u *Use) CollectMetrics(mts []plugin.Metric) ([]plugin.Metric, error) {\n\tcfg := mts[0].Config\n\tif !u.initialized {\n\t\tu.init(cfg)\n\t}\n\n\tmetrics := make([]plugin.Metric, len(mts))\n\tfor i, p := range mts {\n\t\tns := p.Namespace.String()\n\t\tswitch {\n\t\tcase cpure.MatchString(ns):\n\t\t\tmetric, err := u.computeStat(p.Namespace)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, errors.New(\"Unable to get compute stat: \" + err.Error())\n\t\t\t}\n\t\t\tmetrics[i] = *metric\n\n\t\tcase storre.MatchString(ns):\n\t\t\tmetric, err := u.diskStat(p.Namespace)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, errors.New(\"Unable to get disk stat: \" + err.Error())\n\t\t\t}\n\t\t\tmetrics[i] = *metric\n\t\tcase memre.MatchString(ns):\n\t\t\tmetric, err := memStat(p.Namespace, u.VmStatPath, u.MemInfoPath)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, errors.New(\"Unable to get mem stat: \" + err.Error())\n\t\t\t}\n\t\t\tmetrics[i] = *metric\n\t\t}\n\t\ttags, err := hostTags()\n\n\t\tif err == nil {\n\t\t\tmetrics[i].Tags = tags\n\t\t}\n\t\tmetrics[i].Timestamp = time.Now()\n\n\t}\n\treturn metrics, nil\n}", "func logMemstatsSample() {\n\tl := log.WithField(\"process\", \"memstats\")\n\n\truntime.GC() // get up-to-date statistics\n\n\tmemStats := new(runtime.MemStats)\n\truntime.ReadMemStats(memStats)\n\n\tvar gcStats debug.GCStats\n\tdebug.ReadGCStats(&gcStats)\n\n\ts := memStats\n\n\tl.Infof(\"# runtime.MemStats\")\n\tl.Infof(\"# Alloc = %d\", s.Alloc)\n\tl.Infof(\"# TotalAlloc = %d\", s.TotalAlloc)\n\tl.Infof(\"# Sys = %d\", s.Sys)\n\tl.Infof(\"# Lookups = %d\", s.Lookups)\n\tl.Infof(\"# Mallocs = %d\", s.Mallocs)\n\tl.Infof(\"# Frees = %d\", s.Frees)\n\tl.Infof(\"# HeapAlloc = %d\", s.HeapAlloc)\n\tl.Infof(\"# HeapSys = %d\", s.HeapSys)\n\tl.Infof(\"# HeapIdle = %d\", s.HeapIdle)\n\tl.Infof(\"# HeapInuse = %d\", s.HeapInuse)\n\tl.Infof(\"# HeapReleased = %d\", s.HeapReleased)\n\tl.Infof(\"# HeapObjects = %d\", s.HeapObjects)\n\tl.Infof(\"# Stack = %d / %d\", s.StackInuse, s.StackSys)\n\tl.Infof(\"# NumGoroutine = %d\", runtime.NumGoroutine())\n\n\t// Record GC pause history, most recent 5 entries\n\tl.Infof(\"# Stop-the-world Pause time\")\n\n\tfor i, v := range gcStats.Pause {\n\t\tl.Infof(\"# gcStats.Pause[%d] = %d ns\", i, v)\n\n\t\tif i == 5 {\n\t\t\tbreak\n\t\t}\n\t}\n}", "func (c collector) Collect(ch chan<- prometheus.Metric) {\n\tvar wg sync.WaitGroup\n\n\t// We don't bail out on errors because those can happen if there is a race condition between\n\t// the destruction of a container and us getting to read the cgroup data. We just don't report\n\t// the values we don't get.\n\n\tcollectors := []func(string, *regexp.Regexp){\n\t\tfunc(path string, re *regexp.Regexp) {\n\t\t\tdefer wg.Done()\n\t\t\tnuma, err := cgroups.GetNumaStats(cgroupPath(\"memory\", path))\n\t\t\tif err == nil {\n\t\t\t\tupdateNumaStatMetric(ch, re.FindStringSubmatch(filepath.Base(path))[0], numa)\n\t\t\t} else {\n\t\t\t\tlog.Error(\"failed to collect NUMA stats for %s: %v\", path, err)\n\t\t\t}\n\t\t},\n\t\tfunc(path string, re *regexp.Regexp) {\n\t\t\tdefer wg.Done()\n\t\t\tmemory, err := cgroups.GetMemoryUsage(cgroupPath(\"memory\", path))\n\t\t\tif err == nil {\n\t\t\t\tupdateMemoryUsageMetric(ch, re.FindStringSubmatch(filepath.Base(path))[0], memory)\n\t\t\t} else {\n\t\t\t\tlog.Error(\"failed to collect memory usage stats for %s: %v\", path, err)\n\t\t\t}\n\t\t},\n\t\tfunc(path string, re *regexp.Regexp) {\n\t\t\tdefer wg.Done()\n\t\t\tmigrate, err := cgroups.GetCPUSetMemoryMigrate(cgroupPath(\"cpuset\", path))\n\t\t\tif err == nil {\n\t\t\t\tupdateMemoryMigrateMetric(ch, re.FindStringSubmatch(filepath.Base(path))[0], migrate)\n\t\t\t} else {\n\t\t\t\tlog.Error(\"failed to collect memory migration stats for %s: %v\", path, err)\n\t\t\t}\n\t\t},\n\t\tfunc(path string, re *regexp.Regexp) {\n\t\t\tdefer wg.Done()\n\t\t\tcpuAcctUsage, err := cgroups.GetCPUAcctStats(cgroupPath(\"cpuacct\", path))\n\t\t\tif err == nil {\n\t\t\t\tupdateCPUAcctUsageMetric(ch, re.FindStringSubmatch(filepath.Base(path))[0], cpuAcctUsage)\n\t\t\t} else {\n\t\t\t\tlog.Error(\"failed to collect CPU accounting stats for %s: %v\", path, err)\n\t\t\t}\n\t\t},\n\t\tfunc(path string, re *regexp.Regexp) {\n\t\t\tdefer wg.Done()\n\t\t\thugeTlbUsage, err := cgroups.GetHugetlbUsage(cgroupPath(\"hugetlb\", path))\n\t\t\tif err == nil {\n\t\t\t\tupdateHugeTlbUsageMetric(ch, re.FindStringSubmatch(filepath.Base(path))[0], hugeTlbUsage)\n\t\t\t} else {\n\t\t\t\tlog.Error(\"failed to collect hugetlb stats for %s: %v\", path, err)\n\t\t\t}\n\t\t},\n\t\tfunc(path string, re *regexp.Regexp) {\n\t\t\tdefer wg.Done()\n\t\t\tblkioDeviceUsage, err := cgroups.GetBlkioThrottleBytes(cgroupPath(\"blkio\", path))\n\t\t\tif err == nil {\n\t\t\t\tupdateBlkioDeviceUsageMetric(ch, re.FindStringSubmatch(filepath.Base(path))[0], blkioDeviceUsage)\n\t\t\t} else {\n\t\t\t\tlog.Error(\"failed to collect blkio stats for %s: %v\", path, err)\n\t\t\t}\n\t\t},\n\t}\n\n\tcontainerIDRegexp := regexp.MustCompile(`[a-z0-9]{64}`)\n\n\tfor _, path := range walkCgroups() {\n\t\twg.Add(len(collectors))\n\t\tfor _, fn := range collectors {\n\t\t\tgo fn(path, containerIDRegexp)\n\t\t}\n\t}\n\n\t// We need to wait so that the response channel doesn't get closed.\n\twg.Wait()\n}", "func (s *CPUStat) Collect() {\n\tfile, err := os.Open(root + \"proc/stat\")\n\tdefer file.Close()\n\n\tif err != nil {\n\t\treturn\n\t}\n\tscanner := bufio.NewScanner(file)\n\tfor scanner.Scan() {\n\t\tf := regexp.MustCompile(\"\\\\s+\").Split(scanner.Text(), -1)\n\n\t\tisCPU, err := regexp.MatchString(\"^cpu\\\\d*\", f[0])\n\t\tif err == nil && isCPU {\n\t\t\tif f[0] == \"cpu\" {\n\t\t\t\tparseCPUline(s.All, f)\n\t\t\t\tpopulateComputedStats(s.All, float64(len(s.cpus)))\n\t\t\t\ts.All.TotalCount.Set(float64(len(s.cpus)))\n\t\t\t} else {\n\t\t\t\tperCPU, ok := s.cpus[f[0]]\n\t\t\t\tif !ok {\n\t\t\t\t\tperCPU = NewPerCPU(s.m, f[0])\n\t\t\t\t\ts.cpus[f[0]] = perCPU\n\t\t\t\t}\n\t\t\t\tparseCPUline(perCPU, f)\n\t\t\t\tpopulateComputedStats(perCPU, 1.0)\n\t\t\t\tperCPU.TotalCount.Set(1)\n\t\t\t}\n\t\t}\n\t}\n}", "func procMem(_ int) (ProcMemStats, error) {\n\treturn ProcMemStats{}, nil\n}", "func Collectmem(serverName string) (Metric, error) {\n\tvalues := Metric{}\n\tvar err error\n\tvalues.Timestamp = time.Now()\n\tvalues.MetricType = \"mem\"\n\tvar output string\n\n\tvar response cpmserverapi.MetricMEMResponse\n\trequest := &cpmserverapi.MetricMEMRequest{}\n\tresponse, err = cpmserverapi.MetricMEMClient(serverName, request)\n\tif err != nil {\n\t\tlogit.Error.Println(\"mem metric error:\" + err.Error())\n\t\treturn values, err\n\t}\n\n\toutput = strings.TrimSpace(response.Output)\n\n\tvalues.Value, err = strconv.ParseFloat(output, 64)\n\tif err != nil {\n\t\tlogit.Error.Println(\"parseFloat error in mem metric \" + err.Error())\n\t}\n\n\treturn values, err\n}", "func hookStats(e *evtx.GoEvtxMap) {\n\t// We do not store stats if process termination is not enabled\n\tif flagProcTermEn {\n\t\tif guid, err := e.GetString(&pathSysmonProcessGUID); err == nil {\n\t\t\t//v, ok := processTracker.Get(guid)\n\t\t\t//if ok {\n\t\t\tif pt := processTracker.GetByGuid(guid); pt != nil {\n\t\t\t\t//pt := v.(*processTrack)\n\t\t\t\tswitch e.EventID() {\n\t\t\t\tcase 1:\n\t\t\t\t\tpt.Stats.CountProcessCreated++\n\t\t\t\tcase 3:\n\t\t\t\t\tpt.Stats.CountNetConn++\n\t\t\t\tcase 11:\n\t\t\t\t\tif target, err := e.GetString(&pathSysmonTargetFilename); err == nil {\n\t\t\t\t\t\text := filepath.Ext(target)\n\t\t\t\t\t\tif pt.Stats.CountFilesCreatedByExt[ext] == nil {\n\t\t\t\t\t\t\ti := int64(0)\n\t\t\t\t\t\t\tpt.Stats.CountFilesCreatedByExt[ext] = &i\n\t\t\t\t\t\t}\n\t\t\t\t\t\t*(pt.Stats.CountFilesCreatedByExt[ext])++\n\t\t\t\t\t}\n\t\t\t\t\tpt.Stats.CountFilesCreated++\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n}", "func (n *RouterNode) GatherMetrics() {\n\tn.Lock()\n\tdefer n.Unlock()\n\n\tlevel.Debug(n.logger).Log(\n\t\t\"msg\", \"GatherMetrics() locked\",\n\t)\n\n\tif time.Now().Unix() < n.nextCollectionTicker {\n\t\treturn\n\t}\n\tstart := time.Now()\n\tif len(n.metrics) > 0 {\n\t\tn.metrics = n.metrics[:0]\n\t\tlevel.Debug(n.logger).Log(\n\t\t\t\"msg\", \"GatherMetrics() cleared metrics\",\n\t\t)\n\t}\n\tupValue := 1\n\n\t// What is RouterID and AS number of this GoBGP server?\n\tserver, err := n.client.GetBgp(context.Background(), &gobgpapi.GetBgpRequest{})\n\tif err != nil {\n\t\tn.IncrementErrorCounter()\n\t\tlevel.Error(n.logger).Log(\n\t\t\t\"msg\", \"failed query gobgp server\",\n\t\t\t\"error\", err.Error(),\n\t\t)\n\t\tif IsConnectionError(err) {\n\t\t\tn.connected = false\n\t\t\tupValue = 0\n\t\t}\n\t} else {\n\t\tn.routerID = server.Global.RouterId\n\t\tn.localAS = server.Global.Asn\n\t\tlevel.Debug(n.logger).Log(\n\t\t\t\"msg\", \"router info\",\n\t\t\t\"router_id\", n.routerID,\n\t\t\t\"local_asn\", n.localAS,\n\t\t)\n\t\tn.connected = true\n\t}\n\n\tif n.connected {\n\t\tvar wg sync.WaitGroup\n\t\twg.Add(2)\n\t\tgo func() {\n\t\t\tdefer wg.Done()\n\t\t\tn.GetRibCounters()\n\t\t}()\n\t\tgo func() {\n\t\t\tdefer wg.Done()\n\t\t\tn.GetPeers()\n\t\t}()\n\t\twg.Wait()\n\n\t}\n\n\t// Generic Metrics\n\tn.metrics = append(n.metrics, prometheus.MustNewConstMetric(\n\t\trouterUp,\n\t\tprometheus.GaugeValue,\n\t\tfloat64(upValue),\n\t))\n\n\tn.metrics = append(n.metrics, prometheus.MustNewConstMetric(\n\t\trouterErrors,\n\t\tprometheus.CounterValue,\n\t\tfloat64(n.errors),\n\t))\n\tn.metrics = append(n.metrics, prometheus.MustNewConstMetric(\n\t\trouterNextScrape,\n\t\tprometheus.CounterValue,\n\t\tfloat64(n.nextCollectionTicker),\n\t))\n\tn.metrics = append(n.metrics, prometheus.MustNewConstMetric(\n\t\trouterScrapeTime,\n\t\tprometheus.GaugeValue,\n\t\ttime.Since(start).Seconds(),\n\t))\n\n\t// Router ID and ASN\n\tif n.routerID != \"\" {\n\t\tn.metrics = append(n.metrics, prometheus.MustNewConstMetric(\n\t\t\trouterID,\n\t\t\tprometheus.GaugeValue,\n\t\t\t1,\n\t\t\tn.routerID,\n\t\t))\n\t}\n\tif n.localAS > 0 {\n\t\tn.metrics = append(n.metrics, prometheus.MustNewConstMetric(\n\t\t\trouterLocalAS,\n\t\t\tprometheus.GaugeValue,\n\t\t\tfloat64(n.localAS),\n\t\t))\n\t}\n\n\tn.nextCollectionTicker = time.Now().Add(time.Duration(n.pollInterval) * time.Second).Unix()\n\n\tif upValue > 0 {\n\t\tn.result = \"success\"\n\t} else {\n\t\tn.result = \"failure\"\n\t}\n\tn.timestamp = time.Now().Format(time.RFC3339)\n\n\tlevel.Debug(n.logger).Log(\n\t\t\"msg\", \"GatherMetrics() returns\",\n\t)\n}", "func (c *metricCollector) collectMemory(service *rrhttp.Service, tick time.Duration) {\n\tstarted := false\n\tfor {\n\t\tserver := service.Server()\n\t\tif server == nil && started {\n\t\t\t// stopped\n\t\t\treturn\n\t\t}\n\n\t\tstarted = true\n\n\t\tif workers, err := util.ServerState(server); err == nil {\n\t\t\tsum := 0.0\n\t\t\tfor _, w := range workers {\n\t\t\t\tsum = sum + float64(w.MemoryUsage)\n\t\t\t}\n\n\t\t\tc.workersMemory.Set(sum)\n\t\t}\n\n\t\ttime.Sleep(tick)\n\t}\n}", "func (c *ProcessStat) ByMemUsage() []*PerProcessStat {\n\tvar v []*PerProcessStat\n\tfor _, o := range c.Processes {\n\t\tif !math.IsNaN(o.MemUsage()) {\n\t\t\tv = append(v, o)\n\t\t}\n\t}\n\tsort.Sort(byMemUsage(v))\n\treturn v\n}", "func (g gatherer) GatherMetrics(ctx context.Context, m *elasticapm.Metrics) error {\n\tg.r.Each(func(name string, v interface{}) {\n\t\tswitch v := v.(type) {\n\t\tcase metrics.Counter:\n\t\t\tm.Add(name, nil, float64(v.Count()))\n\t\tcase metrics.Gauge:\n\t\t\tm.Add(name, nil, float64(v.Value()))\n\t\tcase metrics.GaugeFloat64:\n\t\t\tm.Add(name, nil, v.Value())\n\t\tcase metrics.Histogram:\n\t\t\tm.Add(name+\".count\", nil, float64(v.Count()))\n\t\t\tm.Add(name+\".total\", nil, float64(v.Sum()))\n\t\t\tm.Add(name+\".min\", nil, float64(v.Min()))\n\t\t\tm.Add(name+\".max\", nil, float64(v.Max()))\n\t\t\tm.Add(name+\".stddev\", nil, v.StdDev())\n\t\t\tm.Add(name+\".percentile.50\", nil, v.Percentile(0.5))\n\t\t\tm.Add(name+\".percentile.95\", nil, v.Percentile(0.95))\n\t\t\tm.Add(name+\".percentile.99\", nil, v.Percentile(0.99))\n\t\tdefault:\n\t\t\t// TODO(axw) Meter, Timer, EWMA\n\t\t}\n\t})\n\treturn nil\n}", "func UpdateMetrics(result *Results) {\n\n\t// Publish system variables\n\tupTimeGauge.Set(float64(result.SysMonitorInfo.Uptime))\n\tcpuUsageGauge.Set(float64(result.SysMonitorInfo.CpuUsagePercent))\n\n\t// Memory\n\tmemUsagePercentGauge.Set(result.SysMonitorInfo.MemUsagePercent)\n\tmemTotalGauge.Set(float64(result.SysMonitorInfo.MemTotal))\n\tmemAvailableGauge.Set(float64(result.SysMonitorInfo.MemAvailable))\n\n\t// Bandwidth\n\tbandwidthUsageTotalGauge.Set(float64(result.SysMonitorInfo.BandwidthUsageTotal))\n\tbandwidthUsageSentGauge.Set(float64(result.SysMonitorInfo.BandwidthUsageSent))\n\tbandwidthUsageRecvGauge.Set(float64(result.SysMonitorInfo.BandwidthUsageRecv))\n\n\tfor _, driveUsage := range result.SysMonitorInfo.DriveUsage {\n\t\t// \"drive_path\", \"available\", \"growth_rate\", \"full_in\", \"physical_drive\"\n\n\t\tdays := strconv.FormatFloat(driveUsage.DaysTillFull, 'f', 3, 64)\n\n\t\tif math.IsInf(driveUsage.DaysTillFull, 0) {\n\t\t\tdays = \"10 years\"\n\t\t}\n\n\t\tdriveSpace.WithLabelValues(driveUsage.Path,\n\t\t\tstrconv.FormatFloat(driveUsage.PercentUsed, 'f', 3, 64),\n\t\t\tstrconv.FormatUint(driveUsage.GrowthPerDayBytes, 10),\n\t\t\tdays,\n\t\t\tdriveUsage.VolumeName).Set(driveUsage.PercentUsed)\n\t}\n\n\t// Publish endpoints being monitored\n\tfor _, uptimeResponse := range result.UptimeList {\n\n\t\tif uptimeResponse.ResponseCode == 200 {\n\t\t\tendpointAvailable.WithLabelValues(uptimeResponse.Endpoint).Set(1)\n\t\t} else {\n\t\t\tendpointAvailable.WithLabelValues(uptimeResponse.Endpoint).Set(0)\n\t\t}\n\n\t\tendpointDuration.WithLabelValues(uptimeResponse.Endpoint).Set(uptimeResponse.ResponseTime.Seconds())\n\t}\n\n\tfor _, backupInfo := range result.BackupInfoList {\n\n\t\t/*\n\t\t\tif backupInfo.WasBackedUp {\n\t\t\t\tbackupsDone.WithLabelValues(backupInfo.Folder).Set(1)\n\t\t\t} else {\n\t\t\t\tbackupsDone.WithLabelValues(backupInfo.Folder).Set(0)\n\t\t\t}\n\t\t*/\n\n\t\t// {\"backup_directory\", \"backup_in_last_24_hours\", \"last_backup_size\", \"last_backup_date\", \"last_backup_time\"})\n\n\t\t// backupsSize.WithLabelValues(backupInfo.Folder).Set(float64(backupInfo.BackupFileSize))\n\n\t\tbackupInfoGauge.WithLabelValues(backupInfo.Folder,\n\t\t\tbtoa(backupInfo.WasBackedUp),\n\t\t\titoa(backupInfo.LastBackupSize),\n\t\t\tttoa(backupInfo.LastBackupTime),\n\t\t\tbackupInfo.LastBackupFile).Set(btof(backupInfo.WasBackedUp))\n\t}\n\n\t// TODO: This loop is not needed, you can build the summary on the first loop\n\tvar too_many_lines = 500\n\tfor _, logLine := range result.LoglineList {\n\n\t\tsummary, ok := result.LogSummary[logLine.LogPath]\n\n\t\tif ok == false {\n\t\t\tsummary = LogSummary{}\n\t\t\tsummary.StatusCount = make(map[string]int64)\n\t\t\tsummary.SeverityLevelCount = make(map[string]int64)\n\t\t}\n\n\t\tsummary.StatusCount[logLine.StatusCode] = summary.StatusCount[logLine.StatusCode] + 1\n\n\t\tif len(logLine.Severity) > 0 {\n\t\t\tsummary.SeverityLevelCount[logLine.Severity] = summary.SeverityLevelCount[logLine.Severity] + 1\n\t\t}\n\n\t\tresult.LogSummary[logLine.LogPath] = summary\n\n\t\tif too_many_lines <= 0 {\n\t\t\t// Pending a better solution, let's not allow the processing\n\t\t\t// of too many lines, to not kill the server\n\t\t\tlLog.Print(\"Too many lines for a single tick to process\")\n\t\t\tbreak\n\t\t}\n\n\t}\n\n\t// Set the values for the logs. We use two labels (logpath, code)\n\tfor logFilePath, logSummary := range result.LogSummary {\n\n\t\tfor s, value := range logSummary.StatusCount {\n\t\t\tstatusCodes.WithLabelValues(logFilePath, s).Set(float64(value))\n\t\t}\n\n\t\tfor s, value := range logSummary.SeverityLevelCount {\n\t\t\tseverity.WithLabelValues(logFilePath, s).Set(float64(value))\n\t\t}\n\n\t}\n}", "func report(p *rc.Process, wallTime time.Duration) {\n\tsv, err := p.SystemVersion()\n\tif err != nil {\n\t\tlog.Println(err)\n\t\treturn\n\t}\n\n\tss, err := p.SystemStatus()\n\tif err != nil {\n\t\tlog.Println(err)\n\t\treturn\n\t}\n\n\tproc, err := p.Stop()\n\tif err != nil {\n\t\treturn\n\t}\n\n\trusage, ok := proc.SysUsage().(*syscall.Rusage)\n\tif !ok {\n\t\treturn\n\t}\n\n\tlog.Println(\"Version:\", sv.Version)\n\tlog.Println(\"Alloc:\", ss.Alloc/1024, \"KiB\")\n\tlog.Println(\"Sys:\", ss.Sys/1024, \"KiB\")\n\tlog.Println(\"Goroutines:\", ss.Goroutines)\n\tlog.Println(\"Wall time:\", wallTime)\n\tlog.Println(\"Utime:\", time.Duration(rusage.Utime.Nano()))\n\tlog.Println(\"Stime:\", time.Duration(rusage.Stime.Nano()))\n\tif runtime.GOOS == \"darwin\" {\n\t\t// Darwin reports in bytes, Linux seems to report in KiB even\n\t\t// though the manpage says otherwise.\n\t\trusage.Maxrss /= 1024\n\t}\n\tlog.Println(\"MaxRSS:\", rusage.Maxrss, \"KiB\")\n\n\tfmt.Printf(\"%s,%d,%d,%d,%.02f,%.02f,%.02f,%d\\n\",\n\t\tsv.Version,\n\t\tss.Alloc/1024,\n\t\tss.Sys/1024,\n\t\tss.Goroutines,\n\t\twallTime.Seconds(),\n\t\ttime.Duration(rusage.Utime.Nano()).Seconds(),\n\t\ttime.Duration(rusage.Stime.Nano()).Seconds(),\n\t\trusage.Maxrss)\n}", "func (agg *aggregator) Process(pageviews []*models.Pageview) *results {\n\tlog.Debugf(\"processing %d pageviews\", len(pageviews))\n\tresults := newResults()\n\n\tfor _, p := range pageviews {\n\t\tsite, err := agg.getSiteStats(results, p.Timestamp)\n\t\tif err != nil {\n\t\t\tlog.Error(err)\n\t\t\tcontinue\n\t\t}\n\n\t\tsite.Pageviews += 1\n\n\t\tif p.Duration > 0.00 {\n\t\t\tsite.KnownDurations += 1\n\t\t\tsite.AvgDuration = site.AvgDuration + ((float64(p.Duration) - site.AvgDuration) * 1 / float64(site.KnownDurations))\n\t\t}\n\n\t\tif p.IsNewVisitor {\n\t\t\tsite.Visitors += 1\n\t\t}\n\n\t\tif p.IsNewSession {\n\t\t\tsite.Sessions += 1\n\n\t\t\tif p.IsBounce {\n\t\t\t\tsite.BounceRate = ((float64(site.Sessions-1) * site.BounceRate) + 1) / (float64(site.Sessions))\n\t\t\t} else {\n\t\t\t\tsite.BounceRate = ((float64(site.Sessions-1) * site.BounceRate) + 0) / (float64(site.Sessions))\n\t\t\t}\n\t\t}\n\n\t\tpageStats, err := agg.getPageStats(results, p.Timestamp, p.Hostname, p.Pathname)\n\t\tif err != nil {\n\t\t\tlog.Error(err)\n\t\t\tcontinue\n\t\t}\n\n\t\tpageStats.Pageviews += 1\n\t\tif p.IsUnique {\n\t\t\tpageStats.Visitors += 1\n\t\t}\n\n\t\tif p.Duration > 0.00 {\n\t\t\tpageStats.KnownDurations += 1\n\t\t\tpageStats.AvgDuration = pageStats.AvgDuration + ((float64(p.Duration) - pageStats.AvgDuration) * 1 / float64(pageStats.KnownDurations))\n\t\t}\n\n\t\tif p.IsNewSession {\n\t\t\tpageStats.Entries += 1\n\n\t\t\tif p.IsBounce {\n\t\t\t\tpageStats.BounceRate = ((float64(pageStats.Entries-1) * pageStats.BounceRate) + 1.00) / (float64(pageStats.Entries))\n\t\t\t} else {\n\t\t\t\tpageStats.BounceRate = ((float64(pageStats.Entries-1) * pageStats.BounceRate) + 0.00) / (float64(pageStats.Entries))\n\t\t\t}\n\t\t}\n\n\t\t// referrer stats\n\t\tif p.Referrer != \"\" {\n\t\t\treferrerStats, err := agg.getReferrerStats(results, p.Timestamp, p.Referrer)\n\t\t\tif err != nil {\n\t\t\t\tlog.Error(err)\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\treferrerStats.Pageviews += 1\n\n\t\t\tif p.IsNewVisitor {\n\t\t\t\treferrerStats.Visitors += 1\n\t\t\t}\n\n\t\t\tif p.IsBounce {\n\t\t\t\treferrerStats.BounceRate = ((float64(referrerStats.Pageviews-1) * referrerStats.BounceRate) + 1.00) / (float64(referrerStats.Pageviews))\n\t\t\t} else {\n\t\t\t\treferrerStats.BounceRate = ((float64(referrerStats.Pageviews-1) * referrerStats.BounceRate) + 0.00) / (float64(referrerStats.Pageviews))\n\t\t\t}\n\n\t\t\tif p.Duration > 0.00 {\n\t\t\t\treferrerStats.KnownDurations += 1\n\t\t\t\treferrerStats.AvgDuration = referrerStats.AvgDuration + ((float64(p.Duration) - referrerStats.AvgDuration) * 1 / float64(referrerStats.KnownDurations))\n\t\t\t}\n\n\t\t}\n\n\t}\n\n\treturn results\n}", "func rcMemStats(ctx context.Context, in Params) (out Params, err error) {\n\tout = make(Params)\n\tvar m runtime.MemStats\n\truntime.ReadMemStats(&m)\n\tout[\"Alloc\"] = m.Alloc\n\tout[\"TotalAlloc\"] = m.TotalAlloc\n\tout[\"Sys\"] = m.Sys\n\tout[\"Mallocs\"] = m.Mallocs\n\tout[\"Frees\"] = m.Frees\n\tout[\"HeapAlloc\"] = m.HeapAlloc\n\tout[\"HeapSys\"] = m.HeapSys\n\tout[\"HeapIdle\"] = m.HeapIdle\n\tout[\"HeapInuse\"] = m.HeapInuse\n\tout[\"HeapReleased\"] = m.HeapReleased\n\tout[\"HeapObjects\"] = m.HeapObjects\n\tout[\"StackInuse\"] = m.StackInuse\n\tout[\"StackSys\"] = m.StackSys\n\tout[\"MSpanInuse\"] = m.MSpanInuse\n\tout[\"MSpanSys\"] = m.MSpanSys\n\tout[\"MCacheInuse\"] = m.MCacheInuse\n\tout[\"MCacheSys\"] = m.MCacheSys\n\tout[\"BuckHashSys\"] = m.BuckHashSys\n\tout[\"GCSys\"] = m.GCSys\n\tout[\"OtherSys\"] = m.OtherSys\n\treturn out, nil\n}", "func (s *Stats) GetAllCPUInfo() {\n s.GetCPUInfo()\n s.GetCPUTimes()\n}", "func systemMemoryMonitor(logger *logrus.Logger, wg *sync.WaitGroup, done chan struct{}, kill chan struct{}) {\n\tdefer wg.Done()\n\tdefer close(kill)\n\n\tvar swapUsedBaseline uint64 = math.MaxUint64\n\n\tfor {\n\t\tselect {\n\t\tcase <-done:\n\t\t\treturn\n\t\tcase <-time.After(1 * time.Second):\n\t\t}\n\n\t\tctx, cancel := context.WithTimeout(context.Background(), 100*time.Millisecond)\n\t\tmemStat, err := mem.VirtualMemoryWithContext(ctx)\n\n\t\tif err != nil {\n\t\t\tlogger.WithError(err).Debugf(\"Failed to retrieve memory usage.\")\n\t\t}\n\n\t\tcancel()\n\n\t\tctx, cancel = context.WithTimeout(context.Background(), 100*time.Millisecond)\n\t\tswapStat, err := mem.SwapMemoryWithContext(ctx)\n\n\t\tif err != nil {\n\t\t\tlogger.WithError(err).Debugf(\"Failed to retrieve swap usage.\")\n\t\t}\n\n\t\tcancel()\n\n\t\tswapUsed := uint64(0)\n\t\tif swapStat.Used < swapUsedBaseline {\n\t\t\tswapUsed = swapStat.Used\n\t\t} else {\n\t\t\tswapUsed = swapStat.Used - swapUsedBaseline\n\t\t}\n\n\t\tused := float64(memStat.Used+swapUsed) / float64(memStat.Total)\n\t\tlogger.Debugf(\n\t\t\t\"Memory usage: %.2f%% - RAM %s / Swap %s.\",\n\t\t\tused*100,\n\t\t\thumanBytes(memStat.Used),\n\t\t\thumanBytes(swapUsed),\n\t\t)\n\n\t\tif used > 0.9 {\n\t\t\treturn\n\t\t}\n\t}\n}", "func (c *VM) Collect(ctx context.Context) error {\n\tmetrics := cgm.Metrics{}\n\n\tc.Lock()\n\n\tif c.runTTL > time.Duration(0) {\n\t\tif time.Since(c.lastEnd) < c.runTTL {\n\t\t\tc.logger.Warn().Msg(collector.ErrTTLNotExpired.Error())\n\t\t\tc.Unlock()\n\t\t\treturn collector.ErrTTLNotExpired\n\t\t}\n\t}\n\tif c.running {\n\t\tc.logger.Warn().Msg(collector.ErrAlreadyRunning.Error())\n\t\tc.Unlock()\n\t\treturn collector.ErrAlreadyRunning\n\t}\n\n\tc.running = true\n\tc.lastStart = time.Now()\n\tc.Unlock()\n\n\tif err := c.parseMemstats(ctx, &metrics); err != nil {\n\t\tc.setStatus(metrics, err)\n\t\treturn fmt.Errorf(\"%s parseMemstats: %w\", c.pkgID, err)\n\t}\n\n\tif err := c.parseVMstats(ctx, &metrics); err != nil {\n\t\tc.setStatus(metrics, err)\n\t\treturn fmt.Errorf(\"%s parseVMstats: %w\", c.pkgID, err)\n\t}\n\n\tc.setStatus(metrics, nil)\n\treturn nil\n}", "func appStatsCollect(ctx *zedrouterContext) {\n\tlog.Infof(\"appStatsCollect: containerStats, started\")\n\tappStatsCollectTimer := time.NewTimer(time.Duration(ctx.appStatsInterval) * time.Second)\n\tfor {\n\t\tselect {\n\t\tcase <-appStatsCollectTimer.C:\n\t\t\titems, stopped := checkAppStopStatsCollect(ctx)\n\t\t\tif stopped {\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tcollectTime := time.Now() // all apps collection assign the same timestamp\n\t\t\tfor _, st := range items {\n\t\t\t\tstatus := st.(types.AppNetworkStatus)\n\t\t\t\tif status.GetStatsIPAddr != nil {\n\t\t\t\t\tacMetrics, err := appContainerGetStats(status.GetStatsIPAddr)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\tlog.Errorf(\"appStatsCollect: can't get App %s Container Metrics on %s, %v\",\n\t\t\t\t\t\t\tstatus.UUIDandVersion.UUID.String(), status.GetStatsIPAddr.String(), err)\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t}\n\t\t\t\t\tacMetrics.UUIDandVersion = status.UUIDandVersion\n\t\t\t\t\tacMetrics.CollectTime = collectTime\n\t\t\t\t\tctx.pubAppContainerMetrics.Publish(acMetrics.Key(), acMetrics)\n\t\t\t\t}\n\t\t\t}\n\t\t\tappStatsCollectTimer = time.NewTimer(time.Duration(ctx.appStatsInterval) * time.Second)\n\t\t}\n\t}\n}", "func FetchAppServerCPUStats(r Result) []float32 {\n\treturn r.AppServerStats().CPU\n}", "func (w *windowsResourceUsageGatherer) Gather(executor QueryExecutor, startTime time.Time, config *measurement.MeasurementConfig) ([]measurement.Summary, error) {\n\tcpuSummary, err := getSummary(cpuUsageQueryTop10, convertToCPUPerfData, cpuUsageMetricsName, executor, config)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tmemorySummary, err := getSummary(memoryUsageQueryTop10, convertToMemoryPerfData, memoryUsageMetricsName, executor, config)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn []measurement.Summary{cpuSummary, memorySummary}, nil\n}", "func (c *HostMetricCollector) Run() (HostMetrics, error) {\n\tcpuTimes, err := cpu.Times(false)\n\tif err != nil {\n\t\t// note: can't happen on Linux. gopsutil doesn't\n\t\t// return an error\n\t\treturn HostMetrics{}, fmt.Errorf(\"cpu.Times() failed: %s\", err)\n\t}\n\tif len(cpuTimes) == 0 {\n\t\t// possible with hardware failure\n\t\treturn HostMetrics{}, fmt.Errorf(\"cpu.Times() returns no cpus\")\n\t}\n\tt := cpuTimes[0]\n\tjiffy := t.Total()\n\ttoPercent := 100 / (jiffy - c.lastJiffy)\n\n\tlastTimes := c.lastTimes\n\tc.lastJiffy = jiffy\n\tc.lastTimes = t\n\n\tconst mbSize float64 = 1024 * 1024\n\tvmem, err := mem.VirtualMemory()\n\tif err != nil {\n\t\t// only possible if can't parse numbers in /proc/meminfo\n\t\t// that would be massive failure\n\t\treturn HostMetrics{}, fmt.Errorf(\"mem.VirtualMemory() failed: %s:\", err)\n\t}\n\n\treturn HostMetrics{\n\t\tCPUUser: ((t.User + t.Nice) - (lastTimes.User + lastTimes.Nice)) * toPercent,\n\t\tCPUSystem: ((t.System + t.Irq + t.Softirq) - (lastTimes.System + lastTimes.Irq + lastTimes.Softirq)) * toPercent,\n\t\tCPUIowait: (t.Iowait - lastTimes.Iowait) * toPercent,\n\t\tCPUIdle: (t.Idle - lastTimes.Idle) * toPercent,\n\t\tCPUStolen: (t.Steal - lastTimes.Steal) * toPercent,\n\t\tCPUGuest: (t.Guest - lastTimes.Guest) * toPercent,\n\t\tMemTotal: float64(vmem.Total) / mbSize,\n\t\tMemFree: float64(vmem.Free) / mbSize,\n\t\tMemUsed: float64(vmem.Total-vmem.Free) / mbSize,\n\t\tMemUsable: float64(vmem.Available) / mbSize,\n\t\tMemPctUsable: float64(100-vmem.UsedPercent) / 100,\n\t}, nil\n}", "func hookStats(e *evtx.GoEvtxMap) {\n\t// We do not store stats if process termination is not enabled\n\tif flagProcTermEn {\n\t\tif guid, err := e.GetString(&sysmonProcessGUID); err == nil {\n\t\t\tpt := processTracker[guid]\n\t\t\tif pt != nil {\n\t\t\t\tswitch e.EventID() {\n\t\t\t\tcase 1:\n\t\t\t\t\tpt.Stats.CountProcessCreated++\n\t\t\t\tcase 3:\n\t\t\t\t\tpt.Stats.CountNetConn++\n\t\t\t\tcase 11:\n\t\t\t\t\tif target, err := e.GetString(&sysmonTargetFilename); err == nil {\n\t\t\t\t\t\text := filepath.Ext(target)\n\t\t\t\t\t\tif pt.Stats.CountFilesCreatedByExt[ext] == nil {\n\t\t\t\t\t\t\ti := int64(0)\n\t\t\t\t\t\t\tpt.Stats.CountFilesCreatedByExt[ext] = &i\n\t\t\t\t\t\t}\n\t\t\t\t\t\t*(pt.Stats.CountFilesCreatedByExt[ext])++\n\t\t\t\t\t}\n\t\t\t\t\tpt.Stats.CountFilesCreated++\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n}", "func (s *Stats) CalculateTotalCPUTimes() []CPUPercentages {\n\n percentages := make([]CPUPercentages, len(s.CPUInfo.TotalTimes))\n\n if len(s.CPUInfo.PrevTotalTimes) == 0 || len(s.CPUInfo.TotalTimes) == 0 {\n return percentages\n }\n\n var diff float64\n var total float64\n var prevTotal float64\n var prevStat cpu.TimesStat\n var cpuStat *CPUPercentages\n\n for i, t := range s.CPUInfo.TotalTimes {\n cpuStat = &percentages[i]\n prevStat = s.CPUInfo.PrevTotalTimes[i]\n\n total = t.User + t.System + t.Idle + t.Nice + t.Iowait + t.Irq + t.Softirq + t.Steal + t.Guest + t.GuestNice + t.Stolen\n prevTotal = prevStat.User + prevStat.System + prevStat.Idle + prevStat.Nice + prevStat.Iowait + prevStat.Irq + prevStat.Softirq + prevStat.Steal + prevStat.Guest + prevStat.GuestNice + prevStat.Stolen\n\n diff = total - prevTotal\n\n cpuStat.CPU = t.CPU\n cpuStat.User = (t.User - prevStat.User) / diff * 100\n cpuStat.System = (t.System - prevStat.System) / diff * 100\n cpuStat.Idle = (t.Idle - prevStat.Idle) / diff * 100\n cpuStat.Nice = (t.Nice - prevStat.Nice) / diff * 100\n cpuStat.IOWait = (t.Iowait - prevStat.Iowait) / diff * 100\n cpuStat.IRQ = (t.Irq - prevStat.Irq) / diff * 100\n cpuStat.SoftIRQ = (t.Softirq - prevStat.Softirq) / diff * 100\n cpuStat.Steal = (t.Steal - prevStat.Steal) / diff * 100\n cpuStat.Guest = (t.Guest - prevStat.Guest) / diff * 100\n cpuStat.GuestNice = (t.GuestNice - prevStat.GuestNice) / diff * 100\n cpuStat.Stolen = (t.Stolen - prevStat.Stolen) / diff * 100\n cpuStat.Total = 100 * (diff - (t.Idle - prevStat.Idle)) / diff\n }\n\n return percentages\n}", "func CollectProcInfo(pid int) (ProcInfo, error) {\n\treturn collectProcInfo(pid)\n}", "func (c *VM) Collect() error {\n\tmetrics := cgm.Metrics{}\n\n\tc.Lock()\n\n\tif c.runTTL > time.Duration(0) {\n\t\tif time.Since(c.lastEnd) < c.runTTL {\n\t\t\tc.logger.Warn().Msg(collector.ErrTTLNotExpired.Error())\n\t\t\tc.Unlock()\n\t\t\treturn collector.ErrTTLNotExpired\n\t\t}\n\t}\n\tif c.running {\n\t\tc.logger.Warn().Msg(collector.ErrAlreadyRunning.Error())\n\t\tc.Unlock()\n\t\treturn collector.ErrAlreadyRunning\n\t}\n\n\tc.running = true\n\tc.lastStart = time.Now()\n\tc.Unlock()\n\n\tif err := c.parseMemstats(&metrics); err != nil {\n\t\tc.setStatus(metrics, err)\n\t\treturn errors.Wrap(err, c.pkgID)\n\t}\n\n\tif err := c.parseVMstats(&metrics); err != nil {\n\t\tc.setStatus(metrics, err)\n\t\treturn errors.Wrap(err, c.pkgID)\n\t}\n\n\tc.setStatus(metrics, nil)\n\treturn nil\n}", "func getProcesses(limit int) ([]interface{}, error) {\n\tprocessGroups, err := gops.TopRSSProcessGroups(limit)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tsnapData := make([]ProcessField, len(processGroups))\n\n\tfor i, processGroup := range processGroups {\n\t\tprocessField := ProcessField{\n\t\t\tstrings.Join(processGroup.Usernames(), \",\"),\n\t\t\t0, // pct_cpu, requires two consecutive samples to be computed, so not fetched for now\n\t\t\tprocessGroup.PctMem(),\n\t\t\tprocessGroup.VMS(),\n\t\t\tprocessGroup.RSS(),\n\t\t\tprocessGroup.Name(),\n\t\t\tlen(processGroup.Pids()),\n\t\t}\n\t\tsnapData[i] = processField\n\t}\n\n\treturn []interface{}{time.Now().Unix(), snapData}, nil\n}", "func MonitorProcessGroup(ctx context.Context, pid int) {\n\tif pid > 0 {\n\t\tpid = -pid\n\t}\n\tfs, err := procfs.NewDefaultFS()\n\tif err != nil {\n\t\tlog.Error(ctx, \"unable to monitor process group\", zap.Int(\"pid\", pid), zap.Error(err))\n\t\treturn\n\t}\n\tctx = pctx.Child(ctx, \"\",\n\t\tpctx.WithGauge(\"open_fd_count\", 0, meters.WithFlushInterval(30*time.Second)),\n\t\tpctx.WithDelta(\"rchar_bytes\", int(1e6), meters.Deferred()),\n\t\tpctx.WithDelta(\"wchar_bytes\", int(1e6), meters.Deferred()),\n\t\tpctx.WithDelta(\"bytes_written_bytes\", int(1e6), meters.Deferred()),\n\t\tpctx.WithDelta(\"bytes_read_bytes\", int(1e6), meters.Deferred()),\n\t\tpctx.WithDelta(\"canceled_write_bytes\", int(1e6), meters.Deferred()),\n\t\tpctx.WithDelta(\"read_syscall_count\", int(1e4), meters.Deferred()),\n\t\tpctx.WithDelta(\"write_syscall_count\", int(1e4), meters.Deferred()),\n\t\tpctx.WithDelta(\"cpu_time_seconds\", float64(30), meters.WithFlushInterval(30*time.Second)),\n\t\tpctx.WithDelta(\"resident_memory_bytes\", int(100e6), meters.WithFlushInterval(30*time.Second)),\n\t\tpctx.WithGauge(\"oom_score\", 0, meters.WithFlushInterval(30*time.Minute)),\n\t)\n\tfor {\n\t\tselect {\n\t\tcase <-time.After(2 * time.Second):\n\t\tcase <-ctx.Done():\n\t\t\tlog.Debug(ctx, \"MonitorUserCode exiting\", zap.Int(\"pid\", pid))\n\t\t\treturn\n\t\t}\n\t\tstats, err := getProcessStats(fs, pid)\n\t\tif err != nil {\n\t\t\tlog.Info(ctx, \"problem getting self stats\", zap.Error(err))\n\t\t\tcontinue\n\t\t}\n\t\tmeters.Set(ctx, \"open_fd_count\", stats.FDCount)\n\t\tmeters.Set(ctx, \"rchar_bytes\", int(stats.RChars))\n\t\tmeters.Set(ctx, \"wchar_bytes\", int(stats.WChars))\n\t\tmeters.Set(ctx, \"bytes_read_bytes\", int(stats.RBytes))\n\t\tmeters.Set(ctx, \"bytes_written_bytes\", int(stats.WBytes))\n\t\tmeters.Set(ctx, \"canceled_write_bytes\", int(stats.CanceledWBytes))\n\t\tmeters.Set(ctx, \"read_syscall_count\", int(stats.RSysc))\n\t\tmeters.Set(ctx, \"write_syscall_count\", int(stats.WSysc))\n\t\tmeters.Set(ctx, \"cpu_time_seconds\", stats.CPUTime)\n\t\tmeters.Set(ctx, \"resident_memory_bytes\", stats.ResidentMemory)\n\t\tmeters.Set(ctx, \"oom_score\", stats.OOMScore)\n\t}\n}", "func (bot *AwakenBot) CollectGlobalMetrics() {\n\truntime.ReadMemStats(&mem)\n\ttags := map[string]string{\"metric\": \"server_metrics\", \"server\": \"global\"}\n\tfields := map[string]interface{}{\n\t\t\"memAlloc\": int(mem.Alloc),\n\t\t\"memTotalAlloc\": int(mem.TotalAlloc),\n\t\t\"memHeapAlloc\": int(mem.HeapAlloc),\n\t\t\"memHeapSys\": int(mem.HeapSys),\n\t}\n\n\terr := bot.iDB.AddMetric(\"server_metrics\", tags, fields)\n\tif err != nil {\n\t\tlog.Errorln(\"Error adding Metric:\", err)\n\t}\n}", "func (s *Stats) CalculateCPUTimes() []CPUPercentages {\n\n percentages := make([]CPUPercentages, len(s.CPUInfo.PerCPUTimes))\n\n if len(s.CPUInfo.PrevCPUTimes) == 0 || len(s.CPUInfo.PerCPUTimes) == 0 {\n return percentages\n }\n\n var diff float64\n var total float64\n var prevTotal float64\n var prevStat cpu.TimesStat\n var cpuStat *CPUPercentages\n\n for i, t := range s.CPUInfo.PerCPUTimes {\n cpuStat = &percentages[i]\n prevStat = s.CPUInfo.PrevCPUTimes[i]\n\n total = t.User + t.System + t.Idle + t.Nice + t.Iowait + t.Irq + t.Softirq + t.Steal + t.Guest + t.GuestNice + t.Stolen\n prevTotal = prevStat.User + prevStat.System + prevStat.Idle + prevStat.Nice + prevStat.Iowait + prevStat.Irq + prevStat.Softirq + prevStat.Steal + prevStat.Guest + prevStat.GuestNice + prevStat.Stolen\n\n diff = total - prevTotal\n\n cpuStat.CPU = t.CPU\n cpuStat.User = (t.User - prevStat.User) / diff * 100\n cpuStat.System = (t.System - prevStat.System) / diff * 100\n cpuStat.Idle = (t.Idle - prevStat.Idle) / diff * 100\n cpuStat.Nice = (t.Nice - prevStat.Nice) / diff * 100\n cpuStat.IOWait = (t.Iowait - prevStat.Iowait) / diff * 100\n cpuStat.IRQ = (t.Irq - prevStat.Irq) / diff * 100\n cpuStat.SoftIRQ = (t.Softirq - prevStat.Softirq) / diff * 100\n cpuStat.Steal = (t.Steal - prevStat.Steal) / diff * 100\n cpuStat.Guest = (t.Guest - prevStat.Guest) / diff * 100\n cpuStat.GuestNice = (t.GuestNice - prevStat.GuestNice) / diff * 100\n cpuStat.Stolen = (t.Stolen - prevStat.Stolen) / diff * 100\n cpuStat.Total = 100 * (diff - (t.Idle - prevStat.Idle)) / diff\n }\n\n return percentages\n}", "func getServerStatsMemory(printDetails bool){\n\n\t/*\n\tresident, virtual, err := getServerStatsMemory(true)\n\tif err == nil {\n\t\tfmt.Printf(\"phys. Memory is %v - of data stock size of %v (useful only on mongod)\\n\",resident, virtual)\n\t} else {\n\t\tfmt.Println(\"no mem info\", err )\n\t}\n\t*/\n\t\n}", "func parseProcStat(content string) (procStats, error) {\n\tstats := procStats{}\n\n\ti := strings.Index(content, \"(\")\n\tif i == -1 {\n\t\treturn stats, fmt.Errorf(\"could not find command name start symbol '(' for stats: %s\", content)\n\t}\n\t// Drop the first first field which is the pid.\n\tcontent = content[i+1:]\n\n\ti = strings.Index(content, \")\")\n\tif i == -1 {\n\t\treturn stats, fmt.Errorf(\"could not find command name end symbol ')' for stats: %s\", content)\n\t}\n\n\t// Command Name found as the second field inside the brackets.\n\tstats.command = content[:i]\n\n\tfields := strings.Fields(content[i+1:])\n\n\t// Process State\n\tstats.state = fields[statState]\n\n\t// Parent PID\n\tppid, err := strconv.ParseInt(fields[statPPID], 10, 32)\n\tif err != nil {\n\t\treturn stats, errors.Wrapf(err, \"for stats: %s\", string(content))\n\t}\n\tstats.ppid = int32(ppid)\n\n\t// User time\n\tutime, err := strconv.ParseInt(fields[statUtime], 10, 64)\n\tif err != nil {\n\t\treturn stats, errors.Wrapf(err, \"for stats: %s\", string(content))\n\t}\n\tstats.cpu.User = float64(utime) / float64(clockTicks)\n\n\t// System time\n\tstime, err := strconv.ParseInt(fields[statStime], 10, 64)\n\tif err != nil {\n\t\treturn stats, errors.Wrapf(err, \"for stats: %s\", string(content))\n\t}\n\tstats.cpu.System = float64(stime) / float64(clockTicks)\n\n\t// Number of threads\n\tnthreads, err := strconv.ParseInt(fields[statNumThreads], 10, 32)\n\tif err != nil {\n\t\treturn stats, errors.Wrapf(err, \"for stats: %s\", string(content))\n\t}\n\tstats.numThreads = int32(nthreads)\n\n\t// VM Memory size\n\tstats.vmSize, err = strconv.ParseInt(fields[statVsize], 10, 64)\n\tif err != nil {\n\t\treturn stats, errors.Wrapf(err, \"for stats: %s\", string(content))\n\t}\n\n\t// VM RSS size\n\tstats.vmRSS, err = strconv.ParseInt(fields[statRss], 10, 64)\n\tif err != nil {\n\t\treturn stats, errors.Wrapf(err, \"for stats: %s\", string(content))\n\t}\n\tstats.vmRSS *= pageSize\n\n\treturn stats, nil\n}", "func GetCpuUsage() ([]float64, error) {\n\n currProcStat := getProcStats()\n usage := make([]float64, len(currProcStat))\n for i := 0; i < len(currProcStat); i++ {\n\n // Get Difference\n dIdle := float64(currProcStat[i].GetIdle() - prevProcStat[i].GetIdle())\n dTotal := float64(currProcStat[i].GetTotal() - prevProcStat[i].GetTotal())\n\n // Get Usage\n usage[i] = (dTotal - dIdle) / dTotal * 100.0\n\n }\n\n // Assign Previous Value\n prevProcStat = currProcStat\n return usage, nil\n}", "func (h BryanMemoryStatsHook) printMemStats() {\n\th.Log.Debug(\"Reading memory statistics\")\n\tvar m runtime.MemStats\n\truntime.ReadMemStats(&m)\n\t\n\th.Log.Debug(\"Printing memory statistics\")\n\th.printHeader(&m)\n\th.printMemAlloc(&m)\n\th.printMemTotalAlloc(&m)\n\th.printMemSys(&m)\n\th.printMemFrees(&m)\n\th.printMemNumGC(&m)\n\th.printFooter(&m)\n}", "func FetchDBServerMemStats(r Result) []float32 {\n\treturn r.DBServerStats().Mem\n}", "func PrintMemStats(m *runtime.MemStats, mstats, ostats, astats, gc bool, pauses int) {\n\tif mstats {\n\t\tfmt.Printf(\"Alloc=%h, TotalAlloc=%h, Sys=%h, Lookups=%h, Mallocs=%h, Frees=%h\\n\",\n\t\t\thu(m.Alloc, \"B\"), hu(m.TotalAlloc, \"B\"), hu(m.Sys, \"B\"), hu(m.Lookups, \"\"), hu(m.Mallocs, \"\"), hu(m.Frees, \"\"))\n\t\tfmt.Printf(\"HeapAlloc=%h, HeapSys=%h, HeapIdle=%h, HeapInuse=%h, HeapReleased=%h, HeapObjects=%h, StackInuse=%h, StackSys=%h\\n\",\n\t\t\thu(m.HeapAlloc, \"B\"), hu(m.HeapSys, \"B\"), hu(m.HeapIdle, \"B\"), hu(m.HeapInuse, \"B\"), hu(m.HeapReleased, \"B\"),\n\t\t\thu(m.HeapObjects, \"\"), hu(m.StackInuse, \"B\"), hu(m.StackSys, \"B\"))\n\t\tif ostats {\n\t\t\tfmt.Printf(\"MSpanInuse=%d, MSpanSys=%d, m.MCacheInuse=%d, MCacheSys=%d, BuckHashSys=%d, GCSys=%d, OtherSys=%d\\n\",\n\t\t\t\tm.MSpanInuse, m.MSpanSys, m.MCacheInuse, m.MCacheSys, m.BuckHashSys, m.GCSys, m.OtherSys)\n\t\t}\n\n\t\tt1 := time.Unix(0, int64(m.LastGC))\n\t\t//t2 := time.Now()\n\t\t//t3 := time.Unix(int64(0), int64(m.PauseTotalNs))\n\t\tet := time.Duration(int64(m.PauseTotalNs)) // Since(t3)\n\t\tfmt.Printf(\"NextGC=%h, NumGC=%d, LastGC=%s, PauseTotalNs=%v, NumForcedGC=%d, GCCPUFraction=%0.2f\\n\",\n\t\t\thu(m.NextGC, \"B\"), m.NumGC, t1.Format(\"15:04:05.99\"), et, m.NumForcedGC, m.GCCPUFraction)\n\t}\n\tfmt.Printf(\"\\n\")\n\n\tif astats {\n\t\tfor i, b := range m.BySize {\n\t\t\tif b.Mallocs == 0 {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tfmt.Printf(\"BySize[%d]: Size=%d, Malloc=%d, Frees=%d\\n\", i, b.Size, b.Mallocs, b.Frees)\n\t\t}\n\t\tfmt.Printf(\"\\n\")\n\t}\n\n\tif gc {\n\t\tPrintCircularBuffer(\"PauseNs\", pauses, int(m.NumGC), true, m.PauseNs)\n\t\tPrintCircularBuffer(\"PauseEnd\", pauses, int(m.NumGC), false, m.PauseEnd)\n\t\tfmt.Printf(\"\\n\")\n\t}\n}", "func stats(stats elastic.BulkProcessorStats) {\n\t//构建Workers的json文本\n\tvar workersStr string\n\tvar workers Workers\n\tif err := workers.InitWorkers(stats.Workers); err == nil {\n\t\tworkersStr = workers.String()\n\t}\n\n\t//打印stats信息\n\tlog.Logger.WithFields(logrus.Fields{\n\t\t\"Flushed\": stats.Flushed,\n\t\t\"Committed\": stats.Committed,\n\t\t\"Indexed\": stats.Indexed,\n\t\t\"Created\": stats.Created,\n\t\t\"Updated\": stats.Updated,\n\t\t\"Deleted\": stats.Deleted,\n\t\t\"Succeeded\": stats.Succeeded,\n\t\t\"Failed\": stats.Failed,\n\t\t\"Workers\": workersStr,\n\t}).Info(\"stats info detail\")\n}", "func (s *SystemMetrics) GetVirtualMemoryStats(c chan *MemoryStats) {\n\tstats, err := gomem.VirtualMemory()\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tc <- &MemoryStats{\n\t\t// default is always in bytes. hence, convert into the required format.\n\t\tTotal: stats.Total / 1000000,\n\t\tAvailable: stats.Available / 1000000,\n\t\tUsed: stats.Used / 1000000,\n\t\tUsedPercent: stats.UsedPercent,\n\t\tFree: stats.Free / 1000000,\n\t}\n}", "func (p *Libvirt) CollectMetrics(mts []plugin.MetricType) ([]plugin.MetricType, error) {\n\tmetrics := []plugin.MetricType{}\n\tconn, err := libvirt.NewVirConnection(getHypervisorURI(mts[0].Config().Table()))\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer conn.CloseConnection()\n\n\tfor _, p := range mts {\n\n\t\tns := p.Namespace()\n\t\tif ns.Strings()[1] == \"*\" {\n\t\t\tdomains, err := conn.ListDomains()\n\t\t\tif err != nil {\n\t\t\t\treturn metrics, err\n\t\t\t}\n\t\t\tfor j := 0; j < domainCount(domains); j++ {\n\t\t\t\tdom, err := conn.LookupDomainById(domains[j])\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn metrics, err\n\t\t\t\t}\n\t\t\t\tdefer dom.Free()\n\t\t\t\tmetric, err := processMetric(ns.String(), dom, p)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn metrics, err\n\t\t\t\t}\n\t\t\t\tif metricReported(metrics, metric.Namespace().String()) == false {\n\t\t\t\t\thostname, err := conn.GetHostname()\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\treturn metrics, err\n\t\t\t\t\t}\n\t\t\t\t\tdomainname := p.Namespace()[1]\n\t\t\t\t\tmetric.Tags_ = addTags(hostname, domainname.Value)\n\t\t\t\t\tmetrics = append(metrics, metric)\n\t\t\t\t}\n\n\t\t\t}\n\t\t} else {\n\n\t\t\tif metricReported(metrics, p.Namespace().String()) == false {\n\t\t\t\tdomainName, err := namespacetoDomain(p.Namespace().Strings())\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn nil, err\n\t\t\t\t}\n\t\t\t\tdom, err := conn.LookupDomainByName(domainName)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn nil, err\n\t\t\t\t}\n\t\t\t\tdefer dom.Free()\n\t\t\t\tmetric, err := processMetric(ns.String(), dom, p)\n\t\t\t\thostname, err := conn.GetHostname()\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn metrics, err\n\t\t\t\t}\n\t\t\t\tdomainname := p.Namespace()[1]\n\t\t\t\tmetric.Tags_ = addTags(hostname, domainname.Value)\n\t\t\t\tmetrics = append(metrics, metric)\n\t\t\t}\n\t\t}\n\n\t}\n\treturn metrics, err\n}", "func NewProcessStat(m *metrics.MetricContext, Step time.Duration) *ProcessStat {\n\tc := new(ProcessStat)\n\tc.m = m\n\n\tc.Processes = make(map[string]*PerProcessStat, 1024)\n\tc.hport = C.host_t(C.mach_host_self())\n\n\tvar n int\n\tticker := time.NewTicker(Step)\n\tgo func() {\n\t\tfor _ = range ticker.C {\n\t\t\tp := int(len(c.Processes) / 1024)\n\t\t\tif n == 0 {\n\t\t\t\tc.Collect(true)\n\t\t\t}\n\t\t\t// always collect all metrics for first two samples\n\t\t\t// and if number of processes < 1024\n\t\t\tif p < 1 || n%p == 0 {\n\t\t\t\tc.Collect(false)\n\t\t\t}\n\t\t\tn++\n\t\t}\n\t}()\n\n\treturn c\n}", "func ReadMemStats(ms *MemStats)", "func (phStats *passwordHasherStats) accumulateStats() {\n\tphStats.logger.Print(\"Collecting stats...\")\n\tok := true\n\tfor ok {\n\t\tvar ms microseconds\n\t\tif ms, ok = <-phStats.queue; ok {\n\t\t\tphStats.logger.Printf(\"Elapsed time: %dms\", ms)\n\n\t\t\t// block reads while appending/resizing/reallocating\n\t\t\tphStats.lock.Lock()\n\t\t\tphStats.times = append(phStats.times, ms)\n\t\t\tphStats.lock.Unlock()\n\t\t}\n\t}\n\tphStats.logger.Print(\"Done collecting stats\")\n}", "func Collect(ctx context.Context) error {\n\tif !singleton.enabled {\n\t\treturn nil\n\t}\n\n\tif singleton.darkstatAddr == \"\" {\n\t\treturn fmt.Errorf(\"Darkstat address is empty\")\n\t}\n\n\tstartTime := time.Now()\n\n\tinventoryHosts := inventory.Get()\n\n\tlocalAddr, err := network.DefaultLocalAddr()\n\tif err != nil {\n\t\treturn err\n\t}\n\t// To label source traffic that we need to build dependency graph\n\tlocalHostgroup := localAddr.String()\n\tlocalDomain := localAddr.String()\n\tlocalInventory, ok := inventoryHosts[localAddr.String()]\n\tif ok {\n\t\tlocalHostgroup = localInventory.Hostgroup\n\t\tlocalDomain = localInventory.Domain\n\t}\n\tlog.Debugf(\"Local address don't exist in inventory: %v\", localAddr.String())\n\n\t// Scrape darkstat prometheus endpoint for host_bytes_total\n\tvar darkstatHostBytesTotal *prom2json.Family\n\tdarkstatScrape, err := prometheus.Scrape(singleton.darkstatAddr)\n\tif err != nil {\n\t\treturn err\n\t}\n\tfor _, v := range darkstatScrape {\n\t\tif v.Name == \"host_bytes_total\" {\n\t\t\tdarkstatHostBytesTotal = v\n\t\t\tbreak\n\t\t}\n\t}\n\tif darkstatHostBytesTotal == nil {\n\t\treturn fmt.Errorf(\"Metric host_bytes_total doesn't exist\")\n\t}\n\n\t// Extract relevant data out of host_bytes_total\n\tvar hosts []Metric\n\tfor _, m := range darkstatHostBytesTotal.Metrics {\n\t\tmetric := m.(prom2json.Metric)\n\n\t\tip := net.ParseIP(metric.Labels[\"ip\"])\n\n\t\t// Skip its own IP as we don't need it\n\t\tif ip.Equal(localAddr) {\n\t\t\tcontinue\n\t\t}\n\n\t\tinventoryHostInfo := inventoryHosts[metric.Labels[\"ip\"]]\n\n\t\tbandwidth, err := strconv.ParseFloat(metric.Value, 64)\n\t\tif err != nil {\n\t\t\tlog.Errorf(\"Failed to parse 'host_bytes_total' value: %v\", err)\n\t\t\tcontinue\n\t\t}\n\n\t\tdirection := \"\"\n\t\t// Reversed from netfilter perspective\n\t\tswitch metric.Labels[\"dir\"] {\n\t\tcase \"out\":\n\t\t\tdirection = \"ingress\"\n\t\tcase \"in\":\n\t\t\tdirection = \"egress\"\n\t\t}\n\n\t\thosts = append(hosts, Metric{\n\t\t\tLocalHostgroup: localHostgroup,\n\t\t\tRemoteHostgroup: inventoryHostInfo.Hostgroup,\n\t\t\tRemoteIPAddr: metric.Labels[\"ip\"],\n\t\t\tLocalDomain: localDomain,\n\t\t\tRemoteDomain: inventoryHostInfo.Domain,\n\t\t\tDirection: direction,\n\t\t\tBandwidth: bandwidth,\n\t\t})\n\t}\n\n\tsingleton.mu.Lock()\n\tsingleton.hosts = hosts\n\tsingleton.mu.Unlock()\n\n\tlog.Debugf(\"taskdarkstat.Collect retrieved %v downstreams metrics\", len(hosts))\n\tlog.Debugf(\"taskdarkstat.Collect process took %v\", time.Since(startTime))\n\treturn nil\n}", "func getCpuMem() (float64, float64) {\n\tvar sumCPU, sumMEM float64\n\tcmd := exec.Command(\"ps\", \"aux\") // ps aux is the command used to get cpu and ram usage\n\tvar out bytes.Buffer\n\tcmd.Stdout = &out //catching the command output\n\terr := cmd.Run() //running the command\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tfor {\n\t\tline, err := out.ReadString('\\n') //breaking the output in lines\n\t\tif err != nil {\n\t\t\tbreak\n\t\t}\n\t\ttokens := strings.Split(line, \" \") //spliting each output line\n\t\tft := make([]string, 0)\n\t\tfor _, t := range tokens {\n\t\t\tif t != \"\" && t != \"\\t\" {\n\t\t\t\tft = append(ft, t) //for each line there is a buffer (ft) that keeps all the parameters\n\t\t\t}\n\t\t}\n\t\tif cpu, err := strconv.ParseFloat(ft[2], 32); err == nil { // parsing the cpu variable, as string, to float\n\t\t\tsumCPU += cpu //all the cpu's used capacity at the instant\n\t\t}\n\t\tif mem, err := strconv.ParseFloat(ft[3], 32); err == nil { // parsing the ram variable, as string, to float\n\t\t\tsumMEM += mem //all the ram's used capacity at the instant\n\t\t}\n\t}\n\tlog.Println(\"Used CPU\", sumCPU/8, \"%\", \" Used Memory RAM\", sumMEM, \"%\")\n\treturn sumCPU / 8, sumMEM //the cpu's total used capacity is splitted by 8 because its the total number of my PC's cores\n\t//otherwise, we would see outputs bigger than 100%\n}", "func (cpuCollector *CPUCollector) Collect() {\n\tcpuCollector.cpuStats.GetCPUStats()\n\n\tcpuCollector.cpuMetrics.cpuTotal.Set(float64(cpuCollector.cpuStats.Total))\n\tcpuCollector.cpuMetrics.cupIdle.Set(float64(cpuCollector.cpuStats.Idle))\n\tcpuCollector.cpuMetrics.cpuUtilization.Set(cpuCollector.cpuStats.Utilization)\n}", "func (l *Libvirt) NodeGetMemoryStats(Nparams int32, CellNum int32, Flags uint32) (rParams []NodeGetMemoryStats, rNparams int32, err error) {\n\tvar buf []byte\n\n\targs := NodeGetMemoryStatsArgs {\n\t\tNparams: Nparams,\n\t\tCellNum: CellNum,\n\t\tFlags: Flags,\n\t}\n\n\tbuf, err = encode(&args)\n\tif err != nil {\n\t\treturn\n\t}\n\n\tvar r response\n\tr, err = l.requestStream(228, constants.Program, buf, nil, nil)\n\tif err != nil {\n\t\treturn\n\t}\n\n\t// Return value unmarshaling\n\ttpd := typedParamDecoder{}\n\tct := map[string]xdr.TypeDecoder{\"libvirt.TypedParam\": tpd}\n\trdr := bytes.NewReader(r.Payload)\n\tdec := xdr.NewDecoderCustomTypes(rdr, 0, ct)\n\t// Params: []NodeGetMemoryStats\n\t_, err = dec.Decode(&rParams)\n\tif err != nil {\n\t\treturn\n\t}\n\t// Nparams: int32\n\t_, err = dec.Decode(&rNparams)\n\tif err != nil {\n\t\treturn\n\t}\n\n\treturn\n}", "func measureSpammerMetrics() {\n\tif spammerStartTime.IsZero() {\n\t\t// Spammer not started yet\n\t\treturn\n\t}\n\n\tsentSpamMsgsCnt := deps.ServerMetrics.SentSpamMessages.Load()\n\tnew := utils.GetUint32Diff(sentSpamMsgsCnt, lastSentSpamMsgsCnt)\n\tlastSentSpamMsgsCnt = sentSpamMsgsCnt\n\n\tspammerAvgHeap.Add(uint64(new))\n\n\ttimeDiff := time.Since(spammerStartTime)\n\tif timeDiff > 60*time.Second {\n\t\t// Only filter over one minute maximum\n\t\ttimeDiff = 60 * time.Second\n\t}\n\n\t// trigger events for outside listeners\n\tEvents.AvgSpamMetricsUpdated.Trigger(&spammer.AvgSpamMetrics{\n\t\tNewMessages: new,\n\t\tAverageMessagesPerSecond: spammerAvgHeap.GetAveragePerSecond(timeDiff),\n\t})\n}", "func (m *ServiceMgr) getAggEventProcessingStats(w http.ResponseWriter, r *http.Request) {\n\tlogPrefix := \"ServiceMgr::getAggEventProcessingStats\"\n\n\tif !m.validateAuth(w, r, EventingPermissionManage) {\n\t\treturn\n\t}\n\n\tparams := r.URL.Query()\n\tappName := params[\"name\"][0]\n\n\tutil.Retry(util.NewFixedBackoff(time.Second), nil, getEventingNodesAddressesOpCallback, m)\n\n\tpStats, err := util.GetEventProcessingStats(\"/getEventProcessingStats?name=\"+appName, m.eventingNodeAddrs)\n\tif err != nil {\n\t\tfmt.Fprintf(w, \"Failed to get event processing stats, err: %v\", err)\n\t\treturn\n\t}\n\n\tbuf, err := json.MarshalIndent(pStats, \"\", \" \")\n\tif err != nil {\n\t\tlogging.Errorf(\"%s Failed to unmarshal event processing stats from all producers, err: %v\", logPrefix, err)\n\t\treturn\n\t}\n\n\tfmt.Fprintf(w, \"%s\", string(buf))\n}", "func ComputeStats(res *sdk.Result, v *venom.Tests) []string {\n\t// update global stats\n\tfor _, ts := range v.TestSuites {\n\t\tnSkipped := 0\n\t\tfor _, tc := range ts.TestCases {\n\t\t\tnSkipped += len(tc.Skipped)\n\t\t}\n\t\tif ts.Skipped < nSkipped {\n\t\t\tts.Skipped = nSkipped\n\t\t}\n\t\tif ts.Total < len(ts.TestCases)-nSkipped {\n\t\t\tts.Total = len(ts.TestCases) - nSkipped\n\t\t}\n\t\tv.Total += ts.Total\n\t\tv.TotalOK += ts.Total - ts.Failures - ts.Errors\n\t\tv.TotalKO += ts.Failures + ts.Errors\n\t\tv.TotalSkipped += ts.Skipped\n\t}\n\n\tvar nbOK, nbKO, nbSkipped int\n\n\treasons := []string{}\n\treasons = append(reasons, fmt.Sprintf(\"JUnit parser: %d testsuite(s)\", len(v.TestSuites)))\n\n\tfor i, ts := range v.TestSuites {\n\t\tvar nbKOTC, nbFailures, nbErrors, nbSkippedTC int\n\t\tif ts.Name == \"\" {\n\t\t\tts.Name = fmt.Sprintf(\"TestSuite.%d\", i)\n\t\t}\n\t\treasons = append(reasons, fmt.Sprintf(\"JUnit parser: testsuite %s has %d testcase(s)\", ts.Name, len(ts.TestCases)))\n\t\tfor k, tc := range ts.TestCases {\n\t\t\tif tc.Name == \"\" {\n\t\t\t\ttc.Name = fmt.Sprintf(\"TestCase.%d\", k)\n\t\t\t}\n\t\t\tif len(tc.Failures) > 0 {\n\t\t\t\treasons = append(reasons, fmt.Sprintf(\"JUnit parser: testcase %s has %d failure(s)\", tc.Name, len(tc.Failures)))\n\t\t\t\tnbFailures += len(tc.Failures)\n\t\t\t}\n\t\t\tif len(tc.Errors) > 0 {\n\t\t\t\treasons = append(reasons, fmt.Sprintf(\"JUnit parser: testcase %s has %d error(s)\", tc.Name, len(tc.Errors)))\n\t\t\t\tnbErrors += len(tc.Errors)\n\t\t\t}\n\t\t\tif len(tc.Failures) > 0 || len(tc.Errors) > 0 {\n\t\t\t\tnbKOTC++\n\t\t\t} else if len(tc.Skipped) > 0 {\n\t\t\t\tnbSkippedTC += len(tc.Skipped)\n\t\t\t}\n\t\t\tv.TestSuites[i].TestCases[k] = tc\n\t\t}\n\t\tnbOK += len(ts.TestCases) - nbKOTC\n\t\tnbKO += nbKOTC\n\t\tnbSkipped += nbSkippedTC\n\t\tif ts.Failures > nbFailures {\n\t\t\tnbFailures = ts.Failures\n\t\t}\n\t\tif ts.Errors > nbErrors {\n\t\t\tnbErrors = ts.Errors\n\t\t}\n\n\t\tif nbFailures > 0 {\n\t\t\treasons = append(reasons, fmt.Sprintf(\"JUnit parser: testsuite %s has %d failure(s)\", ts.Name, nbFailures))\n\t\t}\n\t\tif nbErrors > 0 {\n\t\t\treasons = append(reasons, fmt.Sprintf(\"JUnit parser: testsuite %s has %d error(s)\", ts.Name, nbErrors))\n\t\t}\n\t\tif nbKOTC > 0 {\n\t\t\treasons = append(reasons, fmt.Sprintf(\"JUnit parser: testsuite %s has %d test(s) failed\", ts.Name, nbKOTC))\n\t\t}\n\t\tif nbSkippedTC > 0 {\n\t\t\treasons = append(reasons, fmt.Sprintf(\"JUnit parser: testsuite %s has %d test(s) skipped\", ts.Name, nbSkippedTC))\n\t\t}\n\t\tv.TestSuites[i] = ts\n\t}\n\n\tif nbKO > v.TotalKO {\n\t\tv.TotalKO = nbKO\n\t}\n\n\tif nbOK != v.TotalOK {\n\t\tv.TotalOK = nbOK\n\t}\n\n\tif nbSkipped != v.TotalSkipped {\n\t\tv.TotalSkipped = nbSkipped\n\t}\n\n\tif v.TotalKO+v.TotalOK != v.Total {\n\t\tv.Total = v.TotalKO + v.TotalOK + v.TotalSkipped\n\t}\n\n\tres.Status = sdk.StatusFail\n\tif v.TotalKO == 0 {\n\t\tres.Status = sdk.StatusSuccess\n\t}\n\treturn reasons\n}", "func (p *Prom) CollectStdout(in *bufio.Reader) {\n\tvar stats Metrics\n\tfor {\n\t\tline, err := in.ReadBytes('\\n')\n\t\tif err == io.EOF {\n\t\t\treturn\n\t\t}\n\t\tif err != nil {\n\t\t\tfmt.Fprintln(os.Stderr, err)\n\t\t}\n\t\tif err := json.Unmarshal(line, &stats); err != nil {\n\t\t\tfmt.Fprint(os.Stdout, string(line))\n\t\t\tcontinue\n\t\t}\n\t\tif stats.MessageType != \"summary\" {\n\t\t\tcontinue\n\t\t}\n\t\tp.duration.WithLabelValues(p.labelValues...).Set(float64(stats.TotalDuration))\n\t\tp.filesNew.WithLabelValues(p.labelValues...).Set(float64(stats.FilesNew))\n\t\tp.filesUnmodified.WithLabelValues(p.labelValues...).Set(float64(stats.FilesUnmodified))\n\t\tp.filesChanged.WithLabelValues(p.labelValues...).Set(float64(stats.FilesChanged))\n\t\tp.dirsNew.WithLabelValues(p.labelValues...).Set(float64(stats.DirsNew))\n\t\tp.dirsChanged.WithLabelValues(p.labelValues...).Set(float64(stats.DirsChanged))\n\t\tp.dirsUnmodified.WithLabelValues(p.labelValues...).Set(float64(stats.DirsUnmodified))\n\t\tp.bytesAdded.WithLabelValues(p.labelValues...).Set(float64(stats.DataAdded))\n\t\tp.bytesProcessed.WithLabelValues(p.labelValues...).Set(float64(stats.TotalBytesProcessed))\n\t\tp.parsed = true\n\t}\n}", "func (s *Systemctl) Gather(acc telegraf.Accumulator) error {\n\ts.mux.Lock()\n\tdefer s.mux.Unlock()\n\n\t// for each systemctl service being monitored\n\tfor _, aggregator := range s.Aggregators {\n\t\t// aggregate the data from the set of samples\n\t\tlog.WithFields(log.Fields{\n\t\t\t\"InputPlugin\": \"systemctl\",\n\t\t\t\"ResourceName\": aggregator.ResourceName,\n\t\t}).Debug(\"Aggregating\")\n\t\terr := aggregator.Aggregate()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t// create fields\n\t\tfields := map[string]interface{}{\n\t\t\t\"current_state_time\": aggregator.CurrentStateDuration,\n\t\t\t\"current_state\": aggregator.CurrentState,\n\t\t}\n\t\tfor k := range aggregator.AggState {\n\t\t\tfields[k] = aggregator.AggState[k]\n\t\t}\n\t\t// create tags\n\t\ttags := map[string]string{\"resource\": aggregator.ResourceName}\n\t\tacc.AddFields(\"service_config_state\", fields, tags)\n\t\tlog.WithFields(log.Fields{\n\t\t\t\"InputPlugin\": \"systemctl\",\n\t\t\t\"ResourceName\": aggregator.ResourceName,\n\t\t}).Debug(\"Added fields\")\n\t}\n\treturn nil\n}", "func MemTop() error {\n\tconst tformat = \"%.3f\\t|%v\\t|%v\\t|%.3f\\t|%v\\t|%.3f\\t|%v\\t|%v\\t|%v\\t|%v\\t|%v\\t\\n\"\n\tconst format = \"%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t\\n\"\n\ttw := new(tabwriter.Writer).Init(os.Stdout, 0, 4, 1, ' ', 0)\n\tps, err := GetProcessStats()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif err := Top(ps, \"ram\"); err != nil {\n\t\treturn err\n\t}\n\n\tfmt.Fprintf(tw, format, \"%MEM\", \"RSS\", \"VSZ\", \"%CPU\", \"START\", \"TIME\", \"PID\", \"USER\", \"TTY\", \"STAT\", \"COMMAND\")\n\tfmt.Fprintf(tw, format, \"----\", \"---\", \"---\", \"----\", \"-----\", \"----\", \"---\", \"----\", \"---\", \"----\", \"-------\")\n\tfor _, p := range ps {\n\t\tif p.IsGhostProcess() {\n\t\t\tcontinue\n\t\t}\n\n\t\tstat := p.Stat\n\t\tprocessStartTime := startTime(stat.createTime())\n\n\t\tfmt.Fprintf(tw, tformat, p.Memp, util.TransformSize(uint64(stat.Rss)), util.TransformSize(uint64(stat.Vsize)), p.Cpup, processStartTime, p.Cput, p.Pid, p.User, p.getTerminalName(), stat.State, strings.Trim(p.Name, \"()\"))\n\t}\n\ttw.Flush()\n\n\treturn nil\n}" ]
[ "0.7562619", "0.6592916", "0.6283652", "0.62427837", "0.6217113", "0.57899576", "0.5756989", "0.5741851", "0.5728166", "0.5649872", "0.55758524", "0.5550586", "0.5515233", "0.5495391", "0.5487253", "0.5464003", "0.5422401", "0.5420728", "0.5385302", "0.536197", "0.53588146", "0.53567845", "0.53540343", "0.5316201", "0.5313902", "0.5301826", "0.5297538", "0.5281538", "0.52700967", "0.52634466", "0.52561736", "0.5249091", "0.5244547", "0.5242712", "0.5238455", "0.52356875", "0.52259266", "0.52133256", "0.52074903", "0.519781", "0.51942474", "0.5172617", "0.51631445", "0.51577187", "0.51554555", "0.51542854", "0.5125998", "0.5110018", "0.5106939", "0.5105471", "0.51016366", "0.50672334", "0.50657856", "0.5061239", "0.50400776", "0.50391805", "0.50381213", "0.50298196", "0.50147957", "0.5012422", "0.4992248", "0.49851757", "0.49833196", "0.49788475", "0.49759915", "0.49707237", "0.49634263", "0.49619776", "0.49517688", "0.4951145", "0.49442336", "0.49312258", "0.49240574", "0.4921595", "0.4921352", "0.49203274", "0.49194118", "0.49178046", "0.4902981", "0.4890327", "0.48883143", "0.4881795", "0.4881026", "0.48698428", "0.4864906", "0.48629856", "0.48624495", "0.48586625", "0.48392174", "0.48377907", "0.48252076", "0.48229417", "0.48138446", "0.48126185", "0.48087314", "0.48048082", "0.48036855", "0.4803082", "0.4800335", "0.4795988" ]
0.6729161
1
gatherPluginsStats go through a list of plugins and add their metrics to the accumulator
func (logstash *Logstash) gatherPluginsStats( plugins []Plugin, pluginType string, tags map[string]string, accumulator telegraf.Accumulator, ) error { for _, plugin := range plugins { pluginTags := map[string]string{ "plugin_name": plugin.Name, "plugin_id": plugin.ID, "plugin_type": pluginType, } for tag, value := range tags { pluginTags[tag] = value } flattener := jsonParser.JSONFlattener{} err := flattener.FlattenJSON("", plugin.Events) if err != nil { return err } accumulator.AddFields("logstash_plugins", flattener.Fields, pluginTags) if plugin.Failures != nil { failuresFields := map[string]interface{}{"failures": *plugin.Failures} accumulator.AddFields("logstash_plugins", failuresFields, pluginTags) } /* The elasticsearch & opensearch output produces additional stats around bulk requests and document writes (that are elasticsearch and opensearch specific). Collect those below: */ if pluginType == "output" && (plugin.Name == "elasticsearch" || plugin.Name == "opensearch") { /* The "bulk_requests" section has details about batch writes into Elasticsearch "bulk_requests" : { "successes" : 2870, "responses" : { "200" : 2870 }, "failures": 262, "with_errors": 9089 }, */ flattener := jsonParser.JSONFlattener{} err := flattener.FlattenJSON("", plugin.BulkRequests) if err != nil { return err } for k, v := range flattener.Fields { if strings.HasPrefix(k, "bulk_requests") { continue } newKey := fmt.Sprintf("bulk_requests_%s", k) flattener.Fields[newKey] = v delete(flattener.Fields, k) } accumulator.AddFields("logstash_plugins", flattener.Fields, pluginTags) /* The "documents" section has counts of individual documents written/retried/etc. "documents" : { "successes" : 2665549, "retryable_failures": 13733 } */ flattener = jsonParser.JSONFlattener{} err = flattener.FlattenJSON("", plugin.Documents) if err != nil { return err } for k, v := range flattener.Fields { if strings.HasPrefix(k, "documents") { continue } newKey := fmt.Sprintf("documents_%s", k) flattener.Fields[newKey] = v delete(flattener.Fields, k) } accumulator.AddFields("logstash_plugins", flattener.Fields, pluginTags) } } return nil }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (p *Plugin) CollectMetrics(metrics []plugin.Metric) ([]plugin.Metric, error) {\n\tvar mtxMetrics sync.Mutex\n\tvar wgCollectedMetrics sync.WaitGroup\n\n\t//initialization of plugin structure (only once)\n\tif !p.initialized {\n\t\tconfigs, err := getMetricsConfig(metrics[0].Config)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tfor _, cfg := range configs {\n\t\t\tnamespace := core.NewNamespace(Vendor, PluginName)\n\t\t\tfor _, ns := range cfg.Namespace {\n\t\t\t\tif ns.Source == configReader.NsSourceString {\n\t\t\t\t\tnamespace = namespace.AddStaticElement(ns.String)\n\t\t\t\t} else {\n\t\t\t\t\tnamespace = namespace.AddDynamicElement(ns.Name, ns.Description)\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tif _, metricExist := p.metricsConfigs[namespace.String()]; metricExist {\n\t\t\t\tlogFields := map[string]interface{}{\n\t\t\t\t\t\"namespace\": namespace.String(),\n\t\t\t\t\t\"previous_metric_configuration\": p.metricsConfigs[namespace.String()],\n\t\t\t\t\t\"current_metric_configuration\": cfg,\n\t\t\t\t}\n\t\t\t\tlog.WithFields(logFields).Warn(fmt.Errorf(\"Plugin configuration file (`setfile`) contains metrics definitions which expose the same namespace, only one of them is in use. Correction of plugin configuration file (`setfile`) is recommended.\"))\n\t\t\t} else {\n\t\t\t\t//add metric configuration to plugin metric map\n\t\t\t\tp.metricsConfigs[namespace.String()] = cfg\n\t\t\t}\n\t\t}\n\t\tp.initialized = true\n\t}\n\n\tagentConfig, err := configReader.GetSnmpAgentConfig(metrics[0].Config)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t//lock using of connections in watchConnections\n\tmtxSnmpConnections.Lock()\n\tdefer mtxSnmpConnections.Unlock()\n\n\tconn, err := getConnection(agentConfig)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tmts := []plugin.Metric{}\n\n\tfor _, metric := range metrics {\n\n\t\t//get metrics to collect\n\t\tmetricsConfigs, err := getMetricsToCollect(metric.Namespace.String(), p.metricsConfigs)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\twgCollectedMetrics.Add(len(metricsConfigs))\n\n\t\tfor _, cfg := range metricsConfigs {\n\n\t\t\tgo func(cfg configReader.Metric) {\n\n\t\t\t\tdefer wgCollectedMetrics.Done()\n\n\t\t\t\tconn.mtx.Lock()\n\n\t\t\t\t//get value of metric/metrics\n\t\t\t\tresults, err := snmp_.readElements(conn.handler, cfg.Oid, cfg.Mode)\n\t\t\t\tif err != nil {\n\t\t\t\t\tlog.Warn(err)\n\t\t\t\t\tconn.mtx.Unlock()\n\t\t\t\t\treturn\n\t\t\t\t}\n\n\t\t\t\t//get dynamic elements of namespace parts\n\t\t\t\terr = getDynamicNamespaceElements(conn.handler, results, &cfg)\n\t\t\t\tif err != nil {\n\t\t\t\t\tconn.mtx.Unlock()\n\t\t\t\t\treturn\n\t\t\t\t}\n\n\t\t\t\tconn.lastUsed = time.Now()\n\t\t\t\tconn.mtx.Unlock()\n\n\t\t\t\tfor i, result := range results {\n\n\t\t\t\t\t//build namespace for metric\n\t\t\t\t\tnamespace := plugin.NewNamespace(Vendor, PluginName)\n\t\t\t\t\toffset := len(namespace)\n\t\t\t\t\tfor j, ns := range cfg.Namespace {\n\t\t\t\t\t\tif ns.Source == configReader.NsSourceString {\n\t\t\t\t\t\t\tnamespace = namespace.AddStaticElements(ns.String)\n\t\t\t\t\t\t} else {\n\t\t\t\t\t\t\tnamespace = namespace.AddDynamicElement(ns.Name, ns.Description)\n\t\t\t\t\t\t\tnamespace[j+offset].Value = ns.Values[i]\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\n\t\t\t\t\t//convert metric types\n\t\t\t\t\tval, err := convertSnmpDataToMetric(result.Variable.String(), result.Variable.Type())\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t}\n\n\t\t\t\t\t//modify numeric metric - use scale and shift parameters\n\t\t\t\t\tdata := modifyNumericMetric(val, cfg.Scale, cfg.Shift)\n\n\t\t\t\t\t//creating metric\n\t\t\t\t\tmt := plugin.Metric{\n\t\t\t\t\t\tNamespace: namespace,\n\t\t\t\t\t\tData: data,\n\t\t\t\t\t\tTimestamp: time.Now(),\n\t\t\t\t\t\tTags: map[string]string{\n\t\t\t\t\t\t\ttagSnmpAgentName: agentConfig.Name,\n\t\t\t\t\t\t\ttagSnmpAgentAddress: agentConfig.Address,\n\t\t\t\t\t\t\ttagOid: result.Oid.String()},\n\t\t\t\t\t\tUnit: metric.Unit,\n\t\t\t\t\t\tDescription: metric.Description,\n\t\t\t\t\t}\n\n\t\t\t\t\t//adding metric to list of metrics\n\t\t\t\t\tmtxMetrics.Lock()\n\n\t\t\t\t\t//filter specific instance\n\t\t\t\t\tnsPattern := strings.Replace(metric.Namespace.String(), \"*\", \".*\", -1)\n\t\t\t\t\tmatched, err := regexp.MatchString(nsPattern, mt.Namespace.String())\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\tlogFields := map[string]interface{}{\"namespace\": mt.Namespace.String(), \"pattern\": nsPattern, \"match_error\": err}\n\t\t\t\t\t\terr := fmt.Errorf(\"Cannot parse namespace element for matching\")\n\t\t\t\t\t\tlog.WithFields(logFields).Warn(err)\n\t\t\t\t\t\treturn\n\t\t\t\t\t}\n\t\t\t\t\tif matched {\n\t\t\t\t\t\tmts = append(mts, mt)\n\t\t\t\t\t}\n\n\t\t\t\t\tmtxMetrics.Unlock()\n\t\t\t\t}\n\t\t\t}(cfg)\n\t\t}\n\t\twgCollectedMetrics.Wait()\n\t}\n\treturn mts, nil\n}", "func (p *Pool) PluginStatisticPerHost() map[string][]PluginStatistic {\n\tp.mutex.Lock()\n\tdefer p.mutex.Unlock()\n\tresult := make(map[string][]PluginStatistic)\n\tfor host, pls := range p.hosts {\n\t\tif _, ok := result[host]; !ok {\n\t\t\tresult[host] = make([]PluginStatistic, 0)\n\t\t}\n\t\tfor _, pl := range pls.plugins {\n\t\t\tresult[host] = append(result[host], pl.getStatistics())\n\t\t}\n\t}\n\treturn result\n}", "func (g gatherer) GatherMetrics(ctx context.Context, out *apm.Metrics) error {\n\tmetricFamilies, err := g.p.Gather()\n\tif err != nil {\n\t\treturn errors.WithStack(err)\n\t}\n\tfor _, mf := range metricFamilies {\n\t\tname := mf.GetName()\n\t\tswitch mf.GetType() {\n\t\tcase dto.MetricType_COUNTER:\n\t\t\tfor _, m := range mf.GetMetric() {\n\t\t\t\tv := m.GetCounter().GetValue()\n\t\t\t\tout.Add(name, makeLabels(m.GetLabel()), v)\n\t\t\t}\n\t\tcase dto.MetricType_GAUGE:\n\t\t\tmetrics := mf.GetMetric()\n\t\t\tif name == \"go_info\" && len(metrics) == 1 && metrics[0].GetGauge().GetValue() == 1 {\n\t\t\t\t// Ignore the \"go_info\" metric from the\n\t\t\t\t// built-in GoCollector, as we provide\n\t\t\t\t// the same information in the payload.\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tfor _, m := range metrics {\n\t\t\t\tv := m.GetGauge().GetValue()\n\t\t\t\tout.Add(name, makeLabels(m.GetLabel()), v)\n\t\t\t}\n\t\tcase dto.MetricType_UNTYPED:\n\t\t\tfor _, m := range mf.GetMetric() {\n\t\t\t\tv := m.GetUntyped().GetValue()\n\t\t\t\tout.Add(name, makeLabels(m.GetLabel()), v)\n\t\t\t}\n\t\tcase dto.MetricType_SUMMARY:\n\t\t\tfor _, m := range mf.GetMetric() {\n\t\t\t\ts := m.GetSummary()\n\t\t\t\tlabels := makeLabels(m.GetLabel())\n\t\t\t\tout.Add(name+\".count\", labels, float64(s.GetSampleCount()))\n\t\t\t\tout.Add(name+\".total\", labels, float64(s.GetSampleSum()))\n\t\t\t\tfor _, q := range s.GetQuantile() {\n\t\t\t\t\tp := int(q.GetQuantile() * 100)\n\t\t\t\t\tout.Add(name+\".percentile.\"+strconv.Itoa(p), labels, q.GetValue())\n\t\t\t\t}\n\t\t\t}\n\t\tdefault:\n\t\t\t// TODO(axw) MetricType_HISTOGRAM\n\t\t}\n\t}\n\treturn nil\n}", "func (m *manager) AllStats() []*device.DeviceGroupStats {\n\t// Go through each plugin and collect stats\n\tvar stats []*device.DeviceGroupStats\n\tfor _, i := range m.instances {\n\t\tstats = append(stats, i.AllStats()...)\n\t}\n\n\treturn stats\n}", "func (a *Agent) ExternalPluginsHealthCheck() {\n\tfor _, p := range a.plugins {\n\t\tp.ScheduleHealthCheck()\n\t}\n}", "func ExtractOutputsFromPlugins(\n\tloadedPlugins []*PluginLoaded,\n\toutputs *map[string]kombustionTypes.ParserFunc,\n) {\n\tfor _, plugin := range loadedPlugins {\n\t\tif *plugin.Outputs != nil {\n\t\t\tfor key, parserFunc := range *plugin.Outputs {\n\t\t\t\tpluginKey := fmt.Sprintf(\"%s::%s\", plugin.InternalConfig.Prefix, key)\n\t\t\t\tif _, ok := (*outputs)[pluginKey]; ok { // Check for duplicates\n\n\t\t\t\t\tprinter.Fatal(\n\t\t\t\t\t\tfmt.Errorf(\"Plugin `%s` tried to load output `%s` but it already exists\", plugin.Config.Name, pluginKey),\n\t\t\t\t\t\tfmt.Sprintf(\n\t\t\t\t\t\t\t\"You can add a `Alias` to this plugin in kombustion.yaml to resolve this.\",\n\t\t\t\t\t\t),\n\t\t\t\t\t\t\"https://www.kombustion.io/api/manifest/#alias-optional\",\n\t\t\t\t\t)\n\t\t\t\t} else {\n\t\t\t\t\twrappedParserFunc := func(\n\t\t\t\t\t\tname string, data string,\n\t\t\t\t\t) (kombustionTypes.TemplateObject, error) {\n\t\t\t\t\t\toutputs, errs := loadOutput(parserFunc(name, data))\n\n\t\t\t\t\t\thasErrors := false\n\t\t\t\t\t\tfor _, err := range errs {\n\t\t\t\t\t\t\tif err != nil {\n\t\t\t\t\t\t\t\thasErrors = true\n\n\t\t\t\t\t\t\t\tprinter.Error(\n\t\t\t\t\t\t\t\t\terr,\n\t\t\t\t\t\t\t\t\tfmt.Sprintf(\n\t\t\t\t\t\t\t\t\t\t\"\\n ├─ Name: %s\\n ├─ Plugin: %s\\n └─ Type: %s\",\n\t\t\t\t\t\t\t\t\t\tname,\n\t\t\t\t\t\t\t\t\t\tplugin.Config.Name,\n\t\t\t\t\t\t\t\t\t\tpluginKey,\n\t\t\t\t\t\t\t\t\t),\n\t\t\t\t\t\t\t\t\t\"\",\n\t\t\t\t\t\t\t\t)\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\n\t\t\t\t\t\tif hasErrors {\n\t\t\t\t\t\t\treturn outputs, fmt.Errorf(\"There were errors parsing %s\", name)\n\t\t\t\t\t\t}\n\t\t\t\t\t\treturn outputs, nil\n\t\t\t\t\t}\n\t\t\t\t\t(*outputs)[pluginKey] = wrappedParserFunc\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\treturn\n}", "func DescribePlugins() string {\n\tpl := ListPlugins()\n\n\tstr := \"Server types:\\n\"\n\tfor _, name := range pl[\"server_types\"] {\n\t\tstr += \" \" + name + \"\\n\"\n\t}\n\n\tstr += \"\\nCaddyfile loaders:\\n\"\n\tfor _, name := range pl[\"caddyfile_loaders\"] {\n\t\tstr += \" \" + name + \"\\n\"\n\t}\n\n\tif len(pl[\"event_hooks\"]) > 0 {\n\t\tstr += \"\\nEvent hook plugins:\\n\"\n\t\tfor _, name := range pl[\"event_hooks\"] {\n\t\t\tstr += \" hook.\" + name + \"\\n\"\n\t\t}\n\t}\n\n\tif len(pl[\"clustering\"]) > 0 {\n\t\tstr += \"\\nClustering plugins:\\n\"\n\t\tfor _, name := range pl[\"clustering\"] {\n\t\t\tstr += \" \" + name + \"\\n\"\n\t\t}\n\t}\n\n\tstr += \"\\nOther plugins:\\n\"\n\tfor _, name := range pl[\"others\"] {\n\t\tstr += \" \" + name + \"\\n\"\n\t}\n\n\treturn str\n}", "func ExtractOutputsFromPlugins(\n\tloadedPlugins []*PluginLoaded,\n\toutputs *map[string]kombustionTypes.ParserFunc,\n) {\n\tfor _, plugin := range loadedPlugins {\n\t\tif *plugin.Outputs != nil {\n\t\t\tfor key, parserFunc := range *plugin.Outputs {\n\t\t\t\tpluginKey := fmt.Sprintf(\"%s::%s\", plugin.InternalConfig.Prefix, key)\n\t\t\t\tif _, ok := (*outputs)[pluginKey]; ok { // Check for duplicates\n\n\t\t\t\t\tprinter.Fatal(\n\t\t\t\t\t\tfmt.Errorf(\"Plugin `%s` tried to load output `%s` but it already exists\", plugin.Config.Name, pluginKey),\n\t\t\t\t\t\tfmt.Sprintf(\n\t\t\t\t\t\t\t\"You can add a `prefix` to this plugin in kombustion.yaml to resolve this.\",\n\t\t\t\t\t\t),\n\t\t\t\t\t\t\"\",\n\t\t\t\t\t)\n\t\t\t\t} else {\n\t\t\t\t\twrappedParserFunc := func(\n\t\t\t\t\t\tname string, data string,\n\t\t\t\t\t) (\n\t\t\t\t\t\tkombustionTypes.TemplateObject,\n\t\t\t\t\t\terror,\n\t\t\t\t\t) {\n\t\t\t\t\t\treturn loadResource(parserFunc(name, data))\n\t\t\t\t\t}\n\t\t\t\t\t(*outputs)[pluginKey] = wrappedParserFunc\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\treturn\n}", "func (g gatherer) GatherMetrics(ctx context.Context, m *elasticapm.Metrics) error {\n\tg.r.Each(func(name string, v interface{}) {\n\t\tswitch v := v.(type) {\n\t\tcase metrics.Counter:\n\t\t\tm.Add(name, nil, float64(v.Count()))\n\t\tcase metrics.Gauge:\n\t\t\tm.Add(name, nil, float64(v.Value()))\n\t\tcase metrics.GaugeFloat64:\n\t\t\tm.Add(name, nil, v.Value())\n\t\tcase metrics.Histogram:\n\t\t\tm.Add(name+\".count\", nil, float64(v.Count()))\n\t\t\tm.Add(name+\".total\", nil, float64(v.Sum()))\n\t\t\tm.Add(name+\".min\", nil, float64(v.Min()))\n\t\t\tm.Add(name+\".max\", nil, float64(v.Max()))\n\t\t\tm.Add(name+\".stddev\", nil, v.StdDev())\n\t\t\tm.Add(name+\".percentile.50\", nil, v.Percentile(0.5))\n\t\t\tm.Add(name+\".percentile.95\", nil, v.Percentile(0.95))\n\t\t\tm.Add(name+\".percentile.99\", nil, v.Percentile(0.99))\n\t\tdefault:\n\t\t\t// TODO(axw) Meter, Timer, EWMA\n\t\t}\n\t})\n\treturn nil\n}", "func (p *Pilot) InstallPlugins(pilot *v1alpha1.Pilot) error {\n\tinstalled, err := p.getInstalledPlugins(pilot)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"error listing installed plugins: %s\", err.Error())\n\t}\n\tglog.V(4).Infof(\"There are %d plugins already installed: %v\", len(installed), installed)\n\tfor _, plugin := range p.Options.ElasticsearchOptions.Plugins {\n\t\tif installed.Has(plugin) {\n\t\t\tglog.V(4).Infof(\"Skipping already installed plugin '%s'\", plugin)\n\t\t\tcontinue\n\t\t}\n\n\t\terr := p.installPlugin(pilot, plugin)\n\t\tif err != nil {\n\t\t\tglog.V(4).Infof(\"Error installing plugin '%s': %s\", plugin, err.Error())\n\t\t\treturn err\n\t\t}\n\n\t\tglog.V(4).Infof(\"Successfully installed plugin '%s'\", plugin)\n\t}\n\treturn nil\n}", "func (g *Generator) runPlugins(file *FileDescriptor) {\n\tfor _, p := range plugins {\n\t\tp.Generate(file)\n\t}\n}", "func (pm Manager) RefreshPlugins() error {\n\tdir, err := filepath.Abs(filepath.Dir(os.Args[0]))\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tpluginFolder := filepath.Join(dir, \"plugins\") + \"/\"\n\n\tlog.Println(\"Refreshing plugins...\")\n\n\tfiles, err := ioutil.ReadDir(pluginFolder)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfor _, f := range files {\n\t\tfile := f.Name()\n\n\t\t// We should only load plugin files\n\t\tif filepath.Ext(file) != \".so\" {\n\t\t\tcontinue\n\t\t}\n\n\t\t// Trim the extension\n\t\tid := strings.TrimSuffix(file, \".so\")\n\n\t\t// If the plugin is already detected, ignore it\n\t\tif pm.HasPlugin(id) {\n\t\t\tcontinue\n\t\t}\n\n\t\tplugin, err := LoadPlugin(id, pluginFolder+file)\n\n\t\tif err != nil {\n\t\t\tlog.Println(\"Failed to load plugin:\", id)\n\t\t\tlog.Println(\" \", err)\n\t\t\tcontinue\n\t\t}\n\n\t\tlog.Println(\"Loaded plugin:\", plugin.Name, \"version\", plugin.Version, \"by\", plugin.Authors, \"(\", plugin.ShortDescription, \")\")\n\n\t\tpm.plugins[id] = *plugin\n\t}\n\n\treturn nil\n}", "func (a *Agent) initPlugins() error {\n\tfor _, input := range a.Config.Inputs {\n\t\terr := input.Init()\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"could not initialize input %s: %v\",\n\t\t\t\tinput.LogName(), err)\n\t\t}\n\t}\n\tfor _, processor := range a.Config.Processors {\n\t\terr := processor.Init()\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"could not initialize processor %s: %v\",\n\t\t\t\tprocessor.Config.Name, err)\n\t\t}\n\t}\n\tfor _, aggregator := range a.Config.Aggregators {\n\t\terr := aggregator.Init()\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"could not initialize aggregator %s: %v\",\n\t\t\t\taggregator.Config.Name, err)\n\t\t}\n\t}\n\tfor _, processor := range a.Config.AggProcessors {\n\t\terr := processor.Init()\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"could not initialize processor %s: %v\",\n\t\t\t\tprocessor.Config.Name, err)\n\t\t}\n\t}\n\tfor _, output := range a.Config.Outputs {\n\t\terr := output.Init()\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"could not initialize output %s: %v\",\n\t\t\t\toutput.Config.Name, err)\n\t\t}\n\t}\n\treturn nil\n}", "func (r *ReconcileGrafana) ReconcileDashboardPlugins(cr *i8ly.Grafana) error {\n\t// Waited long enough for dashboards to be ready?\n\tif r.plugins.CanUpdatePlugins() == false {\n\t\treturn nil\n\t}\n\n\t// Fetch all plugins of all dashboards\n\tvar requestedPlugins i8ly.PluginList\n\tfor _, v := range common.GetControllerConfig().Plugins {\n\t\trequestedPlugins = append(requestedPlugins, v...)\n\t}\n\n\t// Consolidate plugins and create the new list of plugin requirements\n\t// If 'updated' is false then no changes have to be applied\n\tfilteredPlugins, updated := r.plugins.FilterPlugins(cr, requestedPlugins)\n\n\tif updated {\n\t\tr.ReconcilePlugins(cr, filteredPlugins)\n\n\t\t// Update the dashboards that had their plugins modified\n\t\t// to let the owners know about the status\n\t\terr := r.updateDashboardMessages(filteredPlugins)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}", "func (sr *ServicedStatsReporter) gatherStats(t time.Time) []Sample {\n\tstats := []Sample{}\n\t// Handle the host metrics.\n\treg, _ := sr.hostRegistry.(*metrics.StandardRegistry)\n\treg.Each(func(name string, i interface{}) {\n\t\ttagmap := map[string]string{\n\t\t\t\"controlplane_host_id\": sr.hostID,\n\t\t}\n\t\tswitch metric := i.(type) {\n\t\tcase metrics.Gauge:\n\t\t\tstats = append(stats, Sample{name, strconv.FormatInt(metric.Value(), 10), t.Unix(), tagmap})\n\t\tcase metrics.GaugeFloat64:\n\t\t\tstats = append(stats, Sample{name, strconv.FormatFloat(metric.Value(), 'f', -1, 32), t.Unix(), tagmap})\n\t\t}\n\t})\n\t// Handle each container's metrics.\n\tfor key, registry := range sr.containerRegistries {\n\t\treg, _ := registry.(*metrics.StandardRegistry)\n\t\treg.Each(func(name string, i interface{}) {\n\t\t\ttagmap := map[string]string{\n\t\t\t\t\"controlplane_host_id\": sr.hostID,\n\t\t\t\t\"controlplane_service_id\": key.serviceID,\n\t\t\t\t\"controlplane_instance_id\": strconv.FormatInt(int64(key.instanceID), 10),\n\t\t\t}\n\t\t\tswitch metric := i.(type) {\n\t\t\tcase metrics.Gauge:\n\t\t\t\tstats = append(stats, Sample{name, strconv.FormatInt(metric.Value(), 10), t.Unix(), tagmap})\n\t\t\tcase metrics.GaugeFloat64:\n\t\t\t\tstats = append(stats, Sample{name, strconv.FormatFloat(metric.Value(), 'f', -1, 32), t.Unix(), tagmap})\n\t\t\t}\n\t\t})\n\t}\n\treturn stats\n}", "func (u *Use) CollectMetrics(mts []plugin.Metric) ([]plugin.Metric, error) {\n\tcfg := mts[0].Config\n\tif !u.initialized {\n\t\tu.init(cfg)\n\t}\n\n\tmetrics := make([]plugin.Metric, len(mts))\n\tfor i, p := range mts {\n\t\tns := p.Namespace.String()\n\t\tswitch {\n\t\tcase cpure.MatchString(ns):\n\t\t\tmetric, err := u.computeStat(p.Namespace)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, errors.New(\"Unable to get compute stat: \" + err.Error())\n\t\t\t}\n\t\t\tmetrics[i] = *metric\n\n\t\tcase storre.MatchString(ns):\n\t\t\tmetric, err := u.diskStat(p.Namespace)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, errors.New(\"Unable to get disk stat: \" + err.Error())\n\t\t\t}\n\t\t\tmetrics[i] = *metric\n\t\tcase memre.MatchString(ns):\n\t\t\tmetric, err := memStat(p.Namespace, u.VmStatPath, u.MemInfoPath)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, errors.New(\"Unable to get mem stat: \" + err.Error())\n\t\t\t}\n\t\t\tmetrics[i] = *metric\n\t\t}\n\t\ttags, err := hostTags()\n\n\t\tif err == nil {\n\t\t\tmetrics[i].Tags = tags\n\t\t}\n\t\tmetrics[i].Timestamp = time.Now()\n\n\t}\n\treturn metrics, nil\n}", "func ExtractResourcesFromPlugins(\n\tloadedPlugins []*PluginLoaded,\n\tresources *map[string]kombustionTypes.ParserFunc,\n) {\n\tfor _, plugin := range loadedPlugins {\n\t\tif *plugin.Resources != nil {\n\t\t\tfor key, parserFunc := range *plugin.Resources {\n\t\t\t\tpluginKey := fmt.Sprintf(\"%s::%s\", plugin.InternalConfig.Prefix, key)\n\t\t\t\tif _, ok := (*resources)[pluginKey]; ok { // Check for duplicates\n\t\t\t\t\tprinter.Fatal(\n\t\t\t\t\t\tfmt.Errorf(\"Plugin `%s` tried to load resource `%s` but it already exists\", plugin.Config.Name, pluginKey),\n\t\t\t\t\t\tfmt.Sprintf(\n\t\t\t\t\t\t\t\"You can add a `Alias` to this plugin in kombustion.yaml to resolve this.\",\n\t\t\t\t\t\t),\n\t\t\t\t\t\t\"https://www.kombustion.io/api/manifest/#alias-optional\",\n\t\t\t\t\t)\n\t\t\t\t} else {\n\t\t\t\t\twrappedParserFunc := func(\n\t\t\t\t\t\tname string, data string,\n\t\t\t\t\t) (kombustionTypes.TemplateObject, error) {\n\t\t\t\t\t\tresources, errs := loadResource(parserFunc(name, data))\n\t\t\t\t\t\thasErrors := false\n\t\t\t\t\t\tfor _, err := range errs {\n\t\t\t\t\t\t\tif err != nil {\n\t\t\t\t\t\t\t\thasErrors = true\n\t\t\t\t\t\t\t\tprinter.Error(\n\t\t\t\t\t\t\t\t\terr,\n\t\t\t\t\t\t\t\t\tfmt.Sprintf(\n\t\t\t\t\t\t\t\t\t\t\"\\n ├─ Name: %s\\n ├─ Plugin: %s\\n └─ Type: %s\",\n\t\t\t\t\t\t\t\t\t\tname,\n\t\t\t\t\t\t\t\t\t\tplugin.Config.Name,\n\t\t\t\t\t\t\t\t\t\tpluginKey,\n\t\t\t\t\t\t\t\t\t),\n\t\t\t\t\t\t\t\t\t\"\",\n\t\t\t\t\t\t\t\t)\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\n\t\t\t\t\t\tif hasErrors {\n\t\t\t\t\t\t\treturn resources, fmt.Errorf(\"There were errors parsing %s\", name)\n\t\t\t\t\t\t}\n\t\t\t\t\t\treturn resources, nil\n\t\t\t\t\t}\n\t\t\t\t\t(*resources)[pluginKey] = wrappedParserFunc\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\treturn\n}", "func (a *Agent) startPlugins() {\n\t// iterate over and start each plugin\n\tfor _, plugin := range a.plugins {\n\t\tplugin.LogInfo()\n\t\tfunc(p Plugin) {\n\t\t\tgo p.Run()\n\t\t}(plugin)\n\t}\n}", "func (logstash *Logstash) Gather(accumulator telegraf.Accumulator) error {\n\tif logstash.client == nil {\n\t\tclient, err := logstash.createHTTPClient()\n\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tlogstash.client = client\n\t}\n\n\tif choice.Contains(\"jvm\", logstash.Collect) {\n\t\tjvmURL, err := url.Parse(logstash.URL + jvmStats)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif err := logstash.gatherJVMStats(jvmURL.String(), accumulator); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tif choice.Contains(\"process\", logstash.Collect) {\n\t\tprocessURL, err := url.Parse(logstash.URL + processStats)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif err := logstash.gatherProcessStats(processURL.String(), accumulator); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tif choice.Contains(\"pipelines\", logstash.Collect) {\n\t\tif logstash.SinglePipeline {\n\t\t\tpipelineURL, err := url.Parse(logstash.URL + pipelineStats)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tif err := logstash.gatherPipelineStats(pipelineURL.String(), accumulator); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t} else {\n\t\t\tpipelinesURL, err := url.Parse(logstash.URL + pipelinesStats)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tif err := logstash.gatherPipelinesStats(pipelinesURL.String(), accumulator); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\n\treturn nil\n}", "func (h *httpJSONRPCClient) CollectMetrics(mts []core.Metric) ([]core.Metric, error) {\n\tvar results []core.Metric\n\tif len(mts) == 0 {\n\t\treturn nil, errors.New(\"no metrics to collect\")\n\t}\n\n\tmetricsToCollect := make([]plugin.PluginMetricType, len(mts))\n\tfor idx, mt := range mts {\n\t\tmetricsToCollect[idx] = plugin.PluginMetricType{\n\t\t\tNamespace_: mt.Namespace(),\n\t\t\tLastAdvertisedTime_: mt.LastAdvertisedTime(),\n\t\t\tVersion_: mt.Version(),\n\t\t\tTags_: mt.Tags(),\n\t\t\tLabels_: mt.Labels(),\n\t\t\tConfig_: mt.Config(),\n\t\t}\n\t}\n\n\targs := &plugin.CollectMetricsArgs{PluginMetricTypes: metricsToCollect}\n\n\tout, err := h.encoder.Encode(args)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tres, err := h.call(\"Collector.CollectMetrics\", []interface{}{out})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif len(res.Result) == 0 {\n\t\terr := errors.New(\"Invalid response: result is 0\")\n\t\tlogger.WithFields(log.Fields{\n\t\t\t\"_block\": \"CollectMetrics\",\n\t\t\t\"jsonrpc response\": fmt.Sprintf(\"%+v\", res),\n\t\t}).Error(err)\n\t\treturn nil, err\n\t}\n\tr := &plugin.CollectMetricsReply{}\n\terr = h.encoder.Decode(res.Result, r)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tresults = make([]core.Metric, len(r.PluginMetrics))\n\tidx := 0\n\tfor _, m := range r.PluginMetrics {\n\t\tresults[idx] = m\n\t\tidx++\n\t}\n\n\treturn results, nil\n}", "func (cli *Client) PluginList(ctx context.Context) (types.PluginsListResponse, error) {\n\tvar plugins types.PluginsListResponse\n\tresp, err := cli.get(ctx, \"/plugins\", nil, nil)\n\tif err != nil {\n\t\treturn plugins, err\n\t}\n\n\terr = json.NewDecoder(resp.body).Decode(&plugins)\n\tensureReaderClosed(resp)\n\treturn plugins, err\n}", "func (a *Agent) LogExternalPluginsInfo() {\n\tfor _, plugin := range a.plugins {\n\t\tif plugin.IsExternal() {\n\t\t\tplugin.LogInfo()\n\t\t}\n\t}\n}", "func (vm *VM) ScansPlugins(cmd *cobra.Command, args []string) {\n\treturn\n}", "func listPlugins(dir string) (result []Plugin, err error) {\n\titems, err := os.ReadDir(dir)\n\tif err != nil {\n\t\treturn\n\t}\n\tfor _, item := range items {\n\t\tif item.IsDir() {\n\t\t\tcontinue\n\t\t}\n\t\tname := item.Name()\n\t\tif !strings.HasPrefix(name, pluginPrefix) {\n\t\t\tcontinue\n\t\t}\n\t\tpath := filepath.Join(dir, name)\n\t\tvar exec bool\n\t\texec, err = isExecutable(path)\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\t\tif !exec {\n\t\t\tfmt.Printf(\"Warning: %s identified as an ocm plugin, but it is not executable.\\n\", path)\n\t\t}\n\t\tif runtime.GOOS == \"windows\" {\n\t\t\tname = strings.TrimSuffix(name, \".exe\")\n\t\t}\n\t\tplugin := Plugin{\n\t\t\tName: name,\n\t\t\tPath: dir,\n\t\t}\n\t\tresult = append(result, plugin)\n\t}\n\treturn\n}", "func (p *Plugins) RefreshPlugins() {\n\tfor _, plugin := range p.plugins {\n\t\tif !p.pluginRefreshNeeded(plugin) {\n\t\t\tcontinue\n\t\t}\n\t\taction(fmt.Sprintf(\"Parsing %s\", plugin.Name), \"done\", func() {\n\t\t\t_, err := p.ParsePlugin(plugin.Name, \"symlink\")\n\t\t\tWarnIfError(err)\n\t\t})\n\t}\n}", "func getAvailablePluginInfo(restPluginsMap restPlugins) []restPluginsAvailable {\n\tvar availablePluginsMap []restPluginsAvailable\n\tfor _, plugin := range restPluginsMap.Plugins {\n\t\tlog.Debug(\"getting: \", plugin.Name, \", available info\")\n\t\tavailablePluginURL := baseURL + \"available/\" + plugin.Key + \"-key\"\n\t\tlog.Debug(\"requesting URL: \" + availablePluginURL)\n\t\treq, err := http.NewRequest(\"GET\", availablePluginURL, nil)\n\t\tif err != nil {\n\t\t\tlog.Error(\"http.NewRequest returned an error:\", err)\n\t\t}\n\n\t\tlog.Debug(\"add authorization header to the request\")\n\t\treq.Header.Add(\"Authorization\", bearer)\n\n\t\tlog.Debug(\"make request... get back a response\")\n\t\tres, err := http.DefaultClient.Do(req)\n\t\tif err != nil {\n\t\t\tlog.Error(\"http.DefaultClient.Do returned an error:\", err)\n\t\t}\n\t\tdefer res.Body.Close()\n\n\t\tif res.StatusCode != 200 {\n\t\t\tlog.Debug(\"response status code: \", res.StatusCode, \" continuing to next plugin\")\n\t\t\tcontinue\n\t\t}\n\n\t\tlog.Debug(\"get the body out of the response\")\n\t\tbody, err := ioutil.ReadAll(res.Body)\n\t\tif err != nil {\n\t\t\tlog.Error(\"ioutil.ReadAll returned an error:\", err)\n\t\t}\n\n\t\tif len(body) < 1 {\n\t\t\tlog.Debug(\"body was empty, continue to next plugin\")\n\t\t\tcontinue\n\t\t}\n\n\t\tlog.Debug(\"create temp map object\")\n\t\tvar tempMap restPluginsAvailable\n\n\t\tlog.Debug(\"unmarshal (turn unicode back into a string) request body into map structure\")\n\t\terr = json.Unmarshal(body, &tempMap)\n\t\tif err != nil {\n\t\t\tlog.Error(\"error Unmarshalling: \", err)\n\t\t\tlog.Info(\"Problem unmarshalling the following string: \", string(body))\n\t\t}\n\n\t\t// add the enabled value from the plugin map to the available map\n\t\ttempMap.Enabled = plugin.Enabled\n\n\t\tlog.Debug(\"adding plugin: \", tempMap.Name, \", and Key: \", tempMap.Key)\n\t\tavailablePluginsMap = append(availablePluginsMap, tempMap)\n\n\t}\n\n\treturn availablePluginsMap\n}", "func loadPlugins(indexes []indexoperations.Index) []pluginEntry {\n\tvar out []pluginEntry\n\tfor _, idx := range indexes {\n\t\tlist, err := indexscanner.LoadPluginListFromFS(paths.IndexPluginsPath(idx.Name))\n\t\tif err != nil {\n\t\t\tklog.V(1).Infof(\"WARNING: failed to load plugin list from %q: %v\", idx.Name, err)\n\t\t\tcontinue\n\t\t}\n\t\tfor _, v := range list {\n\t\t\tout = append(out, pluginEntry{indexName: idx.Name, p: v})\n\t\t}\n\t}\n\treturn out\n}", "func ExtractResourcesFromPlugins(\n\tloadedPlugins []*PluginLoaded,\n\tresources *map[string]kombustionTypes.ParserFunc,\n) {\n\tfor _, plugin := range loadedPlugins {\n\t\tif *plugin.Resources != nil {\n\t\t\tfor key, parserFunc := range *plugin.Resources {\n\t\t\t\tpluginKey := fmt.Sprintf(\"%s::%s\", plugin.InternalConfig.Prefix, key)\n\t\t\t\tif _, ok := (*resources)[pluginKey]; ok { // Check for duplicates\n\t\t\t\t\tprinter.Fatal(\n\t\t\t\t\t\tfmt.Errorf(\"Plugin `%s` tried to load resource `%s` but it already exists\", plugin.Config.Name, pluginKey),\n\t\t\t\t\t\tfmt.Sprintf(\n\t\t\t\t\t\t\t\"You can add a `prefix` to this plugin in kombustion.yaml to resolve this.\",\n\t\t\t\t\t\t),\n\t\t\t\t\t\t\"\",\n\t\t\t\t\t)\n\t\t\t\t} else {\n\t\t\t\t\twrappedParserFunc := func(\n\t\t\t\t\t\tname string, data string,\n\t\t\t\t\t) (\n\t\t\t\t\t\tkombustionTypes.TemplateObject,\n\t\t\t\t\t\terror,\n\t\t\t\t\t) {\n\t\t\t\t\t\treturn loadResource(parserFunc(name, data))\n\t\t\t\t\t}\n\t\t\t\t\t(*resources)[pluginKey] = wrappedParserFunc\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\treturn\n}", "func buildPlugins(options *Options, metricWriter MetricWriter) PluginManagerContainer {\n\tplugins := make(PluginManagerContainer)\n\n\tif !options.UseLocalRouter {\n\t\tplugin := NewPluginManager(options.RouterPlugin, options.RouterPluginArgs, RouterPlugin, metricWriter)\n\t\tplugins[RouterPlugin] = []PluginManager{plugin}\n\t}\n\n\tif !options.UseLocalLoadBalancer {\n\t\tplugin := NewPluginManager(options.LoadBalancerPlugin, options.LoadBalancerPluginArgs, LoadBalancerPlugin, metricWriter)\n\t\tplugins[LoadBalancerPlugin] = []PluginManager{plugin}\n\t}\n\n\tfor _, plugin := range options.UpstreamPlugins {\n\t\tplugins[UpstreamPlugin] = append(plugins[UpstreamPlugin], NewPluginManager(plugin, options.UpstreamPluginArgs, UpstreamPlugin, metricWriter))\n\t}\n\n\tfor _, plugin := range options.ModifierPlugins {\n\t\tplugins[ModifierPlugin] = append(plugins[ModifierPlugin], NewPluginManager(plugin, options.ModifierPluginArgs, ModifierPlugin, metricWriter))\n\t}\n\n\tfor _, plugin := range options.MetricPlugins {\n\t\tplugins[MetricPlugin] = append(plugins[MetricPlugin], NewPluginManager(plugin, options.MetricPluginArgs, MetricPlugin, metricWriter))\n\t}\n\treturn plugins\n}", "func buildPlugins(plugins []apiplexPluginConfig, lifecyclePluginType reflect.Type) ([]interface{}, error) {\n\tbuilt := make([]interface{}, len(plugins))\n\tfor i, config := range plugins {\n\t\tptype, ok := registeredPlugins[config.Plugin]\n\t\tif !ok {\n\t\t\treturn nil, fmt.Errorf(\"No plugin named '%s' available.\", config.Plugin)\n\t\t}\n\t\tpt := reflect.New(ptype.pluginType)\n\n\t\tif ptype.pluginType.Implements(lifecyclePluginType) {\n\t\t\treturn nil, fmt.Errorf(\"Plugin '%s' (%s) cannot be loaded as %s.\", config.Plugin, ptype.pluginType.Name(), lifecyclePluginType.Name())\n\t\t}\n\n\t\tdefConfig := pt.MethodByName(\"DefaultConfig\").Call([]reflect.Value{})[0].Interface().(map[string]interface{})\n\t\tif err := ensureDefaults(config.Config, defConfig); err != nil {\n\t\t\treturn nil, fmt.Errorf(\"While configuring '%s': %s\", config.Plugin, err.Error())\n\t\t}\n\t\tmaybeErr := pt.MethodByName(\"Configure\").Call([]reflect.Value{reflect.ValueOf(config.Config)})[0].Interface()\n\t\tif maybeErr != nil {\n\t\t\terr := maybeErr.(error)\n\t\t\treturn nil, fmt.Errorf(\"While configuring '%s': %s\", config.Plugin, err.Error())\n\t\t}\n\t\tbuilt[i] = pt.Interface()\n\t}\n\treturn built, nil\n}", "func (p *Pilot) getInstalledPlugins(pilot *v1alpha1.Pilot) (sets.String, error) {\n\tstdout := new(bytes.Buffer)\n\tcmd := exec.Command(p.Options.ElasticsearchOptions.PluginBinary, \"list\")\n\tcmd.Env = p.env().Strings()\n\tcmd.Stdout = stdout\n\tcmd.Stderr = p.Options.StdErr\n\tif err := cmd.Run(); err != nil {\n\t\treturn nil, err\n\t}\n\tstrOutput := stdout.String()\n\tpluginsSlice := strings.Split(strOutput, \"\\n\")\n\treturn sets.NewString(pluginsSlice...), nil\n}", "func GetAllPlugins(ctx context.Context) ([]restClient.Plugin, error) {\n\tkeys, err := GetAllKeysFromTable(\"Plugin\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tvar plugins []restClient.Plugin\n\tfor _, key := range keys {\n\t\tplugin, err := GetPluginData(key)\n\t\tif err != nil {\n\t\t\tl.LogWithFields(ctx).Error(\"failed to get details of \" + key + \" plugin: \" + err.Error())\n\t\t\tcontinue\n\t\t}\n\t\tplugins = append(plugins, plugin)\n\t}\n\treturn plugins, nil\n}", "func (c *Canary) GatherMetrics(config schemas.Config) error {\n\tif !c.StepStatus[constants.StepCleanChecking] {\n\t\treturn nil\n\t}\n\tif config.DisableMetrics {\n\t\treturn nil\n\t}\n\n\tif len(config.Region) > 0 {\n\t\tif !CheckRegionExist(config.Region, c.Stack.Regions) {\n\t\t\treturn nil\n\t\t}\n\t}\n\n\tif !config.CompleteCanary {\n\t\tc.Logger.Debug(\"Skip gathering metrics because canary is now applied\")\n\t\treturn nil\n\t}\n\n\tif err := c.Deployer.StartGatheringMetrics(config); err != nil {\n\t\treturn err\n\t}\n\n\tc.StepStatus[constants.StepGatherMetrics] = true\n\treturn nil\n}", "func (s *Systemctl) Gather(acc telegraf.Accumulator) error {\n\ts.mux.Lock()\n\tdefer s.mux.Unlock()\n\n\t// for each systemctl service being monitored\n\tfor _, aggregator := range s.Aggregators {\n\t\t// aggregate the data from the set of samples\n\t\tlog.WithFields(log.Fields{\n\t\t\t\"InputPlugin\": \"systemctl\",\n\t\t\t\"ResourceName\": aggregator.ResourceName,\n\t\t}).Debug(\"Aggregating\")\n\t\terr := aggregator.Aggregate()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t// create fields\n\t\tfields := map[string]interface{}{\n\t\t\t\"current_state_time\": aggregator.CurrentStateDuration,\n\t\t\t\"current_state\": aggregator.CurrentState,\n\t\t}\n\t\tfor k := range aggregator.AggState {\n\t\t\tfields[k] = aggregator.AggState[k]\n\t\t}\n\t\t// create tags\n\t\ttags := map[string]string{\"resource\": aggregator.ResourceName}\n\t\tacc.AddFields(\"service_config_state\", fields, tags)\n\t\tlog.WithFields(log.Fields{\n\t\t\t\"InputPlugin\": \"systemctl\",\n\t\t\t\"ResourceName\": aggregator.ResourceName,\n\t\t}).Debug(\"Added fields\")\n\t}\n\treturn nil\n}", "func (p *PluginService) PluginList(ctx context.Context, filter filters.Args) (types.PluginsListResponse, error) {\n\treturn types.PluginsListResponse{}, nil\n}", "func (gp *GaugePlugins) NotifyPlugins(message *gauge_messages.Message) {\n\tvar handle = func(id string, p *plugin, err error) {\n\t\tif err != nil {\n\t\t\tlogger.Errorf(true, \"Unable to connect to plugin %s %s. %s\\n\", p.descriptor.Name, p.descriptor.Version, err.Error())\n\t\t\tgp.killPlugin(id)\n\t\t}\n\t}\n\n\tfor id, plugin := range gp.pluginsMap {\n\t\thandle(id, plugin, plugin.sendMessage(message))\n\t}\n}", "func AvailablePlugins() map[string]apiplexPluginInfo {\n\treturn registeredPlugins\n}", "func (pluginExecutionImp) RunPlugins(\n\tcontext context.T,\n\tassociationID string,\n\tdocumentCreatedDate string,\n\tplugins []docModel.PluginState,\n\tpluginRegistry runpluginutil.PluginRegistry,\n\tassocUpdate runpluginutil.UpdateAssociation,\n\tcancelFlag task.CancelFlag,\n) (pluginOutputs map[string]*contracts.PluginResult) {\n\treturn engine.RunPlugins(context, associationID, documentCreatedDate, plugins, pluginRegistry, nil, assocUpdate, cancelFlag)\n}", "func (w *windowsResourceUsageGatherer) Gather(executor QueryExecutor, startTime time.Time, config *measurement.MeasurementConfig) ([]measurement.Summary, error) {\n\tcpuSummary, err := getSummary(cpuUsageQueryTop10, convertToCPUPerfData, cpuUsageMetricsName, executor, config)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tmemorySummary, err := getSummary(memoryUsageQueryTop10, convertToMemoryPerfData, memoryUsageMetricsName, executor, config)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn []measurement.Summary{cpuSummary, memorySummary}, nil\n}", "func Plugins(r *Runner) error {\n\tfmt.Println(\"~~~ Cleaning plugins cache ~~~\")\n\tos.RemoveAll(plugins.CachePath)\n\tplugs, err := plugdeps.List(r.App)\n\tif err != nil && (errx.Unwrap(err) != plugdeps.ErrMissingConfig) {\n\t\treturn err\n\t}\n\n\trun := genny.WetRunner(context.Background())\n\tgg, err := install.New(&install.Options{\n\t\tApp: r.App,\n\t\tPlugins: plugs.List(),\n\t})\n\n\trun.WithGroup(gg)\n\n\tfmt.Println(\"~~~ Reinstalling plugins ~~~\")\n\treturn run.Run()\n}", "func (logstash *Logstash) gatherPipelinesStats(address string, accumulator telegraf.Accumulator) error {\n\tpipelinesStats := &PipelinesStats{}\n\n\terr := logstash.gatherJSONData(address, pipelinesStats)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfor pipelineName, pipeline := range pipelinesStats.Pipelines {\n\t\ttags := map[string]string{\n\t\t\t\"node_id\": pipelinesStats.ID,\n\t\t\t\"node_name\": pipelinesStats.Name,\n\t\t\t\"node_version\": pipelinesStats.Version,\n\t\t\t\"pipeline\": pipelineName,\n\t\t\t\"source\": pipelinesStats.Host,\n\t\t}\n\n\t\tflattener := jsonParser.JSONFlattener{}\n\t\terr := flattener.FlattenJSON(\"\", pipeline.Events)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\taccumulator.AddFields(\"logstash_events\", flattener.Fields, tags)\n\n\t\terr = logstash.gatherPluginsStats(pipeline.Plugins.Inputs, \"input\", tags, accumulator)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\terr = logstash.gatherPluginsStats(pipeline.Plugins.Filters, \"filter\", tags, accumulator)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\terr = logstash.gatherPluginsStats(pipeline.Plugins.Outputs, \"output\", tags, accumulator)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\terr = logstash.gatherQueueStats(&pipeline.Queue, tags, accumulator)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}", "func RegisterCredentialProviderPlugins(pluginConfigFile, pluginBinDir string) error {\n\tif _, err := os.Stat(pluginBinDir); err != nil {\n\t\tif os.IsNotExist(err) {\n\t\t\treturn fmt.Errorf(\"plugin binary directory %s did not exist\", pluginBinDir)\n\t\t}\n\n\t\treturn fmt.Errorf(\"error inspecting binary directory %s: %w\", pluginBinDir, err)\n\t}\n\n\tcredentialProviderConfig, err := readCredentialProviderConfigFile(pluginConfigFile)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terrs := validateCredentialProviderConfig(credentialProviderConfig)\n\tif len(errs) > 0 {\n\t\treturn fmt.Errorf(\"failed to validate credential provider config: %v\", errs.ToAggregate())\n\t}\n\n\t// Register metrics for credential providers\n\tregisterMetrics()\n\n\tfor _, provider := range credentialProviderConfig.Providers {\n\t\tpluginBin := filepath.Join(pluginBinDir, provider.Name)\n\t\tif _, err := os.Stat(pluginBin); err != nil {\n\t\t\tif os.IsNotExist(err) {\n\t\t\t\treturn fmt.Errorf(\"plugin binary executable %s did not exist\", pluginBin)\n\t\t\t}\n\n\t\t\treturn fmt.Errorf(\"error inspecting binary executable %s: %w\", pluginBin, err)\n\t\t}\n\n\t\tplugin, err := newPluginProvider(pluginBinDir, provider)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"error initializing plugin provider %s: %w\", provider.Name, err)\n\t\t}\n\n\t\tcredentialprovider.RegisterCredentialProvider(provider.Name, plugin)\n\t}\n\n\treturn nil\n}", "func GetAllPlugins() []Executor {\n\treturn []Executor{\n\t\tExecutor{\n\t\t\tPlugin: upwork.Upwork{},\n\t\t},\n\t\tExecutor{\n\t\t\tPlugin: wlg.Wlg{},\n\t\t},\n\t}\n}", "func StepInstallPlugins(ctx context.Context,\n\tpluginManager pluginmanager.PluginManager,\n\tconfigItems []configs.ConfigItem,\n) error {\n\tdeduplicate := map[string]struct{}{}\n\tfor _, config := range configItems {\n\t\tfor _, pkg := range config.Config().Plugins {\n\t\t\tdeduplicate[pkg] = struct{}{}\n\t\t}\n\t}\n\tprogress := progressbar.GetProgressBar(ctx, len(deduplicate))\n\tprogress.SetPrefix(\"Install plugins\")\n\tpluginsMap := map[string]struct{}{}\n\tfor pkg := range deduplicate {\n\t\tpath, version, ok := util.SplitGoPackageVersion(pkg)\n\t\tif !ok {\n\t\t\treturn errors.Errorf(\"invalid format: %s, should in path@version format\", pkg)\n\t\t}\n\t\tif version == \"latest\" {\n\t\t\tprogress.SetSuffix(\"query latest version of %s\", path)\n\t\t\tdata, err := pluginManager.GetPluginLatestVersion(ctx, path)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tversion = data\n\t\t\tpkg = util.JoinGoPackageVersion(path, version)\n\t\t}\n\t\tprogress.SetSuffix(\"check cache of %s\", pkg)\n\t\texists, _, err := pluginManager.IsPluginInstalled(ctx, path, version)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif exists {\n\t\t\tprogress.SetSuffix(\"%s is cached\", pkg)\n\t\t} else {\n\t\t\tprogress.SetSuffix(\"installing %s\", pkg)\n\t\t\t_, err := pluginManager.InstallPlugin(ctx, path, version)\n\t\t\tif err != nil {\n\t\t\t\tpanic(err)\n\t\t\t}\n\t\t\tprogress.SetSuffix(\"%s installed\", pkg)\n\t\t}\n\t\tpluginsMap[pkg] = struct{}{}\n\t\tprogress.Incr()\n\t}\n\tprogress.SetSuffix(\"all plugins have been installed\")\n\tprogress.Wait()\n\n\tfmt.Println(\"the following plugins will be used:\")\n\tfor pkg := range pluginsMap {\n\t\tfmt.Printf(\"\t%s\\r\\n\", pkg)\n\t}\n\treturn nil\n}", "func PluginsDo(f func(plugin core.Pluginer) error) error {\n\treturn pluginRegistry.Do(f)\n}", "func (throttler *Throttler) aggregateMySQLMetrics(ctx context.Context) error {\n\tfor clusterName, probes := range throttler.mysqlInventory.ClustersProbes {\n\t\tmetricName := fmt.Sprintf(\"mysql/%s\", clusterName)\n\t\tignoreHostsCount := throttler.mysqlInventory.IgnoreHostsCount[clusterName]\n\t\tignoreHostsThreshold := throttler.mysqlInventory.IgnoreHostsThreshold[clusterName]\n\t\taggregatedMetric := aggregateMySQLProbes(ctx, probes, clusterName, throttler.mysqlInventory.InstanceKeyMetrics, ignoreHostsCount, config.Settings().Stores.MySQL.IgnoreDialTCPErrors, ignoreHostsThreshold)\n\t\tthrottler.aggregatedMetrics.Set(metricName, aggregatedMetric, cache.DefaultExpiration)\n\t}\n\treturn nil\n}", "func initializePlugins(t testing.TB, configDir string) {\n\tt.Helper()\n\n\tt.Cleanup(func() {\n\t\tif t.Failed() {\n\t\t\tif conf, err := os.ReadFile(filepath.Join(configDir, \"config.json\")); err == nil {\n\t\t\t\tt.Logf(\"Config: %s\\n\", string(conf))\n\t\t\t}\n\t\t\tt.Log(\"Contents of config dir:\")\n\t\t\tfor _, p := range dirContents(configDir) {\n\t\t\t\tt.Logf(\" - %s\", p)\n\t\t\t}\n\t\t}\n\t})\n\n\trequire.NoError(t, os.MkdirAll(filepath.Join(configDir, \"cli-plugins\"), 0o755),\n\t\t\"Failed to create cli-plugins directory\")\n\tcomposePlugin, err := findExecutable(DockerComposeExecutableName)\n\tif errors.Is(err, fs.ErrNotExist) {\n\t\tt.Logf(\"WARNING: docker-compose cli-plugin not found\")\n\t}\n\n\tif err == nil {\n\t\tCopyFile(t, composePlugin, filepath.Join(configDir, \"cli-plugins\", DockerComposeExecutableName))\n\t\tbuildxPlugin, err := findPluginExecutable(DockerBuildxExecutableName)\n\t\tif err != nil {\n\t\t\tt.Logf(\"WARNING: docker-buildx cli-plugin not found, using default buildx installation.\")\n\t\t} else {\n\t\t\tCopyFile(t, buildxPlugin, filepath.Join(configDir, \"cli-plugins\", DockerBuildxExecutableName))\n\t\t}\n\t\t// We don't need a functional scan plugin, but a valid plugin binary\n\t\tCopyFile(t, composePlugin, filepath.Join(configDir, \"cli-plugins\", DockerScanExecutableName))\n\t}\n}", "func (ks *kuiperService) ListPluginSinks(context.Context, string, uint64, uint64, string, Metadata) (PluginSinksPage, error) {\n\treturn PluginSinksPage{}, nil\n}", "func GetPluginOuts(plugins []*PluginConfiguration) map[string]string {\n\touts := make(map[string]string)\n\tfor _, plugin := range plugins {\n\t\tif plugin.Out == \"\" {\n\t\t\tcontinue\n\t\t}\n\t\touts[plugin.Label.String()] = plugin.Out\n\t}\n\treturn outs\n}", "func (logstash *Logstash) gatherPipelineStats(address string, accumulator telegraf.Accumulator) error {\n\tpipelineStats := &PipelineStats{}\n\n\terr := logstash.gatherJSONData(address, pipelineStats)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\ttags := map[string]string{\n\t\t\"node_id\": pipelineStats.ID,\n\t\t\"node_name\": pipelineStats.Name,\n\t\t\"node_version\": pipelineStats.Version,\n\t\t\"source\": pipelineStats.Host,\n\t}\n\n\tflattener := jsonParser.JSONFlattener{}\n\terr = flattener.FlattenJSON(\"\", pipelineStats.Pipeline.Events)\n\tif err != nil {\n\t\treturn err\n\t}\n\taccumulator.AddFields(\"logstash_events\", flattener.Fields, tags)\n\n\terr = logstash.gatherPluginsStats(pipelineStats.Pipeline.Plugins.Inputs, \"input\", tags, accumulator)\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = logstash.gatherPluginsStats(pipelineStats.Pipeline.Plugins.Filters, \"filter\", tags, accumulator)\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = logstash.gatherPluginsStats(pipelineStats.Pipeline.Plugins.Outputs, \"output\", tags, accumulator)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = logstash.gatherQueueStats(&pipelineStats.Pipeline.Queue, tags, accumulator)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}", "func (m Plugin) FetchMetrics() (map[string]float64, error) {\n\tresp, err := http.Get(fmt.Sprintf(\"http://%s/stats\", m.Target))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer resp.Body.Close()\n\tstats := struct {\n\t\tConnections float64 `json:\"connections\"`\n\t\tTotalConnections float64 `json:\"total_connections\"`\n\t\tTotalMessages float64 `json:\"total_messages\"`\n\t\tConnectErrors float64 `json:\"connect_errors\"`\n\t\tMessageErrors float64 `json:\"message_errors\"`\n\t\tClosingConnections float64 `json:\"closing_connections\"`\n\t}{}\n\tif err := json.NewDecoder(resp.Body).Decode(&stats); err != nil {\n\t\treturn nil, err\n\t}\n\tret := make(map[string]float64, 6)\n\tret[\"conn_current\"] = stats.Connections\n\tret[\"conn_total\"] = stats.TotalConnections\n\tret[\"conn_errors\"] = stats.ConnectErrors\n\tret[\"conn_closing\"] = stats.ClosingConnections\n\tret[\"messages_total\"] = stats.TotalMessages\n\tret[\"messages_errors\"] = stats.MessageErrors\n\n\treturn ret, nil\n}", "func ListPlugins() []PluginName {\n\treturn []PluginName{\n\t\tPluginCNI,\n\t\tPluginDockerBridge,\n\t}\n}", "func (c *Config) GetAllPlugins() []types.PluginItem {\n\tmutex.RLock()\n\tdefer mutex.RUnlock()\n\n\tvar copiedPlugins []types.PluginItem\n\tcopiedPlugins = append(copiedPlugins, configMemoryCache.plugins...)\n\treturn copiedPlugins\n}", "func (pr *pluginRegistry) PluginList() []*Plugin {\n\tpr.mut.Lock()\n\tdefer pr.mut.Unlock()\n\n\tout := make([]*Plugin, len(pr.plugins))\n\tcopy(out, pr.plugins) // intentional shallow copy\n\treturn out\n}", "func AllAvailablePluginsWithOptions(\n\tpluginDir string,\n\tchecksumsFile string,\n\tinternalLookupFunc InternalPluginLookupFunc,\n\texternalLookupfunc ExternalPluginLookupFunc,\n\tlogger log.Logger,\n) (plugin.AvailablePlugins, error) {\n\n\tallHTTPPlugins := map[string]http.Plugin{}\n\tallTCPPlugins := map[string]tcp.Plugin{}\n\n\t// Assemble internal plugins. Plugin IDs for internal plugins are\n\t// assumed to be unique because their definitions are hardcoded.\n\tinternalPlugins, err := InternalPlugins(internalLookupFunc)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tfor pluginID, httpPlugin := range internalPlugins.HTTPPlugins() {\n\t\tallHTTPPlugins[pluginID] = httpPlugin\n\t}\n\tfor pluginID, tcpPlugin := range internalPlugins.TCPPlugins() {\n\t\tallTCPPlugins[pluginID] = tcpPlugin\n\t}\n\n\t// Assemble external plugins. Check whether the plugin ID for each\n\t// external plugin conflicts with any plugin IDs of internal plugins.\n\t// (Checks for uniqueness among external HTTP and TCP plugins is\n\t// done elsewhere, i.e. as external plugins are discovered.)\n\texternalPlugins, err := externalLookupfunc(pluginDir, checksumsFile, logger)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tfor pluginID, httpPlugin := range externalPlugins.HTTPPlugins() {\n\t\tcheckPluginIDConflicts(\"HTTP\", pluginID, internalPlugins, logger)\n\t\tallHTTPPlugins[pluginID] = httpPlugin\n\t}\n\tfor pluginID, tcpPlugin := range externalPlugins.TCPPlugins() {\n\t\tcheckPluginIDConflicts(\"TCP\", pluginID, internalPlugins, logger)\n\t\tallTCPPlugins[pluginID] = tcpPlugin\n\t}\n\n\treturn &Plugins{\n\t\tHTTPPluginsByID: allHTTPPlugins,\n\t\tTCPPluginsByID: allTCPPlugins,\n\t}, nil\n}", "func (g *Gatherer) Gather(ctx context.Context, gatherList []string, rec recorder.Interface) error {\n\tg.ctx = ctx\n\tvar errors []string\n\tvar gatherReport gatherMetadata\n\n\tif len(gatherList) == 0 {\n\t\terrors = append(errors, \"no gather functions are specified to run\")\n\t}\n\n\tif utils.StringInSlice(gatherAll, gatherList) {\n\t\tgatherList = fullGatherList()\n\t}\n\n\t// Starts the gathers in Go routines\n\tcases, starts, err := g.startGathering(gatherList, &errors)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// Gets the info from the Go routines\n\tfor range gatherList {\n\t\tchosen, value, _ := reflect.Select(cases)\n\t\t// The chosen channel has been closed, so zero out the channel to disable the case\n\t\tcases[chosen].Chan = reflect.ValueOf(nil)\n\t\tgather := gatherList[chosen]\n\n\t\tgi := NewGatherInfo(gather, value)\n\t\tstatusReport, errorsReport := createStatusReport(gi, rec, starts[chosen])\n\n\t\tif len(errorsReport) > 0 {\n\t\t\terrors = append(errors, errorsReport...)\n\t\t}\n\t\tgatherReport.StatusReports = append(gatherReport.StatusReports, statusReport)\n\t}\n\n\t// if obfuscation is enabled, we want to know it from the archive\n\tgatherReport.IsGlobalObfuscationEnabled = g.anonymizer != nil\n\n\t// fill in performance related data to the report\n\tvar m runtime.MemStats\n\truntime.ReadMemStats(&m)\n\tgatherReport.MemoryAlloc = m.HeapAlloc\n\tgatherReport.Uptime = time.Since(g.startTime).Truncate(time.Millisecond).Seconds()\n\n\t// records the report\n\tif err := recordGatherReport(rec, gatherReport); err != nil {\n\t\terrors = append(errors, fmt.Sprintf(\"unable to record io status reports: %v\", err))\n\t}\n\n\tif len(errors) > 0 {\n\t\treturn sumErrors(errors)\n\t}\n\n\treturn nil\n}", "func (p *pluginContainer) Add(plugins ...Plugin) error {\n\tif p.plugins == nil {\n\t\tp.plugins = make([]Plugin, 0)\n\t}\n\tfor _, plugin := range plugins {\n\t\tif plugin == nil {\n\t\t\treturn errors.New(\"plugin cannot be nil!\")\n\t\t}\n\t\tpName := plugin.Name()\n\t\tif len(pName) == 0 && p.GetByName(pName) != nil {\n\t\t\treturn errors.Errorf(\"repeat add plugin: %s\", pName)\n\t\t}\n\t\tp.plugins = append(p.plugins, plugin)\n\t}\n\treturn nil\n}", "func (p *pluginContainer) Add(plugins ...Plugin) error {\n\tif p.plugins == nil {\n\t\tp.plugins = make([]Plugin, 0)\n\t}\n\tfor _, plugin := range plugins {\n\t\tif plugin == nil {\n\t\t\treturn errors.New(\"plugin cannot be nil!\")\n\t\t}\n\t\tpName := plugin.Name()\n\t\tif len(pName) == 0 && p.GetByName(pName) != nil {\n\t\t\treturn errors.Errorf(\"repeat add plugin: %s\", pName)\n\t\t}\n\t\tp.plugins = append(p.plugins, plugin)\n\t}\n\treturn nil\n}", "func (p *Psutil) CollectMetrics(mts []plugin.MetricType) ([]plugin.MetricType, error) {\n\tloadReqs := []core.Namespace{}\n\tcpuReqs := []core.Namespace{}\n\tmemReqs := []core.Namespace{}\n\tnetReqs := []core.Namespace{}\n\tdiskReqs := []core.Namespace{}\n\n\tfor _, m := range mts {\n\t\tns := m.Namespace()\n\t\tswitch ns[2].Value {\n\t\tcase \"load\":\n\t\t\tloadReqs = append(loadReqs, ns)\n\t\tcase \"cpu\":\n\t\t\tcpuReqs = append(cpuReqs, ns)\n\t\tcase \"vm\":\n\t\t\tmemReqs = append(memReqs, ns)\n\t\tcase \"net\":\n\t\t\tnetReqs = append(netReqs, ns)\n\t\tcase \"disk\":\n\t\t\tdiskReqs = append(diskReqs, ns)\n\t\tdefault:\n\t\t\treturn nil, fmt.Errorf(\"Requested metric %s does not match any known psutil metric\", m.Namespace().String())\n\t\t}\n\t}\n\n\tmetrics := []plugin.MetricType{}\n\n\tloadMts, err := loadAvg(loadReqs)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tmetrics = append(metrics, loadMts...)\n\n\tcpuMts, err := cpuTimes(cpuReqs)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tmetrics = append(metrics, cpuMts...)\n\n\tmemMts, err := virtualMemory(memReqs)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tmetrics = append(metrics, memMts...)\n\n\tnetMts, err := netIOCounters(netReqs)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tmetrics = append(metrics, netMts...)\n\tmounts := getMountpoints(mts[0].Config().Table())\n\tdiskMts, err := getDiskUsageMetrics(diskReqs, mounts)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tmetrics = append(metrics, diskMts...)\n\n\treturn metrics, nil\n}", "func LoadPlugins() {\n\tlog.Println(\"Loading plugins...\")\n\tfiles, err := ioutil.ReadDir(\"./plugins\")\n\tif err == nil {\n\t\tfor _, f := range files {\n\t\t\tLoadPlugin(f.Name())\n\t\t}\n\t}\n\tlog.Printf(\"%d plugins loaded!\\n\", len(loadedPlugins))\n}", "func (c *CloudWatch) Gather(acc telegraf.Accumulator) error {\n\tfilteredMetrics, err := getFilteredMetrics(c)\n\tif err != nil {\n\t\treturn err\n\t}\n\tc.updateWindow(time.Now())\n\n\t// Get all of the possible queries so we can send groups of 100.\n\tqueries := c.getDataQueries(filteredMetrics)\n\tif len(queries) == 0 {\n\t\treturn nil\n\t}\n\n\t// Limit concurrency or we can easily exhaust user connection limit.\n\t// See cloudwatch API request limits:\n\t// http://docs.aws.amazon.com/AmazonCloudWatch/latest/DeveloperGuide/cloudwatch_limits.html\n\tlmtr := limiter.NewRateLimiter(c.RateLimit, time.Second)\n\tdefer lmtr.Stop()\n\twg := sync.WaitGroup{}\n\trLock := sync.Mutex{}\n\n\tresults := map[string][]types.MetricDataResult{}\n\n\tfor namespace, namespacedQueries := range queries {\n\t\tvar batches [][]types.MetricDataQuery\n\n\t\tfor c.BatchSize < len(namespacedQueries) {\n\t\t\tnamespacedQueries, batches = namespacedQueries[c.BatchSize:], append(batches, namespacedQueries[0:c.BatchSize:c.BatchSize])\n\t\t}\n\t\tbatches = append(batches, namespacedQueries)\n\n\t\tfor i := range batches {\n\t\t\twg.Add(1)\n\t\t\t<-lmtr.C\n\t\t\tgo func(n string, inm []types.MetricDataQuery) {\n\t\t\t\tdefer wg.Done()\n\t\t\t\tresult, err := c.gatherMetrics(c.getDataInputs(inm))\n\t\t\t\tif err != nil {\n\t\t\t\t\tacc.AddError(err)\n\t\t\t\t\treturn\n\t\t\t\t}\n\n\t\t\t\trLock.Lock()\n\t\t\t\tresults[n] = append(results[n], result...)\n\t\t\t\trLock.Unlock()\n\t\t\t}(namespace, batches[i])\n\t\t}\n\t}\n\n\twg.Wait()\n\treturn c.aggregateMetrics(acc, results)\n}", "func (p *UserPluginProvider) GetUserPlugins(userID string) ([]string, errors.Error) {\n\tquery := \"CALL get_user_plugins(?);\"\n\n\tarr, err := p.db.Read(query, func(s database.ScannerFunc) (interface{}, errors.Error) {\n\t\tvar id string\n\n\t\terr := s(&id)\n\t\tif err != nil {\n\t\t\treturn nil, errors.InternalError(err)\n\t\t}\n\n\t\treturn id, nil\n\t}, userID)\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tids := make([]string, len(arr))\n\n\tfor i, id := range arr {\n\t\tids[i] = id.(string)\n\t}\n\n\treturn ids, nil\n}", "func (c *Collector) Transform(allStats *NodeStatsResponse) (metrics []*exportertools.Metric) {\n for _, stats := range allStats.Nodes {\n // GC Stats\n for _, gcstats := range stats.JVM.GC.Collectors {\n metrics = append(metrics, c.ConvertToMetric(\"jvm_gc_collection_seconds_count\",\n float64(gcstats.CollectionCount),\n \"COUNTER\",\n nil))\n\n metrics = append(metrics, c.ConvertToMetric(\"jvm_gc_collection_seconds_sum\",\n float64(gcstats.CollectionTime / 1000),\n \"COUNTER\",\n nil))\n }\n\n // Breaker stats\n for _, bstats := range stats.Breakers {\n metrics = append(metrics, c.ConvertToMetric(\"breakers_estimated_size_bytes\",\n float64(bstats.EstimatedSize),\n \"GAUGE\",\n nil))\n\n metrics = append(metrics, c.ConvertToMetric(\"breakers_limit_size_bytes\",\n float64(bstats.LimitSize),\n \"GAUGE\",\n nil))\n }\n\n // Thread Pool stats\n for pool, pstats := range stats.ThreadPool {\n metrics = append(metrics, c.ConvertToMetric(\"thread_pool_completed_count\",\n float64(pstats.Completed),\n \"COUNTER\",\n map[string]string{\"type\": pool}))\n\n metrics = append(metrics, c.ConvertToMetric(\"thread_pool_rejected_count\",\n float64(pstats.Rejected),\n \"COUNTER\",\n map[string]string{\"type\": pool}))\n\n metrics = append(metrics, c.ConvertToMetric(\"thread_pool_active_count\",\n float64(pstats.Active),\n \"GAUGE\",\n map[string]string{\"type\": pool}))\n\n metrics = append(metrics, c.ConvertToMetric(\"thread_pool_threads_count\",\n float64(pstats.Threads),\n \"GAUGE\",\n map[string]string{\"type\": pool}))\n\n metrics = append(metrics, c.ConvertToMetric(\"thread_pool_largest_count\",\n float64(pstats.Largest),\n \"GAUGE\",\n map[string]string{\"type\": pool}))\n\n metrics = append(metrics, c.ConvertToMetric(\"thread_pool_queue_count\",\n float64(pstats.Queue),\n \"GAUGE\",\n map[string]string{\"type\": pool}))\n }\n\n // JVM Memory Stats\n metrics = append(metrics, c.ConvertToMetric(\"jvm_memory_committed_bytes\",\n float64(stats.JVM.Mem.HeapCommitted),\n \"GAUGE\",\n nil))\n\n metrics = append(metrics, c.ConvertToMetric(\"jvm_memory_used_bytes\",\n float64(stats.JVM.Mem.HeapUsed),\n \"GAUGE\",\n nil))\n\n\n metrics = append(metrics, c.ConvertToMetric(\"jvm_memory_max_bytes\",\n float64(stats.JVM.Mem.HeapMax),\n \"GAUGE\",\n nil))\n\n metrics = append(metrics, c.ConvertToMetric(\"jvm_memory_committed_bytes\",\n float64(stats.JVM.Mem.NonHeapCommitted),\n \"GAUGE\",\n nil))\n\n metrics = append(metrics, c.ConvertToMetric(\"jvm_memory_used_bytes\",\n float64(stats.JVM.Mem.NonHeapUsed),\n \"GAUGE\",\n nil))\n\n // Indices Stats)\n metrics = append(metrics, c.ConvertToMetric(\"indices_fielddata_memory_size_bytes\",\n float64(stats.Indices.FieldData.MemorySize),\n \"GAUGE\",\n nil))\n\n metrics = append(metrics, c.ConvertToMetric(\"indices_fielddata_evictions\",\n float64(stats.Indices.FieldData.Evictions),\n \"COUNTER\",\n nil))\n\n metrics = append(metrics, c.ConvertToMetric(\"indices_filter_cache_memory_size_bytes\",\n float64(stats.Indices.FilterCache.MemorySize),\n \"GAUGE\",\n nil))\n\n metrics = append(metrics, c.ConvertToMetric(\"indices_filter_cache_evictions\",\n float64(stats.Indices.FilterCache.Evictions),\n \"COUNTER\",\n nil))\n\n metrics = append(metrics, c.ConvertToMetric(\"indices_query_cache_memory_size_bytes\",\n float64(stats.Indices.QueryCache.MemorySize),\n \"GAUGE\",\n nil))\n\n metrics = append(metrics, c.ConvertToMetric(\"indices_query_cache_evictions\",\n float64(stats.Indices.QueryCache.Evictions),\n \"COUNTER\",\n nil))\n\n metrics = append(metrics, c.ConvertToMetric(\"indices_request_cache_memory_size_bytes\",\n float64(stats.Indices.QueryCache.MemorySize),\n \"GAUGE\",\n nil))\n\n metrics = append(metrics, c.ConvertToMetric(\"indices_request_cache_evictions\",\n float64(stats.Indices.QueryCache.Evictions),\n \"COUNTER\",\n nil))\n\n metrics = append(metrics, c.ConvertToMetric(\"indices_docs\",\n float64(stats.Indices.Docs.Count),\n \"GAUGE\",\n nil))\n\n metrics = append(metrics, c.ConvertToMetric(\"indices_docs_deleted\",\n float64(stats.Indices.Docs.Deleted),\n \"GAUGE\",\n nil))\n\n metrics = append(metrics, c.ConvertToMetric(\"indices_segments_memory_bytes\",\n float64(stats.Indices.Segments.Memory),\n \"GAUGE\",\n nil))\n\n metrics = append(metrics, c.ConvertToMetric(\"indices_segments_count\",\n float64(stats.Indices.Segments.Count),\n \"GAUGE\",\n nil))\n\n metrics = append(metrics, c.ConvertToMetric(\"indices_store_size_bytes\",\n float64(stats.Indices.Store.Size),\n \"GAUGE\",\n nil))\n\n metrics = append(metrics, c.ConvertToMetric(\"indices_store_throttle_time_ms_total\",\n float64(stats.Indices.Store.ThrottleTime),\n \"COUNTER\",\n nil))\n\n metrics = append(metrics, c.ConvertToMetric(\"indices_flush_total\",\n float64(stats.Indices.Flush.Total),\n \"COUNTER\",\n nil))\n\n metrics = append(metrics, c.ConvertToMetric(\"indices_flush_time_ms_total\",\n float64(stats.Indices.Flush.Time),\n \"COUNTER\",\n nil))\n\n metrics = append(metrics, c.ConvertToMetric(\"indices_indexing_index_time_ms_total\",\n float64(stats.Indices.Indexing.IndexTime),\n \"COUNTER\",\n nil))\n\n metrics = append(metrics, c.ConvertToMetric(\"indices_indexing_index_total\",\n float64(stats.Indices.Indexing.IndexTotal),\n \"COUNTER\",\n nil))\n\n metrics = append(metrics, c.ConvertToMetric(\"indices_merges_total_time_ms_total\",\n float64(stats.Indices.Merges.TotalTime),\n \"COUNTER\",\n nil))\n\n metrics = append(metrics, c.ConvertToMetric(\"indices_merges_total_size_bytes_total\",\n float64(stats.Indices.Merges.TotalSize),\n \"COUNTER\",\n nil))\n\n metrics = append(metrics, c.ConvertToMetric(\"indices_merges_total\",\n float64(stats.Indices.Merges.Total),\n \"COUNTER\",\n nil))\n\n metrics = append(metrics, c.ConvertToMetric(\"indices_refresh_total_time_ms_total\",\n float64(stats.Indices.Refresh.TotalTime),\n \"COUNTER\",\n nil))\n\n metrics = append(metrics, c.ConvertToMetric(\"indices_refresh_total\",\n float64(stats.Indices.Refresh.Total),\n \"COUNTER\",\n nil))\n\n // Transport Stats)\n metrics = append(metrics, c.ConvertToMetric(\"transport_rx_packets_total\",\n float64(stats.Transport.RxCount),\n \"COUNTER\",\n nil))\n\n metrics = append(metrics, c.ConvertToMetric(\"transport_rx_size_bytes_total\",\n float64(stats.Transport.RxSize),\n \"COUNTER\",\n nil))\n\n\n metrics = append(metrics, c.ConvertToMetric(\"transport_tx_packets_total\",\n float64(stats.Transport.TxCount),\n \"COUNTER\",\n nil))\n\n metrics = append(metrics, c.ConvertToMetric(\"transport_tx_size_bytes_total\",\n float64(stats.Transport.TxSize),\n \"COUNTER\",\n nil))\n\n // Process Stats)\n metrics = append(metrics, c.ConvertToMetric(\"process_cpu_percent\",\n float64(stats.Process.CPU.Percent),\n \"GAUGE\",\n nil))\n\n metrics = append(metrics, c.ConvertToMetric(\"process_mem_resident_size_bytes\",\n float64(stats.Process.Memory.Resident),\n \"GAUGE\",\n nil))\n\n metrics = append(metrics, c.ConvertToMetric(\"process_mem_share_size_bytes\",\n float64(stats.Process.Memory.Share),\n \"GAUGE\",\n nil))\n\n metrics = append(metrics, c.ConvertToMetric(\"process_mem_virtual_size_bytes\",\n float64(stats.Process.Memory.TotalVirtual),\n \"GAUGE\",\n nil))\n\n metrics = append(metrics, c.ConvertToMetric(\"process_open_files_count\",\n float64(stats.Process.OpenFD),\n \"GAUGE\",\n nil))\n\n metrics = append(metrics, c.ConvertToMetric(\"process_cpu_time_seconds_sum\",\n float64(stats.Process.CPU.Total / 1000),\n \"COUNTER\",\n nil))\n\n metrics = append(metrics, c.ConvertToMetric(\"process_cpu_time_seconds_sum\",\n float64(stats.Process.CPU.Sys / 1000),\n \"COUNTER\",\n nil))\n\n metrics = append(metrics, c.ConvertToMetric(\"process_cpu_time_seconds_sum\",\n float64(stats.Process.CPU.User / 1000),\n \"COUNTER\",\n nil))\n\n }\n\n return metrics\n}", "func Metrics() []prometheus.Collector {\n\treturn []prometheus.Collector{\n\t\tptsCounterVec,\n\t\tbytesCounterVec,\n\t\tapiSumVec,\n\t\tsinkCounterVec,\n\t\thttpRetry,\n\t\tnotSinkPtsVec,\n\t\tsinkPtsVec,\n\t\tflushFailCacheVec,\n\t}\n}", "func ListPlugins() map[string][]string {\n\tp := make(map[string][]string)\n\n\t// server type plugins\n\tfor name := range serverTypes {\n\t\tp[\"server_types\"] = append(p[\"server_types\"], name)\n\t}\n\n\t// caddyfile loaders in registration order\n\tfor _, loader := range caddyfileLoaders {\n\t\tp[\"caddyfile_loaders\"] = append(p[\"caddyfile_loaders\"], loader.name)\n\t}\n\tif defaultCaddyfileLoader.name != \"\" {\n\t\tp[\"caddyfile_loaders\"] = append(p[\"caddyfile_loaders\"], defaultCaddyfileLoader.name)\n\t}\n\n\t// List the event hook plugins\n\teventHooks.Range(func(k, _ interface{}) bool {\n\t\tp[\"event_hooks\"] = append(p[\"event_hooks\"], k.(string))\n\t\treturn true\n\t})\n\n\t// alphabetize the rest of the plugins\n\tvar others []string\n\tfor stype, stypePlugins := range plugins {\n\t\tfor name := range stypePlugins {\n\t\t\tvar s string\n\t\t\tif stype != \"\" {\n\t\t\t\ts = stype + \".\"\n\t\t\t}\n\t\t\ts += name\n\t\t\tothers = append(others, s)\n\t\t}\n\t}\n\n\tsort.Strings(others)\n\tfor _, name := range others {\n\t\tp[\"others\"] = append(p[\"others\"], name)\n\t}\n\n\treturn p\n}", "func GetLoadedPlugins() []IgluPlugin {\n\tmarkCrashedPlugins()\n\treturn loadedPlugins\n}", "func (c *Client) Metrics(pluginID string) (*Metrics, error) {\n\treturn c.MetricsWithContext(context.Background(), pluginID)\n}", "func (store *Store) getPlugins(serverVersion string) ([]*model.Plugin, error) {\n\tvar result []*model.Plugin\n\tplugins := map[string]*model.Plugin{}\n\n\tfor _, storePlugin := range store.plugins {\n\t\tif serverVersion != \"\" && storePlugin.Manifest.MinServerVersion != \"\" {\n\t\t\tmeetsMinServerVersion, err := storePlugin.Manifest.MeetMinServerVersion(serverVersion)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, errors.Wrapf(err, \"failed to check minServerVersion for manifest.Id %s\", storePlugin.Manifest.Id)\n\t\t\t}\n\n\t\t\tif !meetsMinServerVersion {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t}\n\n\t\tif plugins[storePlugin.Manifest.Id] == nil {\n\t\t\tplugins[storePlugin.Manifest.Id] = storePlugin\n\t\t\tcontinue\n\t\t}\n\n\t\tlastSeenPluginVersion, err := semver.Parse(plugins[storePlugin.Manifest.Id].Manifest.Version)\n\t\tif err != nil {\n\t\t\treturn nil, errors.Errorf(\"failed to parse manifest.Version for manifest.Id %s\", storePlugin.Manifest.Id)\n\t\t}\n\n\t\tstorePluginVersion := semver.MustParse(storePlugin.Manifest.Version)\n\t\tif storePluginVersion.GT(lastSeenPluginVersion) {\n\t\t\tplugins[storePlugin.Manifest.Id] = storePlugin\n\t\t}\n\t}\n\n\tfor _, plugin := range plugins {\n\t\tresult = append(result, plugin)\n\t}\n\n\t// Sort the final slice by plugin name, ascending\n\tsort.SliceStable(\n\t\tresult,\n\t\tfunc(i, j int) bool {\n\t\t\treturn strings.ToLower(result[i].Manifest.Name) < strings.ToLower(result[j].Manifest.Name)\n\t\t},\n\t)\n\n\treturn result, nil\n}", "func collectGauges(e *Exporter, ch chan<- prometheus.Metric) {\n\te.chipStatGauge.Collect(ch)\n\te.devsHashRateGauge.Collect(ch)\n\te.devsHashCountGauge.Collect(ch)\n\te.devsErrorsGauge.Collect(ch)\n\te.devsTemperatureGauge.Collect(ch)\n}", "func LoadPlugins(fms ...template.FuncMap) {\n\t// Final FuncMap\n\tfm := make(template.FuncMap)\n\n\t// Loop through the maps\n\tfor _, m := range fms {\n\t\t// Loop through each key and value\n\t\tfor k, v := range m {\n\t\t\tfm[k] = v\n\t\t}\n\t}\n\n\t// Load the plugins\n\tmutexPlugins.Lock()\n\tpluginCollection = fm\n\tmutexPlugins.Unlock()\n}", "func (p *Kafka) CollectMetrics(mts []plugin.MetricType) ([]plugin.MetricType, error) {\n\tmetrics := []plugin.MetricType{}\n\n\terr := p.loadMetricAPI(mts[0].Config())\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tfor _, m := range mts {\n\t\tresults := []nodeData{}\n\t\tsearch := strings.Split(replaceUnderscoreToDot(strings.TrimLeft(m.Namespace().String(), \"/\")), \"/\")\n\t\tif len(search) > 3 {\n\t\t\tp.client.Root.Get(p.client.client.GetUrl(), search[4:], 0, &results)\n\t\t}\n\n\t\tfor _, result := range results {\n\t\t\tns := append([]string{\"hyperpilot\", \"kafka\", \"node\", p.client.host}, strings.Split(result.Path, Slash)...)\n\t\t\tmetrics = append(metrics, plugin.MetricType{\n\t\t\t\tNamespace_: core.NewNamespace(ns...),\n\t\t\t\tTimestamp_: time.Now(),\n\t\t\t\tData_: result.Data,\n\t\t\t\tUnit_: reflect.TypeOf(result.Data).String(),\n\t\t\t})\n\t\t}\n\n\t}\n\n\treturn metrics, nil\n}", "func AllAvailablePlugins(\n\tpluginDir string,\n\tchecksumsFile string,\n\tlogger log.Logger,\n) (plugin.AvailablePlugins, error) {\n\n\treturn AllAvailablePluginsWithOptions(\n\t\tpluginDir,\n\t\tchecksumsFile,\n\t\tGetInternalPluginsFunc,\n\t\tExternalPlugins,\n\t\tlogger,\n\t)\n}", "func (p *pluginContainer) GetAll() []Plugin {\n\treturn p.plugins\n}", "func (p *pluginContainer) GetAll() []Plugin {\n\treturn p.plugins\n}", "func (m VarnishPlugin) FetchMetrics() (map[string]interface{}, error) {\n\tvar out []byte\n\tvar err error\n\n\tif m.VarnishName == \"\" {\n\t\tout, err = exec.Command(m.VarnishStatPath, \"-1\").CombinedOutput()\n\t} else {\n\t\tout, err = exec.Command(m.VarnishStatPath, \"-1\", \"-n\", m.VarnishName).CombinedOutput()\n\t}\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"%s: %s\", err, out)\n\t}\n\n\tlineexp := regexp.MustCompile(`^([^ ]+) +(\\d+)`)\n\tsmaexp := regexp.MustCompile(`^SMA\\.([^\\.]+)\\.(.+)$`)\n\n\tstat := map[string]interface{}{\n\t\t\"requests\": float64(0),\n\t}\n\n\tvar tmpv float64\n\tfor _, line := range strings.Split(string(out), \"\\n\") {\n\t\tmatch := lineexp.FindStringSubmatch(line)\n\t\tif match == nil {\n\t\t\tcontinue\n\t\t}\n\n\t\ttmpv, err = strconv.ParseFloat(match[2], 64)\n\t\tif err != nil {\n\t\t\tcontinue\n\t\t}\n\n\t\tswitch match[1] {\n\t\tcase \"cache_hit\", \"MAIN.cache_hit\":\n\t\t\tstat[\"cache_hits\"] = tmpv\n\t\t\tstat[\"requests\"] = stat[\"requests\"].(float64) + tmpv\n\t\tcase \"cache_miss\", \"MAIN.cache_miss\":\n\t\t\tstat[\"requests\"] = stat[\"requests\"].(float64) + tmpv\n\t\tcase \"cache_hitpass\", \"MAIN.cache_hitpass\":\n\t\t\tstat[\"requests\"] = stat[\"requests\"].(float64) + tmpv\n\t\tcase \"MAIN.backend_req\":\n\t\t\tstat[\"backend_req\"] = tmpv\n\t\tcase \"MAIN.backend_conn\":\n\t\t\tstat[\"backend_conn\"] = tmpv\n\t\tcase \"MAIN.backend_fail\":\n\t\t\tstat[\"backend_fail\"] = tmpv\n\t\tcase \"MAIN.backend_reuse\":\n\t\t\tstat[\"backend_reuse\"] = tmpv\n\t\tcase \"MAIN.backend_recycle\":\n\t\t\tstat[\"backend_recycle\"] = tmpv\n\t\tcase \"MAIN.n_object\":\n\t\t\tstat[\"n_object\"] = tmpv\n\t\tcase \"MAIN.n_objectcore\":\n\t\t\tstat[\"n_objectcore\"] = tmpv\n\t\tcase \"MAIN.n_expired\":\n\t\t\tstat[\"n_expired\"] = tmpv\n\t\tcase \"MAIN.n_objecthead\":\n\t\t\tstat[\"n_objecthead\"] = tmpv\n\t\tcase \"MAIN.busy_sleep\":\n\t\t\tstat[\"busy_sleep\"] = tmpv\n\t\tcase \"MAIN.busy_wakeup\":\n\t\t\tstat[\"busy_wakeup\"] = tmpv\n\t\tdefault:\n\t\t\tsmamatch := smaexp.FindStringSubmatch(match[1])\n\t\t\tif smamatch == nil {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif smamatch[2] == \"g_alloc\" {\n\t\t\t\tstat[\"varnish.sma.g_alloc.\"+smamatch[1]+\".g_alloc\"] = tmpv\n\t\t\t} else if smamatch[2] == \"g_bytes\" {\n\t\t\t\tstat[\"varnish.sma.memory.\"+smamatch[1]+\".allocated\"] = tmpv\n\t\t\t} else if smamatch[2] == \"g_space\" {\n\t\t\t\tstat[\"varnish.sma.memory.\"+smamatch[1]+\".available\"] = tmpv\n\t\t\t}\n\t\t}\n\t}\n\n\treturn stat, err\n}", "func ListPlugins(dockerCli command.Cli, rootcmd *cobra.Command) ([]Plugin, error) {\n\tpluginDirs, err := getPluginDirs(dockerCli)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tcandidates, err := listPluginCandidates(pluginDirs)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar plugins []Plugin\n\tvar mu sync.Mutex\n\teg, _ := errgroup.WithContext(context.TODO())\n\tcmds := rootcmd.Commands()\n\tfor _, paths := range candidates {\n\t\tfunc(paths []string) {\n\t\t\teg.Go(func() error {\n\t\t\t\tif len(paths) == 0 {\n\t\t\t\t\treturn nil\n\t\t\t\t}\n\t\t\t\tc := &candidate{paths[0]}\n\t\t\t\tp, err := newPlugin(c, cmds)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tif !IsNotFound(p.Err) {\n\t\t\t\t\tp.ShadowedPaths = paths[1:]\n\t\t\t\t\tmu.Lock()\n\t\t\t\t\tdefer mu.Unlock()\n\t\t\t\t\tplugins = append(plugins, p)\n\t\t\t\t}\n\t\t\t\treturn nil\n\t\t\t})\n\t\t}(paths)\n\t}\n\tif err := eg.Wait(); err != nil {\n\t\treturn nil, err\n\t}\n\n\tsort.Slice(plugins, func(i, j int) bool {\n\t\treturn sortorder.NaturalLess(plugins[i].Name, plugins[j].Name)\n\t})\n\n\treturn plugins, nil\n}", "func (r *Router) AddPlugin(p ...RouterPlugin) {\n\tr.logger.Debug(\"Adding plugins\", watermill.LogFields{\"count\": fmt.Sprintf(\"%d\", len(p))})\n\n\tr.plugins = append(r.plugins, p...)\n}", "func NewPlugins() {\n\tActivePlugins = PluginsManager{\n\t\tPlugins: map[string]Plugin{},\n\t}\n\tp := plugins.NewPluginMusicStream()\n\tActivePlugins.AddPlugin(Plugin{\n\t\tName: \"music-stream\",\n\t\tType: \"stream\",\n\t\tPlugin: &p,\n\t})\n\n\tp2 := plugins.NewPluginFirmata()\n\tActivePlugins.AddPlugin(Plugin{\n\t\tName: \"firmata\",\n\t\tType: \"device\",\n\t\tPlugin: &p2,\n\t})\n\n\tp3 := plugins.NewPluginSonoff()\n\tActivePlugins.AddPlugin(Plugin{\n\t\tName: \"Sonoff\",\n\t\tType: \"device\",\n\t\tPlugin: &p3,\n\t})\n\n\tp4 := plugins.NewPluginHTTP()\n\tActivePlugins.AddPlugin(Plugin{\n\t\tName: \"http\",\n\t\tType: \"device\",\n\t\tPlugin: &p4,\n\t})\n\n}", "func (p *pluginContainer) All() []Plugin {\n\treturn p.plugins\n}", "func comparePlugins(newPlugins,\n\toldPlugins []core.SubscribedPlugin) (adds,\n\tremoves []core.SubscribedPlugin) {\n\tnewMap := make(map[string]int)\n\toldMap := make(map[string]int)\n\n\tfor _, n := range newPlugins {\n\t\tnewMap[key(n)]++\n\t}\n\tfor _, o := range oldPlugins {\n\t\toldMap[key(o)]++\n\t}\n\n\tfor _, n := range newPlugins {\n\t\tif oldMap[key(n)] > 0 {\n\t\t\toldMap[key(n)]--\n\t\t\tcontinue\n\t\t}\n\t\tadds = append(adds, n)\n\t}\n\n\tfor _, o := range oldPlugins {\n\t\tif newMap[key(o)] > 0 {\n\t\t\tnewMap[key(o)]--\n\t\t\tcontinue\n\t\t}\n\t\tremoves = append(removes, o)\n\t}\n\n\treturn\n}", "func (ic *IpmiCollector) CollectMetrics(mts []plugin.MetricType) ([]plugin.MetricType, error) {\n\tif !ic.Initialized {\n\t\tic.construct(mts[0].Config().Table()) //reinitialize plugin\n\t}\n\trequestList := make(map[string][]ipmi.IpmiRequest, 0)\n\trequestDescList := make(map[string][]ipmi.RequestDescription, 0)\n\tresponseCache := map[string]map[string]uint16{}\n\tfor _, host := range ic.Hosts {\n\t\trequestList[host] = make([]ipmi.IpmiRequest, 0)\n\t\trequestDescList[host] = make([]ipmi.RequestDescription, 0)\n\t\tfor _, request := range ic.Vendor[host] {\n\t\t\trequestList[host] = append(requestList[host], request.Request)\n\t\t\trequestDescList[host] = append(requestDescList[host], request)\n\t\t}\n\t}\n\tresponse := make(map[string][]ipmi.IpmiResponse, 0)\n\n\tfor _, host := range ic.Hosts {\n\t\tresponse[host], _ = ic.IpmiLayer.BatchExecRaw(requestList[host], host)\n\t}\n\n\tfor nmResponseIdx, hostResponses := range response {\n\t\tcached := map[string]uint16{}\n\t\tfor i, resp := range hostResponses {\n\t\t\tformat := requestDescList[nmResponseIdx][i].Format\n\t\t\tif err := format.Validate(resp); err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tsubmetrics := format.Parse(resp)\n\t\t\tfor k, v := range submetrics {\n\t\t\t\tpath := extendPath(requestDescList[nmResponseIdx][i].MetricsRoot, k)\n\t\t\t\tcached[path] = v\n\t\t\t}\n\t\t\tresponseCache[nmResponseIdx] = cached\n\t\t}\n\t}\n\n\tresults := make([]plugin.MetricType, len(mts))\n\tvar responseMetrics []plugin.MetricType\n\tresponseMetrics = make([]plugin.MetricType, 0)\n\tt := time.Now()\n\n\tfor _, host := range ic.Hosts {\n\t\tfor i, mt := range mts {\n\t\t\tns := mt.Namespace()\n\t\t\tkey := parseName(ns)\n\t\t\tdata := responseCache[host][key]\n\t\t\tmetric := plugin.MetricType{Namespace_: ns, Tags_: map[string]string{\"source\": host},\n\t\t\t\tTimestamp_: t, Data_: data}\n\t\t\tresults[i] = metric\n\t\t\tresponseMetrics = append(responseMetrics, metric)\n\t\t}\n\t}\n\n\treturn responseMetrics, nil\n}", "func listPluginCandidates(dirs []string) (map[string][]string, error) {\n\tresult := make(map[string][]string)\n\tfor _, d := range dirs {\n\t\t// Silently ignore any directories which we cannot\n\t\t// Stat (e.g. due to permissions or anything else) or\n\t\t// which is not a directory.\n\t\tif fi, err := os.Stat(d); err != nil || !fi.IsDir() {\n\t\t\tcontinue\n\t\t}\n\t\tif err := addPluginCandidatesFromDir(result, d); err != nil {\n\t\t\t// Silently ignore paths which don't exist.\n\t\t\tif os.IsNotExist(err) {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\treturn nil, err // Or return partial result?\n\t\t}\n\t}\n\treturn result, nil\n}", "func (shard *ShardedIncomingDataCache) FlushAllMetrics() {\n\tshard.lock.Lock()\n\tdefer shard.lock.Unlock()\n\tfor _, dataInterface := range shard.plugin {\n\t\tif collectd, ok := dataInterface.(*incoming.CollectdMetric); ok {\n\t\t\tif collectd.ISNew() {\n\t\t\t\tcollectd.SetNew(false)\n\t\t\t\tlog.Printf(\"New Metrics %#v\\n\", collectd)\n\t\t\t} else {\n\t\t\t\t//clean up if data is not access for max TTL specified\n\t\t\t\tif shard.Expired() {\n\t\t\t\t\tdelete(shard.plugin, collectd.GetItemKey())\n\t\t\t\t\tlog.Printf(\"Cleaned up plugin for %s\", collectd.GetItemKey())\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n}", "func (l *loader) plugins() []string {\n dir, err := os.Open(l.pluginsDir)\n if err != nil {\n log.Fatal(err)\n }\n defer dir.Close()\n names, err := dir.Readdirnames(-1)\n if err != nil {\n log.Fatal(err)\n }\n\n var res []string\n for _, name := range names {\n if filepath.Ext(name) == \".go\" {\n res = append(res, name)\n }\n }\n return res\n}", "func preparePluginResults(context context.T, update *UpdateDetail) map[string]*contracts.PluginResult {\n\tcode := 0\n\tif update.Result == contracts.ResultStatusFailed {\n\t\tcode = 1\n\t}\n\n\toutput := iohandler.TruncateOutput(update.StandardOut,\n\t\tupdate.StandardError,\n\t\tiohandler.MaximumPluginOutputSize)\n\n\tpluginResult := &contracts.PluginResult{\n\t\tStatus: update.Result,\n\t\tCode: code,\n\t\tOutput: output,\n\t\tStartDateTime: update.StartDateTime,\n\t\tEndDateTime: time.Now(),\n\t\tOutputS3BucketName: update.OutputS3BucketName,\n\t\tOutputS3KeyPrefix: update.OutputS3KeyPrefix,\n\t\tStandardOutput: update.StandardOut,\n\t\tStandardError: update.StandardError,\n\t}\n\n\tpluginResults := make(map[string]*contracts.PluginResult)\n\n\tif isV22DocUpdate(context.Identity(), context.Log(), update) {\n\t\tpluginResult.PluginName = appconfig.PluginNameAwsAgentUpdate\n\t\tpluginResults[updateconstants.DefaultOutputFolder] = pluginResult\n\t} else {\n\t\tpluginResults[appconfig.PluginNameAwsAgentUpdate] = pluginResult\n\t}\n\n\treturn pluginResults\n}", "func RegisterPlugins() {\n\t// plugins\n\tfilter.RegisterFilterPlugins()\n\tforwarder.RegisterForwarderPlugins()\n\tparser.RegisterParserPlugins()\n\tqueue.RegisterQueuePlugins()\n\treceiver.RegisterReceiverPlugins()\n\tfetcher.RegisterFetcherPlugins()\n\tfallbacker.RegisterFallbackerPlugins()\n\t// sharing plugins\n\tserver.RegisterServerPlugins()\n\tclient.RegisterClientPlugins()\n}", "func (n *Node) ForEachPlugin(f PluginForEachFunc) {\n\tforEachPlugin(n.plugins, f)\n}", "func (p *Plugins) Plugins() []*Plugin {\n\tif p.plugins == nil {\n\t\tp.plugins = []*Plugin{}\n\t\tif exists, _ := FileExists(p.cachePath()); !exists {\n\t\t\treturn p.plugins\n\t\t}\n\t\tf, err := os.Open(p.cachePath())\n\t\tif err != nil {\n\t\t\tLogIfError(err)\n\t\t\treturn p.plugins\n\t\t}\n\t\terr = json.NewDecoder(f).Decode(&p.plugins)\n\t\tWarnIfError(err)\n\t\tp.removeMissingPlugins()\n\t\tp.RefreshPlugins()\n\t}\n\treturn p.plugins\n}", "func (n *Vspheretpgy) Gather(acc telegraf.Accumulator) error {\n\t// setPrecision function is the same as `acc.SetPrecision(time.Nanosecond, 0)`\n\tsetPrecisionForVsphere(&acc)\n\n\tfor i, urls := range n.Urls {\n\t\tif len(urls) == 0 {\n\t\t\tlog.Printf(\"Need to put vCenter information!\\n\")\n\n\t\t\tcontinue\n\t\t}\n\n\t\tif len(urls) != 4 {\n\t\t\tacc.AddError(fmt.Errorf(\"the %d_th vsphere configuration is incorrect! \", i+1))\n\n\t\t\tcontinue\n\t\t}\n\t\t// for a give set of vcsas\n\t\tvc, err := vcsa.NewVcsaConnector(urls[0], urls[1], urls[2], urls[3], true)\n\t\tif err != nil {\n\t\t\tacc.AddError(fmt.Errorf(\"failed to connect '%v\", err))\n\n\t\t\tcontinue\n\t\t}\n\n\t\tdcs, err := dcai.FetchVsphereTopology(vc)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\ttimeStamp := fmt.Sprintf(\"%d\", time.Now().UnixNano())\n\n\t\terr = startAccNeo4j(vc, dcs, timeStamp, acc)\n\t\treturn err\n\n\t}\n\n\treturn nil\n}", "func (p *ColorPlugin) Stats(bot *mmmorty.Bot, service mmmorty.Service, message mmmorty.Message) []string {\n\treturn []string{}\n}", "func initPlugins() error {\n\treturn pluginRegistry.InitPlugins()\n}", "func RunCommonRunPlugins() {\n\tfor _, v := range Plugins {\n\t\tif cast, ok := v.(PluginWithCommonRun); ok {\n\t\t\tgo cast.CommonRun()\n\t\t}\n\t}\n}", "func CollectAllMetrics(client *statsd.Client, log *li.StandardLogger) {\n\n\tvar metrics []metric\n\tmetrics = append(metrics, metric{name: \"gpu.temperature\", cmd: \"vcgencmd measure_temp | egrep -o '[0-9]*\\\\.[0-9]*'\"})\n\tmetrics = append(metrics, metric{name: \"cpu.temperature\", cmd: \"cat /sys/class/thermal/thermal_zone0/temp | awk 'END {print $1/1000}'\"})\n\tmetrics = append(metrics, metric{name: \"threads\", cmd: \"ps -eo nlwp | tail -n +2 | awk '{ num_threads += $1 } END { print num_threads }'\"})\n\tmetrics = append(metrics, metric{name: \"processes\", cmd: \"ps axu | wc -l\"})\n\n\tfor range time.Tick(15 * time.Second) {\n\t\tlog.Info(\"Starting metric collection\")\n\t\tfor _, m := range metrics {\n\t\t\terr := collectMetric(m, client, log)\n\t\t\tif err != nil {\n\t\t\t\tlog.Error(err)\n\t\t\t}\n\t\t}\n\t}\n}", "func _LoadProvidersFromPlugin(manager *Manager, pluginObj *plugin.Plugin, pluginName string) error {\n\trawProviderPluginsFunc, err := pluginObj.Lookup(\"GetProviders\")\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// TODO: Handle different interface versions\n\tproviderPluginsFunc, ok := rawProviderPluginsFunc.(func() map[string]func(plugin_v1.ProviderOptions) (plugin_v1.Provider, error))\n\tif !ok {\n\t\treturn errors.New(\"ERROR: Could not cast GetProviders to proper type\")\n\t}\n\tproviderPlugins := providerPluginsFunc()\n\tfor providerID, providerFactory := range providerPlugins {\n\t\tmanager.ProviderFactories[providerID] = providerFactory\n\t\tlog.Printf(\"Provider factory '%s' added from plugin %s\", providerID, pluginName)\n\t}\n\n\treturn nil\n}", "func (store *Store) GetPlugins(pluginFilter *model.PluginFilter) ([]*model.Plugin, error) {\n\tif pluginFilter.PerPage == 0 {\n\t\treturn nil, nil\n\t}\n\n\tplugins, err := store.getPlugins(pluginFilter.ServerVersion)\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"failed to get plugins\")\n\t}\n\n\tfilter := strings.TrimSpace(pluginFilter.Filter)\n\tif filter != \"\" {\n\t\tvar filteredPlugins []*model.Plugin\n\t\tfor _, plugin := range plugins {\n\t\t\tif pluginMatchesFilter(plugin, filter) {\n\t\t\t\tfilteredPlugins = append(filteredPlugins, plugin)\n\t\t\t}\n\t\t}\n\t\tplugins = filteredPlugins\n\t}\n\n\tif len(plugins) == 0 {\n\t\treturn nil, nil\n\t}\n\tif pluginFilter.PerPage == model.AllPerPage {\n\t\treturn plugins, nil\n\t}\n\n\tstart := (pluginFilter.Page) * pluginFilter.PerPage\n\tend := (pluginFilter.Page + 1) * pluginFilter.PerPage\n\tif start >= len(plugins) {\n\t\treturn nil, nil\n\t}\n\tif end > len(plugins) {\n\t\tend = len(plugins)\n\t}\n\n\treturn plugins[start:end], nil\n}", "func (c *CloudWatch) gatherMetrics(\n\tparams *cwClient.GetMetricDataInput,\n) ([]types.MetricDataResult, error) {\n\tresults := []types.MetricDataResult{}\n\n\tfor {\n\t\tresp, err := c.client.GetMetricData(context.Background(), params)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"failed to get metric data: %w\", err)\n\t\t}\n\n\t\tresults = append(results, resp.MetricDataResults...)\n\t\tif resp.NextToken == nil {\n\t\t\tbreak\n\t\t}\n\t\tparams.NextToken = resp.NextToken\n\t}\n\n\treturn results, nil\n}", "func (d *GatherJob) Gather(ctx context.Context, kubeConfig, protoKubeConfig *rest.Config) error {\n\tklog.Infof(\"Starting insights-operator %s\", version.Get().String())\n\t// these are operator clients\n\tkubeClient, err := kubernetes.NewForConfig(protoKubeConfig)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tconfigClient, err := configv1client.NewForConfig(kubeConfig)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tgatherProtoKubeConfig, gatherKubeConfig, metricsGatherKubeConfig, alertsGatherKubeConfig := prepareGatherConfigs(\n\t\tprotoKubeConfig, kubeConfig, d.Impersonate,\n\t)\n\n\ttpEnabled, err := isTechPreviewEnabled(ctx, configClient)\n\tif err != nil {\n\t\tklog.Error(\"can't read cluster feature gates: %v\", err)\n\t}\n\tvar gatherConfig v1alpha1.GatherConfig\n\tif tpEnabled {\n\t\tinsightsDataGather, err := configClient.ConfigV1alpha1().InsightsDataGathers().Get(ctx, \"cluster\", metav1.GetOptions{}) //nolint: govet\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tgatherConfig = insightsDataGather.Spec.GatherConfig\n\t}\n\n\t// ensure the insight snapshot directory exists\n\tif _, err = os.Stat(d.StoragePath); err != nil && os.IsNotExist(err) {\n\t\tif err = os.MkdirAll(d.StoragePath, 0777); err != nil {\n\t\t\treturn fmt.Errorf(\"can't create --path: %v\", err)\n\t\t}\n\t}\n\n\t// configobserver synthesizes all config into the status reporter controller\n\tconfigObserver := configobserver.New(d.Controller, kubeClient)\n\n\t// anonymizer is responsible for anonymizing sensitive data, it can be configured to disable specific anonymization\n\tanonymizer, err := anonymization.NewAnonymizerFromConfig(\n\t\tctx, gatherKubeConfig, gatherProtoKubeConfig, protoKubeConfig, configObserver, nil)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// the recorder stores the collected data and we flush at the end.\n\trecdriver := diskrecorder.New(d.StoragePath)\n\trec := recorder.New(recdriver, d.Interval, anonymizer)\n\tdefer func() {\n\t\tif err = rec.Flush(); err != nil {\n\t\t\tklog.Error(err)\n\t\t}\n\t}()\n\n\tauthorizer := clusterauthorizer.New(configObserver)\n\n\t// gatherConfigClient is configClient created from gatherKubeConfig, this name was used because configClient was already taken\n\t// this client is only used in insightsClient, it is created here\n\t// because pkg/insights/insightsclient/request_test.go unit test won't work otherwise\n\tgatherConfigClient, err := configv1client.NewForConfig(gatherKubeConfig)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tinsightsClient := insightsclient.New(nil, 0, \"default\", authorizer, gatherConfigClient)\n\tgatherers := gather.CreateAllGatherers(\n\t\tgatherKubeConfig, gatherProtoKubeConfig, metricsGatherKubeConfig, alertsGatherKubeConfig, anonymizer,\n\t\tconfigObserver, insightsClient,\n\t)\n\n\tallFunctionReports := make(map[string]gather.GathererFunctionReport)\n\tfor _, gatherer := range gatherers {\n\t\tfunctionReports, err := gather.CollectAndRecordGatherer(ctx, gatherer, rec, &gatherConfig)\n\t\tif err != nil {\n\t\t\tklog.Errorf(\"unable to process gatherer %v, error: %v\", gatherer.GetName(), err)\n\t\t}\n\n\t\tfor i := range functionReports {\n\t\t\tallFunctionReports[functionReports[i].FuncName] = functionReports[i]\n\t\t}\n\t}\n\n\treturn gather.RecordArchiveMetadata(mapToArray(allFunctionReports), rec, anonymizer)\n}", "func (dpm *DevicePluginManager) startPlugins() {\n\tfor _, plugin := range dpm.plugins {\n\t\tgo plugin.Start()\n\t}\n}", "func LoadPlugins(c config.Config) (p Plugins) {\n\treturn Plugins{\n\t\tTwilio{\n\t\t\tc.Plugins.Twilio.AccountID,\n\t\t\tc.Plugins.Twilio.Auth,\n\t\t\ttrue,\n\t\t},\n\t\tPagerDuty{\n\t\t\tc.Plugins.PagerDuty.Email,\n\t\t\tc.Plugins.PagerDuty.Key,\n\t\t\ttrue,\n\t\t},\n\t}\n}", "func (f *FlavorVppRPC) Plugins() []*core.NamedPlugin {\n\tf.Inject()\n\treturn core.ListPluginsInFlavor(f)\n}" ]
[ "0.6302264", "0.5969855", "0.5939491", "0.5816808", "0.5660939", "0.55557626", "0.5505595", "0.5478887", "0.5473905", "0.5466036", "0.5395421", "0.5387687", "0.53862995", "0.5359657", "0.5324432", "0.5320401", "0.5308153", "0.5253923", "0.5249775", "0.5240576", "0.5203706", "0.5200241", "0.51863223", "0.51846766", "0.51689243", "0.5156022", "0.5152542", "0.51332563", "0.5103296", "0.5077937", "0.5063282", "0.5062341", "0.5062101", "0.50495905", "0.5036095", "0.50105876", "0.50078785", "0.50049007", "0.49984172", "0.49893796", "0.4988505", "0.49563923", "0.4947536", "0.49200657", "0.491477", "0.48793563", "0.48706552", "0.48688442", "0.48462257", "0.4838042", "0.4837262", "0.48355296", "0.4827575", "0.4825164", "0.48179486", "0.47964084", "0.47924668", "0.47924668", "0.4788167", "0.4779801", "0.4778829", "0.47705197", "0.47695047", "0.47678733", "0.4751652", "0.4738944", "0.47309563", "0.47270185", "0.47207725", "0.47180468", "0.47154462", "0.46820983", "0.46697354", "0.46697354", "0.46576726", "0.46570724", "0.4656549", "0.46452162", "0.46448255", "0.46430215", "0.4641635", "0.46323767", "0.46094263", "0.46046633", "0.4602423", "0.4595644", "0.45945957", "0.45945224", "0.45906648", "0.4589783", "0.45850772", "0.45829177", "0.45753145", "0.45725456", "0.45722848", "0.45705077", "0.45679379", "0.45668533", "0.45632863", "0.455792" ]
0.76140094
0
gatherJVMStats gather the Pipeline metrics and add results to the accumulator (for Logstash < 6)
func (logstash *Logstash) gatherPipelineStats(address string, accumulator telegraf.Accumulator) error { pipelineStats := &PipelineStats{} err := logstash.gatherJSONData(address, pipelineStats) if err != nil { return err } tags := map[string]string{ "node_id": pipelineStats.ID, "node_name": pipelineStats.Name, "node_version": pipelineStats.Version, "source": pipelineStats.Host, } flattener := jsonParser.JSONFlattener{} err = flattener.FlattenJSON("", pipelineStats.Pipeline.Events) if err != nil { return err } accumulator.AddFields("logstash_events", flattener.Fields, tags) err = logstash.gatherPluginsStats(pipelineStats.Pipeline.Plugins.Inputs, "input", tags, accumulator) if err != nil { return err } err = logstash.gatherPluginsStats(pipelineStats.Pipeline.Plugins.Filters, "filter", tags, accumulator) if err != nil { return err } err = logstash.gatherPluginsStats(pipelineStats.Pipeline.Plugins.Outputs, "output", tags, accumulator) if err != nil { return err } err = logstash.gatherQueueStats(&pipelineStats.Pipeline.Queue, tags, accumulator) if err != nil { return err } return nil }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (logstash *Logstash) gatherJVMStats(address string, accumulator telegraf.Accumulator) error {\n\tjvmStats := &JVMStats{}\n\n\terr := logstash.gatherJSONData(address, jvmStats)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\ttags := map[string]string{\n\t\t\"node_id\": jvmStats.ID,\n\t\t\"node_name\": jvmStats.Name,\n\t\t\"node_version\": jvmStats.Version,\n\t\t\"source\": jvmStats.Host,\n\t}\n\n\tflattener := jsonParser.JSONFlattener{}\n\terr = flattener.FlattenJSON(\"\", jvmStats.JVM)\n\tif err != nil {\n\t\treturn err\n\t}\n\taccumulator.AddFields(\"logstash_jvm\", flattener.Fields, tags)\n\n\treturn nil\n}", "func (logstash *Logstash) Gather(accumulator telegraf.Accumulator) error {\n\tif logstash.client == nil {\n\t\tclient, err := logstash.createHTTPClient()\n\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tlogstash.client = client\n\t}\n\n\tif choice.Contains(\"jvm\", logstash.Collect) {\n\t\tjvmURL, err := url.Parse(logstash.URL + jvmStats)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif err := logstash.gatherJVMStats(jvmURL.String(), accumulator); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tif choice.Contains(\"process\", logstash.Collect) {\n\t\tprocessURL, err := url.Parse(logstash.URL + processStats)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif err := logstash.gatherProcessStats(processURL.String(), accumulator); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tif choice.Contains(\"pipelines\", logstash.Collect) {\n\t\tif logstash.SinglePipeline {\n\t\t\tpipelineURL, err := url.Parse(logstash.URL + pipelineStats)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tif err := logstash.gatherPipelineStats(pipelineURL.String(), accumulator); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t} else {\n\t\t\tpipelinesURL, err := url.Parse(logstash.URL + pipelinesStats)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tif err := logstash.gatherPipelinesStats(pipelinesURL.String(), accumulator); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\n\treturn nil\n}", "func (c *Collector) Transform(allStats *NodeStatsResponse) (metrics []*exportertools.Metric) {\n for _, stats := range allStats.Nodes {\n // GC Stats\n for _, gcstats := range stats.JVM.GC.Collectors {\n metrics = append(metrics, c.ConvertToMetric(\"jvm_gc_collection_seconds_count\",\n float64(gcstats.CollectionCount),\n \"COUNTER\",\n nil))\n\n metrics = append(metrics, c.ConvertToMetric(\"jvm_gc_collection_seconds_sum\",\n float64(gcstats.CollectionTime / 1000),\n \"COUNTER\",\n nil))\n }\n\n // Breaker stats\n for _, bstats := range stats.Breakers {\n metrics = append(metrics, c.ConvertToMetric(\"breakers_estimated_size_bytes\",\n float64(bstats.EstimatedSize),\n \"GAUGE\",\n nil))\n\n metrics = append(metrics, c.ConvertToMetric(\"breakers_limit_size_bytes\",\n float64(bstats.LimitSize),\n \"GAUGE\",\n nil))\n }\n\n // Thread Pool stats\n for pool, pstats := range stats.ThreadPool {\n metrics = append(metrics, c.ConvertToMetric(\"thread_pool_completed_count\",\n float64(pstats.Completed),\n \"COUNTER\",\n map[string]string{\"type\": pool}))\n\n metrics = append(metrics, c.ConvertToMetric(\"thread_pool_rejected_count\",\n float64(pstats.Rejected),\n \"COUNTER\",\n map[string]string{\"type\": pool}))\n\n metrics = append(metrics, c.ConvertToMetric(\"thread_pool_active_count\",\n float64(pstats.Active),\n \"GAUGE\",\n map[string]string{\"type\": pool}))\n\n metrics = append(metrics, c.ConvertToMetric(\"thread_pool_threads_count\",\n float64(pstats.Threads),\n \"GAUGE\",\n map[string]string{\"type\": pool}))\n\n metrics = append(metrics, c.ConvertToMetric(\"thread_pool_largest_count\",\n float64(pstats.Largest),\n \"GAUGE\",\n map[string]string{\"type\": pool}))\n\n metrics = append(metrics, c.ConvertToMetric(\"thread_pool_queue_count\",\n float64(pstats.Queue),\n \"GAUGE\",\n map[string]string{\"type\": pool}))\n }\n\n // JVM Memory Stats\n metrics = append(metrics, c.ConvertToMetric(\"jvm_memory_committed_bytes\",\n float64(stats.JVM.Mem.HeapCommitted),\n \"GAUGE\",\n nil))\n\n metrics = append(metrics, c.ConvertToMetric(\"jvm_memory_used_bytes\",\n float64(stats.JVM.Mem.HeapUsed),\n \"GAUGE\",\n nil))\n\n\n metrics = append(metrics, c.ConvertToMetric(\"jvm_memory_max_bytes\",\n float64(stats.JVM.Mem.HeapMax),\n \"GAUGE\",\n nil))\n\n metrics = append(metrics, c.ConvertToMetric(\"jvm_memory_committed_bytes\",\n float64(stats.JVM.Mem.NonHeapCommitted),\n \"GAUGE\",\n nil))\n\n metrics = append(metrics, c.ConvertToMetric(\"jvm_memory_used_bytes\",\n float64(stats.JVM.Mem.NonHeapUsed),\n \"GAUGE\",\n nil))\n\n // Indices Stats)\n metrics = append(metrics, c.ConvertToMetric(\"indices_fielddata_memory_size_bytes\",\n float64(stats.Indices.FieldData.MemorySize),\n \"GAUGE\",\n nil))\n\n metrics = append(metrics, c.ConvertToMetric(\"indices_fielddata_evictions\",\n float64(stats.Indices.FieldData.Evictions),\n \"COUNTER\",\n nil))\n\n metrics = append(metrics, c.ConvertToMetric(\"indices_filter_cache_memory_size_bytes\",\n float64(stats.Indices.FilterCache.MemorySize),\n \"GAUGE\",\n nil))\n\n metrics = append(metrics, c.ConvertToMetric(\"indices_filter_cache_evictions\",\n float64(stats.Indices.FilterCache.Evictions),\n \"COUNTER\",\n nil))\n\n metrics = append(metrics, c.ConvertToMetric(\"indices_query_cache_memory_size_bytes\",\n float64(stats.Indices.QueryCache.MemorySize),\n \"GAUGE\",\n nil))\n\n metrics = append(metrics, c.ConvertToMetric(\"indices_query_cache_evictions\",\n float64(stats.Indices.QueryCache.Evictions),\n \"COUNTER\",\n nil))\n\n metrics = append(metrics, c.ConvertToMetric(\"indices_request_cache_memory_size_bytes\",\n float64(stats.Indices.QueryCache.MemorySize),\n \"GAUGE\",\n nil))\n\n metrics = append(metrics, c.ConvertToMetric(\"indices_request_cache_evictions\",\n float64(stats.Indices.QueryCache.Evictions),\n \"COUNTER\",\n nil))\n\n metrics = append(metrics, c.ConvertToMetric(\"indices_docs\",\n float64(stats.Indices.Docs.Count),\n \"GAUGE\",\n nil))\n\n metrics = append(metrics, c.ConvertToMetric(\"indices_docs_deleted\",\n float64(stats.Indices.Docs.Deleted),\n \"GAUGE\",\n nil))\n\n metrics = append(metrics, c.ConvertToMetric(\"indices_segments_memory_bytes\",\n float64(stats.Indices.Segments.Memory),\n \"GAUGE\",\n nil))\n\n metrics = append(metrics, c.ConvertToMetric(\"indices_segments_count\",\n float64(stats.Indices.Segments.Count),\n \"GAUGE\",\n nil))\n\n metrics = append(metrics, c.ConvertToMetric(\"indices_store_size_bytes\",\n float64(stats.Indices.Store.Size),\n \"GAUGE\",\n nil))\n\n metrics = append(metrics, c.ConvertToMetric(\"indices_store_throttle_time_ms_total\",\n float64(stats.Indices.Store.ThrottleTime),\n \"COUNTER\",\n nil))\n\n metrics = append(metrics, c.ConvertToMetric(\"indices_flush_total\",\n float64(stats.Indices.Flush.Total),\n \"COUNTER\",\n nil))\n\n metrics = append(metrics, c.ConvertToMetric(\"indices_flush_time_ms_total\",\n float64(stats.Indices.Flush.Time),\n \"COUNTER\",\n nil))\n\n metrics = append(metrics, c.ConvertToMetric(\"indices_indexing_index_time_ms_total\",\n float64(stats.Indices.Indexing.IndexTime),\n \"COUNTER\",\n nil))\n\n metrics = append(metrics, c.ConvertToMetric(\"indices_indexing_index_total\",\n float64(stats.Indices.Indexing.IndexTotal),\n \"COUNTER\",\n nil))\n\n metrics = append(metrics, c.ConvertToMetric(\"indices_merges_total_time_ms_total\",\n float64(stats.Indices.Merges.TotalTime),\n \"COUNTER\",\n nil))\n\n metrics = append(metrics, c.ConvertToMetric(\"indices_merges_total_size_bytes_total\",\n float64(stats.Indices.Merges.TotalSize),\n \"COUNTER\",\n nil))\n\n metrics = append(metrics, c.ConvertToMetric(\"indices_merges_total\",\n float64(stats.Indices.Merges.Total),\n \"COUNTER\",\n nil))\n\n metrics = append(metrics, c.ConvertToMetric(\"indices_refresh_total_time_ms_total\",\n float64(stats.Indices.Refresh.TotalTime),\n \"COUNTER\",\n nil))\n\n metrics = append(metrics, c.ConvertToMetric(\"indices_refresh_total\",\n float64(stats.Indices.Refresh.Total),\n \"COUNTER\",\n nil))\n\n // Transport Stats)\n metrics = append(metrics, c.ConvertToMetric(\"transport_rx_packets_total\",\n float64(stats.Transport.RxCount),\n \"COUNTER\",\n nil))\n\n metrics = append(metrics, c.ConvertToMetric(\"transport_rx_size_bytes_total\",\n float64(stats.Transport.RxSize),\n \"COUNTER\",\n nil))\n\n\n metrics = append(metrics, c.ConvertToMetric(\"transport_tx_packets_total\",\n float64(stats.Transport.TxCount),\n \"COUNTER\",\n nil))\n\n metrics = append(metrics, c.ConvertToMetric(\"transport_tx_size_bytes_total\",\n float64(stats.Transport.TxSize),\n \"COUNTER\",\n nil))\n\n // Process Stats)\n metrics = append(metrics, c.ConvertToMetric(\"process_cpu_percent\",\n float64(stats.Process.CPU.Percent),\n \"GAUGE\",\n nil))\n\n metrics = append(metrics, c.ConvertToMetric(\"process_mem_resident_size_bytes\",\n float64(stats.Process.Memory.Resident),\n \"GAUGE\",\n nil))\n\n metrics = append(metrics, c.ConvertToMetric(\"process_mem_share_size_bytes\",\n float64(stats.Process.Memory.Share),\n \"GAUGE\",\n nil))\n\n metrics = append(metrics, c.ConvertToMetric(\"process_mem_virtual_size_bytes\",\n float64(stats.Process.Memory.TotalVirtual),\n \"GAUGE\",\n nil))\n\n metrics = append(metrics, c.ConvertToMetric(\"process_open_files_count\",\n float64(stats.Process.OpenFD),\n \"GAUGE\",\n nil))\n\n metrics = append(metrics, c.ConvertToMetric(\"process_cpu_time_seconds_sum\",\n float64(stats.Process.CPU.Total / 1000),\n \"COUNTER\",\n nil))\n\n metrics = append(metrics, c.ConvertToMetric(\"process_cpu_time_seconds_sum\",\n float64(stats.Process.CPU.Sys / 1000),\n \"COUNTER\",\n nil))\n\n metrics = append(metrics, c.ConvertToMetric(\"process_cpu_time_seconds_sum\",\n float64(stats.Process.CPU.User / 1000),\n \"COUNTER\",\n nil))\n\n }\n\n return metrics\n}", "func (logstash *Logstash) gatherPipelinesStats(address string, accumulator telegraf.Accumulator) error {\n\tpipelinesStats := &PipelinesStats{}\n\n\terr := logstash.gatherJSONData(address, pipelinesStats)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfor pipelineName, pipeline := range pipelinesStats.Pipelines {\n\t\ttags := map[string]string{\n\t\t\t\"node_id\": pipelinesStats.ID,\n\t\t\t\"node_name\": pipelinesStats.Name,\n\t\t\t\"node_version\": pipelinesStats.Version,\n\t\t\t\"pipeline\": pipelineName,\n\t\t\t\"source\": pipelinesStats.Host,\n\t\t}\n\n\t\tflattener := jsonParser.JSONFlattener{}\n\t\terr := flattener.FlattenJSON(\"\", pipeline.Events)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\taccumulator.AddFields(\"logstash_events\", flattener.Fields, tags)\n\n\t\terr = logstash.gatherPluginsStats(pipeline.Plugins.Inputs, \"input\", tags, accumulator)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\terr = logstash.gatherPluginsStats(pipeline.Plugins.Filters, \"filter\", tags, accumulator)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\terr = logstash.gatherPluginsStats(pipeline.Plugins.Outputs, \"output\", tags, accumulator)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\terr = logstash.gatherQueueStats(&pipeline.Queue, tags, accumulator)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}", "func (logstash *Logstash) gatherProcessStats(address string, accumulator telegraf.Accumulator) error {\n\tprocessStats := &ProcessStats{}\n\n\terr := logstash.gatherJSONData(address, processStats)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\ttags := map[string]string{\n\t\t\"node_id\": processStats.ID,\n\t\t\"node_name\": processStats.Name,\n\t\t\"node_version\": processStats.Version,\n\t\t\"source\": processStats.Host,\n\t}\n\n\tflattener := jsonParser.JSONFlattener{}\n\terr = flattener.FlattenJSON(\"\", processStats.Process)\n\tif err != nil {\n\t\treturn err\n\t}\n\taccumulator.AddFields(\"logstash_process\", flattener.Fields, tags)\n\n\treturn nil\n}", "func (e *Exporter) Collect(ch chan<- prometheus.Metric) {\n\te.mutex.Lock() // To protect metrics from concurrent collects.\n\tdefer e.mutex.Unlock()\n\n\t// Reset metrics.\n\tfor _, vec := range e.gauges {\n\t\tvec.Reset()\n\t}\n\n\tfor _, vec := range e.counters {\n\t\tvec.Reset()\n\t}\n\n\tresp, err := e.client.Get(e.URI)\n\tif err != nil {\n\t\te.up.Set(0)\n\t\tlog.Printf(\"Error while querying Elasticsearch: %v\", err)\n\t\treturn\n\t}\n\tdefer resp.Body.Close()\n\n\tbody, err := ioutil.ReadAll(resp.Body)\n\n\tif err != nil {\n\t\tlog.Printf(\"Failed to read ES response body: %v\", err)\n\t\te.up.Set(0)\n\t\treturn\n\t}\n\n\te.up.Set(1)\n\n\tvar all_stats NodeStatsResponse\n\terr = json.Unmarshal(body, &all_stats)\n\n\tif err != nil {\n\t\tlog.Printf(\"Failed to unmarshal JSON into struct: %v\", err)\n\t\treturn\n\t}\n\n\t// Regardless of whether we're querying the local host or the whole\n\t// cluster, here we can just iterate through all nodes found.\n\n\tfor node, stats := range all_stats.Nodes {\n\t\tlog.Printf(\"Processing node %v\", node)\n\t\t// GC Stats\n\t\tfor collector, gcstats := range stats.JVM.GC.Collectors {\n\t\t\te.counters[\"jvm_gc_collection_count\"].WithLabelValues(all_stats.ClusterName, stats.Name, collector).Set(float64(gcstats.CollectionCount))\n\t\t\te.counters[\"jvm_gc_collection_time_in_millis\"].WithLabelValues(all_stats.ClusterName, stats.Name, collector).Set(float64(gcstats.CollectionTime))\n\t\t}\n\n\t\t// Breaker stats\n\t\tfor breaker, bstats := range stats.Breakers {\n\t\t\te.gauges[\"breakers_estimated_size_in_bytes\"].WithLabelValues(all_stats.ClusterName, stats.Name, breaker).Set(float64(bstats.EstimatedSize))\n\t\t\te.gauges[\"breakers_limit_size_in_bytes\"].WithLabelValues(all_stats.ClusterName, stats.Name, breaker).Set(float64(bstats.LimitSize))\n\t\t}\n\n\t\t// JVM Memory Stats\n\t\te.gauges[\"jvm_mem_heap_committed_in_bytes\"].WithLabelValues(all_stats.ClusterName, stats.Name).Set(float64(stats.JVM.Mem.HeapCommitted))\n\t\te.gauges[\"jvm_mem_heap_used_in_bytes\"].WithLabelValues(all_stats.ClusterName, stats.Name).Set(float64(stats.JVM.Mem.HeapUsed))\n\t\te.gauges[\"jvm_mem_heap_max_in_bytes\"].WithLabelValues(all_stats.ClusterName, stats.Name).Set(float64(stats.JVM.Mem.HeapMax))\n\t\te.gauges[\"jvm_mem_non_heap_committed_in_bytes\"].WithLabelValues(all_stats.ClusterName, stats.Name).Set(float64(stats.JVM.Mem.NonHeapCommitted))\n\t\te.gauges[\"jvm_mem_non_heap_used_in_bytes\"].WithLabelValues(all_stats.ClusterName, stats.Name).Set(float64(stats.JVM.Mem.NonHeapUsed))\n\n\t\t// Indices Stats\n\t\te.gauges[\"indices_fielddata_evictions\"].WithLabelValues(all_stats.ClusterName, stats.Name).Set(float64(stats.Indices.FieldData.Evictions))\n\t\te.gauges[\"indices_fielddata_memory_size_in_bytes\"].WithLabelValues(all_stats.ClusterName, stats.Name).Set(float64(stats.Indices.FieldData.MemorySize))\n\t\te.gauges[\"indices_filter_cache_evictions\"].WithLabelValues(all_stats.ClusterName, stats.Name).Set(float64(stats.Indices.FilterCache.Evictions))\n\t\te.gauges[\"indices_filter_cache_memory_size_in_bytes\"].WithLabelValues(all_stats.ClusterName, stats.Name).Set(float64(stats.Indices.FilterCache.MemorySize))\n\n\t\te.gauges[\"indices_docs_count\"].WithLabelValues(all_stats.ClusterName, stats.Name).Set(float64(stats.Indices.Docs.Count))\n\t\te.gauges[\"indices_docs_deleted\"].WithLabelValues(all_stats.ClusterName, stats.Name).Set(float64(stats.Indices.Docs.Deleted))\n\n\t\te.gauges[\"indices_segments_memory_in_bytes\"].WithLabelValues(all_stats.ClusterName, stats.Name).Set(float64(stats.Indices.Segments.Memory))\n\n\t\te.gauges[\"indices_store_size_in_bytes\"].WithLabelValues(all_stats.ClusterName, stats.Name).Set(float64(stats.Indices.Store.Size))\n\t\te.counters[\"indices_store_throttle_time_in_millis\"].WithLabelValues(all_stats.ClusterName, stats.Name).Set(float64(stats.Indices.Store.ThrottleTime))\n\n\t\te.counters[\"indices_flush_total\"].WithLabelValues(all_stats.ClusterName, stats.Name).Set(float64(stats.Indices.Flush.Total))\n\t\te.counters[\"indices_flush_time_in_millis\"].WithLabelValues(all_stats.ClusterName, stats.Name).Set(float64(stats.Indices.Flush.Time))\n\n\t\t// Transport Stats\n\t\te.counters[\"transport_rx_count\"].WithLabelValues(all_stats.ClusterName, stats.Name).Set(float64(stats.Transport.RxCount))\n\t\te.counters[\"transport_rx_size_in_bytes\"].WithLabelValues(all_stats.ClusterName, stats.Name).Set(float64(stats.Transport.RxSize))\n\t\te.counters[\"transport_tx_count\"].WithLabelValues(all_stats.ClusterName, stats.Name).Set(float64(stats.Transport.TxCount))\n\t\te.counters[\"transport_tx_size_in_bytes\"].WithLabelValues(all_stats.ClusterName, stats.Name).Set(float64(stats.Transport.TxSize))\n\t}\n\n\t// Report metrics.\n\tch <- e.up\n\n\tfor _, vec := range e.counters {\n\t\tvec.Collect(ch)\n\t}\n\n\tfor _, vec := range e.gauges {\n\t\tvec.Collect(ch)\n\t}\n}", "func (logstash *Logstash) gatherPluginsStats(\n\tplugins []Plugin,\n\tpluginType string,\n\ttags map[string]string,\n\taccumulator telegraf.Accumulator,\n) error {\n\tfor _, plugin := range plugins {\n\t\tpluginTags := map[string]string{\n\t\t\t\"plugin_name\": plugin.Name,\n\t\t\t\"plugin_id\": plugin.ID,\n\t\t\t\"plugin_type\": pluginType,\n\t\t}\n\t\tfor tag, value := range tags {\n\t\t\tpluginTags[tag] = value\n\t\t}\n\t\tflattener := jsonParser.JSONFlattener{}\n\t\terr := flattener.FlattenJSON(\"\", plugin.Events)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\taccumulator.AddFields(\"logstash_plugins\", flattener.Fields, pluginTags)\n\t\tif plugin.Failures != nil {\n\t\t\tfailuresFields := map[string]interface{}{\"failures\": *plugin.Failures}\n\t\t\taccumulator.AddFields(\"logstash_plugins\", failuresFields, pluginTags)\n\t\t}\n\t\t/*\n\t\t\tThe elasticsearch & opensearch output produces additional stats\n\t\t\taround bulk requests and document writes (that are elasticsearch\n\t\t\tand opensearch specific). Collect those below:\n\t\t*/\n\t\tif pluginType == \"output\" && (plugin.Name == \"elasticsearch\" || plugin.Name == \"opensearch\") {\n\t\t\t/*\n\t\t\t\tThe \"bulk_requests\" section has details about batch writes\n\t\t\t\tinto Elasticsearch\n\n\t\t\t\t \"bulk_requests\" : {\n\t\t\t\t\t\"successes\" : 2870,\n\t\t\t\t\t\"responses\" : {\n\t\t\t\t\t \"200\" : 2870\n\t\t\t\t\t},\n\t\t\t\t\t\"failures\": 262,\n\t\t\t\t\t\"with_errors\": 9089\n\t\t\t\t },\n\t\t\t*/\n\t\t\tflattener := jsonParser.JSONFlattener{}\n\t\t\terr := flattener.FlattenJSON(\"\", plugin.BulkRequests)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tfor k, v := range flattener.Fields {\n\t\t\t\tif strings.HasPrefix(k, \"bulk_requests\") {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tnewKey := fmt.Sprintf(\"bulk_requests_%s\", k)\n\t\t\t\tflattener.Fields[newKey] = v\n\t\t\t\tdelete(flattener.Fields, k)\n\t\t\t}\n\t\t\taccumulator.AddFields(\"logstash_plugins\", flattener.Fields, pluginTags)\n\n\t\t\t/*\n\t\t\t\tThe \"documents\" section has counts of individual documents\n\t\t\t\twritten/retried/etc.\n\t\t\t\t \"documents\" : {\n\t\t\t\t\t\"successes\" : 2665549,\n\t\t\t\t\t\"retryable_failures\": 13733\n\t\t\t\t }\n\t\t\t*/\n\t\t\tflattener = jsonParser.JSONFlattener{}\n\t\t\terr = flattener.FlattenJSON(\"\", plugin.Documents)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tfor k, v := range flattener.Fields {\n\t\t\t\tif strings.HasPrefix(k, \"documents\") {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tnewKey := fmt.Sprintf(\"documents_%s\", k)\n\t\t\t\tflattener.Fields[newKey] = v\n\t\t\t\tdelete(flattener.Fields, k)\n\t\t\t}\n\t\t\taccumulator.AddFields(\"logstash_plugins\", flattener.Fields, pluginTags)\n\t\t}\n\t}\n\n\treturn nil\n}", "func CollectRuntimeMemStats(statsd scopedstatsd.Client, memstatsCurrent *runtime.MemStats, memstatsPrev *runtime.MemStats, tags []string) {\n\t// Collect number of bytes obtained from system.\n\tstatsd.Gauge(\"mem.sys_bytes\", float64(memstatsCurrent.Sys), tags, 1)\n\n\t// Collect number of pointer lookups.\n\tstatsd.Gauge(\"mem.pointer_lookups\", float64(memstatsCurrent.Lookups), tags, 1)\n\n\t// Collect increased heap objects allocated compared to last flush.\n\tstatsd.Count(\"mem.mallocs_total\", int64(memstatsCurrent.Mallocs-memstatsPrev.Mallocs), tags, 1)\n\n\t// Collect increased heap objects freed compared to last flush.\n\tstatsd.Count(\"mem.frees_total\", int64(memstatsCurrent.Frees-memstatsPrev.Frees), tags, 1)\n\n\t// Collect number of mallocs.\n\tstatsd.Gauge(\"mem.mallocs_count\", float64(memstatsCurrent.Mallocs-memstatsCurrent.Frees), tags, 1)\n\n\t// Collect number of bytes newly allocated for heap objects compared to last flush.\n\tstatsd.Count(\"mem.heap_alloc_bytes_total\", int64(memstatsCurrent.TotalAlloc-memstatsPrev.TotalAlloc), tags, 1)\n\n\t// Collect number of heap bytes allocated and still in use.\n\tstatsd.Gauge(\"mem.heap_alloc_bytes\", float64(memstatsCurrent.HeapAlloc), tags, 1)\n\n\t// Collect number of heap bytes obtained from system.\n\tstatsd.Gauge(\"mem.heap_sys_bytes\", float64(memstatsCurrent.HeapSys), tags, 1)\n\n\t// Collect number of heap bytes waiting to be used.\n\tstatsd.Gauge(\"mem.heap_idle_bytes\", float64(memstatsCurrent.HeapIdle), tags, 1)\n\n\t// Collect number of heap bytes that are in use.\n\tstatsd.Gauge(\"mem.heap_inuse_bytes\", float64(memstatsCurrent.HeapInuse), tags, 1)\n\n\t// Collect number of heap bytes released to OS.\n\tstatsd.Gauge(\"mem.heap_released_bytes\", float64(memstatsCurrent.HeapReleased), tags, 1)\n\n\t// Collect number of allocated objects.\n\tstatsd.Gauge(\"mem.heap_objects_count\", float64(memstatsCurrent.HeapObjects), tags, 1)\n\n\t// Collect number of bytes in use by the stack allocator.\n\tstatsd.Gauge(\"mem.stack_inuse_bytes\", float64(memstatsCurrent.StackInuse), tags, 1)\n\n\t// Collect number of bytes obtained from system for stack allocator.\n\tstatsd.Gauge(\"mem.stack_sys_bytes\", float64(memstatsCurrent.StackSys), tags, 1)\n\n\t// Collect number of bytes in use by mspan structures.\n\tstatsd.Gauge(\"mem.mspan_inuse_bytes\", float64(memstatsCurrent.MSpanInuse), tags, 1)\n\n\t// Collect number of bytes used for mspan structures obtained from system.\n\tstatsd.Gauge(\"mem.mspan_sys_bytes\", float64(memstatsCurrent.MSpanSys), tags, 1)\n\n\t// Collect number of bytes in use by mcache structures.\n\tstatsd.Gauge(\"mem.mcache_inuse_bytes\", float64(memstatsCurrent.MCacheInuse), tags, 1)\n\n\t// Collect number of bytes used for mcache structures obtained from system.\n\tstatsd.Gauge(\"mem.mcache_sys_bytes\", float64(memstatsCurrent.MCacheSys), tags, 1)\n\n\t// Collect number of bytes used by the profiling bucket hash table.\n\tstatsd.Gauge(\"mem.buck_hash_sys_bytes\", float64(memstatsCurrent.BuckHashSys), tags, 1)\n\n\t// Collect number of bytes used for garbage collection system metadata.\n\tstatsd.Gauge(\"mem.gc_sys_bytes\", float64(memstatsCurrent.GCSys), tags, 1)\n\n\t// Collect number of bytes used for other system allocations.\n\tstatsd.Gauge(\"mem.other_sys_bytes\", float64(memstatsCurrent.OtherSys), tags, 1)\n\n\t// Collect number of heap bytes when next garbage collection will take pace.\n\tstatsd.Gauge(\"mem.next_gc_bytes\", float64(memstatsCurrent.NextGC), tags, 1)\n}", "func (e *Exporter) collect(ch chan<- prometheus.Metric) error {\n\tvar mempct, memtot, memfree float64\n\tif v, e := mem.VirtualMemory(); e == nil {\n\t\tmempct = v.UsedPercent\n\t\tmemtot = float64(v.Total)\n\t\tmemfree = float64(v.Free)\n\t}\n\tvar swappct, swaptot, swapfree float64\n\tif v, e := mem.SwapMemory(); e == nil {\n\t\tswappct = v.UsedPercent\n\t\tswaptot = float64(v.Total)\n\t\tswapfree = float64(v.Free)\n\t}\n\tvar cpupct float64\n\tif c, e := cpu.Percent(time.Millisecond, false); e == nil {\n\t\tcpupct = c[0] // one value since we didn't ask per cpu\n\t}\n\tvar load1, load5, load15 float64\n\tif l, e := load.Avg(); e == nil {\n\t\tload1 = l.Load1\n\t\tload5 = l.Load5\n\t\tload15 = l.Load15\n\t}\n\n\tvar cpuTotal, vsize, rss, openFDs, maxFDs, maxVsize float64\n\tif proc, err := procfs.NewProc(int(*pid)); err == nil {\n\t\tif stat, err := proc.NewStat(); err == nil {\n\t\t\tcpuTotal = float64(stat.CPUTime())\n\t\t\tvsize = float64(stat.VirtualMemory())\n\t\t\trss = float64(stat.ResidentMemory())\n\t\t}\n\t\tif fds, err := proc.FileDescriptorsLen(); err == nil {\n\t\t\topenFDs = float64(fds)\n\t\t}\n\t\tif limits, err := proc.NewLimits(); err == nil {\n\t\t\tmaxFDs = float64(limits.OpenFiles)\n\t\t\tmaxVsize = float64(limits.AddressSpace)\n\t\t}\n\t}\n\n\tvar procCpu, procMem float64\n\tvar estCon, lisCon, othCon, totCon, closeCon, timeCon, openFiles float64\n\tvar nThreads float64\n\tif proc, err := process.NewProcess(int32(*pid)); err == nil {\n\t\tif v, e := proc.CPUPercent(); e == nil {\n\t\t\tprocCpu = float64(v)\n\t\t}\n\t\tif v, e := proc.MemoryPercent(); e == nil {\n\t\t\tprocMem = float64(v)\n\t\t}\n\n\t\tif v, e := proc.NumThreads(); e == nil {\n\t\t\tnThreads = float64(v)\n\t\t}\n\t\tif connections, e := proc.Connections(); e == nil {\n\t\t\tfor _, v := range connections {\n\t\t\t\tif v.Status == \"LISTEN\" {\n\t\t\t\t\tlisCon += 1\n\t\t\t\t} else if v.Status == \"ESTABLISHED\" {\n\t\t\t\t\testCon += 1\n\t\t\t\t} else if v.Status == \"TIME_WAIT\" {\n\t\t\t\t\ttimeCon += 1\n\t\t\t\t} else if v.Status == \"CLOSE_WAIT\" {\n\t\t\t\t\tcloseCon += 1\n\t\t\t\t} else {\n\t\t\t\t\tothCon += 1\n\t\t\t\t}\n\t\t\t}\n\t\t\ttotCon = lisCon + estCon + timeCon + closeCon + othCon\n\t\t}\n\t\tif oFiles, e := proc.OpenFiles(); e == nil {\n\t\t\topenFiles = float64(len(oFiles))\n\t\t}\n\t}\n\n\t// metrics from process collector\n\tch <- prometheus.MustNewConstMetric(e.cpuTotal, prometheus.CounterValue, cpuTotal)\n\tch <- prometheus.MustNewConstMetric(e.openFDs, prometheus.CounterValue, openFDs)\n\tch <- prometheus.MustNewConstMetric(e.maxFDs, prometheus.CounterValue, maxFDs)\n\tch <- prometheus.MustNewConstMetric(e.vsize, prometheus.CounterValue, vsize)\n\tch <- prometheus.MustNewConstMetric(e.maxVsize, prometheus.CounterValue, maxVsize)\n\tch <- prometheus.MustNewConstMetric(e.rss, prometheus.CounterValue, rss)\n\t// node specific metrics\n\tch <- prometheus.MustNewConstMetric(e.memPercent, prometheus.CounterValue, mempct)\n\tch <- prometheus.MustNewConstMetric(e.memTotal, prometheus.CounterValue, memtot)\n\tch <- prometheus.MustNewConstMetric(e.memFree, prometheus.CounterValue, memfree)\n\tch <- prometheus.MustNewConstMetric(e.swapPercent, prometheus.CounterValue, swappct)\n\tch <- prometheus.MustNewConstMetric(e.swapTotal, prometheus.CounterValue, swaptot)\n\tch <- prometheus.MustNewConstMetric(e.swapFree, prometheus.CounterValue, swapfree)\n\tch <- prometheus.MustNewConstMetric(e.numCpus, prometheus.CounterValue, float64(runtime.NumCPU()))\n\tch <- prometheus.MustNewConstMetric(e.load1, prometheus.CounterValue, load1)\n\tch <- prometheus.MustNewConstMetric(e.load5, prometheus.CounterValue, load5)\n\tch <- prometheus.MustNewConstMetric(e.load15, prometheus.CounterValue, load15)\n\t// process specific metrics\n\tch <- prometheus.MustNewConstMetric(e.procCpu, prometheus.CounterValue, procCpu)\n\tch <- prometheus.MustNewConstMetric(e.procMem, prometheus.CounterValue, procMem)\n\tch <- prometheus.MustNewConstMetric(e.numThreads, prometheus.CounterValue, nThreads)\n\tch <- prometheus.MustNewConstMetric(e.cpuPercent, prometheus.CounterValue, cpupct)\n\tch <- prometheus.MustNewConstMetric(e.openFiles, prometheus.CounterValue, openFiles)\n\tch <- prometheus.MustNewConstMetric(e.totCon, prometheus.CounterValue, totCon)\n\tch <- prometheus.MustNewConstMetric(e.lisCon, prometheus.CounterValue, lisCon)\n\tch <- prometheus.MustNewConstMetric(e.estCon, prometheus.CounterValue, estCon)\n\tch <- prometheus.MustNewConstMetric(e.closeCon, prometheus.CounterValue, closeCon)\n\tch <- prometheus.MustNewConstMetric(e.timeCon, prometheus.CounterValue, timeCon)\n\treturn nil\n}", "func CollectProcessMetrics(refresh time.Duration) {\n\t// Short circuit if the metics system is disabled\n\tif !Enabled {\n\t\treturn\n\t}\n\t// Create the various data collectors\n\tmemstates := make([]*runtime.MemStats, 2)\n\tdiskstates := make([]*DiskStats, 2)\n\tfor i := 0; i < len(memstates); i++ {\n\t\tmemstates[i] = new(runtime.MemStats)\n\t\tdiskstates[i] = new(DiskStats)\n\t}\n\t// Define the various metics to collect\n\tmemAllocs := metics.GetOrRegisterMeter(\"system/memory/allocs\", metics.DefaultRegistry)\n\tmemFrees := metics.GetOrRegisterMeter(\"system/memory/frees\", metics.DefaultRegistry)\n\tmemInuse := metics.GetOrRegisterMeter(\"system/memory/inuse\", metics.DefaultRegistry)\n\tmemPauses := metics.GetOrRegisterMeter(\"system/memory/pauses\", metics.DefaultRegistry)\n\n\tvar diskReads, diskReadBytes, diskWrites, diskWriteBytes metics.Meter\n\tif err := ReadDiskStats(diskstates[0]); err == nil {\n\t\tdiskReads = metics.GetOrRegisterMeter(\"system/disk/readcount\", metics.DefaultRegistry)\n\t\tdiskReadBytes = metics.GetOrRegisterMeter(\"system/disk/readdata\", metics.DefaultRegistry)\n\t\tdiskWrites = metics.GetOrRegisterMeter(\"system/disk/writecount\", metics.DefaultRegistry)\n\t\tdiskWriteBytes = metics.GetOrRegisterMeter(\"system/disk/writedata\", metics.DefaultRegistry)\n\t} else {\n\t\tbgmlogs.Debug(\"Failed to read disk metics\", \"err\", err)\n\t}\n\t// Iterate loading the different states and updating the meters\n\tfor i := 1; ; i++ {\n\t\truntime.ReadMemStats(memstates[i%2])\n\t\tmemAllocs.Mark(int64(memstates[i%2].Mallocs - memstates[(i-1)%2].Mallocs))\n\t\tmemFrees.Mark(int64(memstates[i%2].Frees - memstates[(i-1)%2].Frees))\n\t\tmemInuse.Mark(int64(memstates[i%2].Alloc - memstates[(i-1)%2].Alloc))\n\t\tmemPauses.Mark(int64(memstates[i%2].PauseTotalNs - memstates[(i-1)%2].PauseTotalNs))\n\n\t\tif ReadDiskStats(diskstates[i%2]) == nil {\n\t\t\tdiskReads.Mark(diskstates[i%2].ReadCount - diskstates[(i-1)%2].ReadCount)\n\t\t\tdiskReadBytes.Mark(diskstates[i%2].ReadBytes - diskstates[(i-1)%2].ReadBytes)\n\t\t\tdiskWrites.Mark(diskstates[i%2].WriteCount - diskstates[(i-1)%2].WriteCount)\n\t\t\tdiskWriteBytes.Mark(diskstates[i%2].WriteBytes - diskstates[(i-1)%2].WriteBytes)\n\t\t}\n\t\ttime.Sleep(refresh)\n\t}\n}", "func (s *Server) agentMemoryStats(metrics cgm.Metrics, mtags []string) {\n\t// var mem syscall.Rusage\n\t// if err := syscall.Getrusage(syscall.RUSAGE_SELF, &mem); err == nil {\n\t// \tmetrics[tags.MetricNameWithStreamTags(\"agent_max_rss\", tags.FromList(ctags))] = cgm.Metric{Value: uint64(mem.Maxrss * 1024), Type: \"L\"} // maximum resident set size used (in kilobytes)\n\t// } else {\n\t// \ts.logger.Warn().Err(err).Msg(\"collecting rss from system\")\n\t// }\n}", "func (p *ProcMetrics) Collect() {\n\tif m, err := CollectProcInfo(p.pid); err == nil {\n\t\tnow := time.Now()\n\n\t\tif !p.lastTime.IsZero() {\n\t\t\tratio := 1.0\n\t\t\tswitch {\n\t\t\tcase m.CPU.Period > 0 && m.CPU.Quota > 0:\n\t\t\t\tratio = float64(m.CPU.Quota) / float64(m.CPU.Period)\n\t\t\tcase m.CPU.Shares > 0:\n\t\t\t\tratio = float64(m.CPU.Shares) / 1024\n\t\t\tdefault:\n\t\t\t\tratio = 1 / float64(runtime.NumCPU())\n\t\t\t}\n\n\t\t\tinterval := ratio * float64(now.Sub(p.lastTime))\n\n\t\t\tp.cpu.user.time = m.CPU.User - p.last.CPU.User\n\t\t\tp.cpu.user.percent = 100 * float64(p.cpu.user.time) / interval\n\n\t\t\tp.cpu.system.time = m.CPU.Sys - p.last.CPU.Sys\n\t\t\tp.cpu.system.percent = 100 * float64(p.cpu.system.time) / interval\n\n\t\t\tp.cpu.total.time = (m.CPU.User + m.CPU.Sys) - (p.last.CPU.User + p.last.CPU.Sys)\n\t\t\tp.cpu.total.percent = 100 * float64(p.cpu.total.time) / interval\n\t\t}\n\n\t\tp.memory.available = m.Memory.Available\n\t\tp.memory.size = m.Memory.Size\n\t\tp.memory.resident.usage = m.Memory.Resident\n\t\tp.memory.resident.percent = 100 * float64(p.memory.resident.usage) / float64(p.memory.available)\n\t\tp.memory.shared.usage = m.Memory.Shared\n\t\tp.memory.text.usage = m.Memory.Text\n\t\tp.memory.data.usage = m.Memory.Data\n\t\tp.memory.pagefault.major.count = m.Memory.MajorPageFaults - p.last.Memory.MajorPageFaults\n\t\tp.memory.pagefault.minor.count = m.Memory.MinorPageFaults - p.last.Memory.MinorPageFaults\n\n\t\tp.files.open = m.Files.Open\n\t\tp.files.max = m.Files.Max\n\n\t\tp.threads.num = m.Threads.Num\n\t\tp.threads.switches.voluntary.count = m.Threads.VoluntaryContextSwitches - p.last.Threads.VoluntaryContextSwitches\n\t\tp.threads.switches.involuntary.count = m.Threads.InvoluntaryContextSwitches - p.last.Threads.InvoluntaryContextSwitches\n\n\t\tp.last = m\n\t\tp.lastTime = now\n\t\tp.engine.Report(p)\n\t}\n}", "func (sr *ServicedStatsReporter) gatherStats(t time.Time) []Sample {\n\tstats := []Sample{}\n\t// Handle the host metrics.\n\treg, _ := sr.hostRegistry.(*metrics.StandardRegistry)\n\treg.Each(func(name string, i interface{}) {\n\t\ttagmap := map[string]string{\n\t\t\t\"controlplane_host_id\": sr.hostID,\n\t\t}\n\t\tswitch metric := i.(type) {\n\t\tcase metrics.Gauge:\n\t\t\tstats = append(stats, Sample{name, strconv.FormatInt(metric.Value(), 10), t.Unix(), tagmap})\n\t\tcase metrics.GaugeFloat64:\n\t\t\tstats = append(stats, Sample{name, strconv.FormatFloat(metric.Value(), 'f', -1, 32), t.Unix(), tagmap})\n\t\t}\n\t})\n\t// Handle each container's metrics.\n\tfor key, registry := range sr.containerRegistries {\n\t\treg, _ := registry.(*metrics.StandardRegistry)\n\t\treg.Each(func(name string, i interface{}) {\n\t\t\ttagmap := map[string]string{\n\t\t\t\t\"controlplane_host_id\": sr.hostID,\n\t\t\t\t\"controlplane_service_id\": key.serviceID,\n\t\t\t\t\"controlplane_instance_id\": strconv.FormatInt(int64(key.instanceID), 10),\n\t\t\t}\n\t\t\tswitch metric := i.(type) {\n\t\t\tcase metrics.Gauge:\n\t\t\t\tstats = append(stats, Sample{name, strconv.FormatInt(metric.Value(), 10), t.Unix(), tagmap})\n\t\t\tcase metrics.GaugeFloat64:\n\t\t\t\tstats = append(stats, Sample{name, strconv.FormatFloat(metric.Value(), 'f', -1, 32), t.Unix(), tagmap})\n\t\t\t}\n\t\t})\n\t}\n\treturn stats\n}", "func CollectRuntimeMetrics(registry *Registry) {\n\tCollectMemStats(registry)\n\tCollectSysStats(registry)\n}", "func (m *KubeletMonitor) parsePodStats(podStats []stats.PodStats) {\n\tfor _, podStat := range podStats {\n\t\tvar cpuUsageNanoCoreSum uint64\n\t\tvar memoryUsageBytesSum uint64\n\t\tfor _, containerStat := range podStat.Containers {\n\t\t\tif containerStat.CPU != nil && containerStat.CPU.UsageNanoCores != nil {\n\t\t\t\tcpuUsageNanoCoreSum += *containerStat.CPU.UsageNanoCores\n\t\t\t}\n\t\t\tif containerStat.Memory != nil && containerStat.Memory.UsageBytes != nil {\n\t\t\t\tmemoryUsageBytesSum += *containerStat.Memory.UsageBytes\n\t\t\t}\n\t\t}\n\t\tglog.V(4).Infof(\"Cpu usage of pod %s is %f core\", util.PodStatsKeyFunc(podStat),\n\t\t\tfloat64(cpuUsageNanoCoreSum)/util.NanoToUnit)\n\t\tpodCpuUsageCoreMetrics := metrics.NewEntityResourceMetric(task.PodType, util.PodStatsKeyFunc(podStat),\n\t\t\tmetrics.CPU, metrics.Used, float64(cpuUsageNanoCoreSum)/util.NanoToUnit)\n\n\t\tglog.V(4).Infof(\"Memory usage of pod %s is %f Kb\", util.PodStatsKeyFunc(podStat),\n\t\t\tfloat64(memoryUsageBytesSum)/util.KilobytesToBytes)\n\t\tpodMemoryUsageCoreMetrics := metrics.NewEntityResourceMetric(task.PodType, util.PodStatsKeyFunc(podStat),\n\t\t\tmetrics.Memory, metrics.Used, float64(memoryUsageBytesSum)/util.KilobytesToBytes)\n\n\t\t// application cpu and mem used are the same as pod's.\n\t\tapplicationCpuUsageCoreMetrics := metrics.NewEntityResourceMetric(task.ApplicationType,\n\t\t\tutil.PodStatsKeyFunc(podStat), metrics.CPU, metrics.Used,\n\t\t\tfloat64(cpuUsageNanoCoreSum)/util.NanoToUnit)\n\t\tapplicationMemoryUsageCoreMetrics := metrics.NewEntityResourceMetric(task.ApplicationType,\n\t\t\tutil.PodStatsKeyFunc(podStat), metrics.Memory, metrics.Used,\n\t\t\tfloat64(memoryUsageBytesSum)/util.KilobytesToBytes)\n\n\t\tm.metricSink.AddNewMetricEntries(podCpuUsageCoreMetrics,\n\t\t\tpodMemoryUsageCoreMetrics,\n\t\t\tapplicationCpuUsageCoreMetrics,\n\t\t\tapplicationMemoryUsageCoreMetrics)\n\t}\n}", "func Collect(ctx context.Context) error {\n\tif !singleton.enabled {\n\t\treturn nil\n\t}\n\n\tif singleton.darkstatAddr == \"\" {\n\t\treturn fmt.Errorf(\"Darkstat address is empty\")\n\t}\n\n\tstartTime := time.Now()\n\n\tinventoryHosts := inventory.Get()\n\n\tlocalAddr, err := network.DefaultLocalAddr()\n\tif err != nil {\n\t\treturn err\n\t}\n\t// To label source traffic that we need to build dependency graph\n\tlocalHostgroup := localAddr.String()\n\tlocalDomain := localAddr.String()\n\tlocalInventory, ok := inventoryHosts[localAddr.String()]\n\tif ok {\n\t\tlocalHostgroup = localInventory.Hostgroup\n\t\tlocalDomain = localInventory.Domain\n\t}\n\tlog.Debugf(\"Local address don't exist in inventory: %v\", localAddr.String())\n\n\t// Scrape darkstat prometheus endpoint for host_bytes_total\n\tvar darkstatHostBytesTotal *prom2json.Family\n\tdarkstatScrape, err := prometheus.Scrape(singleton.darkstatAddr)\n\tif err != nil {\n\t\treturn err\n\t}\n\tfor _, v := range darkstatScrape {\n\t\tif v.Name == \"host_bytes_total\" {\n\t\t\tdarkstatHostBytesTotal = v\n\t\t\tbreak\n\t\t}\n\t}\n\tif darkstatHostBytesTotal == nil {\n\t\treturn fmt.Errorf(\"Metric host_bytes_total doesn't exist\")\n\t}\n\n\t// Extract relevant data out of host_bytes_total\n\tvar hosts []Metric\n\tfor _, m := range darkstatHostBytesTotal.Metrics {\n\t\tmetric := m.(prom2json.Metric)\n\n\t\tip := net.ParseIP(metric.Labels[\"ip\"])\n\n\t\t// Skip its own IP as we don't need it\n\t\tif ip.Equal(localAddr) {\n\t\t\tcontinue\n\t\t}\n\n\t\tinventoryHostInfo := inventoryHosts[metric.Labels[\"ip\"]]\n\n\t\tbandwidth, err := strconv.ParseFloat(metric.Value, 64)\n\t\tif err != nil {\n\t\t\tlog.Errorf(\"Failed to parse 'host_bytes_total' value: %v\", err)\n\t\t\tcontinue\n\t\t}\n\n\t\tdirection := \"\"\n\t\t// Reversed from netfilter perspective\n\t\tswitch metric.Labels[\"dir\"] {\n\t\tcase \"out\":\n\t\t\tdirection = \"ingress\"\n\t\tcase \"in\":\n\t\t\tdirection = \"egress\"\n\t\t}\n\n\t\thosts = append(hosts, Metric{\n\t\t\tLocalHostgroup: localHostgroup,\n\t\t\tRemoteHostgroup: inventoryHostInfo.Hostgroup,\n\t\t\tRemoteIPAddr: metric.Labels[\"ip\"],\n\t\t\tLocalDomain: localDomain,\n\t\t\tRemoteDomain: inventoryHostInfo.Domain,\n\t\t\tDirection: direction,\n\t\t\tBandwidth: bandwidth,\n\t\t})\n\t}\n\n\tsingleton.mu.Lock()\n\tsingleton.hosts = hosts\n\tsingleton.mu.Unlock()\n\n\tlog.Debugf(\"taskdarkstat.Collect retrieved %v downstreams metrics\", len(hosts))\n\tlog.Debugf(\"taskdarkstat.Collect process took %v\", time.Since(startTime))\n\treturn nil\n}", "func (h *Hugepages) gatherStatsPerNode(acc telegraf.Accumulator) error {\n\tnodeDirs, err := os.ReadDir(h.numaNodePath)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// read metrics from: node*/hugepages/hugepages-*/*\n\tfor _, nodeDir := range nodeDirs {\n\t\tif !nodeDir.IsDir() || !strings.HasPrefix(nodeDir.Name(), \"node\") {\n\t\t\tcontinue\n\t\t}\n\n\t\tnodeNumber := strings.TrimPrefix(nodeDir.Name(), \"node\")\n\t\t_, err := strconv.Atoi(nodeNumber)\n\t\tif err != nil {\n\t\t\tcontinue\n\t\t}\n\n\t\tperNodeTags := map[string]string{\n\t\t\t\"node\": nodeNumber,\n\t\t}\n\t\thugepagesPath := filepath.Join(h.numaNodePath, nodeDir.Name(), \"hugepages\")\n\t\terr = h.gatherFromHugepagePath(acc, \"hugepages_\"+perNodeHugepages, hugepagesPath, hugepagesMetricsPerNUMANode, perNodeTags)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}", "func (g gatherer) GatherMetrics(ctx context.Context, out *apm.Metrics) error {\n\tmetricFamilies, err := g.p.Gather()\n\tif err != nil {\n\t\treturn errors.WithStack(err)\n\t}\n\tfor _, mf := range metricFamilies {\n\t\tname := mf.GetName()\n\t\tswitch mf.GetType() {\n\t\tcase dto.MetricType_COUNTER:\n\t\t\tfor _, m := range mf.GetMetric() {\n\t\t\t\tv := m.GetCounter().GetValue()\n\t\t\t\tout.Add(name, makeLabels(m.GetLabel()), v)\n\t\t\t}\n\t\tcase dto.MetricType_GAUGE:\n\t\t\tmetrics := mf.GetMetric()\n\t\t\tif name == \"go_info\" && len(metrics) == 1 && metrics[0].GetGauge().GetValue() == 1 {\n\t\t\t\t// Ignore the \"go_info\" metric from the\n\t\t\t\t// built-in GoCollector, as we provide\n\t\t\t\t// the same information in the payload.\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tfor _, m := range metrics {\n\t\t\t\tv := m.GetGauge().GetValue()\n\t\t\t\tout.Add(name, makeLabels(m.GetLabel()), v)\n\t\t\t}\n\t\tcase dto.MetricType_UNTYPED:\n\t\t\tfor _, m := range mf.GetMetric() {\n\t\t\t\tv := m.GetUntyped().GetValue()\n\t\t\t\tout.Add(name, makeLabels(m.GetLabel()), v)\n\t\t\t}\n\t\tcase dto.MetricType_SUMMARY:\n\t\t\tfor _, m := range mf.GetMetric() {\n\t\t\t\ts := m.GetSummary()\n\t\t\t\tlabels := makeLabels(m.GetLabel())\n\t\t\t\tout.Add(name+\".count\", labels, float64(s.GetSampleCount()))\n\t\t\t\tout.Add(name+\".total\", labels, float64(s.GetSampleSum()))\n\t\t\t\tfor _, q := range s.GetQuantile() {\n\t\t\t\t\tp := int(q.GetQuantile() * 100)\n\t\t\t\t\tout.Add(name+\".percentile.\"+strconv.Itoa(p), labels, q.GetValue())\n\t\t\t\t}\n\t\t\t}\n\t\tdefault:\n\t\t\t// TODO(axw) MetricType_HISTOGRAM\n\t\t}\n\t}\n\treturn nil\n}", "func (h *Hugepages) gatherRootStats(acc telegraf.Accumulator) error {\n\treturn h.gatherFromHugepagePath(acc, \"hugepages_\"+rootHugepages, h.rootHugepagePath, hugepagesMetricsRoot, nil)\n}", "func reduceJavaAgentYaml(m *mainDefinitionParser) (map[string]*domainReducer, error) {\n\tthisDomainMap := make(map[string]*domainReducer)\n\tfor _, jmxObject := range m.JMX {\n\t\tvar thisDomain *domainReducer\n\t\tvar thisBean *beanReducer\n\t\tvar domainAndQuery = strings.Split(jmxObject.ObjectName, \":\")\n\t\tif _, ok := thisDomainMap[domainAndQuery[0]]; ok {\n\t\t\tthisDomain = thisDomainMap[domainAndQuery[0]]\n\t\t\tif _, ok := thisDomain.BeansMap[domainAndQuery[1]]; ok {\n\t\t\t\tthisBean = thisDomain.BeansMap[domainAndQuery[1]]\n\t\t\t}\n\t\t}\n\t\tfor _, thisMetric := range jmxObject.Metrics {\n\t\t\tvar inAttrs = strings.Split(thisMetric.Attributes, \",\")\n\t\t\tfor _, thisAttr := range inAttrs {\n\t\t\t\tthisAttr = strings.TrimSpace(thisAttr)\n\t\t\t\tif thisBean != nil {\n\t\t\t\t\tif _, ok := thisBean.AttributesMap[thisAttr]; !ok {\n\t\t\t\t\t\tthisBean.AttributesMap[thisAttr] = &attributeReducer{MetricType: convertMetricType(thisMetric.Type), MetricName: getMetricName(thisAttr, jmxObject.RootMetricName, domainAndQuery[1])}\n\t\t\t\t\t}\n\t\t\t\t} else {\n\t\t\t\t\tthisAttrMap := make(map[string]*attributeReducer)\n\t\t\t\t\tthisAttrMap[thisAttr] = &attributeReducer{MetricType: convertMetricType(thisMetric.Type), MetricName: getMetricName(thisAttr, jmxObject.RootMetricName, domainAndQuery[1])}\n\t\t\t\t\tthisBean = &beanReducer{AttributesMap: thisAttrMap}\n\t\t\t\t\tif thisDomain == nil {\n\t\t\t\t\t\tvar outEventType = getEventType(m.Name, domainAndQuery[0])\n\t\t\t\t\t\tthisBeanMap := make(map[string]*beanReducer)\n\t\t\t\t\t\tthisBeanMap[domainAndQuery[1]] = thisBean\n\t\t\t\t\t\tthisDomainMap[domainAndQuery[0]] = &domainReducer{EventType: outEventType, BeansMap: thisBeanMap}\n\t\t\t\t\t} else {\n\t\t\t\t\t\tthisDomain.BeansMap[domainAndQuery[1]] = thisBean\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\treturn thisDomainMap, nil\n}", "func stats(stats elastic.BulkProcessorStats) {\n\t//构建Workers的json文本\n\tvar workersStr string\n\tvar workers Workers\n\tif err := workers.InitWorkers(stats.Workers); err == nil {\n\t\tworkersStr = workers.String()\n\t}\n\n\t//打印stats信息\n\tlog.Logger.WithFields(logrus.Fields{\n\t\t\"Flushed\": stats.Flushed,\n\t\t\"Committed\": stats.Committed,\n\t\t\"Indexed\": stats.Indexed,\n\t\t\"Created\": stats.Created,\n\t\t\"Updated\": stats.Updated,\n\t\t\"Deleted\": stats.Deleted,\n\t\t\"Succeeded\": stats.Succeeded,\n\t\t\"Failed\": stats.Failed,\n\t\t\"Workers\": workersStr,\n\t}).Info(\"stats info detail\")\n}", "func (ps *linuxHarvester) populateGauges(sample *types.ProcessSample, process Snapshot) error {\n\tvar err error\n\n\tcpuTimes, err := process.CPUTimes()\n\tif err != nil {\n\t\treturn err\n\t}\n\tsample.CPUPercent = cpuTimes.Percent\n\n\ttotalCPU := cpuTimes.User + cpuTimes.System\n\n\tif totalCPU > 0 {\n\t\tsample.CPUUserPercent = (cpuTimes.User / totalCPU) * sample.CPUPercent\n\t\tsample.CPUSystemPercent = (cpuTimes.System / totalCPU) * sample.CPUPercent\n\t} else {\n\t\tsample.CPUUserPercent = 0\n\t\tsample.CPUSystemPercent = 0\n\t}\n\n\tif ps.privileged {\n\t\tfds, err := process.NumFDs()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif fds >= 0 {\n\t\t\tsample.FdCount = &fds\n\t\t}\n\t}\n\n\t// Extra status data\n\tsample.Status = process.Status()\n\tsample.ThreadCount = process.NumThreads()\n\tsample.MemoryVMSBytes = process.VmSize()\n\tsample.MemoryRSSBytes = process.VmRSS()\n\n\treturn nil\n}", "func (s *Systemctl) Gather(acc telegraf.Accumulator) error {\n\ts.mux.Lock()\n\tdefer s.mux.Unlock()\n\n\t// for each systemctl service being monitored\n\tfor _, aggregator := range s.Aggregators {\n\t\t// aggregate the data from the set of samples\n\t\tlog.WithFields(log.Fields{\n\t\t\t\"InputPlugin\": \"systemctl\",\n\t\t\t\"ResourceName\": aggregator.ResourceName,\n\t\t}).Debug(\"Aggregating\")\n\t\terr := aggregator.Aggregate()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t// create fields\n\t\tfields := map[string]interface{}{\n\t\t\t\"current_state_time\": aggregator.CurrentStateDuration,\n\t\t\t\"current_state\": aggregator.CurrentState,\n\t\t}\n\t\tfor k := range aggregator.AggState {\n\t\t\tfields[k] = aggregator.AggState[k]\n\t\t}\n\t\t// create tags\n\t\ttags := map[string]string{\"resource\": aggregator.ResourceName}\n\t\tacc.AddFields(\"service_config_state\", fields, tags)\n\t\tlog.WithFields(log.Fields{\n\t\t\t\"InputPlugin\": \"systemctl\",\n\t\t\t\"ResourceName\": aggregator.ResourceName,\n\t\t}).Debug(\"Added fields\")\n\t}\n\treturn nil\n}", "func (p *Psutil) CollectMetrics(mts []plugin.MetricType) ([]plugin.MetricType, error) {\n\tloadReqs := []core.Namespace{}\n\tcpuReqs := []core.Namespace{}\n\tmemReqs := []core.Namespace{}\n\tnetReqs := []core.Namespace{}\n\tdiskReqs := []core.Namespace{}\n\n\tfor _, m := range mts {\n\t\tns := m.Namespace()\n\t\tswitch ns[2].Value {\n\t\tcase \"load\":\n\t\t\tloadReqs = append(loadReqs, ns)\n\t\tcase \"cpu\":\n\t\t\tcpuReqs = append(cpuReqs, ns)\n\t\tcase \"vm\":\n\t\t\tmemReqs = append(memReqs, ns)\n\t\tcase \"net\":\n\t\t\tnetReqs = append(netReqs, ns)\n\t\tcase \"disk\":\n\t\t\tdiskReqs = append(diskReqs, ns)\n\t\tdefault:\n\t\t\treturn nil, fmt.Errorf(\"Requested metric %s does not match any known psutil metric\", m.Namespace().String())\n\t\t}\n\t}\n\n\tmetrics := []plugin.MetricType{}\n\n\tloadMts, err := loadAvg(loadReqs)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tmetrics = append(metrics, loadMts...)\n\n\tcpuMts, err := cpuTimes(cpuReqs)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tmetrics = append(metrics, cpuMts...)\n\n\tmemMts, err := virtualMemory(memReqs)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tmetrics = append(metrics, memMts...)\n\n\tnetMts, err := netIOCounters(netReqs)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tmetrics = append(metrics, netMts...)\n\tmounts := getMountpoints(mts[0].Config().Table())\n\tdiskMts, err := getDiskUsageMetrics(diskReqs, mounts)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tmetrics = append(metrics, diskMts...)\n\n\treturn metrics, nil\n}", "func (e *Exporter) Collect(ch chan<- prometheus.Metric) {\n\te.mutex.Lock() // To protect metrics from concurrent collects.\n\tdefer e.mutex.Unlock()\n if err := e.scrape(ch); err != nil {\n\t\tlog.Infof(\"Error scraping tinystats: %s\", err)\n\t}\n e.ipv4QueryA.Collect(ch)\n e.ipv4QueryNS.Collect(ch)\n e.ipv4QueryCNAME.Collect(ch)\n e.ipv4QuerySOA.Collect(ch)\n e.ipv4QueryPTR.Collect(ch)\n e.ipv4QueryHINFO.Collect(ch)\n e.ipv4QueryMX.Collect(ch)\n e.ipv4QueryTXT.Collect(ch)\n e.ipv4QueryRP.Collect(ch)\n e.ipv4QuerySIG.Collect(ch)\n e.ipv4QueryKEY.Collect(ch)\n e.ipv4QueryAAAA.Collect(ch)\n e.ipv4QueryAXFR.Collect(ch)\n e.ipv4QueryANY.Collect(ch)\n e.ipv4QueryTOTAL.Collect(ch)\n e.ipv4QueryOTHER.Collect(ch)\n e.ipv4QueryNOTAUTH.Collect(ch)\n e.ipv4QueryNOTIMPL.Collect(ch)\n e.ipv4QueryBADCLASS.Collect(ch)\n e.ipv4QueryNOQUERY.Collect(ch)\n\n e.ipv6QueryA.Collect(ch)\n e.ipv6QueryNS.Collect(ch)\n e.ipv6QueryCNAME.Collect(ch)\n e.ipv6QuerySOA.Collect(ch)\n e.ipv6QueryPTR.Collect(ch)\n e.ipv6QueryHINFO.Collect(ch)\n e.ipv6QueryMX.Collect(ch)\n e.ipv6QueryTXT.Collect(ch)\n e.ipv6QueryRP.Collect(ch)\n e.ipv6QuerySIG.Collect(ch)\n e.ipv6QueryKEY.Collect(ch)\n e.ipv6QueryAAAA.Collect(ch)\n e.ipv6QueryAXFR.Collect(ch)\n e.ipv6QueryANY.Collect(ch)\n e.ipv6QueryTOTAL.Collect(ch)\n e.ipv6QueryOTHER.Collect(ch)\n e.ipv6QueryNOTAUTH.Collect(ch)\n e.ipv6QueryNOTIMPL.Collect(ch)\n e.ipv6QueryBADCLASS.Collect(ch)\n e.ipv6QueryNOQUERY.Collect(ch)\n\n\treturn\n}", "func collectGauges(e *Exporter, ch chan<- prometheus.Metric) {\n\te.chipStatGauge.Collect(ch)\n\te.devsHashRateGauge.Collect(ch)\n\te.devsHashCountGauge.Collect(ch)\n\te.devsErrorsGauge.Collect(ch)\n\te.devsTemperatureGauge.Collect(ch)\n}", "func CollectAllMetrics(client *statsd.Client, log *li.StandardLogger) {\n\n\tvar metrics []metric\n\tmetrics = append(metrics, metric{name: \"gpu.temperature\", cmd: \"vcgencmd measure_temp | egrep -o '[0-9]*\\\\.[0-9]*'\"})\n\tmetrics = append(metrics, metric{name: \"cpu.temperature\", cmd: \"cat /sys/class/thermal/thermal_zone0/temp | awk 'END {print $1/1000}'\"})\n\tmetrics = append(metrics, metric{name: \"threads\", cmd: \"ps -eo nlwp | tail -n +2 | awk '{ num_threads += $1 } END { print num_threads }'\"})\n\tmetrics = append(metrics, metric{name: \"processes\", cmd: \"ps axu | wc -l\"})\n\n\tfor range time.Tick(15 * time.Second) {\n\t\tlog.Info(\"Starting metric collection\")\n\t\tfor _, m := range metrics {\n\t\t\terr := collectMetric(m, client, log)\n\t\t\tif err != nil {\n\t\t\t\tlog.Error(err)\n\t\t\t}\n\t\t}\n\t}\n}", "func (n *RouterNode) GatherMetrics() {\n\tn.Lock()\n\tdefer n.Unlock()\n\n\tlevel.Debug(n.logger).Log(\n\t\t\"msg\", \"GatherMetrics() locked\",\n\t)\n\n\tif time.Now().Unix() < n.nextCollectionTicker {\n\t\treturn\n\t}\n\tstart := time.Now()\n\tif len(n.metrics) > 0 {\n\t\tn.metrics = n.metrics[:0]\n\t\tlevel.Debug(n.logger).Log(\n\t\t\t\"msg\", \"GatherMetrics() cleared metrics\",\n\t\t)\n\t}\n\tupValue := 1\n\n\t// What is RouterID and AS number of this GoBGP server?\n\tserver, err := n.client.GetBgp(context.Background(), &gobgpapi.GetBgpRequest{})\n\tif err != nil {\n\t\tn.IncrementErrorCounter()\n\t\tlevel.Error(n.logger).Log(\n\t\t\t\"msg\", \"failed query gobgp server\",\n\t\t\t\"error\", err.Error(),\n\t\t)\n\t\tif IsConnectionError(err) {\n\t\t\tn.connected = false\n\t\t\tupValue = 0\n\t\t}\n\t} else {\n\t\tn.routerID = server.Global.RouterId\n\t\tn.localAS = server.Global.Asn\n\t\tlevel.Debug(n.logger).Log(\n\t\t\t\"msg\", \"router info\",\n\t\t\t\"router_id\", n.routerID,\n\t\t\t\"local_asn\", n.localAS,\n\t\t)\n\t\tn.connected = true\n\t}\n\n\tif n.connected {\n\t\tvar wg sync.WaitGroup\n\t\twg.Add(2)\n\t\tgo func() {\n\t\t\tdefer wg.Done()\n\t\t\tn.GetRibCounters()\n\t\t}()\n\t\tgo func() {\n\t\t\tdefer wg.Done()\n\t\t\tn.GetPeers()\n\t\t}()\n\t\twg.Wait()\n\n\t}\n\n\t// Generic Metrics\n\tn.metrics = append(n.metrics, prometheus.MustNewConstMetric(\n\t\trouterUp,\n\t\tprometheus.GaugeValue,\n\t\tfloat64(upValue),\n\t))\n\n\tn.metrics = append(n.metrics, prometheus.MustNewConstMetric(\n\t\trouterErrors,\n\t\tprometheus.CounterValue,\n\t\tfloat64(n.errors),\n\t))\n\tn.metrics = append(n.metrics, prometheus.MustNewConstMetric(\n\t\trouterNextScrape,\n\t\tprometheus.CounterValue,\n\t\tfloat64(n.nextCollectionTicker),\n\t))\n\tn.metrics = append(n.metrics, prometheus.MustNewConstMetric(\n\t\trouterScrapeTime,\n\t\tprometheus.GaugeValue,\n\t\ttime.Since(start).Seconds(),\n\t))\n\n\t// Router ID and ASN\n\tif n.routerID != \"\" {\n\t\tn.metrics = append(n.metrics, prometheus.MustNewConstMetric(\n\t\t\trouterID,\n\t\t\tprometheus.GaugeValue,\n\t\t\t1,\n\t\t\tn.routerID,\n\t\t))\n\t}\n\tif n.localAS > 0 {\n\t\tn.metrics = append(n.metrics, prometheus.MustNewConstMetric(\n\t\t\trouterLocalAS,\n\t\t\tprometheus.GaugeValue,\n\t\t\tfloat64(n.localAS),\n\t\t))\n\t}\n\n\tn.nextCollectionTicker = time.Now().Add(time.Duration(n.pollInterval) * time.Second).Unix()\n\n\tif upValue > 0 {\n\t\tn.result = \"success\"\n\t} else {\n\t\tn.result = \"failure\"\n\t}\n\tn.timestamp = time.Now().Format(time.RFC3339)\n\n\tlevel.Debug(n.logger).Log(\n\t\t\"msg\", \"GatherMetrics() returns\",\n\t)\n}", "func (c collector) Collect(ch chan<- prometheus.Metric) {\n\tvar wg sync.WaitGroup\n\n\t// We don't bail out on errors because those can happen if there is a race condition between\n\t// the destruction of a container and us getting to read the cgroup data. We just don't report\n\t// the values we don't get.\n\n\tcollectors := []func(string, *regexp.Regexp){\n\t\tfunc(path string, re *regexp.Regexp) {\n\t\t\tdefer wg.Done()\n\t\t\tnuma, err := cgroups.GetNumaStats(cgroupPath(\"memory\", path))\n\t\t\tif err == nil {\n\t\t\t\tupdateNumaStatMetric(ch, re.FindStringSubmatch(filepath.Base(path))[0], numa)\n\t\t\t} else {\n\t\t\t\tlog.Error(\"failed to collect NUMA stats for %s: %v\", path, err)\n\t\t\t}\n\t\t},\n\t\tfunc(path string, re *regexp.Regexp) {\n\t\t\tdefer wg.Done()\n\t\t\tmemory, err := cgroups.GetMemoryUsage(cgroupPath(\"memory\", path))\n\t\t\tif err == nil {\n\t\t\t\tupdateMemoryUsageMetric(ch, re.FindStringSubmatch(filepath.Base(path))[0], memory)\n\t\t\t} else {\n\t\t\t\tlog.Error(\"failed to collect memory usage stats for %s: %v\", path, err)\n\t\t\t}\n\t\t},\n\t\tfunc(path string, re *regexp.Regexp) {\n\t\t\tdefer wg.Done()\n\t\t\tmigrate, err := cgroups.GetCPUSetMemoryMigrate(cgroupPath(\"cpuset\", path))\n\t\t\tif err == nil {\n\t\t\t\tupdateMemoryMigrateMetric(ch, re.FindStringSubmatch(filepath.Base(path))[0], migrate)\n\t\t\t} else {\n\t\t\t\tlog.Error(\"failed to collect memory migration stats for %s: %v\", path, err)\n\t\t\t}\n\t\t},\n\t\tfunc(path string, re *regexp.Regexp) {\n\t\t\tdefer wg.Done()\n\t\t\tcpuAcctUsage, err := cgroups.GetCPUAcctStats(cgroupPath(\"cpuacct\", path))\n\t\t\tif err == nil {\n\t\t\t\tupdateCPUAcctUsageMetric(ch, re.FindStringSubmatch(filepath.Base(path))[0], cpuAcctUsage)\n\t\t\t} else {\n\t\t\t\tlog.Error(\"failed to collect CPU accounting stats for %s: %v\", path, err)\n\t\t\t}\n\t\t},\n\t\tfunc(path string, re *regexp.Regexp) {\n\t\t\tdefer wg.Done()\n\t\t\thugeTlbUsage, err := cgroups.GetHugetlbUsage(cgroupPath(\"hugetlb\", path))\n\t\t\tif err == nil {\n\t\t\t\tupdateHugeTlbUsageMetric(ch, re.FindStringSubmatch(filepath.Base(path))[0], hugeTlbUsage)\n\t\t\t} else {\n\t\t\t\tlog.Error(\"failed to collect hugetlb stats for %s: %v\", path, err)\n\t\t\t}\n\t\t},\n\t\tfunc(path string, re *regexp.Regexp) {\n\t\t\tdefer wg.Done()\n\t\t\tblkioDeviceUsage, err := cgroups.GetBlkioThrottleBytes(cgroupPath(\"blkio\", path))\n\t\t\tif err == nil {\n\t\t\t\tupdateBlkioDeviceUsageMetric(ch, re.FindStringSubmatch(filepath.Base(path))[0], blkioDeviceUsage)\n\t\t\t} else {\n\t\t\t\tlog.Error(\"failed to collect blkio stats for %s: %v\", path, err)\n\t\t\t}\n\t\t},\n\t}\n\n\tcontainerIDRegexp := regexp.MustCompile(`[a-z0-9]{64}`)\n\n\tfor _, path := range walkCgroups() {\n\t\twg.Add(len(collectors))\n\t\tfor _, fn := range collectors {\n\t\t\tgo fn(path, containerIDRegexp)\n\t\t}\n\t}\n\n\t// We need to wait so that the response channel doesn't get closed.\n\twg.Wait()\n}", "func (throttler *Throttler) aggregateMySQLMetrics(ctx context.Context) error {\n\tfor clusterName, probes := range throttler.mysqlInventory.ClustersProbes {\n\t\tmetricName := fmt.Sprintf(\"mysql/%s\", clusterName)\n\t\tignoreHostsCount := throttler.mysqlInventory.IgnoreHostsCount[clusterName]\n\t\tignoreHostsThreshold := throttler.mysqlInventory.IgnoreHostsThreshold[clusterName]\n\t\taggregatedMetric := aggregateMySQLProbes(ctx, probes, clusterName, throttler.mysqlInventory.InstanceKeyMetrics, ignoreHostsCount, config.Settings().Stores.MySQL.IgnoreDialTCPErrors, ignoreHostsThreshold)\n\t\tthrottler.aggregatedMetrics.Set(metricName, aggregatedMetric, cache.DefaultExpiration)\n\t}\n\treturn nil\n}", "func (p *Prom) CollectStdout(in *bufio.Reader) {\n\tvar stats Metrics\n\tfor {\n\t\tline, err := in.ReadBytes('\\n')\n\t\tif err == io.EOF {\n\t\t\treturn\n\t\t}\n\t\tif err != nil {\n\t\t\tfmt.Fprintln(os.Stderr, err)\n\t\t}\n\t\tif err := json.Unmarshal(line, &stats); err != nil {\n\t\t\tfmt.Fprint(os.Stdout, string(line))\n\t\t\tcontinue\n\t\t}\n\t\tif stats.MessageType != \"summary\" {\n\t\t\tcontinue\n\t\t}\n\t\tp.duration.WithLabelValues(p.labelValues...).Set(float64(stats.TotalDuration))\n\t\tp.filesNew.WithLabelValues(p.labelValues...).Set(float64(stats.FilesNew))\n\t\tp.filesUnmodified.WithLabelValues(p.labelValues...).Set(float64(stats.FilesUnmodified))\n\t\tp.filesChanged.WithLabelValues(p.labelValues...).Set(float64(stats.FilesChanged))\n\t\tp.dirsNew.WithLabelValues(p.labelValues...).Set(float64(stats.DirsNew))\n\t\tp.dirsChanged.WithLabelValues(p.labelValues...).Set(float64(stats.DirsChanged))\n\t\tp.dirsUnmodified.WithLabelValues(p.labelValues...).Set(float64(stats.DirsUnmodified))\n\t\tp.bytesAdded.WithLabelValues(p.labelValues...).Set(float64(stats.DataAdded))\n\t\tp.bytesProcessed.WithLabelValues(p.labelValues...).Set(float64(stats.TotalBytesProcessed))\n\t\tp.parsed = true\n\t}\n}", "func (c *ClusterManager) Collect(ch chan<- prometheus.Metric) {\n\toomCountByHost, ramUsageByHost := c.ReallyExpensiveAssessmentOfTheSystemState()\n\tfor host, oomCount := range oomCountByHost {\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tc.OOMCountDesc,\n\t\t\tprometheus.CounterValue,\n\t\t\tfloat64(oomCount),\n\t\t\thost,\n\t\t)\n\t}\n\tfor host, ramUsage := range ramUsageByHost {\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tc.RAMUsageDesc,\n\t\t\tprometheus.GaugeValue,\n\t\t\tramUsage,\n\t\t\thost,\n\t\t)\n\t}\n}", "func (h *Hugepages) gatherStatsFromMeminfo(acc telegraf.Accumulator) error {\n\tmeminfo, err := os.ReadFile(h.meminfoPath)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tmetrics := make(map[string]interface{})\n\tlines := bytes.Split(meminfo, newlineByte)\n\tfor _, line := range lines {\n\t\tfields := bytes.Fields(line)\n\t\tif len(fields) < 2 {\n\t\t\tcontinue\n\t\t}\n\t\tfieldName := string(bytes.TrimSuffix(fields[0], colonByte))\n\t\tmetricName, ok := hugepagesMetricsFromMeminfo[fieldName]\n\t\tif !ok {\n\t\t\tcontinue\n\t\t}\n\n\t\tfieldValue, err := strconv.Atoi(string(fields[1]))\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"failed to convert content of %q: %w\", fieldName, err)\n\t\t}\n\n\t\tmetrics[metricName] = fieldValue\n\t}\n\n\tacc.AddFields(\"hugepages_\"+meminfoHugepages, metrics, map[string]string{})\n\treturn nil\n}", "func (c *VMCollector) Collect(ch chan<- prometheus.Metric) {\n\tfor _, m := range c.getMetrics() {\n\t\tch <- m\n\t}\n}", "func NewVMCollector(cfgBaseName string) (collector.Collector, error) {\n\tprocFile := \"meminfo\"\n\n\tc := VM{}\n\tc.id = \"vm\"\n\tc.pkgID = \"builtins.linux.procfs.\" + c.id\n\tc.procFSPath = \"/proc\"\n\tc.file = filepath.Join(c.procFSPath, procFile)\n\tc.logger = log.With().Str(\"pkg\", c.pkgID).Logger()\n\tc.metricStatus = map[string]bool{}\n\tc.metricDefaultActive = true\n\n\tif cfgBaseName == \"\" {\n\t\tif _, err := os.Stat(c.file); err != nil {\n\t\t\treturn nil, errors.Wrap(err, c.pkgID)\n\t\t}\n\t\treturn &c, nil\n\t}\n\n\tvar opts vmOptions\n\terr := config.LoadConfigFile(cfgBaseName, &opts)\n\tif err != nil {\n\t\tif strings.Contains(err.Error(), \"no config found matching\") {\n\t\t\treturn &c, nil\n\t\t}\n\t\tc.logger.Warn().Err(err).Str(\"file\", cfgBaseName).Msg(\"loading config file\")\n\t\treturn nil, errors.Wrapf(err, \"%s config\", c.pkgID)\n\t}\n\n\tc.logger.Debug().Str(\"base\", cfgBaseName).Interface(\"config\", opts).Msg(\"loaded config\")\n\n\tif opts.ID != \"\" {\n\t\tc.id = opts.ID\n\t}\n\n\tif opts.ProcFSPath != \"\" {\n\t\tc.procFSPath = opts.ProcFSPath\n\t\tc.file = filepath.Join(c.procFSPath, procFile)\n\t}\n\n\tif len(opts.MetricsEnabled) > 0 {\n\t\tfor _, name := range opts.MetricsEnabled {\n\t\t\tc.metricStatus[name] = true\n\t\t}\n\t}\n\tif len(opts.MetricsDisabled) > 0 {\n\t\tfor _, name := range opts.MetricsDisabled {\n\t\t\tc.metricStatus[name] = false\n\t\t}\n\t}\n\n\tif opts.MetricsDefaultStatus != \"\" {\n\t\tif ok, _ := regexp.MatchString(`^(enabled|disabled)$`, strings.ToLower(opts.MetricsDefaultStatus)); ok {\n\t\t\tc.metricDefaultActive = strings.ToLower(opts.MetricsDefaultStatus) == metricStatusEnabled\n\t\t} else {\n\t\t\treturn nil, errors.Errorf(\"%s invalid metric default status (%s)\", c.pkgID, opts.MetricsDefaultStatus)\n\t\t}\n\t}\n\n\tif opts.RunTTL != \"\" {\n\t\tdur, err := time.ParseDuration(opts.RunTTL)\n\t\tif err != nil {\n\t\t\treturn nil, errors.Wrapf(err, \"%s parsing run_ttl\", c.pkgID)\n\t\t}\n\t\tc.runTTL = dur\n\t}\n\n\tif _, err := os.Stat(c.file); os.IsNotExist(err) {\n\t\treturn nil, errors.Wrap(err, c.pkgID)\n\t}\n\n\treturn &c, nil\n}", "func (m VarnishPlugin) FetchMetrics() (map[string]interface{}, error) {\n\tvar out []byte\n\tvar err error\n\n\tif m.VarnishName == \"\" {\n\t\tout, err = exec.Command(m.VarnishStatPath, \"-1\").CombinedOutput()\n\t} else {\n\t\tout, err = exec.Command(m.VarnishStatPath, \"-1\", \"-n\", m.VarnishName).CombinedOutput()\n\t}\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"%s: %s\", err, out)\n\t}\n\n\tlineexp := regexp.MustCompile(`^([^ ]+) +(\\d+)`)\n\tsmaexp := regexp.MustCompile(`^SMA\\.([^\\.]+)\\.(.+)$`)\n\n\tstat := map[string]interface{}{\n\t\t\"requests\": float64(0),\n\t}\n\n\tvar tmpv float64\n\tfor _, line := range strings.Split(string(out), \"\\n\") {\n\t\tmatch := lineexp.FindStringSubmatch(line)\n\t\tif match == nil {\n\t\t\tcontinue\n\t\t}\n\n\t\ttmpv, err = strconv.ParseFloat(match[2], 64)\n\t\tif err != nil {\n\t\t\tcontinue\n\t\t}\n\n\t\tswitch match[1] {\n\t\tcase \"cache_hit\", \"MAIN.cache_hit\":\n\t\t\tstat[\"cache_hits\"] = tmpv\n\t\t\tstat[\"requests\"] = stat[\"requests\"].(float64) + tmpv\n\t\tcase \"cache_miss\", \"MAIN.cache_miss\":\n\t\t\tstat[\"requests\"] = stat[\"requests\"].(float64) + tmpv\n\t\tcase \"cache_hitpass\", \"MAIN.cache_hitpass\":\n\t\t\tstat[\"requests\"] = stat[\"requests\"].(float64) + tmpv\n\t\tcase \"MAIN.backend_req\":\n\t\t\tstat[\"backend_req\"] = tmpv\n\t\tcase \"MAIN.backend_conn\":\n\t\t\tstat[\"backend_conn\"] = tmpv\n\t\tcase \"MAIN.backend_fail\":\n\t\t\tstat[\"backend_fail\"] = tmpv\n\t\tcase \"MAIN.backend_reuse\":\n\t\t\tstat[\"backend_reuse\"] = tmpv\n\t\tcase \"MAIN.backend_recycle\":\n\t\t\tstat[\"backend_recycle\"] = tmpv\n\t\tcase \"MAIN.n_object\":\n\t\t\tstat[\"n_object\"] = tmpv\n\t\tcase \"MAIN.n_objectcore\":\n\t\t\tstat[\"n_objectcore\"] = tmpv\n\t\tcase \"MAIN.n_expired\":\n\t\t\tstat[\"n_expired\"] = tmpv\n\t\tcase \"MAIN.n_objecthead\":\n\t\t\tstat[\"n_objecthead\"] = tmpv\n\t\tcase \"MAIN.busy_sleep\":\n\t\t\tstat[\"busy_sleep\"] = tmpv\n\t\tcase \"MAIN.busy_wakeup\":\n\t\t\tstat[\"busy_wakeup\"] = tmpv\n\t\tdefault:\n\t\t\tsmamatch := smaexp.FindStringSubmatch(match[1])\n\t\t\tif smamatch == nil {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif smamatch[2] == \"g_alloc\" {\n\t\t\t\tstat[\"varnish.sma.g_alloc.\"+smamatch[1]+\".g_alloc\"] = tmpv\n\t\t\t} else if smamatch[2] == \"g_bytes\" {\n\t\t\t\tstat[\"varnish.sma.memory.\"+smamatch[1]+\".allocated\"] = tmpv\n\t\t\t} else if smamatch[2] == \"g_space\" {\n\t\t\t\tstat[\"varnish.sma.memory.\"+smamatch[1]+\".available\"] = tmpv\n\t\t\t}\n\t\t}\n\t}\n\n\treturn stat, err\n}", "func (e *Exporter) Collect(ch chan<- prometheus.Metric) {\n\te.mutex.Lock() // To protect metrics from concurrent collects.\n\tdefer e.mutex.Unlock()\n\n\tup := e.scrape(ch)\n\n\tch <- prometheus.MustNewConstMetric(artifactoryUp, prometheus.GaugeValue, up)\n\tch <- e.totalScrapes\n\tch <- e.jsonParseFailures\n}", "func appStatsCollect(ctx *zedrouterContext) {\n\tlog.Infof(\"appStatsCollect: containerStats, started\")\n\tappStatsCollectTimer := time.NewTimer(time.Duration(ctx.appStatsInterval) * time.Second)\n\tfor {\n\t\tselect {\n\t\tcase <-appStatsCollectTimer.C:\n\t\t\titems, stopped := checkAppStopStatsCollect(ctx)\n\t\t\tif stopped {\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tcollectTime := time.Now() // all apps collection assign the same timestamp\n\t\t\tfor _, st := range items {\n\t\t\t\tstatus := st.(types.AppNetworkStatus)\n\t\t\t\tif status.GetStatsIPAddr != nil {\n\t\t\t\t\tacMetrics, err := appContainerGetStats(status.GetStatsIPAddr)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\tlog.Errorf(\"appStatsCollect: can't get App %s Container Metrics on %s, %v\",\n\t\t\t\t\t\t\tstatus.UUIDandVersion.UUID.String(), status.GetStatsIPAddr.String(), err)\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t}\n\t\t\t\t\tacMetrics.UUIDandVersion = status.UUIDandVersion\n\t\t\t\t\tacMetrics.CollectTime = collectTime\n\t\t\t\t\tctx.pubAppContainerMetrics.Publish(acMetrics.Key(), acMetrics)\n\t\t\t\t}\n\t\t\t}\n\t\t\tappStatsCollectTimer = time.NewTimer(time.Duration(ctx.appStatsInterval) * time.Second)\n\t\t}\n\t}\n}", "func (pc *NginxProcessesMetricsCollector) Collect(ch chan<- prometheus.Metric) {\n\tpc.updateWorkerProcessCount()\n\tpc.workerProcessTotal.Collect(ch)\n}", "func CollectSysStats(registry *Registry) {\n\tvar s sysStatsCollector\n\ts.registry = registry\n\ts.maxOpen = registry.Gauge(\"fh.allocated\", nil)\n\ts.curOpen = registry.Gauge(\"fh.max\", nil)\n\ts.numGoroutines = registry.Gauge(\"go.numGoroutines\", nil)\n\n\tticker := time.NewTicker(30 * time.Second)\n\tgo func() {\n\t\tlog := registry.log\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase <-ticker.C:\n\t\t\t\tlog.Debugf(\"Collecting system stats\")\n\t\t\t\tfdStats(&s)\n\t\t\t\tgoRuntimeStats(&s)\n\t\t\t}\n\t\t}\n\t}()\n}", "func (e *Exporter) Collect(ch chan<- prometheus.Metric) {\n\te.mutex.Lock()\n\tdefer e.mutex.Unlock()\n\n\tfor _, vec := range e.gauges {\n\t\tvec.Reset()\n\t}\n\n\tdefer func() { ch <- e.up }()\n\n\t// If we fail at any point in retrieving GPU status, we fail 0\n\te.up.Set(1)\n\n\te.GetTelemetryFromNVML()\n\n\tfor _, vec := range e.gauges {\n\t\tvec.Collect(ch)\n\t}\n}", "func (e *Exporter) Collect(ch chan<- prometheus.Metric) {\n\t// Protect metrics from concurrent collects.\n\te.mutex.Lock()\n\tdefer e.mutex.Unlock()\n\n\t// Scrape metrics from Tankerkoenig API.\n\tif err := e.scrape(ch); err != nil {\n\t\te.logger.Printf(\"error: cannot scrape tankerkoenig api: %v\", err)\n\t}\n\n\t// Collect metrics.\n\te.up.Collect(ch)\n\te.scrapeDuration.Collect(ch)\n\te.failedScrapes.Collect(ch)\n\te.totalScrapes.Collect(ch)\n}", "func (c *Canary) GatherMetrics(config schemas.Config) error {\n\tif !c.StepStatus[constants.StepCleanChecking] {\n\t\treturn nil\n\t}\n\tif config.DisableMetrics {\n\t\treturn nil\n\t}\n\n\tif len(config.Region) > 0 {\n\t\tif !CheckRegionExist(config.Region, c.Stack.Regions) {\n\t\t\treturn nil\n\t\t}\n\t}\n\n\tif !config.CompleteCanary {\n\t\tc.Logger.Debug(\"Skip gathering metrics because canary is now applied\")\n\t\treturn nil\n\t}\n\n\tif err := c.Deployer.StartGatheringMetrics(config); err != nil {\n\t\treturn err\n\t}\n\n\tc.StepStatus[constants.StepGatherMetrics] = true\n\treturn nil\n}", "func (g gatherer) GatherMetrics(ctx context.Context, m *elasticapm.Metrics) error {\n\tg.r.Each(func(name string, v interface{}) {\n\t\tswitch v := v.(type) {\n\t\tcase metrics.Counter:\n\t\t\tm.Add(name, nil, float64(v.Count()))\n\t\tcase metrics.Gauge:\n\t\t\tm.Add(name, nil, float64(v.Value()))\n\t\tcase metrics.GaugeFloat64:\n\t\t\tm.Add(name, nil, v.Value())\n\t\tcase metrics.Histogram:\n\t\t\tm.Add(name+\".count\", nil, float64(v.Count()))\n\t\t\tm.Add(name+\".total\", nil, float64(v.Sum()))\n\t\t\tm.Add(name+\".min\", nil, float64(v.Min()))\n\t\t\tm.Add(name+\".max\", nil, float64(v.Max()))\n\t\t\tm.Add(name+\".stddev\", nil, v.StdDev())\n\t\t\tm.Add(name+\".percentile.50\", nil, v.Percentile(0.5))\n\t\t\tm.Add(name+\".percentile.95\", nil, v.Percentile(0.95))\n\t\t\tm.Add(name+\".percentile.99\", nil, v.Percentile(0.99))\n\t\tdefault:\n\t\t\t// TODO(axw) Meter, Timer, EWMA\n\t\t}\n\t})\n\treturn nil\n}", "func (k *KACollector) Collect(ch chan<- prometheus.Metric) {\n\tk.mutex.Lock()\n\tdefer k.mutex.Unlock()\n\n\tvar err error\n\tvar kaStats []KAStats\n\n\tif k.useJSON {\n\t\tkaStats, err = k.json()\n\t\tif err != nil {\n\t\t\tch <- prometheus.MustNewConstMetric(k.metrics[\"keepalived_up\"], prometheus.GaugeValue, 0)\n\t\t\tlog.Printf(\"keepalived_exporter: %v\", err)\n\t\t\treturn\n\t\t}\n\t} else {\n\t\tkaStats, err = k.text()\n\t\tif err != nil {\n\t\t\tch <- prometheus.MustNewConstMetric(k.metrics[\"keepalived_up\"], prometheus.GaugeValue, 0)\n\t\t\tlog.Printf(\"keepalived_exporter: %v\", err)\n\t\t\treturn\n\t\t}\n\t}\n\n\tch <- prometheus.MustNewConstMetric(k.metrics[\"keepalived_up\"], prometheus.GaugeValue, 1)\n\n\tfor _, st := range kaStats {\n\t\tstate := \"\"\n\t\tif _, ok := state2string[st.Data.State]; ok {\n\t\t\tstate = state2string[st.Data.State]\n\t\t}\n\n\t\tch <- prometheus.MustNewConstMetric(k.metrics[\"keepalived_vrrp_advert_rcvd\"], prometheus.CounterValue,\n\t\t\tfloat64(st.Stats.AdvertRcvd), st.Data.Iname, st.Data.IfpIfname, strconv.Itoa(st.Data.Vrid), state)\n\t\tch <- prometheus.MustNewConstMetric(k.metrics[\"keepalived_vrrp_advert_sent\"], prometheus.CounterValue,\n\t\t\tfloat64(st.Stats.AdvertSent), st.Data.Iname, st.Data.IfpIfname, strconv.Itoa(st.Data.Vrid), state)\n\t\tch <- prometheus.MustNewConstMetric(k.metrics[\"keepalived_vrrp_become_master\"], prometheus.CounterValue,\n\t\t\tfloat64(st.Stats.BecomeMaster), st.Data.Iname, st.Data.IfpIfname, strconv.Itoa(st.Data.Vrid), state)\n\t\tch <- prometheus.MustNewConstMetric(k.metrics[\"keepalived_vrrp_release_master\"], prometheus.CounterValue,\n\t\t\tfloat64(st.Stats.ReleaseMaster), st.Data.Iname, st.Data.IfpIfname, strconv.Itoa(st.Data.Vrid), state)\n\t\tch <- prometheus.MustNewConstMetric(k.metrics[\"keepalived_vrrp_packet_len_err\"], prometheus.CounterValue,\n\t\t\tfloat64(st.Stats.PacketLenErr), st.Data.Iname, st.Data.IfpIfname, strconv.Itoa(st.Data.Vrid), state)\n\t\tch <- prometheus.MustNewConstMetric(k.metrics[\"keepalived_vrrp_advert_interval_err\"], prometheus.CounterValue,\n\t\t\tfloat64(st.Stats.AdvertIntervalErr), st.Data.Iname, st.Data.IfpIfname, strconv.Itoa(st.Data.Vrid), state)\n\t\tch <- prometheus.MustNewConstMetric(k.metrics[\"keepalived_vrrp_ip_ttl_err\"], prometheus.CounterValue,\n\t\t\tfloat64(st.Stats.AdvertIntervalErr), st.Data.Iname, st.Data.IfpIfname, strconv.Itoa(st.Data.Vrid), state)\n\t\tch <- prometheus.MustNewConstMetric(k.metrics[\"keepalived_vrrp_invalid_type_rcvd\"], prometheus.CounterValue,\n\t\t\tfloat64(st.Stats.InvalidTypeRcvd), st.Data.Iname, st.Data.IfpIfname, strconv.Itoa(st.Data.Vrid), state)\n\t\tch <- prometheus.MustNewConstMetric(k.metrics[\"keepalived_vrrp_addr_list_err\"], prometheus.CounterValue,\n\t\t\tfloat64(st.Stats.AddrListErr), st.Data.Iname, st.Data.IfpIfname, strconv.Itoa(st.Data.Vrid), state)\n\t\tch <- prometheus.MustNewConstMetric(k.metrics[\"keepalived_vrrp_invalid_authtype\"], prometheus.CounterValue,\n\t\t\tfloat64(st.Stats.InvalidAuthtype), st.Data.Iname, st.Data.IfpIfname, strconv.Itoa(st.Data.Vrid), state)\n\t\tch <- prometheus.MustNewConstMetric(k.metrics[\"keepalived_vrrp_authtype_mismatch\"], prometheus.CounterValue,\n\t\t\tfloat64(st.Stats.AuthtypeMismatch), st.Data.Iname, st.Data.IfpIfname, strconv.Itoa(st.Data.Vrid), state)\n\t\tch <- prometheus.MustNewConstMetric(k.metrics[\"keepalived_vrrp_auth_failure\"], prometheus.CounterValue,\n\t\t\tfloat64(st.Stats.AuthFailure), st.Data.Iname, st.Data.IfpIfname, strconv.Itoa(st.Data.Vrid), state)\n\t\tch <- prometheus.MustNewConstMetric(k.metrics[\"keepalived_vrrp_pri_zero_rcvd\"], prometheus.CounterValue,\n\t\t\tfloat64(st.Stats.PriZeroRcvd), st.Data.Iname, st.Data.IfpIfname, strconv.Itoa(st.Data.Vrid), state)\n\t\tch <- prometheus.MustNewConstMetric(k.metrics[\"keepalived_vrrp_pri_zero_sent\"], prometheus.CounterValue,\n\t\t\tfloat64(st.Stats.PriZeroSent), st.Data.Iname, st.Data.IfpIfname, strconv.Itoa(st.Data.Vrid), state)\n\t}\n\n\tif k.handle == nil {\n\t\treturn\n\t}\n\n\tsvcs, err := k.handle.GetServices()\n\tif err != nil {\n\t\tch <- prometheus.MustNewConstMetric(k.metrics[\"keepalived_up\"], prometheus.GaugeValue, 0)\n\t\tlog.Printf(\"keepalived_exporter: services: %v\", err)\n\t\treturn\n\t}\n\n\tfor _, s := range svcs {\n\t\tdsts, err := k.handle.GetDestinations(s)\n\t\tif err != nil {\n\t\t\tlog.Printf(\"keepalived_exporter: destinations: %v\", err)\n\t\t\tcontinue\n\t\t}\n\n\t\taddr := s.Address.String() + \":\" + strconv.Itoa(int(s.Port))\n\t\tproto := strconv.Itoa(int(s.Protocol))\n\n\t\tch <- prometheus.MustNewConstMetric(k.metrics[\"keepalived_lvs_vip_in_packets\"], prometheus.CounterValue,\n\t\t\tfloat64(s.Stats.PacketsIn), addr, proto)\n\t\tch <- prometheus.MustNewConstMetric(k.metrics[\"keepalived_lvs_vip_out_packets\"], prometheus.CounterValue,\n\t\t\tfloat64(s.Stats.PacketsOut), addr, proto)\n\t\tch <- prometheus.MustNewConstMetric(k.metrics[\"keepalived_lvs_vip_in_bytes\"], prometheus.CounterValue,\n\t\t\tfloat64(s.Stats.BytesIn), addr, proto)\n\t\tch <- prometheus.MustNewConstMetric(k.metrics[\"keepalived_lvs_vip_out_bytes\"], prometheus.CounterValue,\n\t\t\tfloat64(s.Stats.BytesOut), addr, proto)\n\t\tch <- prometheus.MustNewConstMetric(k.metrics[\"keepalived_lvs_vip_conn\"], prometheus.CounterValue,\n\t\t\tfloat64(s.Stats.Connections), addr, proto)\n\n\t\tfor _, d := range dsts {\n\t\t\taddr := d.Address.String() + \":\" + strconv.Itoa(int(d.Port))\n\n\t\t\tch <- prometheus.MustNewConstMetric(k.metrics[\"keepalived_lvs_rs_in_packets\"], prometheus.CounterValue,\n\t\t\t\tfloat64(d.Stats.PacketsIn), addr, proto)\n\t\t\tch <- prometheus.MustNewConstMetric(k.metrics[\"keepalived_lvs_rs_out_packets\"], prometheus.CounterValue,\n\t\t\t\tfloat64(d.Stats.PacketsOut), addr, proto)\n\t\t\tch <- prometheus.MustNewConstMetric(k.metrics[\"keepalived_lvs_rs_in_bytes\"], prometheus.CounterValue,\n\t\t\t\tfloat64(d.Stats.BytesIn), addr, proto)\n\t\t\tch <- prometheus.MustNewConstMetric(k.metrics[\"keepalived_lvs_rs_out_bytes\"], prometheus.CounterValue,\n\t\t\t\tfloat64(d.Stats.BytesOut), addr, proto)\n\t\t\tch <- prometheus.MustNewConstMetric(k.metrics[\"keepalived_lvs_rs_conn\"], prometheus.CounterValue,\n\t\t\t\tfloat64(d.Stats.Connections), addr, proto)\n\t\t}\n\t}\n}", "func (collector *proxmoxZpoolCollector) Collect(ch chan<- prometheus.Metric) {\n\tnodes, err := collector.api.GetNodes()\n\tif err != nil {\n\t\tfmt.Println(err)\n\t\treturn\n\t}\n\tfor _, node := range nodes.Data {\n\t\tzpoolList, err := collector.api.GetZpoolList(node.Node)\n\t\tif err != nil {\n\t\t\tfmt.Println(err)\n\t\t\treturn\n\t\t}\n\t\tfor _, zpool := range zpoolList.Data {\n\t\t\tvar zpoolParsingErrorMetric float64\n\n\t\t\tzpoolInfo, err := collector.api.GetZpool(node.Node, zpool.Name)\n\t\t\tif err != nil {\n\t\t\t\tzpoolParsingErrorMetric = float64(1)\n\t\t\t\tfmt.Println(err)\n\t\t\t}\n\n\t\t\tvar zpoolOnlineMetric float64\n\t\t\tvar zpoolErrorMetric float64\n\t\t\tif zpoolInfo.Data.State == \"ONLINE\" {\n\t\t\t\tzpoolOnlineMetric = float64(1)\n\t\t\t\tzpoolErrorMetric = float64(0)\n\t\t\t} else {\n\t\t\t\tzpoolErrorMetric = float64(1)\n\t\t\t}\n\n\t\t\tvar zpoolLastScrubMetric float64\n\t\t\t//Example scrub response: scrub repaired 0B in 0 days 01:56:29 with 0 errors on Sun May 10 02:20:30 2020\n\t\t\tif x := strings.SplitAfter(zpoolInfo.Data.Scan, \"on \"); len(x) == 2 {\n\t\t\t\t//Sun May 10 02:20:30 2020\n\t\t\t\tif len(x[1]) > 5 { //We want to get rid of the day eg: Mon\n\t\t\t\t\t//May 10 02:20:30 2020\n\t\t\t\t\tt, err := time.Parse(dateForm, x[1][4:])\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\tzpoolParsingErrorMetric = float64(1)\n\t\t\t\t\t\tfmt.Println(err)\n\t\t\t\t\t}\n\t\t\t\t\tzpoolLastScrubMetric = float64(t.Unix()) //Could this be an issue since time.Unix() returns int64?\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tvar zpoolLastScrubErrorsMetric float64\n\t\t\t//Example scrub response: scrub repaired 0B in 0 days 01:56:29 with 0 errors on Sun May 10 02:20:30 2020\n\t\t\tsplitLine := strings.Split(zpoolInfo.Data.Scan, \" \")\n\t\t\tfor index, x := range splitLine {\n\t\t\t\tif strings.Contains(x, \"error\") && index >= 1 { //Support for \"error\" or \"errors\"\n\t\t\t\t\ttotalErrors, err := strconv.ParseFloat(splitLine[index-1], 64) //We want to grab the number before error eg: 3 errors\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\tzpoolParsingErrorMetric = float64(1)\n\t\t\t\t\t} else {\n\t\t\t\t\t\tzpoolLastScrubErrorsMetric = totalErrors\n\t\t\t\t\t}\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\n\t\t\t//ch <- prometheus.MustNewConstMetric(zpoolTotalDisks, prometheus.GaugeValue, metricValue, \"test\")\n\t\t\tch <- prometheus.MustNewConstMetric(zpoolError, prometheus.GaugeValue, zpoolErrorMetric, node.Node, zpool.Name)\n\t\t\tch <- prometheus.MustNewConstMetric(zpoolOnline, prometheus.GaugeValue, zpoolOnlineMetric, node.Node, zpool.Name)\n\t\t\tch <- prometheus.MustNewConstMetric(zpoolFree, prometheus.GaugeValue, zpool.Free, node.Node, zpool.Name)\n\t\t\tch <- prometheus.MustNewConstMetric(zpoolAllocated, prometheus.GaugeValue, zpool.Alloc, node.Node, zpool.Name)\n\t\t\tch <- prometheus.MustNewConstMetric(zpoolSize, prometheus.GaugeValue, zpool.Size, node.Node, zpool.Name)\n\t\t\tch <- prometheus.MustNewConstMetric(zpoolDedup, prometheus.GaugeValue, float64(zpool.Dedup), node.Node, zpool.Name)\n\t\t\tch <- prometheus.MustNewConstMetric(zpoolLastScrub, prometheus.GaugeValue, zpoolLastScrubMetric, node.Node, zpool.Name)\n\t\t\tch <- prometheus.MustNewConstMetric(zpoolLastScrubErrors, prometheus.GaugeValue, zpoolLastScrubErrorsMetric, node.Node, zpool.Name)\n\t\t\tch <- prometheus.MustNewConstMetric(zpoolParsingError, prometheus.GaugeValue, zpoolParsingErrorMetric, node.Node, zpool.Name)\n\t\t}\n\t}\n}", "func processHealthMonitor(duration time.Duration) {\n\tfor {\n\t\t<-time.After(duration)\n\t\tvar numOfGoroutines = runtime.NumGoroutine()\n\t\t//var memStats runtime.MemStats\n\t\t//runtime.ReadMemStats(&memStats)\n\t\t//core.Info(\"Number of goroutines: %d\",numOfGoroutines)\n\t\t//core.Info(\"Mem stats: %v\",memStats)\n\t\tcore.CloudWatchClient.PutMetric(\"num_of_goroutines\", \"Count\", float64(numOfGoroutines), \"httshark_health_monitor\")\n\t}\n}", "func (dh *darwinHarvester) populateGauges(sample *types.ProcessSample, process Snapshot) error {\n\tvar err error\n\n\tcpuTimes, err := process.CPUTimes()\n\tif err != nil {\n\t\treturn err\n\t}\n\tsample.CPUPercent = cpuTimes.Percent\n\n\ttotalCPU := cpuTimes.User + cpuTimes.System\n\n\tif totalCPU > 0 {\n\t\tsample.CPUUserPercent = (cpuTimes.User / totalCPU) * sample.CPUPercent\n\t\tsample.CPUSystemPercent = (cpuTimes.System / totalCPU) * sample.CPUPercent\n\t} else {\n\t\tsample.CPUUserPercent = 0\n\t\tsample.CPUSystemPercent = 0\n\t}\n\n\t// Extra status data\n\tsample.Status = process.Status()\n\tsample.ThreadCount = process.NumThreads()\n\tsample.MemoryVMSBytes = process.VmSize()\n\tsample.MemoryRSSBytes = process.VmRSS()\n\n\treturn nil\n}", "func (w *windowsResourceUsageGatherer) Gather(executor QueryExecutor, startTime time.Time, config *measurement.MeasurementConfig) ([]measurement.Summary, error) {\n\tcpuSummary, err := getSummary(cpuUsageQueryTop10, convertToCPUPerfData, cpuUsageMetricsName, executor, config)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tmemorySummary, err := getSummary(memoryUsageQueryTop10, convertToMemoryPerfData, memoryUsageMetricsName, executor, config)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn []measurement.Summary{cpuSummary, memorySummary}, nil\n}", "func Collectmem(serverName string) (Metric, error) {\n\tvalues := Metric{}\n\tvar err error\n\tvalues.Timestamp = time.Now()\n\tvalues.MetricType = \"mem\"\n\tvar output string\n\n\tvar response cpmserverapi.MetricMEMResponse\n\trequest := &cpmserverapi.MetricMEMRequest{}\n\tresponse, err = cpmserverapi.MetricMEMClient(serverName, request)\n\tif err != nil {\n\t\tlogit.Error.Println(\"mem metric error:\" + err.Error())\n\t\treturn values, err\n\t}\n\n\toutput = strings.TrimSpace(response.Output)\n\n\tvalues.Value, err = strconv.ParseFloat(output, 64)\n\tif err != nil {\n\t\tlogit.Error.Println(\"parseFloat error in mem metric \" + err.Error())\n\t}\n\n\treturn values, err\n}", "func (collector *atlassianUPMCollector) Collect(ch chan<- prometheus.Metric) {\n\tstartTime := time.Now()\n\tlog.Debug(\"Collect start\")\n\n\tlog.Debug(\"create request object\")\n\treq, err := http.NewRequest(\"GET\", baseURL, nil)\n\tif err != nil {\n\t\tlog.Error(\"http.NewRequest returned an error:\", err)\n\t}\n\n\tlog.Debug(\"create Basic auth string from argument passed\")\n\tbearer = \"Basic \" + *token\n\n\tlog.Debug(\"add authorization header to the request\")\n\treq.Header.Add(\"Authorization\", bearer)\n\n\tlog.Debug(\"add content type to the request\")\n\treq.Header.Add(\"content-type\", \"application/json\")\n\n\tlog.Debug(\"make request... get back a response\")\n\tresp, err := http.DefaultClient.Do(req)\n\tif err != nil {\n\t\tlog.Debug(\"set metric atlassian_upm_rest_url_up\")\n\t\tch <- prometheus.MustNewConstMetric(collector.atlassianUPMUpMetric, prometheus.GaugeValue, 0, *fqdn)\n\t\tlog.Warn(\"http.DefaultClient.Do returned an error:\", err, \" return from Collect\")\n\t\treturn\n\t}\n\tdefer resp.Body.Close()\n\n\tif resp.StatusCode != 200 {\n\t\tlog.Debug(\"response status code: \", resp.StatusCode)\n\t}\n\n\tlog.Debug(\"set metric atlassian_upm_rest_url_up\")\n\tch <- prometheus.MustNewConstMetric(collector.atlassianUPMUpMetric, prometheus.GaugeValue, 1, *fqdn)\n\n\tvar allPlugins restPlugins\n\tif resp.StatusCode == 200 {\n\t\tlog.Debug(\"get all plugins\")\n\t\tallPlugins = plugins(resp)\n\n\t\t// return user-installed plugins if argument passed\n\t\tif *userInstalled {\n\t\t\tlog.Debug(\"-user-installed found\")\n\t\t\tallPlugins = userInstalledPlugins(allPlugins)\n\t\t}\n\n\t\t// plugins have the ability to be installed, but disabled, this will remove them if disabled\n\t\tif *dropDisabled {\n\t\t\tlog.Debug(\"-drop-disabled found\")\n\t\t\tallPlugins = dropDisabledPlugins(allPlugins)\n\t\t}\n\n\t\t// Jira specific\n\t\t// some plugins maintained by Jira have an additional element, this gives the option to drop those plugins\n\t\tif *dropJiraSoftware {\n\t\t\tlog.Debug(\"-drop-jira-software found\")\n\t\t\tallPlugins = dropJiraSoftwarePlugins(allPlugins)\n\t\t}\n\n\t\tlog.Debug(\"range over values in response, add each as metric with labels\")\n\t\tfor _, plugin := range allPlugins.Plugins {\n\n\t\t\tlog.Debug(\"creating plugin metric for: \" + plugin.Name)\n\t\t\tch <- prometheus.MustNewConstMetric(\n\t\t\t\tcollector.atlassianUPMPlugins,\n\t\t\t\tprometheus.GaugeValue,\n\t\t\t\t0,\n\t\t\t\tstrconv.FormatBool(plugin.Enabled), // convert bool to string for the 'enabled' value in the labels\n\t\t\t\tstring(plugin.Name),\n\t\t\t\tstring(plugin.Key),\n\t\t\t\tstring(plugin.Version),\n\t\t\t\tstrconv.FormatBool(plugin.UserInstalled),\n\t\t\t\t*fqdn,\n\t\t\t)\n\t\t}\n\t}\n\n\tif resp.StatusCode == 200 && *checkUpdates {\n\t\tlog.Debug(\"get remaining plugins available info\")\n\t\tavailablePluginsMap := getAvailablePluginInfo(allPlugins)\n\n\t\tlog.Debug(\"range over values in response, add each as metric with labels\")\n\t\tfor _, plugin := range availablePluginsMap {\n\t\t\tavailableUpdate := false\n\n\t\t\tverInstalled, err := version.NewVersion(plugin.InstalledVersion)\n\t\t\tif err != nil {\n\t\t\t\tlog.Debug(\"error turning plugin installed into version object\")\n\t\t\t}\n\n\t\t\tverAvailable, err := version.NewVersion(plugin.Version)\n\t\t\tif err != nil {\n\t\t\t\tlog.Debug(\"error turning available plugin into version object\")\n\t\t\t}\n\n\t\t\tif verInstalled.LessThan(verAvailable) {\n\t\t\t\tlog.Debug(\"plugin: \", plugin.Name, \", is currently running: \", plugin.InstalledVersion, \", and can be upgraded to: \", plugin.Version)\n\t\t\t\tavailableUpdate = true\n\t\t\t}\n\n\t\t\tlog.Debug(\"creating plugin version metric for: \", plugin.Name, \", with Key: \", plugin.Key)\n\t\t\tch <- prometheus.MustNewConstMetric(\n\t\t\t\tcollector.atlassianUPMVersionsMetric,\n\t\t\t\tprometheus.GaugeValue,\n\t\t\t\tboolToFloat(availableUpdate),\n\t\t\t\tstring(plugin.Name),\n\t\t\t\tstring(plugin.Key),\n\t\t\t\tstring(plugin.Version),\n\t\t\t\tstring(plugin.InstalledVersion),\n\t\t\t\tstrconv.FormatBool(plugin.Enabled), // convert bool to string for the 'enabled' value in the labels\n\t\t\t\tstrconv.FormatBool(plugin.UserInstalled),\n\t\t\t\t*fqdn,\n\t\t\t)\n\t\t}\n\t}\n\n\tfinishTime := time.Now()\n\telapsedTime := finishTime.Sub(startTime)\n\tlog.Debug(\"set the duration metric\")\n\tch <- prometheus.MustNewConstMetric(collector.atlassianUPMTimeMetric, prometheus.GaugeValue, elapsedTime.Seconds(), *fqdn)\n\n\tlog.Debug(\"Collect finished\")\n}", "func (e *Exporter) Collect(ch chan<- prometheus.Metric) {\n\te.mutex.Lock() // To protect metrics from concurrent collects.\n\tdefer e.mutex.Unlock()\n\n\tup, result := e.scrape(ch)\n\n\tch <- e.totalScrapes\n\tch <- e.jsonParseFailures\n\tch <- prometheus.MustNewConstMetric(iqAirUp, prometheus.GaugeValue, up)\n\tch <- prometheus.MustNewConstMetric(iqAirCO2, prometheus.GaugeValue, float64(result.CO2))\n\tch <- prometheus.MustNewConstMetric(iqAirP25, prometheus.GaugeValue, float64(result.P25))\n\tch <- prometheus.MustNewConstMetric(iqAirP10, prometheus.GaugeValue, float64(result.P10))\n\tch <- prometheus.MustNewConstMetric(iqAirTemp, prometheus.GaugeValue, float64(result.Temperature))\n\tch <- prometheus.MustNewConstMetric(iqAirHumidity, prometheus.GaugeValue, float64(result.Humidity))\n}", "func (e *Exporter) Collect(ch chan<- prometheus.Metric) {\n\te.mutex.Lock() // To protect metrics from concurrent collects.\n\tdefer e.mutex.Unlock()\n\n\tif err := e.scrape(); err != nil {\n\t\tlog.Error(err)\n\t\tnomad_up.Set(0)\n\t\tch <- nomad_up\n\t\treturn\n\t}\n\n\tch <- nomad_up\n\tch <- metric_uptime\n\tch <- metric_request_response_time_total\n\tch <- metric_request_response_time_avg\n\n\tfor _, metric := range metric_request_status_count_current {\n\t\tch <- metric\n\t}\n\tfor _, metric := range metric_request_status_count_total {\n\t\tch <- metric\n\t}\n}", "func (exp *Expvar) Collect() (map[string]interface{}, error) {\n\treq, err := http.NewRequest(http.MethodGet, exp.host, nil)\n\tlog.Println(exp.host)\n\tif err != nil {\n\t\tfmt.Println(err)\n\t\treturn nil, err\n\t}\n\n\tresp, err := exp.client.Do(req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tdata := make(map[string]interface{})\n\tif err := json.NewDecoder(resp.Body).Decode(&data); err != nil {\n\t\treturn nil, err\n\t}\n\n\tmemStats, ok := (data[\"memstats\"]).(map[string]interface{})\n\tif ok {\n\t\tdata[\"heap\"] = memStats[\"Alloc\"]\n\t}\n\n\tu, err := url.Parse(exp.host)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdata[\"host\"] = u.Hostname()\n\n\tdelete(data, \"memStats\")\n\tdelete(data, \"cmdline\")\n\n\treturn data, nil\n}", "func (u *Use) CollectMetrics(mts []plugin.Metric) ([]plugin.Metric, error) {\n\tcfg := mts[0].Config\n\tif !u.initialized {\n\t\tu.init(cfg)\n\t}\n\n\tmetrics := make([]plugin.Metric, len(mts))\n\tfor i, p := range mts {\n\t\tns := p.Namespace.String()\n\t\tswitch {\n\t\tcase cpure.MatchString(ns):\n\t\t\tmetric, err := u.computeStat(p.Namespace)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, errors.New(\"Unable to get compute stat: \" + err.Error())\n\t\t\t}\n\t\t\tmetrics[i] = *metric\n\n\t\tcase storre.MatchString(ns):\n\t\t\tmetric, err := u.diskStat(p.Namespace)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, errors.New(\"Unable to get disk stat: \" + err.Error())\n\t\t\t}\n\t\t\tmetrics[i] = *metric\n\t\tcase memre.MatchString(ns):\n\t\t\tmetric, err := memStat(p.Namespace, u.VmStatPath, u.MemInfoPath)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, errors.New(\"Unable to get mem stat: \" + err.Error())\n\t\t\t}\n\t\t\tmetrics[i] = *metric\n\t\t}\n\t\ttags, err := hostTags()\n\n\t\tif err == nil {\n\t\t\tmetrics[i].Tags = tags\n\t\t}\n\t\tmetrics[i].Timestamp = time.Now()\n\n\t}\n\treturn metrics, nil\n}", "func (n *Vspheretpgy) Gather(acc telegraf.Accumulator) error {\n\t// setPrecision function is the same as `acc.SetPrecision(time.Nanosecond, 0)`\n\tsetPrecisionForVsphere(&acc)\n\n\tfor i, urls := range n.Urls {\n\t\tif len(urls) == 0 {\n\t\t\tlog.Printf(\"Need to put vCenter information!\\n\")\n\n\t\t\tcontinue\n\t\t}\n\n\t\tif len(urls) != 4 {\n\t\t\tacc.AddError(fmt.Errorf(\"the %d_th vsphere configuration is incorrect! \", i+1))\n\n\t\t\tcontinue\n\t\t}\n\t\t// for a give set of vcsas\n\t\tvc, err := vcsa.NewVcsaConnector(urls[0], urls[1], urls[2], urls[3], true)\n\t\tif err != nil {\n\t\t\tacc.AddError(fmt.Errorf(\"failed to connect '%v\", err))\n\n\t\t\tcontinue\n\t\t}\n\n\t\tdcs, err := dcai.FetchVsphereTopology(vc)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\ttimeStamp := fmt.Sprintf(\"%d\", time.Now().UnixNano())\n\n\t\terr = startAccNeo4j(vc, dcs, timeStamp, acc)\n\t\treturn err\n\n\t}\n\n\treturn nil\n}", "func (c *VM) Collect(ctx context.Context) error {\n\tmetrics := cgm.Metrics{}\n\n\tc.Lock()\n\n\tif c.runTTL > time.Duration(0) {\n\t\tif time.Since(c.lastEnd) < c.runTTL {\n\t\t\tc.logger.Warn().Msg(collector.ErrTTLNotExpired.Error())\n\t\t\tc.Unlock()\n\t\t\treturn collector.ErrTTLNotExpired\n\t\t}\n\t}\n\tif c.running {\n\t\tc.logger.Warn().Msg(collector.ErrAlreadyRunning.Error())\n\t\tc.Unlock()\n\t\treturn collector.ErrAlreadyRunning\n\t}\n\n\tc.running = true\n\tc.lastStart = time.Now()\n\tc.Unlock()\n\n\tif err := c.parseMemstats(ctx, &metrics); err != nil {\n\t\tc.setStatus(metrics, err)\n\t\treturn fmt.Errorf(\"%s parseMemstats: %w\", c.pkgID, err)\n\t}\n\n\tif err := c.parseVMstats(ctx, &metrics); err != nil {\n\t\tc.setStatus(metrics, err)\n\t\treturn fmt.Errorf(\"%s parseVMstats: %w\", c.pkgID, err)\n\t}\n\n\tc.setStatus(metrics, nil)\n\treturn nil\n}", "func runCPUUsageStats(){\n\tnbCPU := float64(runtime.NumCPU())\n\tparams := fmt.Sprintf(\"(Get-process -Id %d).CPU\",os.Getpid())\n\tfor {\n\t\tcmd := exec.Command(\"powershell\", params)\n\t\tdata, _ := cmd.Output()\n\t\tcurrent,_ := strconv.ParseFloat(strings.Replace(string(data),\"\\r\\n\",\"\",-1),32)\n\t\tif previous == 0 {\n\t\t\tprevious = current\n\t\t}\n\t\tcurrentUsage = int(((current - previous)*float64(100))/(waitTime*nbCPU) )\n\t\tprevious = current\n\t\ttime.Sleep(time.Duration(waitTime )*time.Second)\n\t}\n}", "func Collectcpu(serverName string) (Metric, error) {\n\tvalues := Metric{}\n\tvar err error\n\tvalues.Timestamp = time.Now()\n\tvalues.MetricType = \"cpu\"\n\n\tvar response cpmserverapi.MetricCPUResponse\n\trequest := &cpmserverapi.MetricCPURequest{}\n\tresponse, err = cpmserverapi.MetricCPUClient(serverName, request)\n\tif err != nil {\n\t\tlogit.Error.Println(\"cpu metric error:\" + err.Error())\n\t\treturn values, err\n\t}\n\n\tvar output = strings.TrimSpace(response.Output)\n\n\tvalues.Value, err = strconv.ParseFloat(output, 64)\n\tif err != nil {\n\t\tlogit.Error.Println(\"parseFloat error in cpu metric \" + err.Error())\n\t}\n\n\treturn values, err\n}", "func (m *arangodb) CollectAgentLogs(w io.Writer) error {\n\tif m.HasAgent() {\n\t\tif err := m.updateServerInfo(); err != nil {\n\t\t\treturn maskAny(err)\n\t\t}\n\t\tif err := m.collectServerLogs(w, \"agent\"); err != nil && errors.Cause(err) != io.EOF {\n\t\t\treturn maskAny(err)\n\t\t}\n\t\treturn nil\n\t}\n\treturn nil\n}", "func (p *Kafka) CollectMetrics(mts []plugin.MetricType) ([]plugin.MetricType, error) {\n\tmetrics := []plugin.MetricType{}\n\n\terr := p.loadMetricAPI(mts[0].Config())\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tfor _, m := range mts {\n\t\tresults := []nodeData{}\n\t\tsearch := strings.Split(replaceUnderscoreToDot(strings.TrimLeft(m.Namespace().String(), \"/\")), \"/\")\n\t\tif len(search) > 3 {\n\t\t\tp.client.Root.Get(p.client.client.GetUrl(), search[4:], 0, &results)\n\t\t}\n\n\t\tfor _, result := range results {\n\t\t\tns := append([]string{\"hyperpilot\", \"kafka\", \"node\", p.client.host}, strings.Split(result.Path, Slash)...)\n\t\t\tmetrics = append(metrics, plugin.MetricType{\n\t\t\t\tNamespace_: core.NewNamespace(ns...),\n\t\t\t\tTimestamp_: time.Now(),\n\t\t\t\tData_: result.Data,\n\t\t\t\tUnit_: reflect.TypeOf(result.Data).String(),\n\t\t\t})\n\t\t}\n\n\t}\n\n\treturn metrics, nil\n}", "func pvCollect(ch chan<- prometheus.Metric, pvs []map[string]string, vgName string) {\n for _, pv := range pvs {\n pvSizeF, err := strconv.ParseFloat(strings.Trim(pv[\"pv_size\"], \"B\"), 64)\n if err != nil {\n log.Print(err)\n return\n }\n ch <- prometheus.MustNewConstMetric(pvSizeMetric, prometheus.GaugeValue, pvSizeF, pv[\"pv_name\"], pv[\"pv_uuid\"], vgName)\n\n pvFreeF, err := strconv.ParseFloat(strings.Trim(pv[\"pv_free\"], \"B\"), 64)\n if err != nil {\n log.Print(err)\n return\n }\n ch <- prometheus.MustNewConstMetric(pvFreeMetric, prometheus.GaugeValue, pvFreeF, pv[\"pv_name\"], pv[\"pv_uuid\"], vgName)\n\n pvUsedF, err := strconv.ParseFloat(strings.Trim(pv[\"pv_used\"], \"B\"), 64)\n if err != nil {\n log.Print(err)\n return\n }\n ch <- prometheus.MustNewConstMetric(pvUsedMetric, prometheus.GaugeValue, pvUsedF, pv[\"pv_name\"], pv[\"pv_uuid\"], vgName)\n }\n}", "func vgCollect(ch chan<- prometheus.Metric, vgs []map[string]string) {\n for _, vg := range vgs {\n vgSizeF, err := strconv.ParseFloat(strings.Trim(vg[\"vg_size\"], \"B\"), 64)\n if err != nil {\n log.Print(err)\n return\n }\n ch <- prometheus.MustNewConstMetric(vgSizeMetric, prometheus.GaugeValue, vgSizeF, vg[\"vg_name\"], vg[\"vg_uuid\"])\n\n vgFreeF, err := strconv.ParseFloat(strings.Trim(vg[\"vg_free\"], \"B\"), 64)\n if err != nil {\n log.Print(err)\n return\n }\n ch <- prometheus.MustNewConstMetric(vgFreeMetric, prometheus.GaugeValue, vgFreeF, vg[\"vg_name\"], vg[\"vg_uuid\"])\n }\n}", "func FlowStatsCollect(ctx *zedrouterContext) {\n\tvar instData networkAttrs\n\tvar timeOutTuples []flowStats\n\tvar totalFlow int\n\n\tinstData.ipaclattr = make(map[int]map[int]aclAttr) // App-ID/ACL-Num/aclAttr table\n\tinstData.appIPinfo = make(map[int][]appInfo)\n\tinstData.bnNet = make(map[string]bridgeAttr) // borrow the aclAttr for intf attributes\n\tinstData.appNet = make(map[int]uuid.UUID)\n\n\tIntfAddrs, err := net.InterfaceAddrs()\n\tif err != nil {\n\t\tlog.Errorf(\"error in getting addresses\\n\")\n\t\treturn\n\t}\n\tinstData.intfAddrs = IntfAddrs\n\n\tcheckAppAndACL(ctx, &instData)\n\n\t// Get IPv4/v6 conntrack table flows\n\tProtocols := [2]netlink.InetFamily{syscall.AF_INET, syscall.AF_INET6}\n\tfor _, proto := range Protocols {\n\t\tconnT, err := netlink.ConntrackTableList(netlink.ConntrackTable, proto)\n\t\tif err != nil {\n\t\t\tlog.Errorf(\"FlowStats(%d): ContrackTableList\", proto)\n\t\t\treturn\n\t\t}\n\n\t\tlog.Tracef(\"***FlowStats(%d): device=%v, size of the flows %d\\n\", proto, devUUID, len(connT))\n\n\t\tfor _, entry := range connT { // loop through and process current timedout flow collection\n\t\t\tflowTuple := flowMergeProcess(entry, instData)\n\t\t\t// flowTuple := FlowMergeTuple(entry, instData, ipToName)\n\t\t\tif flowTuple.IsTimeOut == false || flowTuple.foundApp == false {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\ttimeOutTuples = append(timeOutTuples, flowTuple)\n\t\t\ttotalFlow++\n\t\t}\n\t}\n\n\tlog.Tracef(\"FlowStats ++ Total timedout flows %d, loopcount debug %d\\n\", totalFlow, loopcount)\n\tloopcount++\n\n\t// per app/bridge packing flow stats to be uploaded\n\tfor bnx := range instData.bnNet {\n\t\t// obtain DNS entries recorded since the last flow collection\n\t\tbnNum, err := bridgeStrToNum(ctx, bnx)\n\t\tif err != nil {\n\t\t\tlog.Error(err)\n\t\t\tcontinue\n\t\t}\n\t\tdnssys[bnNum].Lock()\n\t\tdnsEntries := dnssys[bnNum].Snoop\n\t\tdnssys[bnNum].Snoop = nil\n\t\tdnssys[bnNum].Unlock()\n\n\t\tfor appIdx := range instData.appNet {\n\t\t\tvar sequence, flowIdx int\n\n\t\t\t// fill in the partial scope information, later the aclNum and aclAttr will decide\n\t\t\t// if we have a match in this flow into app/bridge scope\n\t\t\tscope := types.FlowScope{\n\t\t\t\tUUID: instData.appNet[appIdx],\n\t\t\t\tLocalintf: instData.bnNet[bnx].bridge,\n\t\t\t\tNetUUID: instData.bnNet[bnx].netUUID,\n\t\t\t}\n\t\t\tflowdata := types.IPFlow{\n\t\t\t\tDevID: devUUID,\n\t\t\t\tScope: scope,\n\t\t\t}\n\n\t\t\tlog.Tracef(\"FlowStats: bnx=%s, appidx %d\\n\", bnx, appIdx)\n\t\t\t// temp print out the flow \"tuple\" and stats per app/bridge\n\t\t\tfor i, tuple := range timeOutTuples { // search for flowstats by bridge\n\t\t\t\tvar aclattr aclAttr\n\t\t\t\tvar aclNum int\n\t\t\t\tvar aclaction types.ACLActionType\n\n\t\t\t\tappN := tuple.appNum\n\t\t\t\tif int(appN) != appIdx { // allow non-App flows to be uploaded\n\t\t\t\t\t//log.Functionf(\"FlowStats: appN %d, appIdx %d not match\", appN, appIdx)\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\n\t\t\t\tif tuple.aclNum != DropMarkValue {\n\t\t\t\t\ttmpMap := instData.ipaclattr[int(appN)]\n\t\t\t\t\tif tmpMap != nil {\n\t\t\t\t\t\tif _, ok := tmpMap[int(tuple.aclNum)]; !ok {\n\t\t\t\t\t\t\tlog.Tracef(\"FlowStats: == can not get acl map with aclN, should not happen appN %d, aclN %d; %s\\n\",\n\t\t\t\t\t\t\t\tappN, tuple.aclNum, tuple.String())\n\t\t\t\t\t\t\tcontinue\n\t\t\t\t\t\t}\n\t\t\t\t\t\taclattr = tmpMap[int(tuple.aclNum)]\n\t\t\t\t\t} else {\n\t\t\t\t\t\tlog.Tracef(\"FlowStats: == can't get acl map with appN, should not happen, appN %d, aclN %d; %s\\n\",\n\t\t\t\t\t\t\tappN, tuple.aclNum, tuple.String())\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t}\n\t\t\t\t\tif aclattr.aclNum == 0 {\n\t\t\t\t\t\tlog.Tracef(\"FlowStats: == aclN zero in attr, appN %d, aclN %d; %s\\n\", appN, tuple.aclNum, tuple.String())\n\t\t\t\t\t\t// some debug info\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t}\n\n\t\t\t\t\tif aclattr.bridge != bnx {\n\t\t\t\t\t\tlog.Tracef(\"FlowStats: == bridge name not match %s, %s\\n\", bnx, aclattr.bridge)\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t}\n\t\t\t\t\tscope.Intf = aclattr.intfname // App side DomU internal interface name\n\t\t\t\t\taclaction = types.ACLActionAccept\n\t\t\t\t\taclNum = int(aclattr.aclNum)\n\t\t\t\t} else { // conntrack mark aclNum field being 0xffffff\n\t\t\t\t\t// special drop aclNum\n\t\t\t\t\tappinfo := flowGetAppInfo(tuple, instData.appIPinfo[appIdx])\n\t\t\t\t\tif appinfo.localintf != bnx {\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t}\n\t\t\t\t\tscope.Intf = appinfo.intf\n\t\t\t\t\taclaction = types.ACLActionDrop\n\t\t\t\t\taclNum = 0\n\t\t\t\t}\n\n\t\t\t\t// temp print out log for the flow\n\t\t\t\tlog.Tracef(\"FlowStats [%d]: on bn%d %s\\n\", i, bnNum, tuple.String()) // just print for now\n\n\t\t\t\tflowtuple := types.IPTuple{\n\t\t\t\t\tSrc: tuple.SrcIP,\n\t\t\t\t\tDst: tuple.DstIP,\n\t\t\t\t\tSrcPort: int32(tuple.SrcPort),\n\t\t\t\t\tDstPort: int32(tuple.DstPort),\n\t\t\t\t\tProto: int32(tuple.Proto),\n\t\t\t\t}\n\t\t\t\tflowrec := types.FlowRec{\n\t\t\t\t\tFlow: flowtuple,\n\t\t\t\t\tInbound: !tuple.AppInitiate,\n\t\t\t\t\tACLID: int32(aclNum),\n\t\t\t\t\tAction: aclaction,\n\t\t\t\t\tStartTime: tuple.TimeStart,\n\t\t\t\t\tStopTime: tuple.TimeStop,\n\t\t\t\t\tTxBytes: int64(tuple.SendBytes),\n\t\t\t\t\tTxPkts: int64(tuple.SendPkts),\n\t\t\t\t\tRxBytes: int64(tuple.RecvBytes),\n\t\t\t\t\tRxPkts: int64(tuple.RecvPkts),\n\t\t\t\t}\n\n\t\t\t\tflowdata.Flows = append(flowdata.Flows, flowrec)\n\t\t\t\tflowIdx++\n\t\t\t\tif flowIdx > maxFlowPack {\n\t\t\t\t\tflowPublish(ctx, &flowdata, &sequence, &flowIdx)\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tvar dnsrec [2]map[string]dnsEntry\n\t\t\tdnsrec[0] = make(map[string]dnsEntry) // store IPv4 addresses from dns\n\t\t\tdnsrec[1] = make(map[string]dnsEntry) // store IPv6 addresses from dns\n\n\t\t\t// select dns request/replies corresponding to this app\n\t\t\tfor _, dnsdata := range dnsEntries {\n\t\t\t\tif !checkAppIPAddr(instData.appIPinfo[appIdx], dnsdata.AppIP) {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\t// unique by domain name, latest reply overwrite previous ones\n\t\t\t\tif dnsdata.isIPv4 {\n\t\t\t\t\tdnsrec[0][dnsdata.DomainName] = dnsdata\n\t\t\t\t} else {\n\t\t\t\t\tdnsrec[1][dnsdata.DomainName] = dnsdata\n\t\t\t\t}\n\t\t\t}\n\n\t\t\t// append dns records into the flow data\n\t\t\tfor idx := range dnsrec {\n\t\t\t\tfor _, dnsRec := range dnsrec[idx] {\n\t\t\t\t\t// temp print out all unique dns replies for the bridge/app\n\t\t\t\t\tlog.Tracef(\"!!FlowStats: DNS time %v, domain %s, appIP %v, count %d, Answers %v\",\n\t\t\t\t\t\tdnsRec.TimeStamp, dnsRec.DomainName, dnsRec.AppIP, dnsRec.ANCount, dnsRec.Answers)\n\n\t\t\t\t\tdnsrec := types.DNSReq{\n\t\t\t\t\t\tHostName: dnsRec.DomainName,\n\t\t\t\t\t\tAddrs: dnsRec.Answers,\n\t\t\t\t\t\tRequestTime: dnsRec.TimeStamp.UnixNano(),\n\t\t\t\t\t}\n\t\t\t\t\tflowdata.DNSReqs = append(flowdata.DNSReqs, dnsrec)\n\t\t\t\t\tflowIdx++\n\t\t\t\t\tif flowIdx > maxFlowPack {\n\t\t\t\t\t\tflowPublish(ctx, &flowdata, &sequence, &flowIdx)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\n\t\t\t// flow record done for the bridge/app\n\t\t\t// publish the flow data (per app/bridge) and sequence (for size limit) to zedagent now\n\t\t\tflowPublish(ctx, &flowdata, &sequence, &flowIdx)\n\t\t}\n\t}\n\t// check and remove stale flowlog publications\n\tcheckFlowUnpublish(ctx)\n}", "func (m *InstallManager) gatherLogs(cd *hivev1.ClusterDeployment) {\n\tif !m.isBootstrapComplete() {\n\t\tif err := m.gatherBootstrapNodeLogs(cd); err != nil {\n\t\t\tm.log.WithError(err).Warn(\"error fetching logs from bootstrap node\")\n\t\t\treturn\n\t\t}\n\t\tm.log.Info(\"successfully gathered logs from bootstrap node\")\n\t} else {\n\t\tif err := m.gatherClusterLogs(cd); err != nil {\n\t\t\tm.log.WithError(err).Warn(\"error fetching logs with oc adm must-gather\")\n\t\t\treturn\n\t\t}\n\t\tm.log.Info(\"successfully ran oc adm must-gather\")\n\t}\n}", "func measureSpammerMetrics() {\n\tif spammerStartTime.IsZero() {\n\t\t// Spammer not started yet\n\t\treturn\n\t}\n\n\tsentSpamMsgsCnt := deps.ServerMetrics.SentSpamMessages.Load()\n\tnew := utils.GetUint32Diff(sentSpamMsgsCnt, lastSentSpamMsgsCnt)\n\tlastSentSpamMsgsCnt = sentSpamMsgsCnt\n\n\tspammerAvgHeap.Add(uint64(new))\n\n\ttimeDiff := time.Since(spammerStartTime)\n\tif timeDiff > 60*time.Second {\n\t\t// Only filter over one minute maximum\n\t\ttimeDiff = 60 * time.Second\n\t}\n\n\t// trigger events for outside listeners\n\tEvents.AvgSpamMetricsUpdated.Trigger(&spammer.AvgSpamMetrics{\n\t\tNewMessages: new,\n\t\tAverageMessagesPerSecond: spammerAvgHeap.GetAveragePerSecond(timeDiff),\n\t})\n}", "func (e *UwsgiExporter) Collect(ch chan<- prometheus.Metric) {\n\tstartTime := time.Now()\n\terr := e.execute(ch)\n\td := time.Since(startTime).Seconds()\n\n\tif err != nil {\n\t\tlog.Errorf(\"ERROR: scrape failed after %fs: %s\", d, err)\n\t\te.uwsgiUp.Set(0)\n\t\te.scrapeDurations.WithLabelValues(\"error\").Observe(d)\n\t} else {\n\t\tlog.Debugf(\"OK: scrape successful after %fs.\", d)\n\t\te.uwsgiUp.Set(1)\n\t\te.scrapeDurations.WithLabelValues(\"success\").Observe(d)\n\t}\n\n\te.uwsgiUp.Collect(ch)\n\te.scrapeDurations.Collect(ch)\n}", "func (m *KubeletMonitor) parseNodeStats(nodeStats stats.NodeStats) {\n\t// cpu\n\tcpuUsageCore := float64(*nodeStats.CPU.UsageNanoCores) / util.NanoToUnit\n\tglog.V(4).Infof(\"Cpu usage of node %s is %f core\", nodeStats.NodeName, cpuUsageCore)\n\tnodeCpuUsageCoreMetrics := metrics.NewEntityResourceMetric(task.NodeType, util.NodeStatsKeyFunc(nodeStats),\n\t\tmetrics.CPU, metrics.Used, cpuUsageCore)\n\n\t// memory\n\tmemoryUsageKiloBytes := float64(*nodeStats.Memory.UsageBytes) / util.KilobytesToBytes\n\tglog.V(4).Infof(\"Memory usage of node %s is %f Kb\", nodeStats.NodeName, memoryUsageKiloBytes)\n\tnodeMemoryUsageKiloBytesMetrics := metrics.NewEntityResourceMetric(task.NodeType,\n\t\tutil.NodeStatsKeyFunc(nodeStats), metrics.Memory, metrics.Used, memoryUsageKiloBytes)\n\n\tm.metricSink.AddNewMetricEntries(nodeCpuUsageCoreMetrics, nodeMemoryUsageKiloBytesMetrics)\n\n}", "func ProcStat(c *gin.Context) {\n\tres := CmdExec(\"cat /proc/stat | head -n 1 | awk '{$1=\\\"\\\";print}'\")\n\tresArray := strings.Split(res[0], \" \")\n\tvar cpu []int64\n\tvar totalcpu, idlecpu int64\n\tfor _, v := range resArray {\n\t\ttemp, err := strconv.ParseInt(v, 10, 64)\n\t\tif err == nil {\n\t\t\tcpu = append(cpu, temp)\n\t\t\ttotalcpu = totalcpu + temp\n\t\t}\n\t}\n\tidlecpu = cpu[3]\n\tc.JSON(http.StatusOK, gin.H{\n\t\t\"totalcpu\": totalcpu,\n\t\t\"idlecpu\": idlecpu,\n\t})\n}", "func (c *VM) Collect() error {\n\tmetrics := cgm.Metrics{}\n\n\tc.Lock()\n\n\tif c.runTTL > time.Duration(0) {\n\t\tif time.Since(c.lastEnd) < c.runTTL {\n\t\t\tc.logger.Warn().Msg(collector.ErrTTLNotExpired.Error())\n\t\t\tc.Unlock()\n\t\t\treturn collector.ErrTTLNotExpired\n\t\t}\n\t}\n\tif c.running {\n\t\tc.logger.Warn().Msg(collector.ErrAlreadyRunning.Error())\n\t\tc.Unlock()\n\t\treturn collector.ErrAlreadyRunning\n\t}\n\n\tc.running = true\n\tc.lastStart = time.Now()\n\tc.Unlock()\n\n\tif err := c.parseMemstats(&metrics); err != nil {\n\t\tc.setStatus(metrics, err)\n\t\treturn errors.Wrap(err, c.pkgID)\n\t}\n\n\tif err := c.parseVMstats(&metrics); err != nil {\n\t\tc.setStatus(metrics, err)\n\t\treturn errors.Wrap(err, c.pkgID)\n\t}\n\n\tc.setStatus(metrics, nil)\n\treturn nil\n}", "func (p *perfStoreManager) collect() {\n\tallCgroups, err := p.cgroupSt.ListAllCgroups(sets.NewString(appclass.AppClassOnline))\n\tif err != nil {\n\t\treturn\n\t}\n\n\tp.cpuLock.Lock()\n\tdefer p.cpuLock.Unlock()\n\n\twg := sync.WaitGroup{}\n\tfor k, v := range allCgroups {\n\t\tfor _, ignored := range p.IgnoredCgroups {\n\t\t\tif checkSubCgroup(ignored, k) {\n\t\t\t\tklog.V(4).Infof(\"cgroup(%s) has been ignored\", k)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t}\n\n\t\twg.Add(1)\n\t\tgo func(cg string, ref *cgstore.CgroupRef) {\n\t\t\tdefer wg.Done()\n\n\t\t\tcgPath, err := cgroup.GetPerfEventCgroupPath(cg)\n\t\t\tif err != nil {\n\t\t\t\tklog.Errorf(\"get perf_event cgroup path err: %v\", err)\n\t\t\t\treturn\n\t\t\t}\n\t\t\t// check pids\n\t\t\tpids, err := cgroup.GetPids(cgPath)\n\t\t\tif err != nil {\n\t\t\t\tklog.Errorf(\"cgroup(%s) get pid err: %v\", cg, err)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tif len(pids) == 0 {\n\t\t\t\tklog.V(4).Infof(\"cgroup(%s) has no pid\", cg)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\t// read cpus\n\t\t\tcpus, err := cgroup.GetCpuSet(cg, true)\n\t\t\tif err != nil {\n\t\t\t\tklog.Errorf(\"cgroup(%s) get cpu sets err: %v\", cg, err)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tif len(cpus) == 0 {\n\t\t\t\tklog.Errorf(\"cgroup(%s) get cpu sets is nil\", cg)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tstart := time.Now()\n\t\t\tcpuStartTotal, err := cgroup.GetCPUTotalUsage(cg)\n\t\t\tif err != nil {\n\t\t\t\tklog.Errorf(\"cgroup(%s) collect cpu usage failed: %v\", cg, err)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tpmuData, err := pmu.GetPMUValue(int(p.CollectDuration.Seconds()),\n\t\t\t\tcgPath, strings.Join(cpus, \",\"))\n\t\t\tif err != nil {\n\t\t\t\tklog.Errorf(\"cgroup(%s) collect perf data err: %v\", cg, err)\n\t\t\t\treturn\n\t\t\t}\n\t\t\ttimeElapsed := time.Since(start).Nanoseconds()\n\t\t\tcpuEndTotal, err := cgroup.GetCPUTotalUsage(cg)\n\t\t\tif err != nil {\n\t\t\t\tklog.Errorf(\"cgroup(%s) collect cpu usage failed: %v\", cg, err)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tpmuData.CPUUsage = float64(cpuEndTotal-cpuStartTotal) / float64(timeElapsed)\n\n\t\t\tmetric := &PerfMetrics{\n\t\t\t\tSpec: *ref,\n\t\t\t\tValue: pmuData,\n\t\t\t}\n\t\t\tp.addContainerPerf(cg, pmuData.Timestamp, metric)\n\t\t}(k, v)\n\t}\n\twg.Wait()\n\n\tp.delContainerPerfs()\n\n\treturn\n}", "func (e *Exporter) Collect(ch chan<- prometheus.Metric) {\n\tjunosTotalScrapeCount++\n\tch <- prometheus.MustNewConstMetric(junosDesc[\"ScrapesTotal\"], prometheus.CounterValue, junosTotalScrapeCount)\n\n\twg := &sync.WaitGroup{}\n\tfor _, collector := range e.Collectors {\n\t\twg.Add(1)\n\t\tgo e.runCollector(ch, collector, wg)\n\t}\n\twg.Wait()\n}", "func (nsac KubeNodeCollector) Collect(ch chan<- prometheus.Metric) {\n\tnodes := nsac.KubeClusterCache.GetAllNodes()\n\tdisabledMetrics := nsac.metricsConfig.GetDisabledMetricsMap()\n\n\tfor _, node := range nodes {\n\t\tnodeName := node.GetName()\n\n\t\t// Node Capacity\n\t\tfor resourceName, quantity := range node.Status.Capacity {\n\t\t\tresource, unit, value := toResourceUnitValue(resourceName, quantity)\n\n\t\t\t// failed to parse the resource type\n\t\t\tif resource == \"\" {\n\t\t\t\tlog.DedupedWarningf(5, \"Failed to parse resource units and quantity for resource: %s\", resourceName)\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\t// KSM v1 Emission\n\t\t\tif _, disabled := disabledMetrics[\"kube_node_status_capacity_cpu_cores\"]; !disabled {\n\t\t\t\tif resource == \"cpu\" {\n\t\t\t\t\tch <- newKubeNodeStatusCapacityCPUCoresMetric(\"kube_node_status_capacity_cpu_cores\", nodeName, value)\n\n\t\t\t\t}\n\t\t\t}\n\t\t\tif _, disabled := disabledMetrics[\"kube_node_status_capacity_memory_bytes\"]; !disabled {\n\t\t\t\tif resource == \"memory\" {\n\t\t\t\t\tch <- newKubeNodeStatusCapacityMemoryBytesMetric(\"kube_node_status_capacity_memory_bytes\", nodeName, value)\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tif _, disabled := disabledMetrics[\"kube_node_status_capacity\"]; !disabled {\n\t\t\t\tch <- newKubeNodeStatusCapacityMetric(\"kube_node_status_capacity\", nodeName, resource, unit, value)\n\t\t\t}\n\t\t}\n\n\t\t// Node Allocatable Resources\n\t\tfor resourceName, quantity := range node.Status.Allocatable {\n\t\t\tresource, unit, value := toResourceUnitValue(resourceName, quantity)\n\n\t\t\t// failed to parse the resource type\n\t\t\tif resource == \"\" {\n\t\t\t\tlog.DedupedWarningf(5, \"Failed to parse resource units and quantity for resource: %s\", resourceName)\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\t// KSM v1 Emission\n\t\t\tif _, disabled := disabledMetrics[\"kube_node_status_allocatable_cpu_cores\"]; !disabled {\n\t\t\t\tif resource == \"cpu\" {\n\t\t\t\t\tch <- newKubeNodeStatusAllocatableCPUCoresMetric(\"kube_node_status_allocatable_cpu_cores\", nodeName, value)\n\t\t\t\t}\n\t\t\t}\n\t\t\tif _, disabled := disabledMetrics[\"kube_node_status_allocatable_memory_bytes\"]; !disabled {\n\t\t\t\tif resource == \"memory\" {\n\t\t\t\t\tch <- newKubeNodeStatusAllocatableMemoryBytesMetric(\"kube_node_status_allocatable_memory_bytes\", nodeName, value)\n\t\t\t\t}\n\t\t\t}\n\t\t\tif _, disabled := disabledMetrics[\"kube_node_status_allocatable\"]; !disabled {\n\t\t\t\tch <- newKubeNodeStatusAllocatableMetric(\"kube_node_status_allocatable\", nodeName, resource, unit, value)\n\t\t\t}\n\t\t}\n\n\t\t// node labels\n\t\tif _, disabled := disabledMetrics[\"kube_node_labels\"]; !disabled {\n\t\t\tlabelNames, labelValues := prom.KubePrependQualifierToLabels(prom.SanitizeLabels(node.GetLabels()), \"label_\")\n\t\t\tch <- newKubeNodeLabelsMetric(nodeName, \"kube_node_labels\", labelNames, labelValues)\n\t\t}\n\n\t\t// kube_node_status_condition\n\t\t// Collect node conditions and while default to false.\n\t\tif _, disabled := disabledMetrics[\"kube_node_status_condition\"]; !disabled {\n\t\t\tfor _, c := range node.Status.Conditions {\n\t\t\t\tconditions := getConditions(c.Status)\n\n\t\t\t\tfor _, cond := range conditions {\n\t\t\t\t\tch <- newKubeNodeStatusConditionMetric(nodeName, \"kube_node_status_condition\", string(c.Type), cond.status, cond.value)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n}", "func (s *CPUStat) Collect() {\n\tfile, err := os.Open(root + \"proc/stat\")\n\tdefer file.Close()\n\n\tif err != nil {\n\t\treturn\n\t}\n\tscanner := bufio.NewScanner(file)\n\tfor scanner.Scan() {\n\t\tf := regexp.MustCompile(\"\\\\s+\").Split(scanner.Text(), -1)\n\n\t\tisCPU, err := regexp.MatchString(\"^cpu\\\\d*\", f[0])\n\t\tif err == nil && isCPU {\n\t\t\tif f[0] == \"cpu\" {\n\t\t\t\tparseCPUline(s.All, f)\n\t\t\t\tpopulateComputedStats(s.All, float64(len(s.cpus)))\n\t\t\t\ts.All.TotalCount.Set(float64(len(s.cpus)))\n\t\t\t} else {\n\t\t\t\tperCPU, ok := s.cpus[f[0]]\n\t\t\t\tif !ok {\n\t\t\t\t\tperCPU = NewPerCPU(s.m, f[0])\n\t\t\t\t\ts.cpus[f[0]] = perCPU\n\t\t\t\t}\n\t\t\t\tparseCPUline(perCPU, f)\n\t\t\t\tpopulateComputedStats(perCPU, 1.0)\n\t\t\t\tperCPU.TotalCount.Set(1)\n\t\t\t}\n\t\t}\n\t}\n}", "func (ld *loader) CollectMetrics() (writes metrics.RequestsSummary, reads metrics.RequestsSummary, err error) {\n\t// https://pkg.go.dev/github.com/prometheus/client_golang/prometheus?tab=doc#Gatherer\n\tmfs, err := prometheus.DefaultGatherer.Gather()\n\tif err != nil {\n\t\tld.cfg.Logger.Warn(\"failed to gather prometheus metrics\", zap.Error(err))\n\t\treturn metrics.RequestsSummary{}, metrics.RequestsSummary{}, err\n\t}\n\tfor _, mf := range mfs {\n\t\tif mf == nil {\n\t\t\tcontinue\n\t\t}\n\t\tswitch *mf.Name {\n\t\tcase \"secrets_client_write_requests_success_total\":\n\t\t\tgg := mf.Metric[0].GetGauge()\n\t\t\twrites.SuccessTotal = gg.GetValue()\n\t\tcase \"secrets_client_write_requests_failure_total\":\n\t\t\tgg := mf.Metric[0].GetGauge()\n\t\t\twrites.FailureTotal = gg.GetValue()\n\t\tcase \"secrets_client_write_request_latency_milliseconds\":\n\t\t\twrites.LatencyHistogram, err = metrics.ParseHistogram(\"milliseconds\", mf.Metric[0].GetHistogram())\n\t\t\tif err != nil {\n\t\t\t\treturn metrics.RequestsSummary{}, metrics.RequestsSummary{}, err\n\t\t\t}\n\n\t\tcase \"secrets_client_read_requests_success_total\":\n\t\t\tgg := mf.Metric[0].GetGauge()\n\t\t\treads.SuccessTotal = gg.GetValue()\n\t\tcase \"secrets_client_read_requests_failure_total\":\n\t\t\tgg := mf.Metric[0].GetGauge()\n\t\t\treads.FailureTotal = gg.GetValue()\n\t\tcase \"secrets_client_read_request_latency_milliseconds\":\n\t\t\treads.LatencyHistogram, err = metrics.ParseHistogram(\"milliseconds\", mf.Metric[0].GetHistogram())\n\t\t\tif err != nil {\n\t\t\t\treturn metrics.RequestsSummary{}, metrics.RequestsSummary{}, err\n\t\t\t}\n\t\t}\n\t}\n\n\tld.cfg.Logger.Info(\"sorting write latency results\", zap.Int(\"total-data-points\", ld.writeLatencies.Len()))\n\tnow := time.Now()\n\tsort.Sort(ld.writeLatencies)\n\tld.cfg.Logger.Info(\"sorted write latency results\", zap.Int(\"total-data-points\", ld.writeLatencies.Len()), zap.String(\"took\", time.Since(now).String()))\n\twrites.LantencyP50 = ld.writeLatencies.PickLantencyP50()\n\twrites.LantencyP90 = ld.writeLatencies.PickLantencyP90()\n\twrites.LantencyP99 = ld.writeLatencies.PickLantencyP99()\n\twrites.LantencyP999 = ld.writeLatencies.PickLantencyP999()\n\twrites.LantencyP9999 = ld.writeLatencies.PickLantencyP9999()\n\n\tld.cfg.Logger.Info(\"writing latency results in JSON to disk\", zap.String(\"path\", ld.cfg.WritesJSONPath))\n\twb, err := json.Marshal(ld.writeLatencies)\n\tif err != nil {\n\t\tld.cfg.Logger.Warn(\"failed to encode latency results in JSON\", zap.Error(err))\n\t\treturn metrics.RequestsSummary{}, metrics.RequestsSummary{}, err\n\t}\n\tif err = ioutil.WriteFile(ld.cfg.WritesJSONPath, wb, 0600); err != nil {\n\t\tld.cfg.Logger.Warn(\"failed to write latency results in JSON to disk\", zap.String(\"path\", ld.cfg.WritesJSONPath), zap.Error(err))\n\t\treturn metrics.RequestsSummary{}, metrics.RequestsSummary{}, err\n\t}\n\tld.cfg.Logger.Info(\"wrote latency results in JSON to disk\", zap.String(\"path\", ld.cfg.WritesJSONPath))\n\n\tld.cfg.Logger.Info(\"sorting read latency results\", zap.Int(\"total-data-points\", ld.readLatencies.Len()))\n\tnow = time.Now()\n\tsort.Sort(ld.readLatencies)\n\tld.cfg.Logger.Info(\"sorted read latency results\", zap.Int(\"total-data-points\", ld.readLatencies.Len()), zap.String(\"took\", time.Since(now).String()))\n\treads.LantencyP50 = ld.readLatencies.PickLantencyP50()\n\treads.LantencyP90 = ld.readLatencies.PickLantencyP90()\n\treads.LantencyP99 = ld.readLatencies.PickLantencyP99()\n\treads.LantencyP999 = ld.readLatencies.PickLantencyP999()\n\treads.LantencyP9999 = ld.readLatencies.PickLantencyP9999()\n\n\tld.cfg.Logger.Info(\"writing latency results in JSON to disk\", zap.String(\"path\", ld.cfg.ReadsJSONPath))\n\twb, err = json.Marshal(ld.readLatencies)\n\tif err != nil {\n\t\tld.cfg.Logger.Warn(\"failed to encode latency results in JSON\", zap.Error(err))\n\t\treturn metrics.RequestsSummary{}, metrics.RequestsSummary{}, err\n\t}\n\tif err = ioutil.WriteFile(ld.cfg.ReadsJSONPath, wb, 0600); err != nil {\n\t\tld.cfg.Logger.Warn(\"failed to write latency results in JSON to disk\", zap.String(\"path\", ld.cfg.ReadsJSONPath), zap.Error(err))\n\t\treturn metrics.RequestsSummary{}, metrics.RequestsSummary{}, err\n\t}\n\tld.cfg.Logger.Info(\"wrote latency results in JSON to disk\", zap.String(\"path\", ld.cfg.ReadsJSONPath))\n\n\treturn writes, reads, nil\n}", "func logMemstatsSample() {\n\tl := log.WithField(\"process\", \"memstats\")\n\n\truntime.GC() // get up-to-date statistics\n\n\tmemStats := new(runtime.MemStats)\n\truntime.ReadMemStats(memStats)\n\n\tvar gcStats debug.GCStats\n\tdebug.ReadGCStats(&gcStats)\n\n\ts := memStats\n\n\tl.Infof(\"# runtime.MemStats\")\n\tl.Infof(\"# Alloc = %d\", s.Alloc)\n\tl.Infof(\"# TotalAlloc = %d\", s.TotalAlloc)\n\tl.Infof(\"# Sys = %d\", s.Sys)\n\tl.Infof(\"# Lookups = %d\", s.Lookups)\n\tl.Infof(\"# Mallocs = %d\", s.Mallocs)\n\tl.Infof(\"# Frees = %d\", s.Frees)\n\tl.Infof(\"# HeapAlloc = %d\", s.HeapAlloc)\n\tl.Infof(\"# HeapSys = %d\", s.HeapSys)\n\tl.Infof(\"# HeapIdle = %d\", s.HeapIdle)\n\tl.Infof(\"# HeapInuse = %d\", s.HeapInuse)\n\tl.Infof(\"# HeapReleased = %d\", s.HeapReleased)\n\tl.Infof(\"# HeapObjects = %d\", s.HeapObjects)\n\tl.Infof(\"# Stack = %d / %d\", s.StackInuse, s.StackSys)\n\tl.Infof(\"# NumGoroutine = %d\", runtime.NumGoroutine())\n\n\t// Record GC pause history, most recent 5 entries\n\tl.Infof(\"# Stop-the-world Pause time\")\n\n\tfor i, v := range gcStats.Pause {\n\t\tl.Infof(\"# gcStats.Pause[%d] = %d ns\", i, v)\n\n\t\tif i == 5 {\n\t\t\tbreak\n\t\t}\n\t}\n}", "func CaptureRuntimeMemStats(registry RootRegistry, collectionFreq time.Duration) {\n\truntimeMemStats.Do(func() {\n\t\tif reg, ok := registry.(*rootRegistry); ok {\n\t\t\tgoRegistry := metrics.NewPrefixedChildRegistry(reg.registry, \"go.\")\n\t\t\tmetrics.RegisterRuntimeMemStats(goRegistry)\n\t\t\tgo metrics.CaptureRuntimeMemStats(goRegistry, collectionFreq)\n\t\t}\n\t})\n}", "func ProcLoadavg(c *gin.Context) {\n\tres := CmdExec(\"cat /proc/loadavg\")\n\tresArray := strings.Split(res[0], \" \")\n\tload5, _ := strconv.ParseFloat(resArray[0], 32)\n\tload10, _ := strconv.ParseFloat(resArray[1], 32)\n\tload15, _ := strconv.ParseFloat(resArray[2], 32)\n\trunningString := strings.Split(resArray[3], \"/\")\n\trunningprocess, _ := strconv.Atoi(runningString[0])\n\tc.JSON(http.StatusOK, gin.H{\n\t\t\"load5\": load5,\n\t\t\"load10\": load10,\n\t\t\"load15\": load15,\n\t\t\"runningprocess\": runningprocess,\n\t})\n}", "func (cpuCollector *CPUCollector) Collect() {\n\tcpuCollector.cpuStats.GetCPUStats()\n\n\tcpuCollector.cpuMetrics.cpuTotal.Set(float64(cpuCollector.cpuStats.Total))\n\tcpuCollector.cpuMetrics.cupIdle.Set(float64(cpuCollector.cpuStats.Idle))\n\tcpuCollector.cpuMetrics.cpuUtilization.Set(cpuCollector.cpuStats.Utilization)\n}", "func dailyTaskStatsForOldTasksPipeline(projectId string, requester string, start time.Time, end time.Time, tasks []string, lastUpdate time.Time) []bson.M {\n\t// Using the same pipeline as for the tasks collection as the base.\n\tbasePipeline := getDailyTaskStatsPipeline(projectId, requester, start, end, tasks, lastUpdate, true)\n\t// And the merge the documents with the existing ones.\n\tmergePipeline := []bson.M{\n\t\t{\"$lookup\": bson.M{\n\t\t\t\"from\": dailyTaskStatsCollection,\n\t\t\t\"localField\": \"_id\",\n\t\t\t\"foreignField\": \"_id\",\n\t\t\t\"as\": \"existing\",\n\t\t}},\n\t\t{\"$unwind\": bson.M{\n\t\t\t\"path\": \"$existing\",\n\t\t\t\"preserveNullAndEmptyArrays\": true,\n\t\t}},\n\t\t{\"$project\": bson.M{\n\t\t\t\"_id\": 1,\n\t\t\t\"num_success\": bson.M{\"$add\": array{\"$num_success\", \"$existing.num_success\"}},\n\t\t\t\"num_failed\": bson.M{\"$add\": array{\"$num_failed\", \"$existing.num_failed\"}},\n\t\t\t\"num_test_failed\": bson.M{\"$add\": array{\"$num_test_failed\", \"$existing.num_test_failed\"}},\n\t\t\t\"num_setup_failed\": bson.M{\"$add\": array{\"$num_setup_failed\", \"$existing.num_setup_failed\"}},\n\t\t\t\"num_system_failed\": bson.M{\"$add\": array{\"$num_system_failed\", \"$existing.num_system_failed\"}},\n\t\t\t\"num_timeout\": bson.M{\"$add\": array{\"$num_timeout\", \"$existing.num_timeout\"}},\n\t\t\t\"total_duration_success\": bson.M{\"$add\": array{\n\t\t\t\tbson.M{\"$ifNull\": array{bson.M{\"$multiply\": array{\"$num_success\", \"$avg_duration_success\"}}, 0}},\n\t\t\t\tbson.M{\"$ifNull\": array{bson.M{\"$multiply\": array{\"$existing.num_success\", \"$existing.avg_duration_success\"}}, 0}},\n\t\t\t}},\n\t\t\t\"last_update\": 1,\n\t\t}},\n\t\t{\"$project\": bson.M{\n\t\t\t\"_id\": 1,\n\t\t\t\"num_success\": 1,\n\t\t\t\"num_failed\": 1,\n\t\t\t\"num_test_failed\": 1,\n\t\t\t\"num_setup_failed\": 1,\n\t\t\t\"num_system_failed\": 1,\n\t\t\t\"num_timeout\": 1,\n\t\t\t\"avg_duration_success\": bson.M{\"$cond\": bson.M{\"if\": bson.M{\"$ne\": array{\"$num_success\", 0}},\n\t\t\t\t\"then\": bson.M{\"$divide\": array{\"$total_duration_success\", \"$num_success\"}},\n\t\t\t\t\"else\": nil}},\n\t\t\t\"last_update\": 1,\n\t\t}},\n\t}\n\treturn append(basePipeline, mergePipeline...)\n\n}", "func (c *solarCollector) collect(ch chan<- prometheus.Metric) error {\n\t// fetch the status of the controller\n\ttracer, err := gotracer.Status(\"/dev/ttyUSB0\")\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t/*\n\t * report the collected data\n\t */\n\n\t// store boolean values as a float (1 == true, 0 == false)\n\tvar loadIsActive float64\n\t// Panel array\n\tch <- prometheus.MustNewConstMetric(\n\t\tc.panelVoltage,\n\t\tprometheus.GaugeValue,\n\t\tfloat64(tracer.ArrayVoltage),\n\t)\n\tch <- prometheus.MustNewConstMetric(\n\t\tc.panelCurrent,\n\t\tprometheus.GaugeValue,\n\t\tfloat64(tracer.ArrayCurrent),\n\t)\n\tch <- prometheus.MustNewConstMetric(\n\t\tc.panelPower,\n\t\tprometheus.GaugeValue,\n\t\tfloat64(tracer.ArrayPower),\n\t)\n\n\t// Batteries\n\tch <- prometheus.MustNewConstMetric(\n\t\tc.batteryCurrent,\n\t\tprometheus.GaugeValue,\n\t\tfloat64(tracer.BatteryCurrent),\n\t)\n\tch <- prometheus.MustNewConstMetric(\n\t\tc.batteryVoltage,\n\t\tprometheus.GaugeValue,\n\t\tfloat64(tracer.BatteryVoltage),\n\t)\n\tch <- prometheus.MustNewConstMetric(\n\t\tc.batterySOC,\n\t\tprometheus.GaugeValue,\n\t\tfloat64(tracer.BatterySOC),\n\t)\n\tch <- prometheus.MustNewConstMetric(\n\t\tc.batteryTemp,\n\t\tprometheus.GaugeValue,\n\t\tfloat64(tracer.BatteryTemp),\n\t)\n\tch <- prometheus.MustNewConstMetric(\n\t\tc.batteryMinVoltage,\n\t\tprometheus.GaugeValue,\n\t\tfloat64(tracer.BatteryMinVoltage),\n\t)\n\tch <- prometheus.MustNewConstMetric(\n\t\tc.batteryMaxVoltage,\n\t\tprometheus.GaugeValue,\n\t\tfloat64(tracer.BatteryMaxVoltage),\n\t)\n\n\t// Load output\n\tif tracer.Load {\n\t\tloadIsActive = 1\n\t}\n\tch <- prometheus.MustNewConstMetric(\n\t\tc.loadActive,\n\t\tprometheus.GaugeValue,\n\t\tloadIsActive,\n\t)\n\tch <- prometheus.MustNewConstMetric(\n\t\tc.loadVoltage,\n\t\tprometheus.GaugeValue,\n\t\tfloat64(tracer.LoadVoltage),\n\t)\n\tch <- prometheus.MustNewConstMetric(\n\t\tc.loadCurrent,\n\t\tprometheus.GaugeValue,\n\t\tfloat64(tracer.LoadCurrent),\n\t)\n\tch <- prometheus.MustNewConstMetric(\n\t\tc.loadPower,\n\t\tprometheus.GaugeValue,\n\t\tfloat64(tracer.LoadPower),\n\t)\n\n\t// controller infos\n\tch <- prometheus.MustNewConstMetric(\n\t\tc.deviceTemp,\n\t\tprometheus.GaugeValue,\n\t\tfloat64(tracer.DeviceTemp),\n\t)\n\n\t// energy consumed\n\tch <- prometheus.MustNewConstMetric(\n\t\tc.energyConsumedDaily,\n\t\tprometheus.GaugeValue,\n\t\tfloat64(tracer.EnergyConsumedDaily),\n\t)\n\tch <- prometheus.MustNewConstMetric(\n\t\tc.energyConsumedMonthly,\n\t\tprometheus.GaugeValue,\n\t\tfloat64(tracer.EnergyConsumedMonthly),\n\t)\n\tch <- prometheus.MustNewConstMetric(\n\t\tc.energyConsumedAnnual,\n\t\tprometheus.GaugeValue,\n\t\tfloat64(tracer.EnergyConsumedAnnual),\n\t)\n\tch <- prometheus.MustNewConstMetric(\n\t\tc.energyConsumedTotal,\n\t\tprometheus.GaugeValue,\n\t\tfloat64(tracer.EnergyConsumedTotal),\n\t)\n\t// energy generated\n\tch <- prometheus.MustNewConstMetric(\n\t\tc.energyGeneratedDaily,\n\t\tprometheus.GaugeValue,\n\t\tfloat64(tracer.EnergyGeneratedDaily),\n\t)\n\tch <- prometheus.MustNewConstMetric(\n\t\tc.energyGeneratedMonthly,\n\t\tprometheus.GaugeValue,\n\t\tfloat64(tracer.EnergyGeneratedMonthly),\n\t)\n\tch <- prometheus.MustNewConstMetric(\n\t\tc.energyGeneratedAnnual,\n\t\tprometheus.GaugeValue,\n\t\tfloat64(tracer.EnergyGeneratedAnnual),\n\t)\n\tch <- prometheus.MustNewConstMetric(\n\t\tc.energyGeneratedTotal,\n\t\tprometheus.GaugeValue,\n\t\tfloat64(tracer.EnergyGeneratedTotal),\n\t)\n\n\treturn nil\n}", "func (p *capacitySwiftHealthStatsdPlugin) Scrape(provider *gophercloud.ProviderClient) (map[string]map[string]uint64, error) {\n\n\tvar prometheusQuery = \"min(swift_cluster_storage_capacity_bytes_gauge < inf)\"\n\tvar prometheusAPIURL = \"https://localhost:9090\"\n\tif p.cfg.Swift.PrometheusAPIURL != \"\" {\n\t\tprometheusAPIURL = p.cfg.Swift.PrometheusAPIURL\n\t}\n\n\tclient, err := Client(prometheusAPIURL)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar value model.Value\n\tvar resultVector model.Vector\n\tvar capacity = map[string]uint64{}\n\tvar adjustmentFactor = 1.0\n\n\tvalue, err = client.Query(context.Background(), prometheusQuery, time.Now())\n\tif err != nil {\n\t\tutil.LogError(\"Could not get value for query %s from Prometheus %s.\", prometheusQuery, prometheusAPIURL)\n\t\treturn nil, err\n\t}\n\tresultVector, ok := value.(model.Vector)\n\tif !ok {\n\t\tutil.LogError(\"Could not get value for query %s from Prometheus due to type mismatch.\", prometheusQuery)\n\t\treturn nil, nil\n\t}\n\n\tif p.cfg.Swift.AdjustmentFactor != 0 {\n\t\tadjustmentFactor = p.cfg.Swift.AdjustmentFactor\n\t}\n\n\tif resultVector.Len() != 0 {\n\t\tcapacity[\"capacity\"] = uint64(float64(resultVector[0].Value) * adjustmentFactor)\n\t}\n\n\t//returns something like\n\t//\"object-store\": {\n\t//\t\"capacity\": capacity,\n\t//}\n\treturn map[string]map[string]uint64{\n\t\t\"object-store\": capacity,\n\t}, nil\n\n}", "func (e *Exporter) Collect(ch chan<- prometheus.Metric) {\n\tvar up float64 = 1\n\n\tglobalMutex.Lock()\n\tdefer globalMutex.Unlock()\n\n\tif e.config.resetStats && !globalResetExecuted {\n\t\t// Its time to try to reset the stats\n\t\tif e.resetStatsSemp1() {\n\t\t\tlevel.Info(e.logger).Log(\"msg\", \"Statistics successfully reset\")\n\t\t\tglobalResetExecuted = true\n\t\t\tup = 1\n\t\t} else {\n\t\t\tup = 0\n\t\t}\n\t}\n\n\tif e.config.details {\n\t\tif up > 0 {\n\t\t\tup = e.getClientSemp1(ch)\n\t\t}\n\t\tif up > 0 {\n\t\t\tup = e.getQueueSemp1(ch)\n\t\t}\n\t\tif up > 0 && e.config.scrapeRates {\n\t\t\tup = e.getQueueRatesSemp1(ch)\n\t\t}\n\t} else { // Basic\n\t\tif up > 0 {\n\t\t\tup = e.getRedundancySemp1(ch)\n\t\t}\n\t\tif up > 0 {\n\t\t\tup = e.getSpoolSemp1(ch)\n\t\t}\n\t\tif up > 0 {\n\t\t\tup = e.getHealthSemp1(ch)\n\t\t}\n\t\tif up > 0 {\n\t\t\tup = e.getVpnSemp1(ch)\n\t\t}\n\t}\n\n\tch <- prometheus.MustNewConstMetric(solaceUp, prometheus.GaugeValue, up)\n}", "func (s *Stats) GetMemoryInfo(logMemory, logGoMemory bool) {\n\n if logGoMemory {\n if s.GoInfo == nil {\n s.initGoInfo()\n }\n\n runtime.ReadMemStats(s.GoInfo.Memory.mem)\n s.GoInfo.GoRoutines = runtime.NumGoroutine()\n s.GoInfo.Memory.Alloc = s.GoInfo.Memory.mem.Alloc\n s.GoInfo.Memory.HeapAlloc = s.GoInfo.Memory.mem.HeapAlloc\n s.GoInfo.Memory.HeapSys = s.GoInfo.Memory.mem.HeapSys\n\n if s.GoInfo.Memory.LastGC != s.GoInfo.Memory.mem.LastGC {\n s.GoInfo.Memory.LastGC = s.GoInfo.Memory.mem.LastGC\n s.GoInfo.Memory.NumGC = s.GoInfo.Memory.mem.NumGC - s.GoInfo.Memory.lastNumGC\n s.GoInfo.Memory.lastNumGC = s.GoInfo.Memory.mem.NumGC\n s.GoInfo.Memory.LastGCPauseDuration = s.GoInfo.Memory.mem.PauseNs[(s.GoInfo.Memory.mem.NumGC+255)%256]\n } else {\n s.GoInfo.Memory.NumGC = 0\n s.GoInfo.Memory.LastGCPauseDuration = 0\n }\n }\n\n if logMemory {\n\n if s.MemInfo == nil {\n s.MemInfo = new(MemInfo)\n }\n\n s.MemInfo.Memory, _ = mem.VirtualMemory()\n s.MemInfo.Swap, _ = mem.SwapMemory()\n }\n}", "func (g *Gatherer) Gather(ctx context.Context, gatherList []string, rec recorder.Interface) error {\n\tg.ctx = ctx\n\tvar errors []string\n\tvar gatherReport gatherMetadata\n\n\tif len(gatherList) == 0 {\n\t\terrors = append(errors, \"no gather functions are specified to run\")\n\t}\n\n\tif utils.StringInSlice(gatherAll, gatherList) {\n\t\tgatherList = fullGatherList()\n\t}\n\n\t// Starts the gathers in Go routines\n\tcases, starts, err := g.startGathering(gatherList, &errors)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// Gets the info from the Go routines\n\tfor range gatherList {\n\t\tchosen, value, _ := reflect.Select(cases)\n\t\t// The chosen channel has been closed, so zero out the channel to disable the case\n\t\tcases[chosen].Chan = reflect.ValueOf(nil)\n\t\tgather := gatherList[chosen]\n\n\t\tgi := NewGatherInfo(gather, value)\n\t\tstatusReport, errorsReport := createStatusReport(gi, rec, starts[chosen])\n\n\t\tif len(errorsReport) > 0 {\n\t\t\terrors = append(errors, errorsReport...)\n\t\t}\n\t\tgatherReport.StatusReports = append(gatherReport.StatusReports, statusReport)\n\t}\n\n\t// if obfuscation is enabled, we want to know it from the archive\n\tgatherReport.IsGlobalObfuscationEnabled = g.anonymizer != nil\n\n\t// fill in performance related data to the report\n\tvar m runtime.MemStats\n\truntime.ReadMemStats(&m)\n\tgatherReport.MemoryAlloc = m.HeapAlloc\n\tgatherReport.Uptime = time.Since(g.startTime).Truncate(time.Millisecond).Seconds()\n\n\t// records the report\n\tif err := recordGatherReport(rec, gatherReport); err != nil {\n\t\terrors = append(errors, fmt.Sprintf(\"unable to record io status reports: %v\", err))\n\t}\n\n\tif len(errors) > 0 {\n\t\treturn sumErrors(errors)\n\t}\n\n\treturn nil\n}", "func collectVMSSBootLog(ctx context.Context, providerID string, outputPath string) error {\n\tresourceID := strings.TrimPrefix(providerID, azureutil.ProviderIDPrefix)\n\tv := strings.Split(resourceID, \"/\")\n\tinstanceID := v[len(v)-1]\n\tresourceID = strings.TrimSuffix(resourceID, \"/virtualMachines/\"+instanceID)\n\tresource, err := azureutil.ParseResourceID(resourceID)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"failed to parse resource id\")\n\t}\n\n\tLogf(\"Collecting boot logs for VMSS instance %s of scale set %s\\n\", instanceID, resource.Name)\n\n\tsettings, err := auth.GetSettingsFromEnvironment()\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"failed to get settings from environment\")\n\t}\n\n\tvmssClient := compute.NewVirtualMachineScaleSetVMsClient(settings.GetSubscriptionID())\n\tvmssClient.Authorizer, err = azureutil.GetAuthorizer(settings)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"failed to get authorizer\")\n\t}\n\n\tbootDiagnostics, err := vmssClient.RetrieveBootDiagnosticsData(ctx, resource.ResourceGroupName, resource.Name, instanceID, nil)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"failed to get boot diagnostics data\")\n\t}\n\n\treturn writeBootLog(bootDiagnostics, outputPath)\n}", "func ProcMeminfo(c *gin.Context) {\n\tres := CmdExec(\"cat /proc/meminfo | head -n 2| awk '{print $2}'\")\n\ttotalMem, _ := strconv.Atoi(res[0])\n\tfreeMem, _ := strconv.Atoi(res[1])\n\tusedMem := totalMem - freeMem\n\tc.JSON(http.StatusOK, gin.H{\n\t\t\"totalMem\": totalMem,\n\t\t\"usedMem\": usedMem,\n\t})\n}", "func (acir *awsContainerInsightReceiver) collectData(ctx context.Context) error {\n\tvar mds []pmetric.Metrics\n\tif acir.cadvisor == nil && acir.k8sapiserver == nil {\n\t\terr := errors.New(\"both cadvisor and k8sapiserver failed to start\")\n\t\tacir.settings.Logger.Error(\"Failed to collect stats\", zap.Error(err))\n\t\treturn err\n\t}\n\n\tif acir.cadvisor != nil {\n\t\tmds = append(mds, acir.cadvisor.GetMetrics()...)\n\t}\n\n\tif acir.k8sapiserver != nil {\n\t\tmds = append(mds, acir.k8sapiserver.GetMetrics()...)\n\t}\n\n\tfor _, md := range mds {\n\t\terr := acir.nextConsumer.ConsumeMetrics(ctx, md)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}", "func (c *metricCollector) collectMemory(service *rrhttp.Service, tick time.Duration) {\n\tstarted := false\n\tfor {\n\t\tserver := service.Server()\n\t\tif server == nil && started {\n\t\t\t// stopped\n\t\t\treturn\n\t\t}\n\n\t\tstarted = true\n\n\t\tif workers, err := util.ServerState(server); err == nil {\n\t\t\tsum := 0.0\n\t\t\tfor _, w := range workers {\n\t\t\t\tsum = sum + float64(w.MemoryUsage)\n\t\t\t}\n\n\t\t\tc.workersMemory.Set(sum)\n\t\t}\n\n\t\ttime.Sleep(tick)\n\t}\n}", "func (e Exporter) Collect(ch chan<- prometheus.Metric) {\n\tctx := context.Background()\n\n\tcontainerService, err := container.NewService(ctx)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tcloudresourcemanagerService, err := cloudresourcemanager.NewService(ctx)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tprojectsListResponse, err := cloudresourcemanagerService.Projects.List().Filter(\"lifecycleState:ACTIVE\").Context(ctx).Do()\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tlog.Infof(\"Found %d projects\", len(projectsListResponse.Projects))\n\n\tvar mutex = &sync.Mutex{}\n\tvar wg sync.WaitGroup\n\twg.Add(len(projectsListResponse.Projects))\n\n\tvalidMasterVersions := map[string][]string{}\n\tmasterVersionCount := map[string]float64{}\n\n\tfor _, p := range projectsListResponse.Projects {\n\t\tgo func(p *cloudresourcemanager.Project) {\n\t\t\tdefer wg.Done()\n\t\t\tresp, err := containerService.Projects.Locations.Clusters.List(\"projects/\" + p.ProjectId + \"/locations/-\").Context(ctx).Do()\n\t\t\tif err != nil {\n\t\t\t\tif ae, ok := err.(*googleapi.Error); ok && ae.Code == http.StatusForbidden {\n\t\t\t\t\tlog.Warnf(\"Missing roles/container.clusterViewer on %s (%s)\", p.Name, p.ProjectId)\n\t\t\t\t\treturn\n\t\t\t\t} else if ae, ok := err.(*googleapi.Error); ok && ae.Code == http.StatusTooManyRequests {\n\t\t\t\t\tlog.Warn(\"Quota exceeded\")\n\t\t\t\t\treturn\n\t\t\t\t} else {\n\t\t\t\t\tlog.Fatal(err)\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tfor _, c := range resp.Clusters {\n\t\t\t\tmutex.Lock()\n\t\t\t\tif _, ok := validMasterVersions[c.Location]; !ok {\n\t\t\t\t\tlog.Infof(\"Pulling server configs for location %s\", c.Location)\n\t\t\t\t\tserverConfig, err := containerService.Projects.Locations.GetServerConfig(\"projects/\" + p.ProjectId + \"/locations/\" + c.Location).Do()\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\tif ae, ok := err.(*googleapi.Error); ok && ae.Code == http.StatusTooManyRequests {\n\t\t\t\t\t\t\tlog.Warn(\"Quota exceeded\")\n\t\t\t\t\t\t\treturn\n\t\t\t\t\t\t} else {\n\t\t\t\t\t\t\tlog.Fatal(err)\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\n\t\t\t\t\tvalidMasterVersions[c.Location] = serverConfig.ValidMasterVersions\n\t\t\t\t}\n\n\t\t\t\tif _, ok := masterVersionCount[c.CurrentMasterVersion]; !ok {\n\t\t\t\t\tmasterVersionCount[c.CurrentMasterVersion] = 1\n\t\t\t\t} else {\n\t\t\t\t\tmasterVersionCount[c.CurrentMasterVersion]++\n\t\t\t\t}\n\t\t\t\tmutex.Unlock()\n\n\t\t\t\tif !contains(c.CurrentMasterVersion, validMasterVersions[c.Location]) {\n\t\t\t\t\tch <- prometheus.MustNewConstMetric(\n\t\t\t\t\t\te.Metrics[\"gkeUnsupportedMasterVersion\"],\n\t\t\t\t\t\tprometheus.CounterValue,\n\t\t\t\t\t\t1,\n\t\t\t\t\t\t[]string{\n\t\t\t\t\t\t\tc.CurrentMasterVersion,\n\t\t\t\t\t\t\tp.ProjectId,\n\t\t\t\t\t\t\tp.Name,\n\t\t\t\t\t\t\tc.Name,\n\t\t\t\t\t\t\tc.Location,\n\t\t\t\t\t\t}...,\n\t\t\t\t\t)\n\t\t\t\t}\n\t\t\t}\n\t\t}(p)\n\t}\n\n\twg.Wait()\n\n\tfor version, cnt := range masterVersionCount {\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\te.Metrics[\"gkeMasterVersion\"],\n\t\t\tprometheus.CounterValue,\n\t\t\tcnt,\n\t\t\t[]string{\n\t\t\t\tversion,\n\t\t\t}...,\n\t\t)\n\t}\n\n\tlog.Info(\"Done\")\n}", "func FetchAppServerMemStats(r Result) []float32 {\n\treturn r.AppServerStats().Mem\n}", "func (client *XenClient) VMGuestMetricsGetAll() (result []string, err error) {\n\tobj, err := client.APICall(\"VM_guest_metrics.get_all\")\n\tif err != nil {\n\t\treturn\n\t}\n\n\tresult = make([]string, len(obj.([]interface{})))\n\tfor i, value := range obj.([]interface{}) {\n\t\tresult[i] = value.(string)\n\t}\n\n\treturn\n}", "func (m *metricFlinkJvmThreadsCount) emit(metrics pmetric.MetricSlice) {\n\tif m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 {\n\t\tm.updateCapacity()\n\t\tm.data.MoveTo(metrics.AppendEmpty())\n\t\tm.init()\n\t}\n}", "func CollectGoStatsTotals() Composer {\n\ts := &GoRuntimeInfo{}\n\ts.build()\n\n\treturn s\n}", "func dailyTaskStatsPipeline(projectId string, requester string, start time.Time, end time.Time, tasks []string, lastUpdate time.Time) []bson.M {\n\treturn getDailyTaskStatsPipeline(projectId, requester, start, end, tasks, lastUpdate, false)\n}", "func (depth *ControlDepth) getProfileMinsFromNodesResults(\n\tfilters map[string][]string,\n\tsearchResult *elastic.SearchResult,\n\tstatusFilters []string) ([]reporting.ProfileMin, *reportingapi.ProfileCounts, error) {\n\n\tprofileMins := make([]reporting.ProfileMin, 0)\n\tvar counts *reportingapi.ProfileCounts\n\tstatusMap := make(map[string]int, 4)\n\n\tif aggRoot, found := depth.unwrap(&searchResult.Aggregations); found {\n\t\tif impactBuckets, found := aggRoot.Aggregations.Terms(\"impact\"); found && len(impactBuckets.Buckets) > 0 {\n\t\t\tsummary := stats.ProfileList{}\n\t\t\t//there can only be one\n\t\t\timpact := impactBuckets.Buckets[0]\n\t\t\tif failedResult, found := impact.Aggregations.Filter(\"noncompliant\"); found {\n\t\t\t\tsummary.Failures = int32(failedResult.DocCount)\n\n\t\t\t\timpactAsNumber, ok := impact.Key.(float64)\n\t\t\t\tif !ok {\n\t\t\t\t\t//todo - what should we do in this case? as it is now, we will just move forward and set it to low risk\n\t\t\t\t\tlogrus.Errorf(\"could not convert the value of impact: %v, to a float!\", impact)\n\t\t\t\t}\n\n\t\t\t\tif impactAsNumber < 0.4 {\n\t\t\t\t\tsummary.Minors = summary.Failures\n\t\t\t\t} else if impactAsNumber < 0.7 {\n\t\t\t\t\tsummary.Majors = summary.Failures\n\t\t\t\t} else {\n\t\t\t\t\tsummary.Criticals = summary.Failures\n\t\t\t\t}\n\t\t\t}\n\t\t\tif passedResult, found := impact.Aggregations.Filter(\"compliant\"); found {\n\t\t\t\tsummary.Passed = int32(passedResult.DocCount)\n\t\t\t}\n\t\t\tif skippedResult, found := impact.Aggregations.Filter(\"skipped\"); found {\n\t\t\t\tsummary.Skipped = int32(skippedResult.DocCount)\n\t\t\t}\n\t\t\tif waivedResult, found := impact.Aggregations.Filter(\"waived\"); found {\n\t\t\t\tsummary.Waived = int32(waivedResult.DocCount)\n\t\t\t}\n\t\t\tif profileResult, found := impact.Aggregations.ReverseNested(\"profile\"); found {\n\t\t\t\tif profileInfoResult, found := profileResult.Terms(\"profile-info\"); found &&\n\t\t\t\t\tlen(profileInfoResult.Buckets) > 0 {\n\n\t\t\t\t\tprofileInfoBucket := profileInfoResult.Buckets[0]\n\t\t\t\t\tsummary.Name = profileInfoBucket.Key.(string)\n\n\t\t\t\t\tif profileShaResult, found := profileInfoBucket.Terms(\"sha\"); found &&\n\t\t\t\t\t\tlen(profileShaResult.Buckets) > 0 {\n\t\t\t\t\t\tsummary.Id = profileShaResult.Buckets[0].Key.(string)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t\tprofileStatus := computeStatus(summary.Failures, summary.Passed, summary.Skipped, summary.Waived)\n\t\t\tsummaryRep := reporting.ProfileMin{\n\t\t\t\tName: summary.Name,\n\t\t\t\tID: summary.Id,\n\t\t\t\tStatus: profileStatus,\n\t\t\t}\n\t\t\tprofileMins = append(profileMins, summaryRep)\n\n\t\t\t//let's keep track of the counts even if they're not in the filter so that we may know that they're there for UI chicklets\n\t\t\tstatusMap[profileStatus]++\n\t\t\tcounts = &reportingapi.ProfileCounts{\n\t\t\t\tTotal: int32(statusMap[\"failed\"] + statusMap[\"passed\"] + statusMap[\"skipped\"] + statusMap[\"waived\"]),\n\t\t\t\tFailed: int32(statusMap[\"failed\"]),\n\t\t\t\tPassed: int32(statusMap[\"passed\"]),\n\t\t\t\tSkipped: int32(statusMap[\"skipped\"]),\n\t\t\t\tWaived: int32(statusMap[\"waived\"]),\n\t\t\t}\n\t\t}\n\t}\n\n\treturn profileMins, counts, nil\n}", "func (c *MSCluster_ClusterCollector) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {\n\tvar dst []MSCluster_Cluster\n\tq := queryAll(&dst, c.logger)\n\tif err := wmi.QueryNamespace(q, &dst, \"root/MSCluster\"); err != nil {\n\t\treturn err\n\t}\n\n\tfor _, v := range dst {\n\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tc.AddEvictDelay,\n\t\t\tprometheus.GaugeValue,\n\t\t\tfloat64(v.AddEvictDelay),\n\t\t\tv.Name,\n\t\t)\n\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tc.AdminAccessPoint,\n\t\t\tprometheus.GaugeValue,\n\t\t\tfloat64(v.AdminAccessPoint),\n\t\t\tv.Name,\n\t\t)\n\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tc.AutoAssignNodeSite,\n\t\t\tprometheus.GaugeValue,\n\t\t\tfloat64(v.AutoAssignNodeSite),\n\t\t\tv.Name,\n\t\t)\n\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tc.AutoBalancerLevel,\n\t\t\tprometheus.GaugeValue,\n\t\t\tfloat64(v.AutoBalancerLevel),\n\t\t\tv.Name,\n\t\t)\n\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tc.AutoBalancerMode,\n\t\t\tprometheus.GaugeValue,\n\t\t\tfloat64(v.AutoBalancerMode),\n\t\t\tv.Name,\n\t\t)\n\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tc.BackupInProgress,\n\t\t\tprometheus.GaugeValue,\n\t\t\tfloat64(v.BackupInProgress),\n\t\t\tv.Name,\n\t\t)\n\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tc.BlockCacheSize,\n\t\t\tprometheus.GaugeValue,\n\t\t\tfloat64(v.BlockCacheSize),\n\t\t\tv.Name,\n\t\t)\n\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tc.ClusSvcHangTimeout,\n\t\t\tprometheus.GaugeValue,\n\t\t\tfloat64(v.ClusSvcHangTimeout),\n\t\t\tv.Name,\n\t\t)\n\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tc.ClusSvcRegroupOpeningTimeout,\n\t\t\tprometheus.GaugeValue,\n\t\t\tfloat64(v.ClusSvcRegroupOpeningTimeout),\n\t\t\tv.Name,\n\t\t)\n\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tc.ClusSvcRegroupPruningTimeout,\n\t\t\tprometheus.GaugeValue,\n\t\t\tfloat64(v.ClusSvcRegroupPruningTimeout),\n\t\t\tv.Name,\n\t\t)\n\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tc.ClusSvcRegroupStageTimeout,\n\t\t\tprometheus.GaugeValue,\n\t\t\tfloat64(v.ClusSvcRegroupStageTimeout),\n\t\t\tv.Name,\n\t\t)\n\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tc.ClusSvcRegroupTickInMilliseconds,\n\t\t\tprometheus.GaugeValue,\n\t\t\tfloat64(v.ClusSvcRegroupTickInMilliseconds),\n\t\t\tv.Name,\n\t\t)\n\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tc.ClusterEnforcedAntiAffinity,\n\t\t\tprometheus.GaugeValue,\n\t\t\tfloat64(v.ClusterEnforcedAntiAffinity),\n\t\t\tv.Name,\n\t\t)\n\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tc.ClusterFunctionalLevel,\n\t\t\tprometheus.GaugeValue,\n\t\t\tfloat64(v.ClusterFunctionalLevel),\n\t\t\tv.Name,\n\t\t)\n\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tc.ClusterGroupWaitDelay,\n\t\t\tprometheus.GaugeValue,\n\t\t\tfloat64(v.ClusterGroupWaitDelay),\n\t\t\tv.Name,\n\t\t)\n\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tc.ClusterLogLevel,\n\t\t\tprometheus.GaugeValue,\n\t\t\tfloat64(v.ClusterLogLevel),\n\t\t\tv.Name,\n\t\t)\n\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tc.ClusterLogSize,\n\t\t\tprometheus.GaugeValue,\n\t\t\tfloat64(v.ClusterLogSize),\n\t\t\tv.Name,\n\t\t)\n\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tc.ClusterUpgradeVersion,\n\t\t\tprometheus.GaugeValue,\n\t\t\tfloat64(v.ClusterUpgradeVersion),\n\t\t\tv.Name,\n\t\t)\n\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tc.CrossSiteDelay,\n\t\t\tprometheus.GaugeValue,\n\t\t\tfloat64(v.CrossSiteDelay),\n\t\t\tv.Name,\n\t\t)\n\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tc.CrossSiteThreshold,\n\t\t\tprometheus.GaugeValue,\n\t\t\tfloat64(v.CrossSiteThreshold),\n\t\t\tv.Name,\n\t\t)\n\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tc.CrossSubnetDelay,\n\t\t\tprometheus.GaugeValue,\n\t\t\tfloat64(v.CrossSubnetDelay),\n\t\t\tv.Name,\n\t\t)\n\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tc.CrossSubnetThreshold,\n\t\t\tprometheus.GaugeValue,\n\t\t\tfloat64(v.CrossSubnetThreshold),\n\t\t\tv.Name,\n\t\t)\n\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tc.CsvBalancer,\n\t\t\tprometheus.GaugeValue,\n\t\t\tfloat64(v.CsvBalancer),\n\t\t\tv.Name,\n\t\t)\n\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tc.DatabaseReadWriteMode,\n\t\t\tprometheus.GaugeValue,\n\t\t\tfloat64(v.DatabaseReadWriteMode),\n\t\t\tv.Name,\n\t\t)\n\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tc.DefaultNetworkRole,\n\t\t\tprometheus.GaugeValue,\n\t\t\tfloat64(v.DefaultNetworkRole),\n\t\t\tv.Name,\n\t\t)\n\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tc.DetectedCloudPlatform,\n\t\t\tprometheus.GaugeValue,\n\t\t\tfloat64(v.DetectedCloudPlatform),\n\t\t\tv.Name,\n\t\t)\n\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tc.DetectManagedEvents,\n\t\t\tprometheus.GaugeValue,\n\t\t\tfloat64(v.DetectManagedEvents),\n\t\t\tv.Name,\n\t\t)\n\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tc.DetectManagedEventsThreshold,\n\t\t\tprometheus.GaugeValue,\n\t\t\tfloat64(v.DetectManagedEventsThreshold),\n\t\t\tv.Name,\n\t\t)\n\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tc.DisableGroupPreferredOwnerRandomization,\n\t\t\tprometheus.GaugeValue,\n\t\t\tfloat64(v.DisableGroupPreferredOwnerRandomization),\n\t\t\tv.Name,\n\t\t)\n\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tc.DrainOnShutdown,\n\t\t\tprometheus.GaugeValue,\n\t\t\tfloat64(v.DrainOnShutdown),\n\t\t\tv.Name,\n\t\t)\n\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tc.DynamicQuorumEnabled,\n\t\t\tprometheus.GaugeValue,\n\t\t\tfloat64(v.DynamicQuorumEnabled),\n\t\t\tv.Name,\n\t\t)\n\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tc.EnableSharedVolumes,\n\t\t\tprometheus.GaugeValue,\n\t\t\tfloat64(v.EnableSharedVolumes),\n\t\t\tv.Name,\n\t\t)\n\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tc.FixQuorum,\n\t\t\tprometheus.GaugeValue,\n\t\t\tfloat64(v.FixQuorum),\n\t\t\tv.Name,\n\t\t)\n\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tc.GracePeriodEnabled,\n\t\t\tprometheus.GaugeValue,\n\t\t\tfloat64(v.GracePeriodEnabled),\n\t\t\tv.Name,\n\t\t)\n\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tc.GracePeriodTimeout,\n\t\t\tprometheus.GaugeValue,\n\t\t\tfloat64(v.GracePeriodTimeout),\n\t\t\tv.Name,\n\t\t)\n\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tc.GroupDependencyTimeout,\n\t\t\tprometheus.GaugeValue,\n\t\t\tfloat64(v.GroupDependencyTimeout),\n\t\t\tv.Name,\n\t\t)\n\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tc.HangRecoveryAction,\n\t\t\tprometheus.GaugeValue,\n\t\t\tfloat64(v.HangRecoveryAction),\n\t\t\tv.Name,\n\t\t)\n\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tc.IgnorePersistentStateOnStartup,\n\t\t\tprometheus.GaugeValue,\n\t\t\tfloat64(v.IgnorePersistentStateOnStartup),\n\t\t\tv.Name,\n\t\t)\n\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tc.LogResourceControls,\n\t\t\tprometheus.GaugeValue,\n\t\t\tfloat64(v.LogResourceControls),\n\t\t\tv.Name,\n\t\t)\n\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tc.LowerQuorumPriorityNodeId,\n\t\t\tprometheus.GaugeValue,\n\t\t\tfloat64(v.LowerQuorumPriorityNodeId),\n\t\t\tv.Name,\n\t\t)\n\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tc.MaxNumberOfNodes,\n\t\t\tprometheus.GaugeValue,\n\t\t\tfloat64(v.MaxNumberOfNodes),\n\t\t\tv.Name,\n\t\t)\n\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tc.MessageBufferLength,\n\t\t\tprometheus.GaugeValue,\n\t\t\tfloat64(v.MessageBufferLength),\n\t\t\tv.Name,\n\t\t)\n\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tc.MinimumNeverPreemptPriority,\n\t\t\tprometheus.GaugeValue,\n\t\t\tfloat64(v.MinimumNeverPreemptPriority),\n\t\t\tv.Name,\n\t\t)\n\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tc.MinimumPreemptorPriority,\n\t\t\tprometheus.GaugeValue,\n\t\t\tfloat64(v.MinimumPreemptorPriority),\n\t\t\tv.Name,\n\t\t)\n\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tc.NetftIPSecEnabled,\n\t\t\tprometheus.GaugeValue,\n\t\t\tfloat64(v.NetftIPSecEnabled),\n\t\t\tv.Name,\n\t\t)\n\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tc.PlacementOptions,\n\t\t\tprometheus.GaugeValue,\n\t\t\tfloat64(v.PlacementOptions),\n\t\t\tv.Name,\n\t\t)\n\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tc.PlumbAllCrossSubnetRoutes,\n\t\t\tprometheus.GaugeValue,\n\t\t\tfloat64(v.PlumbAllCrossSubnetRoutes),\n\t\t\tv.Name,\n\t\t)\n\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tc.PreventQuorum,\n\t\t\tprometheus.GaugeValue,\n\t\t\tfloat64(v.PreventQuorum),\n\t\t\tv.Name,\n\t\t)\n\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tc.QuarantineDuration,\n\t\t\tprometheus.GaugeValue,\n\t\t\tfloat64(v.QuarantineDuration),\n\t\t\tv.Name,\n\t\t)\n\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tc.QuarantineThreshold,\n\t\t\tprometheus.GaugeValue,\n\t\t\tfloat64(v.QuarantineThreshold),\n\t\t\tv.Name,\n\t\t)\n\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tc.QuorumArbitrationTimeMax,\n\t\t\tprometheus.GaugeValue,\n\t\t\tfloat64(v.QuorumArbitrationTimeMax),\n\t\t\tv.Name,\n\t\t)\n\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tc.QuorumArbitrationTimeMin,\n\t\t\tprometheus.GaugeValue,\n\t\t\tfloat64(v.QuorumArbitrationTimeMin),\n\t\t\tv.Name,\n\t\t)\n\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tc.QuorumLogFileSize,\n\t\t\tprometheus.GaugeValue,\n\t\t\tfloat64(v.QuorumLogFileSize),\n\t\t\tv.Name,\n\t\t)\n\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tc.QuorumTypeValue,\n\t\t\tprometheus.GaugeValue,\n\t\t\tfloat64(v.QuorumTypeValue),\n\t\t\tv.Name,\n\t\t)\n\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tc.RequestReplyTimeout,\n\t\t\tprometheus.GaugeValue,\n\t\t\tfloat64(v.RequestReplyTimeout),\n\t\t\tv.Name,\n\t\t)\n\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tc.ResiliencyDefaultPeriod,\n\t\t\tprometheus.GaugeValue,\n\t\t\tfloat64(v.ResiliencyDefaultPeriod),\n\t\t\tv.Name,\n\t\t)\n\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tc.ResiliencyLevel,\n\t\t\tprometheus.GaugeValue,\n\t\t\tfloat64(v.ResiliencyLevel),\n\t\t\tv.Name,\n\t\t)\n\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tc.ResourceDllDeadlockPeriod,\n\t\t\tprometheus.GaugeValue,\n\t\t\tfloat64(v.ResourceDllDeadlockPeriod),\n\t\t\tv.Name,\n\t\t)\n\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tc.RootMemoryReserved,\n\t\t\tprometheus.GaugeValue,\n\t\t\tfloat64(v.RootMemoryReserved),\n\t\t\tv.Name,\n\t\t)\n\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tc.RouteHistoryLength,\n\t\t\tprometheus.GaugeValue,\n\t\t\tfloat64(v.RouteHistoryLength),\n\t\t\tv.Name,\n\t\t)\n\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tc.S2DBusTypes,\n\t\t\tprometheus.GaugeValue,\n\t\t\tfloat64(v.S2DBusTypes),\n\t\t\tv.Name,\n\t\t)\n\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tc.S2DCacheDesiredState,\n\t\t\tprometheus.GaugeValue,\n\t\t\tfloat64(v.S2DCacheDesiredState),\n\t\t\tv.Name,\n\t\t)\n\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tc.S2DCacheFlashReservePercent,\n\t\t\tprometheus.GaugeValue,\n\t\t\tfloat64(v.S2DCacheFlashReservePercent),\n\t\t\tv.Name,\n\t\t)\n\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tc.S2DCachePageSizeKBytes,\n\t\t\tprometheus.GaugeValue,\n\t\t\tfloat64(v.S2DCachePageSizeKBytes),\n\t\t\tv.Name,\n\t\t)\n\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tc.S2DEnabled,\n\t\t\tprometheus.GaugeValue,\n\t\t\tfloat64(v.S2DEnabled),\n\t\t\tv.Name,\n\t\t)\n\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tc.S2DIOLatencyThreshold,\n\t\t\tprometheus.GaugeValue,\n\t\t\tfloat64(v.S2DIOLatencyThreshold),\n\t\t\tv.Name,\n\t\t)\n\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tc.S2DOptimizations,\n\t\t\tprometheus.GaugeValue,\n\t\t\tfloat64(v.S2DOptimizations),\n\t\t\tv.Name,\n\t\t)\n\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tc.SameSubnetDelay,\n\t\t\tprometheus.GaugeValue,\n\t\t\tfloat64(v.SameSubnetDelay),\n\t\t\tv.Name,\n\t\t)\n\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tc.SameSubnetThreshold,\n\t\t\tprometheus.GaugeValue,\n\t\t\tfloat64(v.SameSubnetThreshold),\n\t\t\tv.Name,\n\t\t)\n\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tc.SecurityLevel,\n\t\t\tprometheus.GaugeValue,\n\t\t\tfloat64(v.SecurityLevel),\n\t\t\tv.Name,\n\t\t)\n\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tc.SecurityLevelForStorage,\n\t\t\tprometheus.GaugeValue,\n\t\t\tfloat64(v.SecurityLevelForStorage),\n\t\t\tv.Name,\n\t\t)\n\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tc.SharedVolumeVssWriterOperationTimeout,\n\t\t\tprometheus.GaugeValue,\n\t\t\tfloat64(v.SharedVolumeVssWriterOperationTimeout),\n\t\t\tv.Name,\n\t\t)\n\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tc.ShutdownTimeoutInMinutes,\n\t\t\tprometheus.GaugeValue,\n\t\t\tfloat64(v.ShutdownTimeoutInMinutes),\n\t\t\tv.Name,\n\t\t)\n\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tc.UseClientAccessNetworksForSharedVolumes,\n\t\t\tprometheus.GaugeValue,\n\t\t\tfloat64(v.UseClientAccessNetworksForSharedVolumes),\n\t\t\tv.Name,\n\t\t)\n\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tc.WitnessDatabaseWriteTimeout,\n\t\t\tprometheus.GaugeValue,\n\t\t\tfloat64(v.WitnessDatabaseWriteTimeout),\n\t\t\tv.Name,\n\t\t)\n\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tc.WitnessDynamicWeight,\n\t\t\tprometheus.GaugeValue,\n\t\t\tfloat64(v.WitnessDynamicWeight),\n\t\t\tv.Name,\n\t\t)\n\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tc.WitnessRestartInterval,\n\t\t\tprometheus.GaugeValue,\n\t\t\tfloat64(v.WitnessRestartInterval),\n\t\t\tv.Name,\n\t\t)\n\n\t}\n\n\treturn nil\n}", "func collectMetrics(db *sql.DB, populaterWg *sync.WaitGroup, i *integration.Integration, instanceLookUp map[string]string) {\n\tdefer populaterWg.Done()\n\n\tvar collectorWg sync.WaitGroup\n\tmetricChan := make(chan newrelicMetricSender, 100) // large buffer for speed\n\n\t// Create a goroutine for each of the metric groups to collect\n\tcollectorWg.Add(5)\n\tgo oracleReadWriteMetrics.Collect(db, &collectorWg, metricChan)\n\tgo oraclePgaMetrics.Collect(db, &collectorWg, metricChan)\n\tgo oracleSysMetrics.Collect(db, &collectorWg, metricChan)\n\tgo globalNameInstanceMetric.Collect(db, &collectorWg, metricChan)\n\tgo dbIDInstanceMetric.Collect(db, &collectorWg, metricChan)\n\n\t// Separate logic is needed to see if we should even collect tablespaces\n\tcollectTableSpaces(db, &collectorWg, metricChan)\n\n\t// When the metric groups are finished collecting, close the channel\n\tgo func() {\n\t\tcollectorWg.Wait()\n\t\tclose(metricChan)\n\t}()\n\n\t// Create a goroutine to read from the metric channel and insert the metrics\n\tpopulateMetrics(metricChan, i, instanceLookUp)\n}", "func ComputeStats(res *sdk.Result, v *venom.Tests) []string {\n\t// update global stats\n\tfor _, ts := range v.TestSuites {\n\t\tnSkipped := 0\n\t\tfor _, tc := range ts.TestCases {\n\t\t\tnSkipped += len(tc.Skipped)\n\t\t}\n\t\tif ts.Skipped < nSkipped {\n\t\t\tts.Skipped = nSkipped\n\t\t}\n\t\tif ts.Total < len(ts.TestCases)-nSkipped {\n\t\t\tts.Total = len(ts.TestCases) - nSkipped\n\t\t}\n\t\tv.Total += ts.Total\n\t\tv.TotalOK += ts.Total - ts.Failures - ts.Errors\n\t\tv.TotalKO += ts.Failures + ts.Errors\n\t\tv.TotalSkipped += ts.Skipped\n\t}\n\n\tvar nbOK, nbKO, nbSkipped int\n\n\treasons := []string{}\n\treasons = append(reasons, fmt.Sprintf(\"JUnit parser: %d testsuite(s)\", len(v.TestSuites)))\n\n\tfor i, ts := range v.TestSuites {\n\t\tvar nbKOTC, nbFailures, nbErrors, nbSkippedTC int\n\t\tif ts.Name == \"\" {\n\t\t\tts.Name = fmt.Sprintf(\"TestSuite.%d\", i)\n\t\t}\n\t\treasons = append(reasons, fmt.Sprintf(\"JUnit parser: testsuite %s has %d testcase(s)\", ts.Name, len(ts.TestCases)))\n\t\tfor k, tc := range ts.TestCases {\n\t\t\tif tc.Name == \"\" {\n\t\t\t\ttc.Name = fmt.Sprintf(\"TestCase.%d\", k)\n\t\t\t}\n\t\t\tif len(tc.Failures) > 0 {\n\t\t\t\treasons = append(reasons, fmt.Sprintf(\"JUnit parser: testcase %s has %d failure(s)\", tc.Name, len(tc.Failures)))\n\t\t\t\tnbFailures += len(tc.Failures)\n\t\t\t}\n\t\t\tif len(tc.Errors) > 0 {\n\t\t\t\treasons = append(reasons, fmt.Sprintf(\"JUnit parser: testcase %s has %d error(s)\", tc.Name, len(tc.Errors)))\n\t\t\t\tnbErrors += len(tc.Errors)\n\t\t\t}\n\t\t\tif len(tc.Failures) > 0 || len(tc.Errors) > 0 {\n\t\t\t\tnbKOTC++\n\t\t\t} else if len(tc.Skipped) > 0 {\n\t\t\t\tnbSkippedTC += len(tc.Skipped)\n\t\t\t}\n\t\t\tv.TestSuites[i].TestCases[k] = tc\n\t\t}\n\t\tnbOK += len(ts.TestCases) - nbKOTC\n\t\tnbKO += nbKOTC\n\t\tnbSkipped += nbSkippedTC\n\t\tif ts.Failures > nbFailures {\n\t\t\tnbFailures = ts.Failures\n\t\t}\n\t\tif ts.Errors > nbErrors {\n\t\t\tnbErrors = ts.Errors\n\t\t}\n\n\t\tif nbFailures > 0 {\n\t\t\treasons = append(reasons, fmt.Sprintf(\"JUnit parser: testsuite %s has %d failure(s)\", ts.Name, nbFailures))\n\t\t}\n\t\tif nbErrors > 0 {\n\t\t\treasons = append(reasons, fmt.Sprintf(\"JUnit parser: testsuite %s has %d error(s)\", ts.Name, nbErrors))\n\t\t}\n\t\tif nbKOTC > 0 {\n\t\t\treasons = append(reasons, fmt.Sprintf(\"JUnit parser: testsuite %s has %d test(s) failed\", ts.Name, nbKOTC))\n\t\t}\n\t\tif nbSkippedTC > 0 {\n\t\t\treasons = append(reasons, fmt.Sprintf(\"JUnit parser: testsuite %s has %d test(s) skipped\", ts.Name, nbSkippedTC))\n\t\t}\n\t\tv.TestSuites[i] = ts\n\t}\n\n\tif nbKO > v.TotalKO {\n\t\tv.TotalKO = nbKO\n\t}\n\n\tif nbOK != v.TotalOK {\n\t\tv.TotalOK = nbOK\n\t}\n\n\tif nbSkipped != v.TotalSkipped {\n\t\tv.TotalSkipped = nbSkipped\n\t}\n\n\tif v.TotalKO+v.TotalOK != v.Total {\n\t\tv.Total = v.TotalKO + v.TotalOK + v.TotalSkipped\n\t}\n\n\tres.Status = sdk.StatusFail\n\tif v.TotalKO == 0 {\n\t\tres.Status = sdk.StatusSuccess\n\t}\n\treturn reasons\n}", "func (y *YarnMetrics) Collect(ch chan<- prometheus.Metric) {\n\ty.metricsLock.Lock()\n\tdefer y.metricsLock.Unlock()\n\tif y.metrics != nil {\n\t\tch <- prometheus.MustNewConstMetric(y.containerStatusDesc, prometheus.GaugeValue,\n\t\t\tfloat64(y.metrics.ContainersLaunched), y.nodeName, \"launched\")\n\t\tch <- prometheus.MustNewConstMetric(y.containerStatusDesc, prometheus.GaugeValue,\n\t\t\tfloat64(y.metrics.ContainersCompleted), y.nodeName, \"completed\")\n\t\tch <- prometheus.MustNewConstMetric(y.containerStatusDesc, prometheus.GaugeValue,\n\t\t\tfloat64(y.metrics.ContainersFailed), y.nodeName, \"failed\")\n\t\tch <- prometheus.MustNewConstMetric(y.containerStatusDesc, prometheus.GaugeValue,\n\t\t\tfloat64(y.metrics.ContainersKilled), y.nodeName, \"killed\")\n\t\tch <- prometheus.MustNewConstMetric(y.containerStatusDesc, prometheus.GaugeValue,\n\t\t\tfloat64(y.metrics.ContainersRunning), y.nodeName, \"running\")\n\t\tch <- prometheus.MustNewConstMetric(y.containerStatusDesc, prometheus.GaugeValue,\n\t\t\tfloat64(y.metrics.ContainersIniting), y.nodeName, \"initing\")\n\t}\n\tif y.nodeStatus != nil {\n\t\tch <- prometheus.MustNewConstMetric(y.nodeStatusDesc, prometheus.GaugeValue,\n\t\t\ty.nodeStatus.NodeHealthyFloat, y.nodeName)\n\t}\n}" ]
[ "0.798549", "0.65351737", "0.65252376", "0.6430947", "0.61580193", "0.5957948", "0.5759485", "0.55020154", "0.5496554", "0.54518634", "0.5349374", "0.5299231", "0.52623755", "0.51727706", "0.515689", "0.5150958", "0.5146923", "0.514343", "0.51070416", "0.5051937", "0.5038212", "0.50349516", "0.50213283", "0.49954256", "0.49901807", "0.49814188", "0.49731165", "0.4961342", "0.4953228", "0.49383357", "0.4920049", "0.49175218", "0.49024093", "0.48970792", "0.4891128", "0.4875224", "0.48512617", "0.48490047", "0.48485282", "0.48427162", "0.48323214", "0.48291042", "0.48237017", "0.47721866", "0.47619027", "0.4751631", "0.47230315", "0.471304", "0.4703046", "0.4691145", "0.46903235", "0.46885562", "0.4677678", "0.46705833", "0.4669566", "0.46681073", "0.46677247", "0.4665167", "0.465144", "0.46419358", "0.4638875", "0.46365017", "0.46355242", "0.46318254", "0.4609249", "0.4598088", "0.4589183", "0.45807546", "0.45761913", "0.45714316", "0.45700267", "0.45630926", "0.45616803", "0.45613325", "0.4560089", "0.45477733", "0.45392716", "0.45303842", "0.45251235", "0.45249674", "0.45082918", "0.45070222", "0.45001867", "0.44997504", "0.4492477", "0.4491654", "0.4491592", "0.44907877", "0.44903722", "0.44841555", "0.4477839", "0.44591412", "0.44535935", "0.44520152", "0.44505867", "0.4448333", "0.44462192", "0.44447097", "0.44379076", "0.4429527" ]
0.677846
1
gatherJVMStats gather the Pipelines metrics and add results to the accumulator (for Logstash >= 6)
func (logstash *Logstash) gatherPipelinesStats(address string, accumulator telegraf.Accumulator) error { pipelinesStats := &PipelinesStats{} err := logstash.gatherJSONData(address, pipelinesStats) if err != nil { return err } for pipelineName, pipeline := range pipelinesStats.Pipelines { tags := map[string]string{ "node_id": pipelinesStats.ID, "node_name": pipelinesStats.Name, "node_version": pipelinesStats.Version, "pipeline": pipelineName, "source": pipelinesStats.Host, } flattener := jsonParser.JSONFlattener{} err := flattener.FlattenJSON("", pipeline.Events) if err != nil { return err } accumulator.AddFields("logstash_events", flattener.Fields, tags) err = logstash.gatherPluginsStats(pipeline.Plugins.Inputs, "input", tags, accumulator) if err != nil { return err } err = logstash.gatherPluginsStats(pipeline.Plugins.Filters, "filter", tags, accumulator) if err != nil { return err } err = logstash.gatherPluginsStats(pipeline.Plugins.Outputs, "output", tags, accumulator) if err != nil { return err } err = logstash.gatherQueueStats(&pipeline.Queue, tags, accumulator) if err != nil { return err } } return nil }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (logstash *Logstash) gatherJVMStats(address string, accumulator telegraf.Accumulator) error {\n\tjvmStats := &JVMStats{}\n\n\terr := logstash.gatherJSONData(address, jvmStats)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\ttags := map[string]string{\n\t\t\"node_id\": jvmStats.ID,\n\t\t\"node_name\": jvmStats.Name,\n\t\t\"node_version\": jvmStats.Version,\n\t\t\"source\": jvmStats.Host,\n\t}\n\n\tflattener := jsonParser.JSONFlattener{}\n\terr = flattener.FlattenJSON(\"\", jvmStats.JVM)\n\tif err != nil {\n\t\treturn err\n\t}\n\taccumulator.AddFields(\"logstash_jvm\", flattener.Fields, tags)\n\n\treturn nil\n}", "func (logstash *Logstash) gatherPipelineStats(address string, accumulator telegraf.Accumulator) error {\n\tpipelineStats := &PipelineStats{}\n\n\terr := logstash.gatherJSONData(address, pipelineStats)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\ttags := map[string]string{\n\t\t\"node_id\": pipelineStats.ID,\n\t\t\"node_name\": pipelineStats.Name,\n\t\t\"node_version\": pipelineStats.Version,\n\t\t\"source\": pipelineStats.Host,\n\t}\n\n\tflattener := jsonParser.JSONFlattener{}\n\terr = flattener.FlattenJSON(\"\", pipelineStats.Pipeline.Events)\n\tif err != nil {\n\t\treturn err\n\t}\n\taccumulator.AddFields(\"logstash_events\", flattener.Fields, tags)\n\n\terr = logstash.gatherPluginsStats(pipelineStats.Pipeline.Plugins.Inputs, \"input\", tags, accumulator)\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = logstash.gatherPluginsStats(pipelineStats.Pipeline.Plugins.Filters, \"filter\", tags, accumulator)\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = logstash.gatherPluginsStats(pipelineStats.Pipeline.Plugins.Outputs, \"output\", tags, accumulator)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = logstash.gatherQueueStats(&pipelineStats.Pipeline.Queue, tags, accumulator)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}", "func (logstash *Logstash) Gather(accumulator telegraf.Accumulator) error {\n\tif logstash.client == nil {\n\t\tclient, err := logstash.createHTTPClient()\n\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tlogstash.client = client\n\t}\n\n\tif choice.Contains(\"jvm\", logstash.Collect) {\n\t\tjvmURL, err := url.Parse(logstash.URL + jvmStats)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif err := logstash.gatherJVMStats(jvmURL.String(), accumulator); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tif choice.Contains(\"process\", logstash.Collect) {\n\t\tprocessURL, err := url.Parse(logstash.URL + processStats)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif err := logstash.gatherProcessStats(processURL.String(), accumulator); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tif choice.Contains(\"pipelines\", logstash.Collect) {\n\t\tif logstash.SinglePipeline {\n\t\t\tpipelineURL, err := url.Parse(logstash.URL + pipelineStats)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tif err := logstash.gatherPipelineStats(pipelineURL.String(), accumulator); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t} else {\n\t\t\tpipelinesURL, err := url.Parse(logstash.URL + pipelinesStats)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tif err := logstash.gatherPipelinesStats(pipelinesURL.String(), accumulator); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\n\treturn nil\n}", "func (c *Collector) Transform(allStats *NodeStatsResponse) (metrics []*exportertools.Metric) {\n for _, stats := range allStats.Nodes {\n // GC Stats\n for _, gcstats := range stats.JVM.GC.Collectors {\n metrics = append(metrics, c.ConvertToMetric(\"jvm_gc_collection_seconds_count\",\n float64(gcstats.CollectionCount),\n \"COUNTER\",\n nil))\n\n metrics = append(metrics, c.ConvertToMetric(\"jvm_gc_collection_seconds_sum\",\n float64(gcstats.CollectionTime / 1000),\n \"COUNTER\",\n nil))\n }\n\n // Breaker stats\n for _, bstats := range stats.Breakers {\n metrics = append(metrics, c.ConvertToMetric(\"breakers_estimated_size_bytes\",\n float64(bstats.EstimatedSize),\n \"GAUGE\",\n nil))\n\n metrics = append(metrics, c.ConvertToMetric(\"breakers_limit_size_bytes\",\n float64(bstats.LimitSize),\n \"GAUGE\",\n nil))\n }\n\n // Thread Pool stats\n for pool, pstats := range stats.ThreadPool {\n metrics = append(metrics, c.ConvertToMetric(\"thread_pool_completed_count\",\n float64(pstats.Completed),\n \"COUNTER\",\n map[string]string{\"type\": pool}))\n\n metrics = append(metrics, c.ConvertToMetric(\"thread_pool_rejected_count\",\n float64(pstats.Rejected),\n \"COUNTER\",\n map[string]string{\"type\": pool}))\n\n metrics = append(metrics, c.ConvertToMetric(\"thread_pool_active_count\",\n float64(pstats.Active),\n \"GAUGE\",\n map[string]string{\"type\": pool}))\n\n metrics = append(metrics, c.ConvertToMetric(\"thread_pool_threads_count\",\n float64(pstats.Threads),\n \"GAUGE\",\n map[string]string{\"type\": pool}))\n\n metrics = append(metrics, c.ConvertToMetric(\"thread_pool_largest_count\",\n float64(pstats.Largest),\n \"GAUGE\",\n map[string]string{\"type\": pool}))\n\n metrics = append(metrics, c.ConvertToMetric(\"thread_pool_queue_count\",\n float64(pstats.Queue),\n \"GAUGE\",\n map[string]string{\"type\": pool}))\n }\n\n // JVM Memory Stats\n metrics = append(metrics, c.ConvertToMetric(\"jvm_memory_committed_bytes\",\n float64(stats.JVM.Mem.HeapCommitted),\n \"GAUGE\",\n nil))\n\n metrics = append(metrics, c.ConvertToMetric(\"jvm_memory_used_bytes\",\n float64(stats.JVM.Mem.HeapUsed),\n \"GAUGE\",\n nil))\n\n\n metrics = append(metrics, c.ConvertToMetric(\"jvm_memory_max_bytes\",\n float64(stats.JVM.Mem.HeapMax),\n \"GAUGE\",\n nil))\n\n metrics = append(metrics, c.ConvertToMetric(\"jvm_memory_committed_bytes\",\n float64(stats.JVM.Mem.NonHeapCommitted),\n \"GAUGE\",\n nil))\n\n metrics = append(metrics, c.ConvertToMetric(\"jvm_memory_used_bytes\",\n float64(stats.JVM.Mem.NonHeapUsed),\n \"GAUGE\",\n nil))\n\n // Indices Stats)\n metrics = append(metrics, c.ConvertToMetric(\"indices_fielddata_memory_size_bytes\",\n float64(stats.Indices.FieldData.MemorySize),\n \"GAUGE\",\n nil))\n\n metrics = append(metrics, c.ConvertToMetric(\"indices_fielddata_evictions\",\n float64(stats.Indices.FieldData.Evictions),\n \"COUNTER\",\n nil))\n\n metrics = append(metrics, c.ConvertToMetric(\"indices_filter_cache_memory_size_bytes\",\n float64(stats.Indices.FilterCache.MemorySize),\n \"GAUGE\",\n nil))\n\n metrics = append(metrics, c.ConvertToMetric(\"indices_filter_cache_evictions\",\n float64(stats.Indices.FilterCache.Evictions),\n \"COUNTER\",\n nil))\n\n metrics = append(metrics, c.ConvertToMetric(\"indices_query_cache_memory_size_bytes\",\n float64(stats.Indices.QueryCache.MemorySize),\n \"GAUGE\",\n nil))\n\n metrics = append(metrics, c.ConvertToMetric(\"indices_query_cache_evictions\",\n float64(stats.Indices.QueryCache.Evictions),\n \"COUNTER\",\n nil))\n\n metrics = append(metrics, c.ConvertToMetric(\"indices_request_cache_memory_size_bytes\",\n float64(stats.Indices.QueryCache.MemorySize),\n \"GAUGE\",\n nil))\n\n metrics = append(metrics, c.ConvertToMetric(\"indices_request_cache_evictions\",\n float64(stats.Indices.QueryCache.Evictions),\n \"COUNTER\",\n nil))\n\n metrics = append(metrics, c.ConvertToMetric(\"indices_docs\",\n float64(stats.Indices.Docs.Count),\n \"GAUGE\",\n nil))\n\n metrics = append(metrics, c.ConvertToMetric(\"indices_docs_deleted\",\n float64(stats.Indices.Docs.Deleted),\n \"GAUGE\",\n nil))\n\n metrics = append(metrics, c.ConvertToMetric(\"indices_segments_memory_bytes\",\n float64(stats.Indices.Segments.Memory),\n \"GAUGE\",\n nil))\n\n metrics = append(metrics, c.ConvertToMetric(\"indices_segments_count\",\n float64(stats.Indices.Segments.Count),\n \"GAUGE\",\n nil))\n\n metrics = append(metrics, c.ConvertToMetric(\"indices_store_size_bytes\",\n float64(stats.Indices.Store.Size),\n \"GAUGE\",\n nil))\n\n metrics = append(metrics, c.ConvertToMetric(\"indices_store_throttle_time_ms_total\",\n float64(stats.Indices.Store.ThrottleTime),\n \"COUNTER\",\n nil))\n\n metrics = append(metrics, c.ConvertToMetric(\"indices_flush_total\",\n float64(stats.Indices.Flush.Total),\n \"COUNTER\",\n nil))\n\n metrics = append(metrics, c.ConvertToMetric(\"indices_flush_time_ms_total\",\n float64(stats.Indices.Flush.Time),\n \"COUNTER\",\n nil))\n\n metrics = append(metrics, c.ConvertToMetric(\"indices_indexing_index_time_ms_total\",\n float64(stats.Indices.Indexing.IndexTime),\n \"COUNTER\",\n nil))\n\n metrics = append(metrics, c.ConvertToMetric(\"indices_indexing_index_total\",\n float64(stats.Indices.Indexing.IndexTotal),\n \"COUNTER\",\n nil))\n\n metrics = append(metrics, c.ConvertToMetric(\"indices_merges_total_time_ms_total\",\n float64(stats.Indices.Merges.TotalTime),\n \"COUNTER\",\n nil))\n\n metrics = append(metrics, c.ConvertToMetric(\"indices_merges_total_size_bytes_total\",\n float64(stats.Indices.Merges.TotalSize),\n \"COUNTER\",\n nil))\n\n metrics = append(metrics, c.ConvertToMetric(\"indices_merges_total\",\n float64(stats.Indices.Merges.Total),\n \"COUNTER\",\n nil))\n\n metrics = append(metrics, c.ConvertToMetric(\"indices_refresh_total_time_ms_total\",\n float64(stats.Indices.Refresh.TotalTime),\n \"COUNTER\",\n nil))\n\n metrics = append(metrics, c.ConvertToMetric(\"indices_refresh_total\",\n float64(stats.Indices.Refresh.Total),\n \"COUNTER\",\n nil))\n\n // Transport Stats)\n metrics = append(metrics, c.ConvertToMetric(\"transport_rx_packets_total\",\n float64(stats.Transport.RxCount),\n \"COUNTER\",\n nil))\n\n metrics = append(metrics, c.ConvertToMetric(\"transport_rx_size_bytes_total\",\n float64(stats.Transport.RxSize),\n \"COUNTER\",\n nil))\n\n\n metrics = append(metrics, c.ConvertToMetric(\"transport_tx_packets_total\",\n float64(stats.Transport.TxCount),\n \"COUNTER\",\n nil))\n\n metrics = append(metrics, c.ConvertToMetric(\"transport_tx_size_bytes_total\",\n float64(stats.Transport.TxSize),\n \"COUNTER\",\n nil))\n\n // Process Stats)\n metrics = append(metrics, c.ConvertToMetric(\"process_cpu_percent\",\n float64(stats.Process.CPU.Percent),\n \"GAUGE\",\n nil))\n\n metrics = append(metrics, c.ConvertToMetric(\"process_mem_resident_size_bytes\",\n float64(stats.Process.Memory.Resident),\n \"GAUGE\",\n nil))\n\n metrics = append(metrics, c.ConvertToMetric(\"process_mem_share_size_bytes\",\n float64(stats.Process.Memory.Share),\n \"GAUGE\",\n nil))\n\n metrics = append(metrics, c.ConvertToMetric(\"process_mem_virtual_size_bytes\",\n float64(stats.Process.Memory.TotalVirtual),\n \"GAUGE\",\n nil))\n\n metrics = append(metrics, c.ConvertToMetric(\"process_open_files_count\",\n float64(stats.Process.OpenFD),\n \"GAUGE\",\n nil))\n\n metrics = append(metrics, c.ConvertToMetric(\"process_cpu_time_seconds_sum\",\n float64(stats.Process.CPU.Total / 1000),\n \"COUNTER\",\n nil))\n\n metrics = append(metrics, c.ConvertToMetric(\"process_cpu_time_seconds_sum\",\n float64(stats.Process.CPU.Sys / 1000),\n \"COUNTER\",\n nil))\n\n metrics = append(metrics, c.ConvertToMetric(\"process_cpu_time_seconds_sum\",\n float64(stats.Process.CPU.User / 1000),\n \"COUNTER\",\n nil))\n\n }\n\n return metrics\n}", "func (logstash *Logstash) gatherProcessStats(address string, accumulator telegraf.Accumulator) error {\n\tprocessStats := &ProcessStats{}\n\n\terr := logstash.gatherJSONData(address, processStats)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\ttags := map[string]string{\n\t\t\"node_id\": processStats.ID,\n\t\t\"node_name\": processStats.Name,\n\t\t\"node_version\": processStats.Version,\n\t\t\"source\": processStats.Host,\n\t}\n\n\tflattener := jsonParser.JSONFlattener{}\n\terr = flattener.FlattenJSON(\"\", processStats.Process)\n\tif err != nil {\n\t\treturn err\n\t}\n\taccumulator.AddFields(\"logstash_process\", flattener.Fields, tags)\n\n\treturn nil\n}", "func (e *Exporter) Collect(ch chan<- prometheus.Metric) {\n\te.mutex.Lock() // To protect metrics from concurrent collects.\n\tdefer e.mutex.Unlock()\n\n\t// Reset metrics.\n\tfor _, vec := range e.gauges {\n\t\tvec.Reset()\n\t}\n\n\tfor _, vec := range e.counters {\n\t\tvec.Reset()\n\t}\n\n\tresp, err := e.client.Get(e.URI)\n\tif err != nil {\n\t\te.up.Set(0)\n\t\tlog.Printf(\"Error while querying Elasticsearch: %v\", err)\n\t\treturn\n\t}\n\tdefer resp.Body.Close()\n\n\tbody, err := ioutil.ReadAll(resp.Body)\n\n\tif err != nil {\n\t\tlog.Printf(\"Failed to read ES response body: %v\", err)\n\t\te.up.Set(0)\n\t\treturn\n\t}\n\n\te.up.Set(1)\n\n\tvar all_stats NodeStatsResponse\n\terr = json.Unmarshal(body, &all_stats)\n\n\tif err != nil {\n\t\tlog.Printf(\"Failed to unmarshal JSON into struct: %v\", err)\n\t\treturn\n\t}\n\n\t// Regardless of whether we're querying the local host or the whole\n\t// cluster, here we can just iterate through all nodes found.\n\n\tfor node, stats := range all_stats.Nodes {\n\t\tlog.Printf(\"Processing node %v\", node)\n\t\t// GC Stats\n\t\tfor collector, gcstats := range stats.JVM.GC.Collectors {\n\t\t\te.counters[\"jvm_gc_collection_count\"].WithLabelValues(all_stats.ClusterName, stats.Name, collector).Set(float64(gcstats.CollectionCount))\n\t\t\te.counters[\"jvm_gc_collection_time_in_millis\"].WithLabelValues(all_stats.ClusterName, stats.Name, collector).Set(float64(gcstats.CollectionTime))\n\t\t}\n\n\t\t// Breaker stats\n\t\tfor breaker, bstats := range stats.Breakers {\n\t\t\te.gauges[\"breakers_estimated_size_in_bytes\"].WithLabelValues(all_stats.ClusterName, stats.Name, breaker).Set(float64(bstats.EstimatedSize))\n\t\t\te.gauges[\"breakers_limit_size_in_bytes\"].WithLabelValues(all_stats.ClusterName, stats.Name, breaker).Set(float64(bstats.LimitSize))\n\t\t}\n\n\t\t// JVM Memory Stats\n\t\te.gauges[\"jvm_mem_heap_committed_in_bytes\"].WithLabelValues(all_stats.ClusterName, stats.Name).Set(float64(stats.JVM.Mem.HeapCommitted))\n\t\te.gauges[\"jvm_mem_heap_used_in_bytes\"].WithLabelValues(all_stats.ClusterName, stats.Name).Set(float64(stats.JVM.Mem.HeapUsed))\n\t\te.gauges[\"jvm_mem_heap_max_in_bytes\"].WithLabelValues(all_stats.ClusterName, stats.Name).Set(float64(stats.JVM.Mem.HeapMax))\n\t\te.gauges[\"jvm_mem_non_heap_committed_in_bytes\"].WithLabelValues(all_stats.ClusterName, stats.Name).Set(float64(stats.JVM.Mem.NonHeapCommitted))\n\t\te.gauges[\"jvm_mem_non_heap_used_in_bytes\"].WithLabelValues(all_stats.ClusterName, stats.Name).Set(float64(stats.JVM.Mem.NonHeapUsed))\n\n\t\t// Indices Stats\n\t\te.gauges[\"indices_fielddata_evictions\"].WithLabelValues(all_stats.ClusterName, stats.Name).Set(float64(stats.Indices.FieldData.Evictions))\n\t\te.gauges[\"indices_fielddata_memory_size_in_bytes\"].WithLabelValues(all_stats.ClusterName, stats.Name).Set(float64(stats.Indices.FieldData.MemorySize))\n\t\te.gauges[\"indices_filter_cache_evictions\"].WithLabelValues(all_stats.ClusterName, stats.Name).Set(float64(stats.Indices.FilterCache.Evictions))\n\t\te.gauges[\"indices_filter_cache_memory_size_in_bytes\"].WithLabelValues(all_stats.ClusterName, stats.Name).Set(float64(stats.Indices.FilterCache.MemorySize))\n\n\t\te.gauges[\"indices_docs_count\"].WithLabelValues(all_stats.ClusterName, stats.Name).Set(float64(stats.Indices.Docs.Count))\n\t\te.gauges[\"indices_docs_deleted\"].WithLabelValues(all_stats.ClusterName, stats.Name).Set(float64(stats.Indices.Docs.Deleted))\n\n\t\te.gauges[\"indices_segments_memory_in_bytes\"].WithLabelValues(all_stats.ClusterName, stats.Name).Set(float64(stats.Indices.Segments.Memory))\n\n\t\te.gauges[\"indices_store_size_in_bytes\"].WithLabelValues(all_stats.ClusterName, stats.Name).Set(float64(stats.Indices.Store.Size))\n\t\te.counters[\"indices_store_throttle_time_in_millis\"].WithLabelValues(all_stats.ClusterName, stats.Name).Set(float64(stats.Indices.Store.ThrottleTime))\n\n\t\te.counters[\"indices_flush_total\"].WithLabelValues(all_stats.ClusterName, stats.Name).Set(float64(stats.Indices.Flush.Total))\n\t\te.counters[\"indices_flush_time_in_millis\"].WithLabelValues(all_stats.ClusterName, stats.Name).Set(float64(stats.Indices.Flush.Time))\n\n\t\t// Transport Stats\n\t\te.counters[\"transport_rx_count\"].WithLabelValues(all_stats.ClusterName, stats.Name).Set(float64(stats.Transport.RxCount))\n\t\te.counters[\"transport_rx_size_in_bytes\"].WithLabelValues(all_stats.ClusterName, stats.Name).Set(float64(stats.Transport.RxSize))\n\t\te.counters[\"transport_tx_count\"].WithLabelValues(all_stats.ClusterName, stats.Name).Set(float64(stats.Transport.TxCount))\n\t\te.counters[\"transport_tx_size_in_bytes\"].WithLabelValues(all_stats.ClusterName, stats.Name).Set(float64(stats.Transport.TxSize))\n\t}\n\n\t// Report metrics.\n\tch <- e.up\n\n\tfor _, vec := range e.counters {\n\t\tvec.Collect(ch)\n\t}\n\n\tfor _, vec := range e.gauges {\n\t\tvec.Collect(ch)\n\t}\n}", "func (logstash *Logstash) gatherPluginsStats(\n\tplugins []Plugin,\n\tpluginType string,\n\ttags map[string]string,\n\taccumulator telegraf.Accumulator,\n) error {\n\tfor _, plugin := range plugins {\n\t\tpluginTags := map[string]string{\n\t\t\t\"plugin_name\": plugin.Name,\n\t\t\t\"plugin_id\": plugin.ID,\n\t\t\t\"plugin_type\": pluginType,\n\t\t}\n\t\tfor tag, value := range tags {\n\t\t\tpluginTags[tag] = value\n\t\t}\n\t\tflattener := jsonParser.JSONFlattener{}\n\t\terr := flattener.FlattenJSON(\"\", plugin.Events)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\taccumulator.AddFields(\"logstash_plugins\", flattener.Fields, pluginTags)\n\t\tif plugin.Failures != nil {\n\t\t\tfailuresFields := map[string]interface{}{\"failures\": *plugin.Failures}\n\t\t\taccumulator.AddFields(\"logstash_plugins\", failuresFields, pluginTags)\n\t\t}\n\t\t/*\n\t\t\tThe elasticsearch & opensearch output produces additional stats\n\t\t\taround bulk requests and document writes (that are elasticsearch\n\t\t\tand opensearch specific). Collect those below:\n\t\t*/\n\t\tif pluginType == \"output\" && (plugin.Name == \"elasticsearch\" || plugin.Name == \"opensearch\") {\n\t\t\t/*\n\t\t\t\tThe \"bulk_requests\" section has details about batch writes\n\t\t\t\tinto Elasticsearch\n\n\t\t\t\t \"bulk_requests\" : {\n\t\t\t\t\t\"successes\" : 2870,\n\t\t\t\t\t\"responses\" : {\n\t\t\t\t\t \"200\" : 2870\n\t\t\t\t\t},\n\t\t\t\t\t\"failures\": 262,\n\t\t\t\t\t\"with_errors\": 9089\n\t\t\t\t },\n\t\t\t*/\n\t\t\tflattener := jsonParser.JSONFlattener{}\n\t\t\terr := flattener.FlattenJSON(\"\", plugin.BulkRequests)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tfor k, v := range flattener.Fields {\n\t\t\t\tif strings.HasPrefix(k, \"bulk_requests\") {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tnewKey := fmt.Sprintf(\"bulk_requests_%s\", k)\n\t\t\t\tflattener.Fields[newKey] = v\n\t\t\t\tdelete(flattener.Fields, k)\n\t\t\t}\n\t\t\taccumulator.AddFields(\"logstash_plugins\", flattener.Fields, pluginTags)\n\n\t\t\t/*\n\t\t\t\tThe \"documents\" section has counts of individual documents\n\t\t\t\twritten/retried/etc.\n\t\t\t\t \"documents\" : {\n\t\t\t\t\t\"successes\" : 2665549,\n\t\t\t\t\t\"retryable_failures\": 13733\n\t\t\t\t }\n\t\t\t*/\n\t\t\tflattener = jsonParser.JSONFlattener{}\n\t\t\terr = flattener.FlattenJSON(\"\", plugin.Documents)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tfor k, v := range flattener.Fields {\n\t\t\t\tif strings.HasPrefix(k, \"documents\") {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tnewKey := fmt.Sprintf(\"documents_%s\", k)\n\t\t\t\tflattener.Fields[newKey] = v\n\t\t\t\tdelete(flattener.Fields, k)\n\t\t\t}\n\t\t\taccumulator.AddFields(\"logstash_plugins\", flattener.Fields, pluginTags)\n\t\t}\n\t}\n\n\treturn nil\n}", "func (e *Exporter) collect(ch chan<- prometheus.Metric) error {\n\tvar mempct, memtot, memfree float64\n\tif v, e := mem.VirtualMemory(); e == nil {\n\t\tmempct = v.UsedPercent\n\t\tmemtot = float64(v.Total)\n\t\tmemfree = float64(v.Free)\n\t}\n\tvar swappct, swaptot, swapfree float64\n\tif v, e := mem.SwapMemory(); e == nil {\n\t\tswappct = v.UsedPercent\n\t\tswaptot = float64(v.Total)\n\t\tswapfree = float64(v.Free)\n\t}\n\tvar cpupct float64\n\tif c, e := cpu.Percent(time.Millisecond, false); e == nil {\n\t\tcpupct = c[0] // one value since we didn't ask per cpu\n\t}\n\tvar load1, load5, load15 float64\n\tif l, e := load.Avg(); e == nil {\n\t\tload1 = l.Load1\n\t\tload5 = l.Load5\n\t\tload15 = l.Load15\n\t}\n\n\tvar cpuTotal, vsize, rss, openFDs, maxFDs, maxVsize float64\n\tif proc, err := procfs.NewProc(int(*pid)); err == nil {\n\t\tif stat, err := proc.NewStat(); err == nil {\n\t\t\tcpuTotal = float64(stat.CPUTime())\n\t\t\tvsize = float64(stat.VirtualMemory())\n\t\t\trss = float64(stat.ResidentMemory())\n\t\t}\n\t\tif fds, err := proc.FileDescriptorsLen(); err == nil {\n\t\t\topenFDs = float64(fds)\n\t\t}\n\t\tif limits, err := proc.NewLimits(); err == nil {\n\t\t\tmaxFDs = float64(limits.OpenFiles)\n\t\t\tmaxVsize = float64(limits.AddressSpace)\n\t\t}\n\t}\n\n\tvar procCpu, procMem float64\n\tvar estCon, lisCon, othCon, totCon, closeCon, timeCon, openFiles float64\n\tvar nThreads float64\n\tif proc, err := process.NewProcess(int32(*pid)); err == nil {\n\t\tif v, e := proc.CPUPercent(); e == nil {\n\t\t\tprocCpu = float64(v)\n\t\t}\n\t\tif v, e := proc.MemoryPercent(); e == nil {\n\t\t\tprocMem = float64(v)\n\t\t}\n\n\t\tif v, e := proc.NumThreads(); e == nil {\n\t\t\tnThreads = float64(v)\n\t\t}\n\t\tif connections, e := proc.Connections(); e == nil {\n\t\t\tfor _, v := range connections {\n\t\t\t\tif v.Status == \"LISTEN\" {\n\t\t\t\t\tlisCon += 1\n\t\t\t\t} else if v.Status == \"ESTABLISHED\" {\n\t\t\t\t\testCon += 1\n\t\t\t\t} else if v.Status == \"TIME_WAIT\" {\n\t\t\t\t\ttimeCon += 1\n\t\t\t\t} else if v.Status == \"CLOSE_WAIT\" {\n\t\t\t\t\tcloseCon += 1\n\t\t\t\t} else {\n\t\t\t\t\tothCon += 1\n\t\t\t\t}\n\t\t\t}\n\t\t\ttotCon = lisCon + estCon + timeCon + closeCon + othCon\n\t\t}\n\t\tif oFiles, e := proc.OpenFiles(); e == nil {\n\t\t\topenFiles = float64(len(oFiles))\n\t\t}\n\t}\n\n\t// metrics from process collector\n\tch <- prometheus.MustNewConstMetric(e.cpuTotal, prometheus.CounterValue, cpuTotal)\n\tch <- prometheus.MustNewConstMetric(e.openFDs, prometheus.CounterValue, openFDs)\n\tch <- prometheus.MustNewConstMetric(e.maxFDs, prometheus.CounterValue, maxFDs)\n\tch <- prometheus.MustNewConstMetric(e.vsize, prometheus.CounterValue, vsize)\n\tch <- prometheus.MustNewConstMetric(e.maxVsize, prometheus.CounterValue, maxVsize)\n\tch <- prometheus.MustNewConstMetric(e.rss, prometheus.CounterValue, rss)\n\t// node specific metrics\n\tch <- prometheus.MustNewConstMetric(e.memPercent, prometheus.CounterValue, mempct)\n\tch <- prometheus.MustNewConstMetric(e.memTotal, prometheus.CounterValue, memtot)\n\tch <- prometheus.MustNewConstMetric(e.memFree, prometheus.CounterValue, memfree)\n\tch <- prometheus.MustNewConstMetric(e.swapPercent, prometheus.CounterValue, swappct)\n\tch <- prometheus.MustNewConstMetric(e.swapTotal, prometheus.CounterValue, swaptot)\n\tch <- prometheus.MustNewConstMetric(e.swapFree, prometheus.CounterValue, swapfree)\n\tch <- prometheus.MustNewConstMetric(e.numCpus, prometheus.CounterValue, float64(runtime.NumCPU()))\n\tch <- prometheus.MustNewConstMetric(e.load1, prometheus.CounterValue, load1)\n\tch <- prometheus.MustNewConstMetric(e.load5, prometheus.CounterValue, load5)\n\tch <- prometheus.MustNewConstMetric(e.load15, prometheus.CounterValue, load15)\n\t// process specific metrics\n\tch <- prometheus.MustNewConstMetric(e.procCpu, prometheus.CounterValue, procCpu)\n\tch <- prometheus.MustNewConstMetric(e.procMem, prometheus.CounterValue, procMem)\n\tch <- prometheus.MustNewConstMetric(e.numThreads, prometheus.CounterValue, nThreads)\n\tch <- prometheus.MustNewConstMetric(e.cpuPercent, prometheus.CounterValue, cpupct)\n\tch <- prometheus.MustNewConstMetric(e.openFiles, prometheus.CounterValue, openFiles)\n\tch <- prometheus.MustNewConstMetric(e.totCon, prometheus.CounterValue, totCon)\n\tch <- prometheus.MustNewConstMetric(e.lisCon, prometheus.CounterValue, lisCon)\n\tch <- prometheus.MustNewConstMetric(e.estCon, prometheus.CounterValue, estCon)\n\tch <- prometheus.MustNewConstMetric(e.closeCon, prometheus.CounterValue, closeCon)\n\tch <- prometheus.MustNewConstMetric(e.timeCon, prometheus.CounterValue, timeCon)\n\treturn nil\n}", "func CollectRuntimeMemStats(statsd scopedstatsd.Client, memstatsCurrent *runtime.MemStats, memstatsPrev *runtime.MemStats, tags []string) {\n\t// Collect number of bytes obtained from system.\n\tstatsd.Gauge(\"mem.sys_bytes\", float64(memstatsCurrent.Sys), tags, 1)\n\n\t// Collect number of pointer lookups.\n\tstatsd.Gauge(\"mem.pointer_lookups\", float64(memstatsCurrent.Lookups), tags, 1)\n\n\t// Collect increased heap objects allocated compared to last flush.\n\tstatsd.Count(\"mem.mallocs_total\", int64(memstatsCurrent.Mallocs-memstatsPrev.Mallocs), tags, 1)\n\n\t// Collect increased heap objects freed compared to last flush.\n\tstatsd.Count(\"mem.frees_total\", int64(memstatsCurrent.Frees-memstatsPrev.Frees), tags, 1)\n\n\t// Collect number of mallocs.\n\tstatsd.Gauge(\"mem.mallocs_count\", float64(memstatsCurrent.Mallocs-memstatsCurrent.Frees), tags, 1)\n\n\t// Collect number of bytes newly allocated for heap objects compared to last flush.\n\tstatsd.Count(\"mem.heap_alloc_bytes_total\", int64(memstatsCurrent.TotalAlloc-memstatsPrev.TotalAlloc), tags, 1)\n\n\t// Collect number of heap bytes allocated and still in use.\n\tstatsd.Gauge(\"mem.heap_alloc_bytes\", float64(memstatsCurrent.HeapAlloc), tags, 1)\n\n\t// Collect number of heap bytes obtained from system.\n\tstatsd.Gauge(\"mem.heap_sys_bytes\", float64(memstatsCurrent.HeapSys), tags, 1)\n\n\t// Collect number of heap bytes waiting to be used.\n\tstatsd.Gauge(\"mem.heap_idle_bytes\", float64(memstatsCurrent.HeapIdle), tags, 1)\n\n\t// Collect number of heap bytes that are in use.\n\tstatsd.Gauge(\"mem.heap_inuse_bytes\", float64(memstatsCurrent.HeapInuse), tags, 1)\n\n\t// Collect number of heap bytes released to OS.\n\tstatsd.Gauge(\"mem.heap_released_bytes\", float64(memstatsCurrent.HeapReleased), tags, 1)\n\n\t// Collect number of allocated objects.\n\tstatsd.Gauge(\"mem.heap_objects_count\", float64(memstatsCurrent.HeapObjects), tags, 1)\n\n\t// Collect number of bytes in use by the stack allocator.\n\tstatsd.Gauge(\"mem.stack_inuse_bytes\", float64(memstatsCurrent.StackInuse), tags, 1)\n\n\t// Collect number of bytes obtained from system for stack allocator.\n\tstatsd.Gauge(\"mem.stack_sys_bytes\", float64(memstatsCurrent.StackSys), tags, 1)\n\n\t// Collect number of bytes in use by mspan structures.\n\tstatsd.Gauge(\"mem.mspan_inuse_bytes\", float64(memstatsCurrent.MSpanInuse), tags, 1)\n\n\t// Collect number of bytes used for mspan structures obtained from system.\n\tstatsd.Gauge(\"mem.mspan_sys_bytes\", float64(memstatsCurrent.MSpanSys), tags, 1)\n\n\t// Collect number of bytes in use by mcache structures.\n\tstatsd.Gauge(\"mem.mcache_inuse_bytes\", float64(memstatsCurrent.MCacheInuse), tags, 1)\n\n\t// Collect number of bytes used for mcache structures obtained from system.\n\tstatsd.Gauge(\"mem.mcache_sys_bytes\", float64(memstatsCurrent.MCacheSys), tags, 1)\n\n\t// Collect number of bytes used by the profiling bucket hash table.\n\tstatsd.Gauge(\"mem.buck_hash_sys_bytes\", float64(memstatsCurrent.BuckHashSys), tags, 1)\n\n\t// Collect number of bytes used for garbage collection system metadata.\n\tstatsd.Gauge(\"mem.gc_sys_bytes\", float64(memstatsCurrent.GCSys), tags, 1)\n\n\t// Collect number of bytes used for other system allocations.\n\tstatsd.Gauge(\"mem.other_sys_bytes\", float64(memstatsCurrent.OtherSys), tags, 1)\n\n\t// Collect number of heap bytes when next garbage collection will take pace.\n\tstatsd.Gauge(\"mem.next_gc_bytes\", float64(memstatsCurrent.NextGC), tags, 1)\n}", "func CollectProcessMetrics(refresh time.Duration) {\n\t// Short circuit if the metics system is disabled\n\tif !Enabled {\n\t\treturn\n\t}\n\t// Create the various data collectors\n\tmemstates := make([]*runtime.MemStats, 2)\n\tdiskstates := make([]*DiskStats, 2)\n\tfor i := 0; i < len(memstates); i++ {\n\t\tmemstates[i] = new(runtime.MemStats)\n\t\tdiskstates[i] = new(DiskStats)\n\t}\n\t// Define the various metics to collect\n\tmemAllocs := metics.GetOrRegisterMeter(\"system/memory/allocs\", metics.DefaultRegistry)\n\tmemFrees := metics.GetOrRegisterMeter(\"system/memory/frees\", metics.DefaultRegistry)\n\tmemInuse := metics.GetOrRegisterMeter(\"system/memory/inuse\", metics.DefaultRegistry)\n\tmemPauses := metics.GetOrRegisterMeter(\"system/memory/pauses\", metics.DefaultRegistry)\n\n\tvar diskReads, diskReadBytes, diskWrites, diskWriteBytes metics.Meter\n\tif err := ReadDiskStats(diskstates[0]); err == nil {\n\t\tdiskReads = metics.GetOrRegisterMeter(\"system/disk/readcount\", metics.DefaultRegistry)\n\t\tdiskReadBytes = metics.GetOrRegisterMeter(\"system/disk/readdata\", metics.DefaultRegistry)\n\t\tdiskWrites = metics.GetOrRegisterMeter(\"system/disk/writecount\", metics.DefaultRegistry)\n\t\tdiskWriteBytes = metics.GetOrRegisterMeter(\"system/disk/writedata\", metics.DefaultRegistry)\n\t} else {\n\t\tbgmlogs.Debug(\"Failed to read disk metics\", \"err\", err)\n\t}\n\t// Iterate loading the different states and updating the meters\n\tfor i := 1; ; i++ {\n\t\truntime.ReadMemStats(memstates[i%2])\n\t\tmemAllocs.Mark(int64(memstates[i%2].Mallocs - memstates[(i-1)%2].Mallocs))\n\t\tmemFrees.Mark(int64(memstates[i%2].Frees - memstates[(i-1)%2].Frees))\n\t\tmemInuse.Mark(int64(memstates[i%2].Alloc - memstates[(i-1)%2].Alloc))\n\t\tmemPauses.Mark(int64(memstates[i%2].PauseTotalNs - memstates[(i-1)%2].PauseTotalNs))\n\n\t\tif ReadDiskStats(diskstates[i%2]) == nil {\n\t\t\tdiskReads.Mark(diskstates[i%2].ReadCount - diskstates[(i-1)%2].ReadCount)\n\t\t\tdiskReadBytes.Mark(diskstates[i%2].ReadBytes - diskstates[(i-1)%2].ReadBytes)\n\t\t\tdiskWrites.Mark(diskstates[i%2].WriteCount - diskstates[(i-1)%2].WriteCount)\n\t\t\tdiskWriteBytes.Mark(diskstates[i%2].WriteBytes - diskstates[(i-1)%2].WriteBytes)\n\t\t}\n\t\ttime.Sleep(refresh)\n\t}\n}", "func Collect(ctx context.Context) error {\n\tif !singleton.enabled {\n\t\treturn nil\n\t}\n\n\tif singleton.darkstatAddr == \"\" {\n\t\treturn fmt.Errorf(\"Darkstat address is empty\")\n\t}\n\n\tstartTime := time.Now()\n\n\tinventoryHosts := inventory.Get()\n\n\tlocalAddr, err := network.DefaultLocalAddr()\n\tif err != nil {\n\t\treturn err\n\t}\n\t// To label source traffic that we need to build dependency graph\n\tlocalHostgroup := localAddr.String()\n\tlocalDomain := localAddr.String()\n\tlocalInventory, ok := inventoryHosts[localAddr.String()]\n\tif ok {\n\t\tlocalHostgroup = localInventory.Hostgroup\n\t\tlocalDomain = localInventory.Domain\n\t}\n\tlog.Debugf(\"Local address don't exist in inventory: %v\", localAddr.String())\n\n\t// Scrape darkstat prometheus endpoint for host_bytes_total\n\tvar darkstatHostBytesTotal *prom2json.Family\n\tdarkstatScrape, err := prometheus.Scrape(singleton.darkstatAddr)\n\tif err != nil {\n\t\treturn err\n\t}\n\tfor _, v := range darkstatScrape {\n\t\tif v.Name == \"host_bytes_total\" {\n\t\t\tdarkstatHostBytesTotal = v\n\t\t\tbreak\n\t\t}\n\t}\n\tif darkstatHostBytesTotal == nil {\n\t\treturn fmt.Errorf(\"Metric host_bytes_total doesn't exist\")\n\t}\n\n\t// Extract relevant data out of host_bytes_total\n\tvar hosts []Metric\n\tfor _, m := range darkstatHostBytesTotal.Metrics {\n\t\tmetric := m.(prom2json.Metric)\n\n\t\tip := net.ParseIP(metric.Labels[\"ip\"])\n\n\t\t// Skip its own IP as we don't need it\n\t\tif ip.Equal(localAddr) {\n\t\t\tcontinue\n\t\t}\n\n\t\tinventoryHostInfo := inventoryHosts[metric.Labels[\"ip\"]]\n\n\t\tbandwidth, err := strconv.ParseFloat(metric.Value, 64)\n\t\tif err != nil {\n\t\t\tlog.Errorf(\"Failed to parse 'host_bytes_total' value: %v\", err)\n\t\t\tcontinue\n\t\t}\n\n\t\tdirection := \"\"\n\t\t// Reversed from netfilter perspective\n\t\tswitch metric.Labels[\"dir\"] {\n\t\tcase \"out\":\n\t\t\tdirection = \"ingress\"\n\t\tcase \"in\":\n\t\t\tdirection = \"egress\"\n\t\t}\n\n\t\thosts = append(hosts, Metric{\n\t\t\tLocalHostgroup: localHostgroup,\n\t\t\tRemoteHostgroup: inventoryHostInfo.Hostgroup,\n\t\t\tRemoteIPAddr: metric.Labels[\"ip\"],\n\t\t\tLocalDomain: localDomain,\n\t\t\tRemoteDomain: inventoryHostInfo.Domain,\n\t\t\tDirection: direction,\n\t\t\tBandwidth: bandwidth,\n\t\t})\n\t}\n\n\tsingleton.mu.Lock()\n\tsingleton.hosts = hosts\n\tsingleton.mu.Unlock()\n\n\tlog.Debugf(\"taskdarkstat.Collect retrieved %v downstreams metrics\", len(hosts))\n\tlog.Debugf(\"taskdarkstat.Collect process took %v\", time.Since(startTime))\n\treturn nil\n}", "func (s *Server) agentMemoryStats(metrics cgm.Metrics, mtags []string) {\n\t// var mem syscall.Rusage\n\t// if err := syscall.Getrusage(syscall.RUSAGE_SELF, &mem); err == nil {\n\t// \tmetrics[tags.MetricNameWithStreamTags(\"agent_max_rss\", tags.FromList(ctags))] = cgm.Metric{Value: uint64(mem.Maxrss * 1024), Type: \"L\"} // maximum resident set size used (in kilobytes)\n\t// } else {\n\t// \ts.logger.Warn().Err(err).Msg(\"collecting rss from system\")\n\t// }\n}", "func (p *ProcMetrics) Collect() {\n\tif m, err := CollectProcInfo(p.pid); err == nil {\n\t\tnow := time.Now()\n\n\t\tif !p.lastTime.IsZero() {\n\t\t\tratio := 1.0\n\t\t\tswitch {\n\t\t\tcase m.CPU.Period > 0 && m.CPU.Quota > 0:\n\t\t\t\tratio = float64(m.CPU.Quota) / float64(m.CPU.Period)\n\t\t\tcase m.CPU.Shares > 0:\n\t\t\t\tratio = float64(m.CPU.Shares) / 1024\n\t\t\tdefault:\n\t\t\t\tratio = 1 / float64(runtime.NumCPU())\n\t\t\t}\n\n\t\t\tinterval := ratio * float64(now.Sub(p.lastTime))\n\n\t\t\tp.cpu.user.time = m.CPU.User - p.last.CPU.User\n\t\t\tp.cpu.user.percent = 100 * float64(p.cpu.user.time) / interval\n\n\t\t\tp.cpu.system.time = m.CPU.Sys - p.last.CPU.Sys\n\t\t\tp.cpu.system.percent = 100 * float64(p.cpu.system.time) / interval\n\n\t\t\tp.cpu.total.time = (m.CPU.User + m.CPU.Sys) - (p.last.CPU.User + p.last.CPU.Sys)\n\t\t\tp.cpu.total.percent = 100 * float64(p.cpu.total.time) / interval\n\t\t}\n\n\t\tp.memory.available = m.Memory.Available\n\t\tp.memory.size = m.Memory.Size\n\t\tp.memory.resident.usage = m.Memory.Resident\n\t\tp.memory.resident.percent = 100 * float64(p.memory.resident.usage) / float64(p.memory.available)\n\t\tp.memory.shared.usage = m.Memory.Shared\n\t\tp.memory.text.usage = m.Memory.Text\n\t\tp.memory.data.usage = m.Memory.Data\n\t\tp.memory.pagefault.major.count = m.Memory.MajorPageFaults - p.last.Memory.MajorPageFaults\n\t\tp.memory.pagefault.minor.count = m.Memory.MinorPageFaults - p.last.Memory.MinorPageFaults\n\n\t\tp.files.open = m.Files.Open\n\t\tp.files.max = m.Files.Max\n\n\t\tp.threads.num = m.Threads.Num\n\t\tp.threads.switches.voluntary.count = m.Threads.VoluntaryContextSwitches - p.last.Threads.VoluntaryContextSwitches\n\t\tp.threads.switches.involuntary.count = m.Threads.InvoluntaryContextSwitches - p.last.Threads.InvoluntaryContextSwitches\n\n\t\tp.last = m\n\t\tp.lastTime = now\n\t\tp.engine.Report(p)\n\t}\n}", "func (m *KubeletMonitor) parsePodStats(podStats []stats.PodStats) {\n\tfor _, podStat := range podStats {\n\t\tvar cpuUsageNanoCoreSum uint64\n\t\tvar memoryUsageBytesSum uint64\n\t\tfor _, containerStat := range podStat.Containers {\n\t\t\tif containerStat.CPU != nil && containerStat.CPU.UsageNanoCores != nil {\n\t\t\t\tcpuUsageNanoCoreSum += *containerStat.CPU.UsageNanoCores\n\t\t\t}\n\t\t\tif containerStat.Memory != nil && containerStat.Memory.UsageBytes != nil {\n\t\t\t\tmemoryUsageBytesSum += *containerStat.Memory.UsageBytes\n\t\t\t}\n\t\t}\n\t\tglog.V(4).Infof(\"Cpu usage of pod %s is %f core\", util.PodStatsKeyFunc(podStat),\n\t\t\tfloat64(cpuUsageNanoCoreSum)/util.NanoToUnit)\n\t\tpodCpuUsageCoreMetrics := metrics.NewEntityResourceMetric(task.PodType, util.PodStatsKeyFunc(podStat),\n\t\t\tmetrics.CPU, metrics.Used, float64(cpuUsageNanoCoreSum)/util.NanoToUnit)\n\n\t\tglog.V(4).Infof(\"Memory usage of pod %s is %f Kb\", util.PodStatsKeyFunc(podStat),\n\t\t\tfloat64(memoryUsageBytesSum)/util.KilobytesToBytes)\n\t\tpodMemoryUsageCoreMetrics := metrics.NewEntityResourceMetric(task.PodType, util.PodStatsKeyFunc(podStat),\n\t\t\tmetrics.Memory, metrics.Used, float64(memoryUsageBytesSum)/util.KilobytesToBytes)\n\n\t\t// application cpu and mem used are the same as pod's.\n\t\tapplicationCpuUsageCoreMetrics := metrics.NewEntityResourceMetric(task.ApplicationType,\n\t\t\tutil.PodStatsKeyFunc(podStat), metrics.CPU, metrics.Used,\n\t\t\tfloat64(cpuUsageNanoCoreSum)/util.NanoToUnit)\n\t\tapplicationMemoryUsageCoreMetrics := metrics.NewEntityResourceMetric(task.ApplicationType,\n\t\t\tutil.PodStatsKeyFunc(podStat), metrics.Memory, metrics.Used,\n\t\t\tfloat64(memoryUsageBytesSum)/util.KilobytesToBytes)\n\n\t\tm.metricSink.AddNewMetricEntries(podCpuUsageCoreMetrics,\n\t\t\tpodMemoryUsageCoreMetrics,\n\t\t\tapplicationCpuUsageCoreMetrics,\n\t\t\tapplicationMemoryUsageCoreMetrics)\n\t}\n}", "func (sr *ServicedStatsReporter) gatherStats(t time.Time) []Sample {\n\tstats := []Sample{}\n\t// Handle the host metrics.\n\treg, _ := sr.hostRegistry.(*metrics.StandardRegistry)\n\treg.Each(func(name string, i interface{}) {\n\t\ttagmap := map[string]string{\n\t\t\t\"controlplane_host_id\": sr.hostID,\n\t\t}\n\t\tswitch metric := i.(type) {\n\t\tcase metrics.Gauge:\n\t\t\tstats = append(stats, Sample{name, strconv.FormatInt(metric.Value(), 10), t.Unix(), tagmap})\n\t\tcase metrics.GaugeFloat64:\n\t\t\tstats = append(stats, Sample{name, strconv.FormatFloat(metric.Value(), 'f', -1, 32), t.Unix(), tagmap})\n\t\t}\n\t})\n\t// Handle each container's metrics.\n\tfor key, registry := range sr.containerRegistries {\n\t\treg, _ := registry.(*metrics.StandardRegistry)\n\t\treg.Each(func(name string, i interface{}) {\n\t\t\ttagmap := map[string]string{\n\t\t\t\t\"controlplane_host_id\": sr.hostID,\n\t\t\t\t\"controlplane_service_id\": key.serviceID,\n\t\t\t\t\"controlplane_instance_id\": strconv.FormatInt(int64(key.instanceID), 10),\n\t\t\t}\n\t\t\tswitch metric := i.(type) {\n\t\t\tcase metrics.Gauge:\n\t\t\t\tstats = append(stats, Sample{name, strconv.FormatInt(metric.Value(), 10), t.Unix(), tagmap})\n\t\t\tcase metrics.GaugeFloat64:\n\t\t\t\tstats = append(stats, Sample{name, strconv.FormatFloat(metric.Value(), 'f', -1, 32), t.Unix(), tagmap})\n\t\t\t}\n\t\t})\n\t}\n\treturn stats\n}", "func (g gatherer) GatherMetrics(ctx context.Context, out *apm.Metrics) error {\n\tmetricFamilies, err := g.p.Gather()\n\tif err != nil {\n\t\treturn errors.WithStack(err)\n\t}\n\tfor _, mf := range metricFamilies {\n\t\tname := mf.GetName()\n\t\tswitch mf.GetType() {\n\t\tcase dto.MetricType_COUNTER:\n\t\t\tfor _, m := range mf.GetMetric() {\n\t\t\t\tv := m.GetCounter().GetValue()\n\t\t\t\tout.Add(name, makeLabels(m.GetLabel()), v)\n\t\t\t}\n\t\tcase dto.MetricType_GAUGE:\n\t\t\tmetrics := mf.GetMetric()\n\t\t\tif name == \"go_info\" && len(metrics) == 1 && metrics[0].GetGauge().GetValue() == 1 {\n\t\t\t\t// Ignore the \"go_info\" metric from the\n\t\t\t\t// built-in GoCollector, as we provide\n\t\t\t\t// the same information in the payload.\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tfor _, m := range metrics {\n\t\t\t\tv := m.GetGauge().GetValue()\n\t\t\t\tout.Add(name, makeLabels(m.GetLabel()), v)\n\t\t\t}\n\t\tcase dto.MetricType_UNTYPED:\n\t\t\tfor _, m := range mf.GetMetric() {\n\t\t\t\tv := m.GetUntyped().GetValue()\n\t\t\t\tout.Add(name, makeLabels(m.GetLabel()), v)\n\t\t\t}\n\t\tcase dto.MetricType_SUMMARY:\n\t\t\tfor _, m := range mf.GetMetric() {\n\t\t\t\ts := m.GetSummary()\n\t\t\t\tlabels := makeLabels(m.GetLabel())\n\t\t\t\tout.Add(name+\".count\", labels, float64(s.GetSampleCount()))\n\t\t\t\tout.Add(name+\".total\", labels, float64(s.GetSampleSum()))\n\t\t\t\tfor _, q := range s.GetQuantile() {\n\t\t\t\t\tp := int(q.GetQuantile() * 100)\n\t\t\t\t\tout.Add(name+\".percentile.\"+strconv.Itoa(p), labels, q.GetValue())\n\t\t\t\t}\n\t\t\t}\n\t\tdefault:\n\t\t\t// TODO(axw) MetricType_HISTOGRAM\n\t\t}\n\t}\n\treturn nil\n}", "func (s *Systemctl) Gather(acc telegraf.Accumulator) error {\n\ts.mux.Lock()\n\tdefer s.mux.Unlock()\n\n\t// for each systemctl service being monitored\n\tfor _, aggregator := range s.Aggregators {\n\t\t// aggregate the data from the set of samples\n\t\tlog.WithFields(log.Fields{\n\t\t\t\"InputPlugin\": \"systemctl\",\n\t\t\t\"ResourceName\": aggregator.ResourceName,\n\t\t}).Debug(\"Aggregating\")\n\t\terr := aggregator.Aggregate()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t// create fields\n\t\tfields := map[string]interface{}{\n\t\t\t\"current_state_time\": aggregator.CurrentStateDuration,\n\t\t\t\"current_state\": aggregator.CurrentState,\n\t\t}\n\t\tfor k := range aggregator.AggState {\n\t\t\tfields[k] = aggregator.AggState[k]\n\t\t}\n\t\t// create tags\n\t\ttags := map[string]string{\"resource\": aggregator.ResourceName}\n\t\tacc.AddFields(\"service_config_state\", fields, tags)\n\t\tlog.WithFields(log.Fields{\n\t\t\t\"InputPlugin\": \"systemctl\",\n\t\t\t\"ResourceName\": aggregator.ResourceName,\n\t\t}).Debug(\"Added fields\")\n\t}\n\treturn nil\n}", "func FlowStatsCollect(ctx *zedrouterContext) {\n\tvar instData networkAttrs\n\tvar timeOutTuples []flowStats\n\tvar totalFlow int\n\n\tinstData.ipaclattr = make(map[int]map[int]aclAttr) // App-ID/ACL-Num/aclAttr table\n\tinstData.appIPinfo = make(map[int][]appInfo)\n\tinstData.bnNet = make(map[string]bridgeAttr) // borrow the aclAttr for intf attributes\n\tinstData.appNet = make(map[int]uuid.UUID)\n\n\tIntfAddrs, err := net.InterfaceAddrs()\n\tif err != nil {\n\t\tlog.Errorf(\"error in getting addresses\\n\")\n\t\treturn\n\t}\n\tinstData.intfAddrs = IntfAddrs\n\n\tcheckAppAndACL(ctx, &instData)\n\n\t// Get IPv4/v6 conntrack table flows\n\tProtocols := [2]netlink.InetFamily{syscall.AF_INET, syscall.AF_INET6}\n\tfor _, proto := range Protocols {\n\t\tconnT, err := netlink.ConntrackTableList(netlink.ConntrackTable, proto)\n\t\tif err != nil {\n\t\t\tlog.Errorf(\"FlowStats(%d): ContrackTableList\", proto)\n\t\t\treturn\n\t\t}\n\n\t\tlog.Tracef(\"***FlowStats(%d): device=%v, size of the flows %d\\n\", proto, devUUID, len(connT))\n\n\t\tfor _, entry := range connT { // loop through and process current timedout flow collection\n\t\t\tflowTuple := flowMergeProcess(entry, instData)\n\t\t\t// flowTuple := FlowMergeTuple(entry, instData, ipToName)\n\t\t\tif flowTuple.IsTimeOut == false || flowTuple.foundApp == false {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\ttimeOutTuples = append(timeOutTuples, flowTuple)\n\t\t\ttotalFlow++\n\t\t}\n\t}\n\n\tlog.Tracef(\"FlowStats ++ Total timedout flows %d, loopcount debug %d\\n\", totalFlow, loopcount)\n\tloopcount++\n\n\t// per app/bridge packing flow stats to be uploaded\n\tfor bnx := range instData.bnNet {\n\t\t// obtain DNS entries recorded since the last flow collection\n\t\tbnNum, err := bridgeStrToNum(ctx, bnx)\n\t\tif err != nil {\n\t\t\tlog.Error(err)\n\t\t\tcontinue\n\t\t}\n\t\tdnssys[bnNum].Lock()\n\t\tdnsEntries := dnssys[bnNum].Snoop\n\t\tdnssys[bnNum].Snoop = nil\n\t\tdnssys[bnNum].Unlock()\n\n\t\tfor appIdx := range instData.appNet {\n\t\t\tvar sequence, flowIdx int\n\n\t\t\t// fill in the partial scope information, later the aclNum and aclAttr will decide\n\t\t\t// if we have a match in this flow into app/bridge scope\n\t\t\tscope := types.FlowScope{\n\t\t\t\tUUID: instData.appNet[appIdx],\n\t\t\t\tLocalintf: instData.bnNet[bnx].bridge,\n\t\t\t\tNetUUID: instData.bnNet[bnx].netUUID,\n\t\t\t}\n\t\t\tflowdata := types.IPFlow{\n\t\t\t\tDevID: devUUID,\n\t\t\t\tScope: scope,\n\t\t\t}\n\n\t\t\tlog.Tracef(\"FlowStats: bnx=%s, appidx %d\\n\", bnx, appIdx)\n\t\t\t// temp print out the flow \"tuple\" and stats per app/bridge\n\t\t\tfor i, tuple := range timeOutTuples { // search for flowstats by bridge\n\t\t\t\tvar aclattr aclAttr\n\t\t\t\tvar aclNum int\n\t\t\t\tvar aclaction types.ACLActionType\n\n\t\t\t\tappN := tuple.appNum\n\t\t\t\tif int(appN) != appIdx { // allow non-App flows to be uploaded\n\t\t\t\t\t//log.Functionf(\"FlowStats: appN %d, appIdx %d not match\", appN, appIdx)\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\n\t\t\t\tif tuple.aclNum != DropMarkValue {\n\t\t\t\t\ttmpMap := instData.ipaclattr[int(appN)]\n\t\t\t\t\tif tmpMap != nil {\n\t\t\t\t\t\tif _, ok := tmpMap[int(tuple.aclNum)]; !ok {\n\t\t\t\t\t\t\tlog.Tracef(\"FlowStats: == can not get acl map with aclN, should not happen appN %d, aclN %d; %s\\n\",\n\t\t\t\t\t\t\t\tappN, tuple.aclNum, tuple.String())\n\t\t\t\t\t\t\tcontinue\n\t\t\t\t\t\t}\n\t\t\t\t\t\taclattr = tmpMap[int(tuple.aclNum)]\n\t\t\t\t\t} else {\n\t\t\t\t\t\tlog.Tracef(\"FlowStats: == can't get acl map with appN, should not happen, appN %d, aclN %d; %s\\n\",\n\t\t\t\t\t\t\tappN, tuple.aclNum, tuple.String())\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t}\n\t\t\t\t\tif aclattr.aclNum == 0 {\n\t\t\t\t\t\tlog.Tracef(\"FlowStats: == aclN zero in attr, appN %d, aclN %d; %s\\n\", appN, tuple.aclNum, tuple.String())\n\t\t\t\t\t\t// some debug info\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t}\n\n\t\t\t\t\tif aclattr.bridge != bnx {\n\t\t\t\t\t\tlog.Tracef(\"FlowStats: == bridge name not match %s, %s\\n\", bnx, aclattr.bridge)\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t}\n\t\t\t\t\tscope.Intf = aclattr.intfname // App side DomU internal interface name\n\t\t\t\t\taclaction = types.ACLActionAccept\n\t\t\t\t\taclNum = int(aclattr.aclNum)\n\t\t\t\t} else { // conntrack mark aclNum field being 0xffffff\n\t\t\t\t\t// special drop aclNum\n\t\t\t\t\tappinfo := flowGetAppInfo(tuple, instData.appIPinfo[appIdx])\n\t\t\t\t\tif appinfo.localintf != bnx {\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t}\n\t\t\t\t\tscope.Intf = appinfo.intf\n\t\t\t\t\taclaction = types.ACLActionDrop\n\t\t\t\t\taclNum = 0\n\t\t\t\t}\n\n\t\t\t\t// temp print out log for the flow\n\t\t\t\tlog.Tracef(\"FlowStats [%d]: on bn%d %s\\n\", i, bnNum, tuple.String()) // just print for now\n\n\t\t\t\tflowtuple := types.IPTuple{\n\t\t\t\t\tSrc: tuple.SrcIP,\n\t\t\t\t\tDst: tuple.DstIP,\n\t\t\t\t\tSrcPort: int32(tuple.SrcPort),\n\t\t\t\t\tDstPort: int32(tuple.DstPort),\n\t\t\t\t\tProto: int32(tuple.Proto),\n\t\t\t\t}\n\t\t\t\tflowrec := types.FlowRec{\n\t\t\t\t\tFlow: flowtuple,\n\t\t\t\t\tInbound: !tuple.AppInitiate,\n\t\t\t\t\tACLID: int32(aclNum),\n\t\t\t\t\tAction: aclaction,\n\t\t\t\t\tStartTime: tuple.TimeStart,\n\t\t\t\t\tStopTime: tuple.TimeStop,\n\t\t\t\t\tTxBytes: int64(tuple.SendBytes),\n\t\t\t\t\tTxPkts: int64(tuple.SendPkts),\n\t\t\t\t\tRxBytes: int64(tuple.RecvBytes),\n\t\t\t\t\tRxPkts: int64(tuple.RecvPkts),\n\t\t\t\t}\n\n\t\t\t\tflowdata.Flows = append(flowdata.Flows, flowrec)\n\t\t\t\tflowIdx++\n\t\t\t\tif flowIdx > maxFlowPack {\n\t\t\t\t\tflowPublish(ctx, &flowdata, &sequence, &flowIdx)\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tvar dnsrec [2]map[string]dnsEntry\n\t\t\tdnsrec[0] = make(map[string]dnsEntry) // store IPv4 addresses from dns\n\t\t\tdnsrec[1] = make(map[string]dnsEntry) // store IPv6 addresses from dns\n\n\t\t\t// select dns request/replies corresponding to this app\n\t\t\tfor _, dnsdata := range dnsEntries {\n\t\t\t\tif !checkAppIPAddr(instData.appIPinfo[appIdx], dnsdata.AppIP) {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\t// unique by domain name, latest reply overwrite previous ones\n\t\t\t\tif dnsdata.isIPv4 {\n\t\t\t\t\tdnsrec[0][dnsdata.DomainName] = dnsdata\n\t\t\t\t} else {\n\t\t\t\t\tdnsrec[1][dnsdata.DomainName] = dnsdata\n\t\t\t\t}\n\t\t\t}\n\n\t\t\t// append dns records into the flow data\n\t\t\tfor idx := range dnsrec {\n\t\t\t\tfor _, dnsRec := range dnsrec[idx] {\n\t\t\t\t\t// temp print out all unique dns replies for the bridge/app\n\t\t\t\t\tlog.Tracef(\"!!FlowStats: DNS time %v, domain %s, appIP %v, count %d, Answers %v\",\n\t\t\t\t\t\tdnsRec.TimeStamp, dnsRec.DomainName, dnsRec.AppIP, dnsRec.ANCount, dnsRec.Answers)\n\n\t\t\t\t\tdnsrec := types.DNSReq{\n\t\t\t\t\t\tHostName: dnsRec.DomainName,\n\t\t\t\t\t\tAddrs: dnsRec.Answers,\n\t\t\t\t\t\tRequestTime: dnsRec.TimeStamp.UnixNano(),\n\t\t\t\t\t}\n\t\t\t\t\tflowdata.DNSReqs = append(flowdata.DNSReqs, dnsrec)\n\t\t\t\t\tflowIdx++\n\t\t\t\t\tif flowIdx > maxFlowPack {\n\t\t\t\t\t\tflowPublish(ctx, &flowdata, &sequence, &flowIdx)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\n\t\t\t// flow record done for the bridge/app\n\t\t\t// publish the flow data (per app/bridge) and sequence (for size limit) to zedagent now\n\t\t\tflowPublish(ctx, &flowdata, &sequence, &flowIdx)\n\t\t}\n\t}\n\t// check and remove stale flowlog publications\n\tcheckFlowUnpublish(ctx)\n}", "func NewVMCollector(cfgBaseName string) (collector.Collector, error) {\n\tprocFile := \"meminfo\"\n\n\tc := VM{}\n\tc.id = \"vm\"\n\tc.pkgID = \"builtins.linux.procfs.\" + c.id\n\tc.procFSPath = \"/proc\"\n\tc.file = filepath.Join(c.procFSPath, procFile)\n\tc.logger = log.With().Str(\"pkg\", c.pkgID).Logger()\n\tc.metricStatus = map[string]bool{}\n\tc.metricDefaultActive = true\n\n\tif cfgBaseName == \"\" {\n\t\tif _, err := os.Stat(c.file); err != nil {\n\t\t\treturn nil, errors.Wrap(err, c.pkgID)\n\t\t}\n\t\treturn &c, nil\n\t}\n\n\tvar opts vmOptions\n\terr := config.LoadConfigFile(cfgBaseName, &opts)\n\tif err != nil {\n\t\tif strings.Contains(err.Error(), \"no config found matching\") {\n\t\t\treturn &c, nil\n\t\t}\n\t\tc.logger.Warn().Err(err).Str(\"file\", cfgBaseName).Msg(\"loading config file\")\n\t\treturn nil, errors.Wrapf(err, \"%s config\", c.pkgID)\n\t}\n\n\tc.logger.Debug().Str(\"base\", cfgBaseName).Interface(\"config\", opts).Msg(\"loaded config\")\n\n\tif opts.ID != \"\" {\n\t\tc.id = opts.ID\n\t}\n\n\tif opts.ProcFSPath != \"\" {\n\t\tc.procFSPath = opts.ProcFSPath\n\t\tc.file = filepath.Join(c.procFSPath, procFile)\n\t}\n\n\tif len(opts.MetricsEnabled) > 0 {\n\t\tfor _, name := range opts.MetricsEnabled {\n\t\t\tc.metricStatus[name] = true\n\t\t}\n\t}\n\tif len(opts.MetricsDisabled) > 0 {\n\t\tfor _, name := range opts.MetricsDisabled {\n\t\t\tc.metricStatus[name] = false\n\t\t}\n\t}\n\n\tif opts.MetricsDefaultStatus != \"\" {\n\t\tif ok, _ := regexp.MatchString(`^(enabled|disabled)$`, strings.ToLower(opts.MetricsDefaultStatus)); ok {\n\t\t\tc.metricDefaultActive = strings.ToLower(opts.MetricsDefaultStatus) == metricStatusEnabled\n\t\t} else {\n\t\t\treturn nil, errors.Errorf(\"%s invalid metric default status (%s)\", c.pkgID, opts.MetricsDefaultStatus)\n\t\t}\n\t}\n\n\tif opts.RunTTL != \"\" {\n\t\tdur, err := time.ParseDuration(opts.RunTTL)\n\t\tif err != nil {\n\t\t\treturn nil, errors.Wrapf(err, \"%s parsing run_ttl\", c.pkgID)\n\t\t}\n\t\tc.runTTL = dur\n\t}\n\n\tif _, err := os.Stat(c.file); os.IsNotExist(err) {\n\t\treturn nil, errors.Wrap(err, c.pkgID)\n\t}\n\n\treturn &c, nil\n}", "func reduceJavaAgentYaml(m *mainDefinitionParser) (map[string]*domainReducer, error) {\n\tthisDomainMap := make(map[string]*domainReducer)\n\tfor _, jmxObject := range m.JMX {\n\t\tvar thisDomain *domainReducer\n\t\tvar thisBean *beanReducer\n\t\tvar domainAndQuery = strings.Split(jmxObject.ObjectName, \":\")\n\t\tif _, ok := thisDomainMap[domainAndQuery[0]]; ok {\n\t\t\tthisDomain = thisDomainMap[domainAndQuery[0]]\n\t\t\tif _, ok := thisDomain.BeansMap[domainAndQuery[1]]; ok {\n\t\t\t\tthisBean = thisDomain.BeansMap[domainAndQuery[1]]\n\t\t\t}\n\t\t}\n\t\tfor _, thisMetric := range jmxObject.Metrics {\n\t\t\tvar inAttrs = strings.Split(thisMetric.Attributes, \",\")\n\t\t\tfor _, thisAttr := range inAttrs {\n\t\t\t\tthisAttr = strings.TrimSpace(thisAttr)\n\t\t\t\tif thisBean != nil {\n\t\t\t\t\tif _, ok := thisBean.AttributesMap[thisAttr]; !ok {\n\t\t\t\t\t\tthisBean.AttributesMap[thisAttr] = &attributeReducer{MetricType: convertMetricType(thisMetric.Type), MetricName: getMetricName(thisAttr, jmxObject.RootMetricName, domainAndQuery[1])}\n\t\t\t\t\t}\n\t\t\t\t} else {\n\t\t\t\t\tthisAttrMap := make(map[string]*attributeReducer)\n\t\t\t\t\tthisAttrMap[thisAttr] = &attributeReducer{MetricType: convertMetricType(thisMetric.Type), MetricName: getMetricName(thisAttr, jmxObject.RootMetricName, domainAndQuery[1])}\n\t\t\t\t\tthisBean = &beanReducer{AttributesMap: thisAttrMap}\n\t\t\t\t\tif thisDomain == nil {\n\t\t\t\t\t\tvar outEventType = getEventType(m.Name, domainAndQuery[0])\n\t\t\t\t\t\tthisBeanMap := make(map[string]*beanReducer)\n\t\t\t\t\t\tthisBeanMap[domainAndQuery[1]] = thisBean\n\t\t\t\t\t\tthisDomainMap[domainAndQuery[0]] = &domainReducer{EventType: outEventType, BeansMap: thisBeanMap}\n\t\t\t\t\t} else {\n\t\t\t\t\t\tthisDomain.BeansMap[domainAndQuery[1]] = thisBean\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\treturn thisDomainMap, nil\n}", "func (p *Prom) CollectStdout(in *bufio.Reader) {\n\tvar stats Metrics\n\tfor {\n\t\tline, err := in.ReadBytes('\\n')\n\t\tif err == io.EOF {\n\t\t\treturn\n\t\t}\n\t\tif err != nil {\n\t\t\tfmt.Fprintln(os.Stderr, err)\n\t\t}\n\t\tif err := json.Unmarshal(line, &stats); err != nil {\n\t\t\tfmt.Fprint(os.Stdout, string(line))\n\t\t\tcontinue\n\t\t}\n\t\tif stats.MessageType != \"summary\" {\n\t\t\tcontinue\n\t\t}\n\t\tp.duration.WithLabelValues(p.labelValues...).Set(float64(stats.TotalDuration))\n\t\tp.filesNew.WithLabelValues(p.labelValues...).Set(float64(stats.FilesNew))\n\t\tp.filesUnmodified.WithLabelValues(p.labelValues...).Set(float64(stats.FilesUnmodified))\n\t\tp.filesChanged.WithLabelValues(p.labelValues...).Set(float64(stats.FilesChanged))\n\t\tp.dirsNew.WithLabelValues(p.labelValues...).Set(float64(stats.DirsNew))\n\t\tp.dirsChanged.WithLabelValues(p.labelValues...).Set(float64(stats.DirsChanged))\n\t\tp.dirsUnmodified.WithLabelValues(p.labelValues...).Set(float64(stats.DirsUnmodified))\n\t\tp.bytesAdded.WithLabelValues(p.labelValues...).Set(float64(stats.DataAdded))\n\t\tp.bytesProcessed.WithLabelValues(p.labelValues...).Set(float64(stats.TotalBytesProcessed))\n\t\tp.parsed = true\n\t}\n}", "func (h *Hugepages) gatherRootStats(acc telegraf.Accumulator) error {\n\treturn h.gatherFromHugepagePath(acc, \"hugepages_\"+rootHugepages, h.rootHugepagePath, hugepagesMetricsRoot, nil)\n}", "func (e *Exporter) Collect(ch chan<- prometheus.Metric) {\n\te.mutex.Lock() // To protect metrics from concurrent collects.\n\tdefer e.mutex.Unlock()\n if err := e.scrape(ch); err != nil {\n\t\tlog.Infof(\"Error scraping tinystats: %s\", err)\n\t}\n e.ipv4QueryA.Collect(ch)\n e.ipv4QueryNS.Collect(ch)\n e.ipv4QueryCNAME.Collect(ch)\n e.ipv4QuerySOA.Collect(ch)\n e.ipv4QueryPTR.Collect(ch)\n e.ipv4QueryHINFO.Collect(ch)\n e.ipv4QueryMX.Collect(ch)\n e.ipv4QueryTXT.Collect(ch)\n e.ipv4QueryRP.Collect(ch)\n e.ipv4QuerySIG.Collect(ch)\n e.ipv4QueryKEY.Collect(ch)\n e.ipv4QueryAAAA.Collect(ch)\n e.ipv4QueryAXFR.Collect(ch)\n e.ipv4QueryANY.Collect(ch)\n e.ipv4QueryTOTAL.Collect(ch)\n e.ipv4QueryOTHER.Collect(ch)\n e.ipv4QueryNOTAUTH.Collect(ch)\n e.ipv4QueryNOTIMPL.Collect(ch)\n e.ipv4QueryBADCLASS.Collect(ch)\n e.ipv4QueryNOQUERY.Collect(ch)\n\n e.ipv6QueryA.Collect(ch)\n e.ipv6QueryNS.Collect(ch)\n e.ipv6QueryCNAME.Collect(ch)\n e.ipv6QuerySOA.Collect(ch)\n e.ipv6QueryPTR.Collect(ch)\n e.ipv6QueryHINFO.Collect(ch)\n e.ipv6QueryMX.Collect(ch)\n e.ipv6QueryTXT.Collect(ch)\n e.ipv6QueryRP.Collect(ch)\n e.ipv6QuerySIG.Collect(ch)\n e.ipv6QueryKEY.Collect(ch)\n e.ipv6QueryAAAA.Collect(ch)\n e.ipv6QueryAXFR.Collect(ch)\n e.ipv6QueryANY.Collect(ch)\n e.ipv6QueryTOTAL.Collect(ch)\n e.ipv6QueryOTHER.Collect(ch)\n e.ipv6QueryNOTAUTH.Collect(ch)\n e.ipv6QueryNOTIMPL.Collect(ch)\n e.ipv6QueryBADCLASS.Collect(ch)\n e.ipv6QueryNOQUERY.Collect(ch)\n\n\treturn\n}", "func collectGauges(e *Exporter, ch chan<- prometheus.Metric) {\n\te.chipStatGauge.Collect(ch)\n\te.devsHashRateGauge.Collect(ch)\n\te.devsHashCountGauge.Collect(ch)\n\te.devsErrorsGauge.Collect(ch)\n\te.devsTemperatureGauge.Collect(ch)\n}", "func (h *Hugepages) gatherStatsFromMeminfo(acc telegraf.Accumulator) error {\n\tmeminfo, err := os.ReadFile(h.meminfoPath)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tmetrics := make(map[string]interface{})\n\tlines := bytes.Split(meminfo, newlineByte)\n\tfor _, line := range lines {\n\t\tfields := bytes.Fields(line)\n\t\tif len(fields) < 2 {\n\t\t\tcontinue\n\t\t}\n\t\tfieldName := string(bytes.TrimSuffix(fields[0], colonByte))\n\t\tmetricName, ok := hugepagesMetricsFromMeminfo[fieldName]\n\t\tif !ok {\n\t\t\tcontinue\n\t\t}\n\n\t\tfieldValue, err := strconv.Atoi(string(fields[1]))\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"failed to convert content of %q: %w\", fieldName, err)\n\t\t}\n\n\t\tmetrics[metricName] = fieldValue\n\t}\n\n\tacc.AddFields(\"hugepages_\"+meminfoHugepages, metrics, map[string]string{})\n\treturn nil\n}", "func (h *Hugepages) gatherStatsPerNode(acc telegraf.Accumulator) error {\n\tnodeDirs, err := os.ReadDir(h.numaNodePath)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// read metrics from: node*/hugepages/hugepages-*/*\n\tfor _, nodeDir := range nodeDirs {\n\t\tif !nodeDir.IsDir() || !strings.HasPrefix(nodeDir.Name(), \"node\") {\n\t\t\tcontinue\n\t\t}\n\n\t\tnodeNumber := strings.TrimPrefix(nodeDir.Name(), \"node\")\n\t\t_, err := strconv.Atoi(nodeNumber)\n\t\tif err != nil {\n\t\t\tcontinue\n\t\t}\n\n\t\tperNodeTags := map[string]string{\n\t\t\t\"node\": nodeNumber,\n\t\t}\n\t\thugepagesPath := filepath.Join(h.numaNodePath, nodeDir.Name(), \"hugepages\")\n\t\terr = h.gatherFromHugepagePath(acc, \"hugepages_\"+perNodeHugepages, hugepagesPath, hugepagesMetricsPerNUMANode, perNodeTags)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}", "func CollectAllMetrics(client *statsd.Client, log *li.StandardLogger) {\n\n\tvar metrics []metric\n\tmetrics = append(metrics, metric{name: \"gpu.temperature\", cmd: \"vcgencmd measure_temp | egrep -o '[0-9]*\\\\.[0-9]*'\"})\n\tmetrics = append(metrics, metric{name: \"cpu.temperature\", cmd: \"cat /sys/class/thermal/thermal_zone0/temp | awk 'END {print $1/1000}'\"})\n\tmetrics = append(metrics, metric{name: \"threads\", cmd: \"ps -eo nlwp | tail -n +2 | awk '{ num_threads += $1 } END { print num_threads }'\"})\n\tmetrics = append(metrics, metric{name: \"processes\", cmd: \"ps axu | wc -l\"})\n\n\tfor range time.Tick(15 * time.Second) {\n\t\tlog.Info(\"Starting metric collection\")\n\t\tfor _, m := range metrics {\n\t\t\terr := collectMetric(m, client, log)\n\t\t\tif err != nil {\n\t\t\t\tlog.Error(err)\n\t\t\t}\n\t\t}\n\t}\n}", "func CollectRuntimeMetrics(registry *Registry) {\n\tCollectMemStats(registry)\n\tCollectSysStats(registry)\n}", "func (p *Psutil) CollectMetrics(mts []plugin.MetricType) ([]plugin.MetricType, error) {\n\tloadReqs := []core.Namespace{}\n\tcpuReqs := []core.Namespace{}\n\tmemReqs := []core.Namespace{}\n\tnetReqs := []core.Namespace{}\n\tdiskReqs := []core.Namespace{}\n\n\tfor _, m := range mts {\n\t\tns := m.Namespace()\n\t\tswitch ns[2].Value {\n\t\tcase \"load\":\n\t\t\tloadReqs = append(loadReqs, ns)\n\t\tcase \"cpu\":\n\t\t\tcpuReqs = append(cpuReqs, ns)\n\t\tcase \"vm\":\n\t\t\tmemReqs = append(memReqs, ns)\n\t\tcase \"net\":\n\t\t\tnetReqs = append(netReqs, ns)\n\t\tcase \"disk\":\n\t\t\tdiskReqs = append(diskReqs, ns)\n\t\tdefault:\n\t\t\treturn nil, fmt.Errorf(\"Requested metric %s does not match any known psutil metric\", m.Namespace().String())\n\t\t}\n\t}\n\n\tmetrics := []plugin.MetricType{}\n\n\tloadMts, err := loadAvg(loadReqs)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tmetrics = append(metrics, loadMts...)\n\n\tcpuMts, err := cpuTimes(cpuReqs)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tmetrics = append(metrics, cpuMts...)\n\n\tmemMts, err := virtualMemory(memReqs)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tmetrics = append(metrics, memMts...)\n\n\tnetMts, err := netIOCounters(netReqs)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tmetrics = append(metrics, netMts...)\n\tmounts := getMountpoints(mts[0].Config().Table())\n\tdiskMts, err := getDiskUsageMetrics(diskReqs, mounts)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tmetrics = append(metrics, diskMts...)\n\n\treturn metrics, nil\n}", "func (c *ClusterManager) Collect(ch chan<- prometheus.Metric) {\n\toomCountByHost, ramUsageByHost := c.ReallyExpensiveAssessmentOfTheSystemState()\n\tfor host, oomCount := range oomCountByHost {\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tc.OOMCountDesc,\n\t\t\tprometheus.CounterValue,\n\t\t\tfloat64(oomCount),\n\t\t\thost,\n\t\t)\n\t}\n\tfor host, ramUsage := range ramUsageByHost {\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tc.RAMUsageDesc,\n\t\t\tprometheus.GaugeValue,\n\t\t\tramUsage,\n\t\t\thost,\n\t\t)\n\t}\n}", "func (collector *proxmoxZpoolCollector) Collect(ch chan<- prometheus.Metric) {\n\tnodes, err := collector.api.GetNodes()\n\tif err != nil {\n\t\tfmt.Println(err)\n\t\treturn\n\t}\n\tfor _, node := range nodes.Data {\n\t\tzpoolList, err := collector.api.GetZpoolList(node.Node)\n\t\tif err != nil {\n\t\t\tfmt.Println(err)\n\t\t\treturn\n\t\t}\n\t\tfor _, zpool := range zpoolList.Data {\n\t\t\tvar zpoolParsingErrorMetric float64\n\n\t\t\tzpoolInfo, err := collector.api.GetZpool(node.Node, zpool.Name)\n\t\t\tif err != nil {\n\t\t\t\tzpoolParsingErrorMetric = float64(1)\n\t\t\t\tfmt.Println(err)\n\t\t\t}\n\n\t\t\tvar zpoolOnlineMetric float64\n\t\t\tvar zpoolErrorMetric float64\n\t\t\tif zpoolInfo.Data.State == \"ONLINE\" {\n\t\t\t\tzpoolOnlineMetric = float64(1)\n\t\t\t\tzpoolErrorMetric = float64(0)\n\t\t\t} else {\n\t\t\t\tzpoolErrorMetric = float64(1)\n\t\t\t}\n\n\t\t\tvar zpoolLastScrubMetric float64\n\t\t\t//Example scrub response: scrub repaired 0B in 0 days 01:56:29 with 0 errors on Sun May 10 02:20:30 2020\n\t\t\tif x := strings.SplitAfter(zpoolInfo.Data.Scan, \"on \"); len(x) == 2 {\n\t\t\t\t//Sun May 10 02:20:30 2020\n\t\t\t\tif len(x[1]) > 5 { //We want to get rid of the day eg: Mon\n\t\t\t\t\t//May 10 02:20:30 2020\n\t\t\t\t\tt, err := time.Parse(dateForm, x[1][4:])\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\tzpoolParsingErrorMetric = float64(1)\n\t\t\t\t\t\tfmt.Println(err)\n\t\t\t\t\t}\n\t\t\t\t\tzpoolLastScrubMetric = float64(t.Unix()) //Could this be an issue since time.Unix() returns int64?\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tvar zpoolLastScrubErrorsMetric float64\n\t\t\t//Example scrub response: scrub repaired 0B in 0 days 01:56:29 with 0 errors on Sun May 10 02:20:30 2020\n\t\t\tsplitLine := strings.Split(zpoolInfo.Data.Scan, \" \")\n\t\t\tfor index, x := range splitLine {\n\t\t\t\tif strings.Contains(x, \"error\") && index >= 1 { //Support for \"error\" or \"errors\"\n\t\t\t\t\ttotalErrors, err := strconv.ParseFloat(splitLine[index-1], 64) //We want to grab the number before error eg: 3 errors\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\tzpoolParsingErrorMetric = float64(1)\n\t\t\t\t\t} else {\n\t\t\t\t\t\tzpoolLastScrubErrorsMetric = totalErrors\n\t\t\t\t\t}\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\n\t\t\t//ch <- prometheus.MustNewConstMetric(zpoolTotalDisks, prometheus.GaugeValue, metricValue, \"test\")\n\t\t\tch <- prometheus.MustNewConstMetric(zpoolError, prometheus.GaugeValue, zpoolErrorMetric, node.Node, zpool.Name)\n\t\t\tch <- prometheus.MustNewConstMetric(zpoolOnline, prometheus.GaugeValue, zpoolOnlineMetric, node.Node, zpool.Name)\n\t\t\tch <- prometheus.MustNewConstMetric(zpoolFree, prometheus.GaugeValue, zpool.Free, node.Node, zpool.Name)\n\t\t\tch <- prometheus.MustNewConstMetric(zpoolAllocated, prometheus.GaugeValue, zpool.Alloc, node.Node, zpool.Name)\n\t\t\tch <- prometheus.MustNewConstMetric(zpoolSize, prometheus.GaugeValue, zpool.Size, node.Node, zpool.Name)\n\t\t\tch <- prometheus.MustNewConstMetric(zpoolDedup, prometheus.GaugeValue, float64(zpool.Dedup), node.Node, zpool.Name)\n\t\t\tch <- prometheus.MustNewConstMetric(zpoolLastScrub, prometheus.GaugeValue, zpoolLastScrubMetric, node.Node, zpool.Name)\n\t\t\tch <- prometheus.MustNewConstMetric(zpoolLastScrubErrors, prometheus.GaugeValue, zpoolLastScrubErrorsMetric, node.Node, zpool.Name)\n\t\t\tch <- prometheus.MustNewConstMetric(zpoolParsingError, prometheus.GaugeValue, zpoolParsingErrorMetric, node.Node, zpool.Name)\n\t\t}\n\t}\n}", "func (ps *linuxHarvester) populateGauges(sample *types.ProcessSample, process Snapshot) error {\n\tvar err error\n\n\tcpuTimes, err := process.CPUTimes()\n\tif err != nil {\n\t\treturn err\n\t}\n\tsample.CPUPercent = cpuTimes.Percent\n\n\ttotalCPU := cpuTimes.User + cpuTimes.System\n\n\tif totalCPU > 0 {\n\t\tsample.CPUUserPercent = (cpuTimes.User / totalCPU) * sample.CPUPercent\n\t\tsample.CPUSystemPercent = (cpuTimes.System / totalCPU) * sample.CPUPercent\n\t} else {\n\t\tsample.CPUUserPercent = 0\n\t\tsample.CPUSystemPercent = 0\n\t}\n\n\tif ps.privileged {\n\t\tfds, err := process.NumFDs()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif fds >= 0 {\n\t\t\tsample.FdCount = &fds\n\t\t}\n\t}\n\n\t// Extra status data\n\tsample.Status = process.Status()\n\tsample.ThreadCount = process.NumThreads()\n\tsample.MemoryVMSBytes = process.VmSize()\n\tsample.MemoryRSSBytes = process.VmRSS()\n\n\treturn nil\n}", "func CollectSysStats(registry *Registry) {\n\tvar s sysStatsCollector\n\ts.registry = registry\n\ts.maxOpen = registry.Gauge(\"fh.allocated\", nil)\n\ts.curOpen = registry.Gauge(\"fh.max\", nil)\n\ts.numGoroutines = registry.Gauge(\"go.numGoroutines\", nil)\n\n\tticker := time.NewTicker(30 * time.Second)\n\tgo func() {\n\t\tlog := registry.log\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase <-ticker.C:\n\t\t\t\tlog.Debugf(\"Collecting system stats\")\n\t\t\t\tfdStats(&s)\n\t\t\t\tgoRuntimeStats(&s)\n\t\t\t}\n\t\t}\n\t}()\n}", "func (c *VMCollector) Collect(ch chan<- prometheus.Metric) {\n\tfor _, m := range c.getMetrics() {\n\t\tch <- m\n\t}\n}", "func (c collector) Collect(ch chan<- prometheus.Metric) {\n\tvar wg sync.WaitGroup\n\n\t// We don't bail out on errors because those can happen if there is a race condition between\n\t// the destruction of a container and us getting to read the cgroup data. We just don't report\n\t// the values we don't get.\n\n\tcollectors := []func(string, *regexp.Regexp){\n\t\tfunc(path string, re *regexp.Regexp) {\n\t\t\tdefer wg.Done()\n\t\t\tnuma, err := cgroups.GetNumaStats(cgroupPath(\"memory\", path))\n\t\t\tif err == nil {\n\t\t\t\tupdateNumaStatMetric(ch, re.FindStringSubmatch(filepath.Base(path))[0], numa)\n\t\t\t} else {\n\t\t\t\tlog.Error(\"failed to collect NUMA stats for %s: %v\", path, err)\n\t\t\t}\n\t\t},\n\t\tfunc(path string, re *regexp.Regexp) {\n\t\t\tdefer wg.Done()\n\t\t\tmemory, err := cgroups.GetMemoryUsage(cgroupPath(\"memory\", path))\n\t\t\tif err == nil {\n\t\t\t\tupdateMemoryUsageMetric(ch, re.FindStringSubmatch(filepath.Base(path))[0], memory)\n\t\t\t} else {\n\t\t\t\tlog.Error(\"failed to collect memory usage stats for %s: %v\", path, err)\n\t\t\t}\n\t\t},\n\t\tfunc(path string, re *regexp.Regexp) {\n\t\t\tdefer wg.Done()\n\t\t\tmigrate, err := cgroups.GetCPUSetMemoryMigrate(cgroupPath(\"cpuset\", path))\n\t\t\tif err == nil {\n\t\t\t\tupdateMemoryMigrateMetric(ch, re.FindStringSubmatch(filepath.Base(path))[0], migrate)\n\t\t\t} else {\n\t\t\t\tlog.Error(\"failed to collect memory migration stats for %s: %v\", path, err)\n\t\t\t}\n\t\t},\n\t\tfunc(path string, re *regexp.Regexp) {\n\t\t\tdefer wg.Done()\n\t\t\tcpuAcctUsage, err := cgroups.GetCPUAcctStats(cgroupPath(\"cpuacct\", path))\n\t\t\tif err == nil {\n\t\t\t\tupdateCPUAcctUsageMetric(ch, re.FindStringSubmatch(filepath.Base(path))[0], cpuAcctUsage)\n\t\t\t} else {\n\t\t\t\tlog.Error(\"failed to collect CPU accounting stats for %s: %v\", path, err)\n\t\t\t}\n\t\t},\n\t\tfunc(path string, re *regexp.Regexp) {\n\t\t\tdefer wg.Done()\n\t\t\thugeTlbUsage, err := cgroups.GetHugetlbUsage(cgroupPath(\"hugetlb\", path))\n\t\t\tif err == nil {\n\t\t\t\tupdateHugeTlbUsageMetric(ch, re.FindStringSubmatch(filepath.Base(path))[0], hugeTlbUsage)\n\t\t\t} else {\n\t\t\t\tlog.Error(\"failed to collect hugetlb stats for %s: %v\", path, err)\n\t\t\t}\n\t\t},\n\t\tfunc(path string, re *regexp.Regexp) {\n\t\t\tdefer wg.Done()\n\t\t\tblkioDeviceUsage, err := cgroups.GetBlkioThrottleBytes(cgroupPath(\"blkio\", path))\n\t\t\tif err == nil {\n\t\t\t\tupdateBlkioDeviceUsageMetric(ch, re.FindStringSubmatch(filepath.Base(path))[0], blkioDeviceUsage)\n\t\t\t} else {\n\t\t\t\tlog.Error(\"failed to collect blkio stats for %s: %v\", path, err)\n\t\t\t}\n\t\t},\n\t}\n\n\tcontainerIDRegexp := regexp.MustCompile(`[a-z0-9]{64}`)\n\n\tfor _, path := range walkCgroups() {\n\t\twg.Add(len(collectors))\n\t\tfor _, fn := range collectors {\n\t\t\tgo fn(path, containerIDRegexp)\n\t\t}\n\t}\n\n\t// We need to wait so that the response channel doesn't get closed.\n\twg.Wait()\n}", "func pvCollect(ch chan<- prometheus.Metric, pvs []map[string]string, vgName string) {\n for _, pv := range pvs {\n pvSizeF, err := strconv.ParseFloat(strings.Trim(pv[\"pv_size\"], \"B\"), 64)\n if err != nil {\n log.Print(err)\n return\n }\n ch <- prometheus.MustNewConstMetric(pvSizeMetric, prometheus.GaugeValue, pvSizeF, pv[\"pv_name\"], pv[\"pv_uuid\"], vgName)\n\n pvFreeF, err := strconv.ParseFloat(strings.Trim(pv[\"pv_free\"], \"B\"), 64)\n if err != nil {\n log.Print(err)\n return\n }\n ch <- prometheus.MustNewConstMetric(pvFreeMetric, prometheus.GaugeValue, pvFreeF, pv[\"pv_name\"], pv[\"pv_uuid\"], vgName)\n\n pvUsedF, err := strconv.ParseFloat(strings.Trim(pv[\"pv_used\"], \"B\"), 64)\n if err != nil {\n log.Print(err)\n return\n }\n ch <- prometheus.MustNewConstMetric(pvUsedMetric, prometheus.GaugeValue, pvUsedF, pv[\"pv_name\"], pv[\"pv_uuid\"], vgName)\n }\n}", "func (m VarnishPlugin) FetchMetrics() (map[string]interface{}, error) {\n\tvar out []byte\n\tvar err error\n\n\tif m.VarnishName == \"\" {\n\t\tout, err = exec.Command(m.VarnishStatPath, \"-1\").CombinedOutput()\n\t} else {\n\t\tout, err = exec.Command(m.VarnishStatPath, \"-1\", \"-n\", m.VarnishName).CombinedOutput()\n\t}\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"%s: %s\", err, out)\n\t}\n\n\tlineexp := regexp.MustCompile(`^([^ ]+) +(\\d+)`)\n\tsmaexp := regexp.MustCompile(`^SMA\\.([^\\.]+)\\.(.+)$`)\n\n\tstat := map[string]interface{}{\n\t\t\"requests\": float64(0),\n\t}\n\n\tvar tmpv float64\n\tfor _, line := range strings.Split(string(out), \"\\n\") {\n\t\tmatch := lineexp.FindStringSubmatch(line)\n\t\tif match == nil {\n\t\t\tcontinue\n\t\t}\n\n\t\ttmpv, err = strconv.ParseFloat(match[2], 64)\n\t\tif err != nil {\n\t\t\tcontinue\n\t\t}\n\n\t\tswitch match[1] {\n\t\tcase \"cache_hit\", \"MAIN.cache_hit\":\n\t\t\tstat[\"cache_hits\"] = tmpv\n\t\t\tstat[\"requests\"] = stat[\"requests\"].(float64) + tmpv\n\t\tcase \"cache_miss\", \"MAIN.cache_miss\":\n\t\t\tstat[\"requests\"] = stat[\"requests\"].(float64) + tmpv\n\t\tcase \"cache_hitpass\", \"MAIN.cache_hitpass\":\n\t\t\tstat[\"requests\"] = stat[\"requests\"].(float64) + tmpv\n\t\tcase \"MAIN.backend_req\":\n\t\t\tstat[\"backend_req\"] = tmpv\n\t\tcase \"MAIN.backend_conn\":\n\t\t\tstat[\"backend_conn\"] = tmpv\n\t\tcase \"MAIN.backend_fail\":\n\t\t\tstat[\"backend_fail\"] = tmpv\n\t\tcase \"MAIN.backend_reuse\":\n\t\t\tstat[\"backend_reuse\"] = tmpv\n\t\tcase \"MAIN.backend_recycle\":\n\t\t\tstat[\"backend_recycle\"] = tmpv\n\t\tcase \"MAIN.n_object\":\n\t\t\tstat[\"n_object\"] = tmpv\n\t\tcase \"MAIN.n_objectcore\":\n\t\t\tstat[\"n_objectcore\"] = tmpv\n\t\tcase \"MAIN.n_expired\":\n\t\t\tstat[\"n_expired\"] = tmpv\n\t\tcase \"MAIN.n_objecthead\":\n\t\t\tstat[\"n_objecthead\"] = tmpv\n\t\tcase \"MAIN.busy_sleep\":\n\t\t\tstat[\"busy_sleep\"] = tmpv\n\t\tcase \"MAIN.busy_wakeup\":\n\t\t\tstat[\"busy_wakeup\"] = tmpv\n\t\tdefault:\n\t\t\tsmamatch := smaexp.FindStringSubmatch(match[1])\n\t\t\tif smamatch == nil {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif smamatch[2] == \"g_alloc\" {\n\t\t\t\tstat[\"varnish.sma.g_alloc.\"+smamatch[1]+\".g_alloc\"] = tmpv\n\t\t\t} else if smamatch[2] == \"g_bytes\" {\n\t\t\t\tstat[\"varnish.sma.memory.\"+smamatch[1]+\".allocated\"] = tmpv\n\t\t\t} else if smamatch[2] == \"g_space\" {\n\t\t\t\tstat[\"varnish.sma.memory.\"+smamatch[1]+\".available\"] = tmpv\n\t\t\t}\n\t\t}\n\t}\n\n\treturn stat, err\n}", "func (throttler *Throttler) aggregateMySQLMetrics(ctx context.Context) error {\n\tfor clusterName, probes := range throttler.mysqlInventory.ClustersProbes {\n\t\tmetricName := fmt.Sprintf(\"mysql/%s\", clusterName)\n\t\tignoreHostsCount := throttler.mysqlInventory.IgnoreHostsCount[clusterName]\n\t\tignoreHostsThreshold := throttler.mysqlInventory.IgnoreHostsThreshold[clusterName]\n\t\taggregatedMetric := aggregateMySQLProbes(ctx, probes, clusterName, throttler.mysqlInventory.InstanceKeyMetrics, ignoreHostsCount, config.Settings().Stores.MySQL.IgnoreDialTCPErrors, ignoreHostsThreshold)\n\t\tthrottler.aggregatedMetrics.Set(metricName, aggregatedMetric, cache.DefaultExpiration)\n\t}\n\treturn nil\n}", "func Collectmem(serverName string) (Metric, error) {\n\tvalues := Metric{}\n\tvar err error\n\tvalues.Timestamp = time.Now()\n\tvalues.MetricType = \"mem\"\n\tvar output string\n\n\tvar response cpmserverapi.MetricMEMResponse\n\trequest := &cpmserverapi.MetricMEMRequest{}\n\tresponse, err = cpmserverapi.MetricMEMClient(serverName, request)\n\tif err != nil {\n\t\tlogit.Error.Println(\"mem metric error:\" + err.Error())\n\t\treturn values, err\n\t}\n\n\toutput = strings.TrimSpace(response.Output)\n\n\tvalues.Value, err = strconv.ParseFloat(output, 64)\n\tif err != nil {\n\t\tlogit.Error.Println(\"parseFloat error in mem metric \" + err.Error())\n\t}\n\n\treturn values, err\n}", "func appStatsCollect(ctx *zedrouterContext) {\n\tlog.Infof(\"appStatsCollect: containerStats, started\")\n\tappStatsCollectTimer := time.NewTimer(time.Duration(ctx.appStatsInterval) * time.Second)\n\tfor {\n\t\tselect {\n\t\tcase <-appStatsCollectTimer.C:\n\t\t\titems, stopped := checkAppStopStatsCollect(ctx)\n\t\t\tif stopped {\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tcollectTime := time.Now() // all apps collection assign the same timestamp\n\t\t\tfor _, st := range items {\n\t\t\t\tstatus := st.(types.AppNetworkStatus)\n\t\t\t\tif status.GetStatsIPAddr != nil {\n\t\t\t\t\tacMetrics, err := appContainerGetStats(status.GetStatsIPAddr)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\tlog.Errorf(\"appStatsCollect: can't get App %s Container Metrics on %s, %v\",\n\t\t\t\t\t\t\tstatus.UUIDandVersion.UUID.String(), status.GetStatsIPAddr.String(), err)\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t}\n\t\t\t\t\tacMetrics.UUIDandVersion = status.UUIDandVersion\n\t\t\t\t\tacMetrics.CollectTime = collectTime\n\t\t\t\t\tctx.pubAppContainerMetrics.Publish(acMetrics.Key(), acMetrics)\n\t\t\t\t}\n\t\t\t}\n\t\t\tappStatsCollectTimer = time.NewTimer(time.Duration(ctx.appStatsInterval) * time.Second)\n\t\t}\n\t}\n}", "func stats(stats elastic.BulkProcessorStats) {\n\t//构建Workers的json文本\n\tvar workersStr string\n\tvar workers Workers\n\tif err := workers.InitWorkers(stats.Workers); err == nil {\n\t\tworkersStr = workers.String()\n\t}\n\n\t//打印stats信息\n\tlog.Logger.WithFields(logrus.Fields{\n\t\t\"Flushed\": stats.Flushed,\n\t\t\"Committed\": stats.Committed,\n\t\t\"Indexed\": stats.Indexed,\n\t\t\"Created\": stats.Created,\n\t\t\"Updated\": stats.Updated,\n\t\t\"Deleted\": stats.Deleted,\n\t\t\"Succeeded\": stats.Succeeded,\n\t\t\"Failed\": stats.Failed,\n\t\t\"Workers\": workersStr,\n\t}).Info(\"stats info detail\")\n}", "func (e *Exporter) Collect(ch chan<- prometheus.Metric) {\n\te.mutex.Lock() // To protect metrics from concurrent collects.\n\tdefer e.mutex.Unlock()\n\n\tup := e.scrape(ch)\n\n\tch <- prometheus.MustNewConstMetric(artifactoryUp, prometheus.GaugeValue, up)\n\tch <- e.totalScrapes\n\tch <- e.jsonParseFailures\n}", "func dailyTaskStatsPipeline(projectId string, requester string, start time.Time, end time.Time, tasks []string, lastUpdate time.Time) []bson.M {\n\treturn getDailyTaskStatsPipeline(projectId, requester, start, end, tasks, lastUpdate, false)\n}", "func (n *Vspheretpgy) Gather(acc telegraf.Accumulator) error {\n\t// setPrecision function is the same as `acc.SetPrecision(time.Nanosecond, 0)`\n\tsetPrecisionForVsphere(&acc)\n\n\tfor i, urls := range n.Urls {\n\t\tif len(urls) == 0 {\n\t\t\tlog.Printf(\"Need to put vCenter information!\\n\")\n\n\t\t\tcontinue\n\t\t}\n\n\t\tif len(urls) != 4 {\n\t\t\tacc.AddError(fmt.Errorf(\"the %d_th vsphere configuration is incorrect! \", i+1))\n\n\t\t\tcontinue\n\t\t}\n\t\t// for a give set of vcsas\n\t\tvc, err := vcsa.NewVcsaConnector(urls[0], urls[1], urls[2], urls[3], true)\n\t\tif err != nil {\n\t\t\tacc.AddError(fmt.Errorf(\"failed to connect '%v\", err))\n\n\t\t\tcontinue\n\t\t}\n\n\t\tdcs, err := dcai.FetchVsphereTopology(vc)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\ttimeStamp := fmt.Sprintf(\"%d\", time.Now().UnixNano())\n\n\t\terr = startAccNeo4j(vc, dcs, timeStamp, acc)\n\t\treturn err\n\n\t}\n\n\treturn nil\n}", "func (exp *Expvar) Collect() (map[string]interface{}, error) {\n\treq, err := http.NewRequest(http.MethodGet, exp.host, nil)\n\tlog.Println(exp.host)\n\tif err != nil {\n\t\tfmt.Println(err)\n\t\treturn nil, err\n\t}\n\n\tresp, err := exp.client.Do(req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tdata := make(map[string]interface{})\n\tif err := json.NewDecoder(resp.Body).Decode(&data); err != nil {\n\t\treturn nil, err\n\t}\n\n\tmemStats, ok := (data[\"memstats\"]).(map[string]interface{})\n\tif ok {\n\t\tdata[\"heap\"] = memStats[\"Alloc\"]\n\t}\n\n\tu, err := url.Parse(exp.host)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdata[\"host\"] = u.Hostname()\n\n\tdelete(data, \"memStats\")\n\tdelete(data, \"cmdline\")\n\n\treturn data, nil\n}", "func ProcStat(c *gin.Context) {\n\tres := CmdExec(\"cat /proc/stat | head -n 1 | awk '{$1=\\\"\\\";print}'\")\n\tresArray := strings.Split(res[0], \" \")\n\tvar cpu []int64\n\tvar totalcpu, idlecpu int64\n\tfor _, v := range resArray {\n\t\ttemp, err := strconv.ParseInt(v, 10, 64)\n\t\tif err == nil {\n\t\t\tcpu = append(cpu, temp)\n\t\t\ttotalcpu = totalcpu + temp\n\t\t}\n\t}\n\tidlecpu = cpu[3]\n\tc.JSON(http.StatusOK, gin.H{\n\t\t\"totalcpu\": totalcpu,\n\t\t\"idlecpu\": idlecpu,\n\t})\n}", "func (e *Exporter) Collect(ch chan<- prometheus.Metric) {\n\t// Protect metrics from concurrent collects.\n\te.mutex.Lock()\n\tdefer e.mutex.Unlock()\n\n\t// Scrape metrics from Tankerkoenig API.\n\tif err := e.scrape(ch); err != nil {\n\t\te.logger.Printf(\"error: cannot scrape tankerkoenig api: %v\", err)\n\t}\n\n\t// Collect metrics.\n\te.up.Collect(ch)\n\te.scrapeDuration.Collect(ch)\n\te.failedScrapes.Collect(ch)\n\te.totalScrapes.Collect(ch)\n}", "func dailyTaskStatsForOldTasksPipeline(projectId string, requester string, start time.Time, end time.Time, tasks []string, lastUpdate time.Time) []bson.M {\n\t// Using the same pipeline as for the tasks collection as the base.\n\tbasePipeline := getDailyTaskStatsPipeline(projectId, requester, start, end, tasks, lastUpdate, true)\n\t// And the merge the documents with the existing ones.\n\tmergePipeline := []bson.M{\n\t\t{\"$lookup\": bson.M{\n\t\t\t\"from\": dailyTaskStatsCollection,\n\t\t\t\"localField\": \"_id\",\n\t\t\t\"foreignField\": \"_id\",\n\t\t\t\"as\": \"existing\",\n\t\t}},\n\t\t{\"$unwind\": bson.M{\n\t\t\t\"path\": \"$existing\",\n\t\t\t\"preserveNullAndEmptyArrays\": true,\n\t\t}},\n\t\t{\"$project\": bson.M{\n\t\t\t\"_id\": 1,\n\t\t\t\"num_success\": bson.M{\"$add\": array{\"$num_success\", \"$existing.num_success\"}},\n\t\t\t\"num_failed\": bson.M{\"$add\": array{\"$num_failed\", \"$existing.num_failed\"}},\n\t\t\t\"num_test_failed\": bson.M{\"$add\": array{\"$num_test_failed\", \"$existing.num_test_failed\"}},\n\t\t\t\"num_setup_failed\": bson.M{\"$add\": array{\"$num_setup_failed\", \"$existing.num_setup_failed\"}},\n\t\t\t\"num_system_failed\": bson.M{\"$add\": array{\"$num_system_failed\", \"$existing.num_system_failed\"}},\n\t\t\t\"num_timeout\": bson.M{\"$add\": array{\"$num_timeout\", \"$existing.num_timeout\"}},\n\t\t\t\"total_duration_success\": bson.M{\"$add\": array{\n\t\t\t\tbson.M{\"$ifNull\": array{bson.M{\"$multiply\": array{\"$num_success\", \"$avg_duration_success\"}}, 0}},\n\t\t\t\tbson.M{\"$ifNull\": array{bson.M{\"$multiply\": array{\"$existing.num_success\", \"$existing.avg_duration_success\"}}, 0}},\n\t\t\t}},\n\t\t\t\"last_update\": 1,\n\t\t}},\n\t\t{\"$project\": bson.M{\n\t\t\t\"_id\": 1,\n\t\t\t\"num_success\": 1,\n\t\t\t\"num_failed\": 1,\n\t\t\t\"num_test_failed\": 1,\n\t\t\t\"num_setup_failed\": 1,\n\t\t\t\"num_system_failed\": 1,\n\t\t\t\"num_timeout\": 1,\n\t\t\t\"avg_duration_success\": bson.M{\"$cond\": bson.M{\"if\": bson.M{\"$ne\": array{\"$num_success\", 0}},\n\t\t\t\t\"then\": bson.M{\"$divide\": array{\"$total_duration_success\", \"$num_success\"}},\n\t\t\t\t\"else\": nil}},\n\t\t\t\"last_update\": 1,\n\t\t}},\n\t}\n\treturn append(basePipeline, mergePipeline...)\n\n}", "func (pc *NginxProcessesMetricsCollector) Collect(ch chan<- prometheus.Metric) {\n\tpc.updateWorkerProcessCount()\n\tpc.workerProcessTotal.Collect(ch)\n}", "func (c *VM) Collect(ctx context.Context) error {\n\tmetrics := cgm.Metrics{}\n\n\tc.Lock()\n\n\tif c.runTTL > time.Duration(0) {\n\t\tif time.Since(c.lastEnd) < c.runTTL {\n\t\t\tc.logger.Warn().Msg(collector.ErrTTLNotExpired.Error())\n\t\t\tc.Unlock()\n\t\t\treturn collector.ErrTTLNotExpired\n\t\t}\n\t}\n\tif c.running {\n\t\tc.logger.Warn().Msg(collector.ErrAlreadyRunning.Error())\n\t\tc.Unlock()\n\t\treturn collector.ErrAlreadyRunning\n\t}\n\n\tc.running = true\n\tc.lastStart = time.Now()\n\tc.Unlock()\n\n\tif err := c.parseMemstats(ctx, &metrics); err != nil {\n\t\tc.setStatus(metrics, err)\n\t\treturn fmt.Errorf(\"%s parseMemstats: %w\", c.pkgID, err)\n\t}\n\n\tif err := c.parseVMstats(ctx, &metrics); err != nil {\n\t\tc.setStatus(metrics, err)\n\t\treturn fmt.Errorf(\"%s parseVMstats: %w\", c.pkgID, err)\n\t}\n\n\tc.setStatus(metrics, nil)\n\treturn nil\n}", "func processHealthMonitor(duration time.Duration) {\n\tfor {\n\t\t<-time.After(duration)\n\t\tvar numOfGoroutines = runtime.NumGoroutine()\n\t\t//var memStats runtime.MemStats\n\t\t//runtime.ReadMemStats(&memStats)\n\t\t//core.Info(\"Number of goroutines: %d\",numOfGoroutines)\n\t\t//core.Info(\"Mem stats: %v\",memStats)\n\t\tcore.CloudWatchClient.PutMetric(\"num_of_goroutines\", \"Count\", float64(numOfGoroutines), \"httshark_health_monitor\")\n\t}\n}", "func ProcMeminfo(c *gin.Context) {\n\tres := CmdExec(\"cat /proc/meminfo | head -n 2| awk '{print $2}'\")\n\ttotalMem, _ := strconv.Atoi(res[0])\n\tfreeMem, _ := strconv.Atoi(res[1])\n\tusedMem := totalMem - freeMem\n\tc.JSON(http.StatusOK, gin.H{\n\t\t\"totalMem\": totalMem,\n\t\t\"usedMem\": usedMem,\n\t})\n}", "func vgCollect(ch chan<- prometheus.Metric, vgs []map[string]string) {\n for _, vg := range vgs {\n vgSizeF, err := strconv.ParseFloat(strings.Trim(vg[\"vg_size\"], \"B\"), 64)\n if err != nil {\n log.Print(err)\n return\n }\n ch <- prometheus.MustNewConstMetric(vgSizeMetric, prometheus.GaugeValue, vgSizeF, vg[\"vg_name\"], vg[\"vg_uuid\"])\n\n vgFreeF, err := strconv.ParseFloat(strings.Trim(vg[\"vg_free\"], \"B\"), 64)\n if err != nil {\n log.Print(err)\n return\n }\n ch <- prometheus.MustNewConstMetric(vgFreeMetric, prometheus.GaugeValue, vgFreeF, vg[\"vg_name\"], vg[\"vg_uuid\"])\n }\n}", "func (k *KACollector) Collect(ch chan<- prometheus.Metric) {\n\tk.mutex.Lock()\n\tdefer k.mutex.Unlock()\n\n\tvar err error\n\tvar kaStats []KAStats\n\n\tif k.useJSON {\n\t\tkaStats, err = k.json()\n\t\tif err != nil {\n\t\t\tch <- prometheus.MustNewConstMetric(k.metrics[\"keepalived_up\"], prometheus.GaugeValue, 0)\n\t\t\tlog.Printf(\"keepalived_exporter: %v\", err)\n\t\t\treturn\n\t\t}\n\t} else {\n\t\tkaStats, err = k.text()\n\t\tif err != nil {\n\t\t\tch <- prometheus.MustNewConstMetric(k.metrics[\"keepalived_up\"], prometheus.GaugeValue, 0)\n\t\t\tlog.Printf(\"keepalived_exporter: %v\", err)\n\t\t\treturn\n\t\t}\n\t}\n\n\tch <- prometheus.MustNewConstMetric(k.metrics[\"keepalived_up\"], prometheus.GaugeValue, 1)\n\n\tfor _, st := range kaStats {\n\t\tstate := \"\"\n\t\tif _, ok := state2string[st.Data.State]; ok {\n\t\t\tstate = state2string[st.Data.State]\n\t\t}\n\n\t\tch <- prometheus.MustNewConstMetric(k.metrics[\"keepalived_vrrp_advert_rcvd\"], prometheus.CounterValue,\n\t\t\tfloat64(st.Stats.AdvertRcvd), st.Data.Iname, st.Data.IfpIfname, strconv.Itoa(st.Data.Vrid), state)\n\t\tch <- prometheus.MustNewConstMetric(k.metrics[\"keepalived_vrrp_advert_sent\"], prometheus.CounterValue,\n\t\t\tfloat64(st.Stats.AdvertSent), st.Data.Iname, st.Data.IfpIfname, strconv.Itoa(st.Data.Vrid), state)\n\t\tch <- prometheus.MustNewConstMetric(k.metrics[\"keepalived_vrrp_become_master\"], prometheus.CounterValue,\n\t\t\tfloat64(st.Stats.BecomeMaster), st.Data.Iname, st.Data.IfpIfname, strconv.Itoa(st.Data.Vrid), state)\n\t\tch <- prometheus.MustNewConstMetric(k.metrics[\"keepalived_vrrp_release_master\"], prometheus.CounterValue,\n\t\t\tfloat64(st.Stats.ReleaseMaster), st.Data.Iname, st.Data.IfpIfname, strconv.Itoa(st.Data.Vrid), state)\n\t\tch <- prometheus.MustNewConstMetric(k.metrics[\"keepalived_vrrp_packet_len_err\"], prometheus.CounterValue,\n\t\t\tfloat64(st.Stats.PacketLenErr), st.Data.Iname, st.Data.IfpIfname, strconv.Itoa(st.Data.Vrid), state)\n\t\tch <- prometheus.MustNewConstMetric(k.metrics[\"keepalived_vrrp_advert_interval_err\"], prometheus.CounterValue,\n\t\t\tfloat64(st.Stats.AdvertIntervalErr), st.Data.Iname, st.Data.IfpIfname, strconv.Itoa(st.Data.Vrid), state)\n\t\tch <- prometheus.MustNewConstMetric(k.metrics[\"keepalived_vrrp_ip_ttl_err\"], prometheus.CounterValue,\n\t\t\tfloat64(st.Stats.AdvertIntervalErr), st.Data.Iname, st.Data.IfpIfname, strconv.Itoa(st.Data.Vrid), state)\n\t\tch <- prometheus.MustNewConstMetric(k.metrics[\"keepalived_vrrp_invalid_type_rcvd\"], prometheus.CounterValue,\n\t\t\tfloat64(st.Stats.InvalidTypeRcvd), st.Data.Iname, st.Data.IfpIfname, strconv.Itoa(st.Data.Vrid), state)\n\t\tch <- prometheus.MustNewConstMetric(k.metrics[\"keepalived_vrrp_addr_list_err\"], prometheus.CounterValue,\n\t\t\tfloat64(st.Stats.AddrListErr), st.Data.Iname, st.Data.IfpIfname, strconv.Itoa(st.Data.Vrid), state)\n\t\tch <- prometheus.MustNewConstMetric(k.metrics[\"keepalived_vrrp_invalid_authtype\"], prometheus.CounterValue,\n\t\t\tfloat64(st.Stats.InvalidAuthtype), st.Data.Iname, st.Data.IfpIfname, strconv.Itoa(st.Data.Vrid), state)\n\t\tch <- prometheus.MustNewConstMetric(k.metrics[\"keepalived_vrrp_authtype_mismatch\"], prometheus.CounterValue,\n\t\t\tfloat64(st.Stats.AuthtypeMismatch), st.Data.Iname, st.Data.IfpIfname, strconv.Itoa(st.Data.Vrid), state)\n\t\tch <- prometheus.MustNewConstMetric(k.metrics[\"keepalived_vrrp_auth_failure\"], prometheus.CounterValue,\n\t\t\tfloat64(st.Stats.AuthFailure), st.Data.Iname, st.Data.IfpIfname, strconv.Itoa(st.Data.Vrid), state)\n\t\tch <- prometheus.MustNewConstMetric(k.metrics[\"keepalived_vrrp_pri_zero_rcvd\"], prometheus.CounterValue,\n\t\t\tfloat64(st.Stats.PriZeroRcvd), st.Data.Iname, st.Data.IfpIfname, strconv.Itoa(st.Data.Vrid), state)\n\t\tch <- prometheus.MustNewConstMetric(k.metrics[\"keepalived_vrrp_pri_zero_sent\"], prometheus.CounterValue,\n\t\t\tfloat64(st.Stats.PriZeroSent), st.Data.Iname, st.Data.IfpIfname, strconv.Itoa(st.Data.Vrid), state)\n\t}\n\n\tif k.handle == nil {\n\t\treturn\n\t}\n\n\tsvcs, err := k.handle.GetServices()\n\tif err != nil {\n\t\tch <- prometheus.MustNewConstMetric(k.metrics[\"keepalived_up\"], prometheus.GaugeValue, 0)\n\t\tlog.Printf(\"keepalived_exporter: services: %v\", err)\n\t\treturn\n\t}\n\n\tfor _, s := range svcs {\n\t\tdsts, err := k.handle.GetDestinations(s)\n\t\tif err != nil {\n\t\t\tlog.Printf(\"keepalived_exporter: destinations: %v\", err)\n\t\t\tcontinue\n\t\t}\n\n\t\taddr := s.Address.String() + \":\" + strconv.Itoa(int(s.Port))\n\t\tproto := strconv.Itoa(int(s.Protocol))\n\n\t\tch <- prometheus.MustNewConstMetric(k.metrics[\"keepalived_lvs_vip_in_packets\"], prometheus.CounterValue,\n\t\t\tfloat64(s.Stats.PacketsIn), addr, proto)\n\t\tch <- prometheus.MustNewConstMetric(k.metrics[\"keepalived_lvs_vip_out_packets\"], prometheus.CounterValue,\n\t\t\tfloat64(s.Stats.PacketsOut), addr, proto)\n\t\tch <- prometheus.MustNewConstMetric(k.metrics[\"keepalived_lvs_vip_in_bytes\"], prometheus.CounterValue,\n\t\t\tfloat64(s.Stats.BytesIn), addr, proto)\n\t\tch <- prometheus.MustNewConstMetric(k.metrics[\"keepalived_lvs_vip_out_bytes\"], prometheus.CounterValue,\n\t\t\tfloat64(s.Stats.BytesOut), addr, proto)\n\t\tch <- prometheus.MustNewConstMetric(k.metrics[\"keepalived_lvs_vip_conn\"], prometheus.CounterValue,\n\t\t\tfloat64(s.Stats.Connections), addr, proto)\n\n\t\tfor _, d := range dsts {\n\t\t\taddr := d.Address.String() + \":\" + strconv.Itoa(int(d.Port))\n\n\t\t\tch <- prometheus.MustNewConstMetric(k.metrics[\"keepalived_lvs_rs_in_packets\"], prometheus.CounterValue,\n\t\t\t\tfloat64(d.Stats.PacketsIn), addr, proto)\n\t\t\tch <- prometheus.MustNewConstMetric(k.metrics[\"keepalived_lvs_rs_out_packets\"], prometheus.CounterValue,\n\t\t\t\tfloat64(d.Stats.PacketsOut), addr, proto)\n\t\t\tch <- prometheus.MustNewConstMetric(k.metrics[\"keepalived_lvs_rs_in_bytes\"], prometheus.CounterValue,\n\t\t\t\tfloat64(d.Stats.BytesIn), addr, proto)\n\t\t\tch <- prometheus.MustNewConstMetric(k.metrics[\"keepalived_lvs_rs_out_bytes\"], prometheus.CounterValue,\n\t\t\t\tfloat64(d.Stats.BytesOut), addr, proto)\n\t\t\tch <- prometheus.MustNewConstMetric(k.metrics[\"keepalived_lvs_rs_conn\"], prometheus.CounterValue,\n\t\t\t\tfloat64(d.Stats.Connections), addr, proto)\n\t\t}\n\t}\n}", "func Collectcpu(serverName string) (Metric, error) {\n\tvalues := Metric{}\n\tvar err error\n\tvalues.Timestamp = time.Now()\n\tvalues.MetricType = \"cpu\"\n\n\tvar response cpmserverapi.MetricCPUResponse\n\trequest := &cpmserverapi.MetricCPURequest{}\n\tresponse, err = cpmserverapi.MetricCPUClient(serverName, request)\n\tif err != nil {\n\t\tlogit.Error.Println(\"cpu metric error:\" + err.Error())\n\t\treturn values, err\n\t}\n\n\tvar output = strings.TrimSpace(response.Output)\n\n\tvalues.Value, err = strconv.ParseFloat(output, 64)\n\tif err != nil {\n\t\tlogit.Error.Println(\"parseFloat error in cpu metric \" + err.Error())\n\t}\n\n\treturn values, err\n}", "func (e *Exporter) Collect(ch chan<- prometheus.Metric) {\n\te.mutex.Lock()\n\tdefer e.mutex.Unlock()\n\n\tfor _, vec := range e.gauges {\n\t\tvec.Reset()\n\t}\n\n\tdefer func() { ch <- e.up }()\n\n\t// If we fail at any point in retrieving GPU status, we fail 0\n\te.up.Set(1)\n\n\te.GetTelemetryFromNVML()\n\n\tfor _, vec := range e.gauges {\n\t\tvec.Collect(ch)\n\t}\n}", "func (n *RouterNode) GatherMetrics() {\n\tn.Lock()\n\tdefer n.Unlock()\n\n\tlevel.Debug(n.logger).Log(\n\t\t\"msg\", \"GatherMetrics() locked\",\n\t)\n\n\tif time.Now().Unix() < n.nextCollectionTicker {\n\t\treturn\n\t}\n\tstart := time.Now()\n\tif len(n.metrics) > 0 {\n\t\tn.metrics = n.metrics[:0]\n\t\tlevel.Debug(n.logger).Log(\n\t\t\t\"msg\", \"GatherMetrics() cleared metrics\",\n\t\t)\n\t}\n\tupValue := 1\n\n\t// What is RouterID and AS number of this GoBGP server?\n\tserver, err := n.client.GetBgp(context.Background(), &gobgpapi.GetBgpRequest{})\n\tif err != nil {\n\t\tn.IncrementErrorCounter()\n\t\tlevel.Error(n.logger).Log(\n\t\t\t\"msg\", \"failed query gobgp server\",\n\t\t\t\"error\", err.Error(),\n\t\t)\n\t\tif IsConnectionError(err) {\n\t\t\tn.connected = false\n\t\t\tupValue = 0\n\t\t}\n\t} else {\n\t\tn.routerID = server.Global.RouterId\n\t\tn.localAS = server.Global.Asn\n\t\tlevel.Debug(n.logger).Log(\n\t\t\t\"msg\", \"router info\",\n\t\t\t\"router_id\", n.routerID,\n\t\t\t\"local_asn\", n.localAS,\n\t\t)\n\t\tn.connected = true\n\t}\n\n\tif n.connected {\n\t\tvar wg sync.WaitGroup\n\t\twg.Add(2)\n\t\tgo func() {\n\t\t\tdefer wg.Done()\n\t\t\tn.GetRibCounters()\n\t\t}()\n\t\tgo func() {\n\t\t\tdefer wg.Done()\n\t\t\tn.GetPeers()\n\t\t}()\n\t\twg.Wait()\n\n\t}\n\n\t// Generic Metrics\n\tn.metrics = append(n.metrics, prometheus.MustNewConstMetric(\n\t\trouterUp,\n\t\tprometheus.GaugeValue,\n\t\tfloat64(upValue),\n\t))\n\n\tn.metrics = append(n.metrics, prometheus.MustNewConstMetric(\n\t\trouterErrors,\n\t\tprometheus.CounterValue,\n\t\tfloat64(n.errors),\n\t))\n\tn.metrics = append(n.metrics, prometheus.MustNewConstMetric(\n\t\trouterNextScrape,\n\t\tprometheus.CounterValue,\n\t\tfloat64(n.nextCollectionTicker),\n\t))\n\tn.metrics = append(n.metrics, prometheus.MustNewConstMetric(\n\t\trouterScrapeTime,\n\t\tprometheus.GaugeValue,\n\t\ttime.Since(start).Seconds(),\n\t))\n\n\t// Router ID and ASN\n\tif n.routerID != \"\" {\n\t\tn.metrics = append(n.metrics, prometheus.MustNewConstMetric(\n\t\t\trouterID,\n\t\t\tprometheus.GaugeValue,\n\t\t\t1,\n\t\t\tn.routerID,\n\t\t))\n\t}\n\tif n.localAS > 0 {\n\t\tn.metrics = append(n.metrics, prometheus.MustNewConstMetric(\n\t\t\trouterLocalAS,\n\t\t\tprometheus.GaugeValue,\n\t\t\tfloat64(n.localAS),\n\t\t))\n\t}\n\n\tn.nextCollectionTicker = time.Now().Add(time.Duration(n.pollInterval) * time.Second).Unix()\n\n\tif upValue > 0 {\n\t\tn.result = \"success\"\n\t} else {\n\t\tn.result = \"failure\"\n\t}\n\tn.timestamp = time.Now().Format(time.RFC3339)\n\n\tlevel.Debug(n.logger).Log(\n\t\t\"msg\", \"GatherMetrics() returns\",\n\t)\n}", "func (e *Exporter) Collect(ch chan<- prometheus.Metric) {\n\te.mutex.Lock() // To protect metrics from concurrent collects.\n\tdefer e.mutex.Unlock()\n\n\tup, result := e.scrape(ch)\n\n\tch <- e.totalScrapes\n\tch <- e.jsonParseFailures\n\tch <- prometheus.MustNewConstMetric(iqAirUp, prometheus.GaugeValue, up)\n\tch <- prometheus.MustNewConstMetric(iqAirCO2, prometheus.GaugeValue, float64(result.CO2))\n\tch <- prometheus.MustNewConstMetric(iqAirP25, prometheus.GaugeValue, float64(result.P25))\n\tch <- prometheus.MustNewConstMetric(iqAirP10, prometheus.GaugeValue, float64(result.P10))\n\tch <- prometheus.MustNewConstMetric(iqAirTemp, prometheus.GaugeValue, float64(result.Temperature))\n\tch <- prometheus.MustNewConstMetric(iqAirHumidity, prometheus.GaugeValue, float64(result.Humidity))\n}", "func getDailyTaskStatsPipeline(projectId string, requester string, start time.Time, end time.Time, tasks []string, lastUpdate time.Time, oldTasks bool) []bson.M {\n\tvar taskIdExpr string\n\tvar displayTaskLookupCollection string\n\tif oldTasks {\n\t\ttaskIdExpr = \"$old_task_id\"\n\t\tdisplayTaskLookupCollection = task.OldCollection\n\t} else {\n\t\ttaskIdExpr = \"$_id\"\n\t\tdisplayTaskLookupCollection = task.Collection\n\t}\n\tpipeline := []bson.M{\n\t\t{\"$match\": bson.M{\n\t\t\ttask.ProjectKey: projectId,\n\t\t\ttask.RequesterKey: requester,\n\t\t\ttask.CreateTimeKey: bson.M{\"$gte\": start, \"$lt\": end},\n\t\t\ttask.DisplayNameKey: bson.M{\"$in\": tasks},\n\t\t}},\n\t\t{\"$project\": bson.M{\n\t\t\ttask.IdKey: 0,\n\t\t\t\"task_id\": taskIdExpr,\n\t\t\t\"execution\": taskExecutionRef,\n\t\t\t\"project\": taskProjectKeyRef,\n\t\t\t\"task_name\": taskDisplayNameKeyRef,\n\t\t\t\"variant\": taskBuildVariantKeyRef,\n\t\t\t\"distro\": taskDistroIdKeyRef,\n\t\t\t\"requester\": taskRequesterKeyRef,\n\t\t\t\"status\": taskStatusKeyRef,\n\t\t\t\"details\": taskDetailsKeyRef,\n\t\t\t\"time_taken\": bson.M{\"$divide\": array{taskTimeTakenKeyRef, nsInASecond}},\n\t\t}},\n\t\t{\"$lookup\": bson.M{\n\t\t\t\"from\": displayTaskLookupCollection,\n\t\t\t\"localField\": \"task_id\",\n\t\t\t\"foreignField\": task.ExecutionTasksKey,\n\t\t\t\"as\": \"display_task\",\n\t\t}},\n\t\t{\"$match\": bson.M{\"display_task\": array{}}}, // Excluding the execution tasks\n\t\t{\"$group\": bson.M{\n\t\t\t\"_id\": bson.D{\n\t\t\t\t{Name: \"task_name\", Value: \"$task_name\"},\n\t\t\t\t{Name: \"variant\", Value: \"$variant\"},\n\t\t\t\t{Name: \"distro\", Value: \"$distro\"},\n\t\t\t\t{Name: \"project\", Value: \"$project\"},\n\t\t\t\t{Name: \"requester\", Value: \"$requester\"}},\n\t\t\t\"num_success\": makeSum(bson.M{\"$eq\": array{\"$status\", \"success\"}}),\n\t\t\t\"num_failed\": makeSum(bson.M{\"$eq\": array{\"$status\", \"failed\"}}),\n\t\t\t\"num_timeout\": makeSum(bson.M{\"$and\": array{\n\t\t\t\tbson.M{\"$eq\": array{\"$status\", \"failed\"}},\n\t\t\t\tbson.M{\"$eq\": array{\"$details.timed_out\", true}}}}),\n\t\t\t\"num_test_failed\": makeSum(bson.M{\"$and\": array{\n\t\t\t\tbson.M{\"$eq\": array{\"$status\", \"failed\"}},\n\t\t\t\tbson.M{\"$eq\": array{\"$details.type\", \"test\"}},\n\t\t\t\tbson.M{\"$ne\": array{\"$details.timed_out\", true}}}}),\n\t\t\t\"num_system_failed\": makeSum(bson.M{\"$and\": array{\n\t\t\t\tbson.M{\"$eq\": array{\"$status\", \"failed\"}},\n\t\t\t\tbson.M{\"$eq\": array{\"$details.type\", \"system\"}},\n\t\t\t\tbson.M{\"$ne\": array{\"$details.timed_out\", true}}}}),\n\t\t\t\"num_setup_failed\": makeSum(bson.M{\"$and\": array{\n\t\t\t\tbson.M{\"$eq\": array{\"$status\", \"failed\"}},\n\t\t\t\tbson.M{\"$eq\": array{\"$details.type\", \"setup\"}},\n\t\t\t\tbson.M{\"$ne\": array{\"$details.timed_out\", true}}}}),\n\t\t\t\"avg_duration_success\": bson.M{\"$avg\": bson.M{\"$cond\": bson.M{\"if\": bson.M{\"$eq\": array{\"$status\", \"success\"}},\n\t\t\t\t\"then\": \"$time_taken\", \"else\": \"IGNORE\"}}}}},\n\t\t{\"$addFields\": bson.M{\n\t\t\t\"_id.date\": start,\n\t\t\t\"last_update\": lastUpdate,\n\t\t}},\n\t}\n\treturn pipeline\n}", "func (c *VM) Collect() error {\n\tmetrics := cgm.Metrics{}\n\n\tc.Lock()\n\n\tif c.runTTL > time.Duration(0) {\n\t\tif time.Since(c.lastEnd) < c.runTTL {\n\t\t\tc.logger.Warn().Msg(collector.ErrTTLNotExpired.Error())\n\t\t\tc.Unlock()\n\t\t\treturn collector.ErrTTLNotExpired\n\t\t}\n\t}\n\tif c.running {\n\t\tc.logger.Warn().Msg(collector.ErrAlreadyRunning.Error())\n\t\tc.Unlock()\n\t\treturn collector.ErrAlreadyRunning\n\t}\n\n\tc.running = true\n\tc.lastStart = time.Now()\n\tc.Unlock()\n\n\tif err := c.parseMemstats(&metrics); err != nil {\n\t\tc.setStatus(metrics, err)\n\t\treturn errors.Wrap(err, c.pkgID)\n\t}\n\n\tif err := c.parseVMstats(&metrics); err != nil {\n\t\tc.setStatus(metrics, err)\n\t\treturn errors.Wrap(err, c.pkgID)\n\t}\n\n\tc.setStatus(metrics, nil)\n\treturn nil\n}", "func (collector *atlassianUPMCollector) Collect(ch chan<- prometheus.Metric) {\n\tstartTime := time.Now()\n\tlog.Debug(\"Collect start\")\n\n\tlog.Debug(\"create request object\")\n\treq, err := http.NewRequest(\"GET\", baseURL, nil)\n\tif err != nil {\n\t\tlog.Error(\"http.NewRequest returned an error:\", err)\n\t}\n\n\tlog.Debug(\"create Basic auth string from argument passed\")\n\tbearer = \"Basic \" + *token\n\n\tlog.Debug(\"add authorization header to the request\")\n\treq.Header.Add(\"Authorization\", bearer)\n\n\tlog.Debug(\"add content type to the request\")\n\treq.Header.Add(\"content-type\", \"application/json\")\n\n\tlog.Debug(\"make request... get back a response\")\n\tresp, err := http.DefaultClient.Do(req)\n\tif err != nil {\n\t\tlog.Debug(\"set metric atlassian_upm_rest_url_up\")\n\t\tch <- prometheus.MustNewConstMetric(collector.atlassianUPMUpMetric, prometheus.GaugeValue, 0, *fqdn)\n\t\tlog.Warn(\"http.DefaultClient.Do returned an error:\", err, \" return from Collect\")\n\t\treturn\n\t}\n\tdefer resp.Body.Close()\n\n\tif resp.StatusCode != 200 {\n\t\tlog.Debug(\"response status code: \", resp.StatusCode)\n\t}\n\n\tlog.Debug(\"set metric atlassian_upm_rest_url_up\")\n\tch <- prometheus.MustNewConstMetric(collector.atlassianUPMUpMetric, prometheus.GaugeValue, 1, *fqdn)\n\n\tvar allPlugins restPlugins\n\tif resp.StatusCode == 200 {\n\t\tlog.Debug(\"get all plugins\")\n\t\tallPlugins = plugins(resp)\n\n\t\t// return user-installed plugins if argument passed\n\t\tif *userInstalled {\n\t\t\tlog.Debug(\"-user-installed found\")\n\t\t\tallPlugins = userInstalledPlugins(allPlugins)\n\t\t}\n\n\t\t// plugins have the ability to be installed, but disabled, this will remove them if disabled\n\t\tif *dropDisabled {\n\t\t\tlog.Debug(\"-drop-disabled found\")\n\t\t\tallPlugins = dropDisabledPlugins(allPlugins)\n\t\t}\n\n\t\t// Jira specific\n\t\t// some plugins maintained by Jira have an additional element, this gives the option to drop those plugins\n\t\tif *dropJiraSoftware {\n\t\t\tlog.Debug(\"-drop-jira-software found\")\n\t\t\tallPlugins = dropJiraSoftwarePlugins(allPlugins)\n\t\t}\n\n\t\tlog.Debug(\"range over values in response, add each as metric with labels\")\n\t\tfor _, plugin := range allPlugins.Plugins {\n\n\t\t\tlog.Debug(\"creating plugin metric for: \" + plugin.Name)\n\t\t\tch <- prometheus.MustNewConstMetric(\n\t\t\t\tcollector.atlassianUPMPlugins,\n\t\t\t\tprometheus.GaugeValue,\n\t\t\t\t0,\n\t\t\t\tstrconv.FormatBool(plugin.Enabled), // convert bool to string for the 'enabled' value in the labels\n\t\t\t\tstring(plugin.Name),\n\t\t\t\tstring(plugin.Key),\n\t\t\t\tstring(plugin.Version),\n\t\t\t\tstrconv.FormatBool(plugin.UserInstalled),\n\t\t\t\t*fqdn,\n\t\t\t)\n\t\t}\n\t}\n\n\tif resp.StatusCode == 200 && *checkUpdates {\n\t\tlog.Debug(\"get remaining plugins available info\")\n\t\tavailablePluginsMap := getAvailablePluginInfo(allPlugins)\n\n\t\tlog.Debug(\"range over values in response, add each as metric with labels\")\n\t\tfor _, plugin := range availablePluginsMap {\n\t\t\tavailableUpdate := false\n\n\t\t\tverInstalled, err := version.NewVersion(plugin.InstalledVersion)\n\t\t\tif err != nil {\n\t\t\t\tlog.Debug(\"error turning plugin installed into version object\")\n\t\t\t}\n\n\t\t\tverAvailable, err := version.NewVersion(plugin.Version)\n\t\t\tif err != nil {\n\t\t\t\tlog.Debug(\"error turning available plugin into version object\")\n\t\t\t}\n\n\t\t\tif verInstalled.LessThan(verAvailable) {\n\t\t\t\tlog.Debug(\"plugin: \", plugin.Name, \", is currently running: \", plugin.InstalledVersion, \", and can be upgraded to: \", plugin.Version)\n\t\t\t\tavailableUpdate = true\n\t\t\t}\n\n\t\t\tlog.Debug(\"creating plugin version metric for: \", plugin.Name, \", with Key: \", plugin.Key)\n\t\t\tch <- prometheus.MustNewConstMetric(\n\t\t\t\tcollector.atlassianUPMVersionsMetric,\n\t\t\t\tprometheus.GaugeValue,\n\t\t\t\tboolToFloat(availableUpdate),\n\t\t\t\tstring(plugin.Name),\n\t\t\t\tstring(plugin.Key),\n\t\t\t\tstring(plugin.Version),\n\t\t\t\tstring(plugin.InstalledVersion),\n\t\t\t\tstrconv.FormatBool(plugin.Enabled), // convert bool to string for the 'enabled' value in the labels\n\t\t\t\tstrconv.FormatBool(plugin.UserInstalled),\n\t\t\t\t*fqdn,\n\t\t\t)\n\t\t}\n\t}\n\n\tfinishTime := time.Now()\n\telapsedTime := finishTime.Sub(startTime)\n\tlog.Debug(\"set the duration metric\")\n\tch <- prometheus.MustNewConstMetric(collector.atlassianUPMTimeMetric, prometheus.GaugeValue, elapsedTime.Seconds(), *fqdn)\n\n\tlog.Debug(\"Collect finished\")\n}", "func (e *Exporter) Collect(ch chan<- prometheus.Metric) {\n\te.mutex.Lock() // To protect metrics from concurrent collects.\n\tdefer e.mutex.Unlock()\n\n\tif err := e.scrape(); err != nil {\n\t\tlog.Error(err)\n\t\tnomad_up.Set(0)\n\t\tch <- nomad_up\n\t\treturn\n\t}\n\n\tch <- nomad_up\n\tch <- metric_uptime\n\tch <- metric_request_response_time_total\n\tch <- metric_request_response_time_avg\n\n\tfor _, metric := range metric_request_status_count_current {\n\t\tch <- metric\n\t}\n\tfor _, metric := range metric_request_status_count_total {\n\t\tch <- metric\n\t}\n}", "func (c *solarCollector) collect(ch chan<- prometheus.Metric) error {\n\t// fetch the status of the controller\n\ttracer, err := gotracer.Status(\"/dev/ttyUSB0\")\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t/*\n\t * report the collected data\n\t */\n\n\t// store boolean values as a float (1 == true, 0 == false)\n\tvar loadIsActive float64\n\t// Panel array\n\tch <- prometheus.MustNewConstMetric(\n\t\tc.panelVoltage,\n\t\tprometheus.GaugeValue,\n\t\tfloat64(tracer.ArrayVoltage),\n\t)\n\tch <- prometheus.MustNewConstMetric(\n\t\tc.panelCurrent,\n\t\tprometheus.GaugeValue,\n\t\tfloat64(tracer.ArrayCurrent),\n\t)\n\tch <- prometheus.MustNewConstMetric(\n\t\tc.panelPower,\n\t\tprometheus.GaugeValue,\n\t\tfloat64(tracer.ArrayPower),\n\t)\n\n\t// Batteries\n\tch <- prometheus.MustNewConstMetric(\n\t\tc.batteryCurrent,\n\t\tprometheus.GaugeValue,\n\t\tfloat64(tracer.BatteryCurrent),\n\t)\n\tch <- prometheus.MustNewConstMetric(\n\t\tc.batteryVoltage,\n\t\tprometheus.GaugeValue,\n\t\tfloat64(tracer.BatteryVoltage),\n\t)\n\tch <- prometheus.MustNewConstMetric(\n\t\tc.batterySOC,\n\t\tprometheus.GaugeValue,\n\t\tfloat64(tracer.BatterySOC),\n\t)\n\tch <- prometheus.MustNewConstMetric(\n\t\tc.batteryTemp,\n\t\tprometheus.GaugeValue,\n\t\tfloat64(tracer.BatteryTemp),\n\t)\n\tch <- prometheus.MustNewConstMetric(\n\t\tc.batteryMinVoltage,\n\t\tprometheus.GaugeValue,\n\t\tfloat64(tracer.BatteryMinVoltage),\n\t)\n\tch <- prometheus.MustNewConstMetric(\n\t\tc.batteryMaxVoltage,\n\t\tprometheus.GaugeValue,\n\t\tfloat64(tracer.BatteryMaxVoltage),\n\t)\n\n\t// Load output\n\tif tracer.Load {\n\t\tloadIsActive = 1\n\t}\n\tch <- prometheus.MustNewConstMetric(\n\t\tc.loadActive,\n\t\tprometheus.GaugeValue,\n\t\tloadIsActive,\n\t)\n\tch <- prometheus.MustNewConstMetric(\n\t\tc.loadVoltage,\n\t\tprometheus.GaugeValue,\n\t\tfloat64(tracer.LoadVoltage),\n\t)\n\tch <- prometheus.MustNewConstMetric(\n\t\tc.loadCurrent,\n\t\tprometheus.GaugeValue,\n\t\tfloat64(tracer.LoadCurrent),\n\t)\n\tch <- prometheus.MustNewConstMetric(\n\t\tc.loadPower,\n\t\tprometheus.GaugeValue,\n\t\tfloat64(tracer.LoadPower),\n\t)\n\n\t// controller infos\n\tch <- prometheus.MustNewConstMetric(\n\t\tc.deviceTemp,\n\t\tprometheus.GaugeValue,\n\t\tfloat64(tracer.DeviceTemp),\n\t)\n\n\t// energy consumed\n\tch <- prometheus.MustNewConstMetric(\n\t\tc.energyConsumedDaily,\n\t\tprometheus.GaugeValue,\n\t\tfloat64(tracer.EnergyConsumedDaily),\n\t)\n\tch <- prometheus.MustNewConstMetric(\n\t\tc.energyConsumedMonthly,\n\t\tprometheus.GaugeValue,\n\t\tfloat64(tracer.EnergyConsumedMonthly),\n\t)\n\tch <- prometheus.MustNewConstMetric(\n\t\tc.energyConsumedAnnual,\n\t\tprometheus.GaugeValue,\n\t\tfloat64(tracer.EnergyConsumedAnnual),\n\t)\n\tch <- prometheus.MustNewConstMetric(\n\t\tc.energyConsumedTotal,\n\t\tprometheus.GaugeValue,\n\t\tfloat64(tracer.EnergyConsumedTotal),\n\t)\n\t// energy generated\n\tch <- prometheus.MustNewConstMetric(\n\t\tc.energyGeneratedDaily,\n\t\tprometheus.GaugeValue,\n\t\tfloat64(tracer.EnergyGeneratedDaily),\n\t)\n\tch <- prometheus.MustNewConstMetric(\n\t\tc.energyGeneratedMonthly,\n\t\tprometheus.GaugeValue,\n\t\tfloat64(tracer.EnergyGeneratedMonthly),\n\t)\n\tch <- prometheus.MustNewConstMetric(\n\t\tc.energyGeneratedAnnual,\n\t\tprometheus.GaugeValue,\n\t\tfloat64(tracer.EnergyGeneratedAnnual),\n\t)\n\tch <- prometheus.MustNewConstMetric(\n\t\tc.energyGeneratedTotal,\n\t\tprometheus.GaugeValue,\n\t\tfloat64(tracer.EnergyGeneratedTotal),\n\t)\n\n\treturn nil\n}", "func (g gatherer) GatherMetrics(ctx context.Context, m *elasticapm.Metrics) error {\n\tg.r.Each(func(name string, v interface{}) {\n\t\tswitch v := v.(type) {\n\t\tcase metrics.Counter:\n\t\t\tm.Add(name, nil, float64(v.Count()))\n\t\tcase metrics.Gauge:\n\t\t\tm.Add(name, nil, float64(v.Value()))\n\t\tcase metrics.GaugeFloat64:\n\t\t\tm.Add(name, nil, v.Value())\n\t\tcase metrics.Histogram:\n\t\t\tm.Add(name+\".count\", nil, float64(v.Count()))\n\t\t\tm.Add(name+\".total\", nil, float64(v.Sum()))\n\t\t\tm.Add(name+\".min\", nil, float64(v.Min()))\n\t\t\tm.Add(name+\".max\", nil, float64(v.Max()))\n\t\t\tm.Add(name+\".stddev\", nil, v.StdDev())\n\t\t\tm.Add(name+\".percentile.50\", nil, v.Percentile(0.5))\n\t\t\tm.Add(name+\".percentile.95\", nil, v.Percentile(0.95))\n\t\t\tm.Add(name+\".percentile.99\", nil, v.Percentile(0.99))\n\t\tdefault:\n\t\t\t// TODO(axw) Meter, Timer, EWMA\n\t\t}\n\t})\n\treturn nil\n}", "func hourlyTestStatsForOldTasksPipeline(projectId string, requester string, start time.Time, end time.Time, tasks []string, lastUpdate time.Time) []bson.M {\n\t// Using the same pipeline as for the tasks collection as the base.\n\tbasePipeline := getHourlyTestStatsPipeline(projectId, requester, start, end, tasks, lastUpdate, true)\n\t// And the merge the documents with the existing ones.\n\tmergePipeline := []bson.M{\n\t\t{\"$lookup\": bson.M{\n\t\t\t\"from\": hourlyTestStatsCollection,\n\t\t\t\"localField\": \"_id\",\n\t\t\t\"foreignField\": \"_id\",\n\t\t\t\"as\": \"existing\",\n\t\t}},\n\t\t{\"$unwind\": bson.M{\n\t\t\t\"path\": \"$existing\",\n\t\t\t\"preserveNullAndEmptyArrays\": true,\n\t\t}},\n\t\t{\"$project\": bson.M{\n\t\t\t\"_id\": 1,\n\t\t\t\"num_pass\": bson.M{\"$add\": array{\"$num_pass\", \"$existing.num_pass\"}},\n\t\t\t\"num_fail\": bson.M{\"$add\": array{\"$num_fail\", \"$existing.num_fail\"}},\n\t\t\t\"total_duration_pass\": bson.M{\"$add\": array{\n\t\t\t\tbson.M{\"$ifNull\": array{bson.M{\"$multiply\": array{\"$num_pass\", \"$avg_duration_pass\"}}, 0}},\n\t\t\t\tbson.M{\"$ifNull\": array{bson.M{\"$multiply\": array{\"$existing.num_pass\", \"$existing.avg_duration_pass\"}}, 0}},\n\t\t\t}},\n\t\t\t\"last_update\": 1,\n\t\t}},\n\t\t{\"$project\": bson.M{\n\t\t\t\"_id\": 1,\n\t\t\t\"num_pass\": 1,\n\t\t\t\"num_fail\": 1,\n\t\t\t\"avg_duration_pass\": bson.M{\"$cond\": bson.M{\"if\": bson.M{\"$ne\": array{\"$num_pass\", 0}},\n\t\t\t\t\"then\": bson.M{\"$divide\": array{\"$total_duration_pass\", \"$num_pass\"}},\n\t\t\t\t\"else\": nil}},\n\t\t\t\"last_update\": 1,\n\t\t}},\n\t}\n\treturn append(basePipeline, mergePipeline...)\n}", "func ProcLoadavg(c *gin.Context) {\n\tres := CmdExec(\"cat /proc/loadavg\")\n\tresArray := strings.Split(res[0], \" \")\n\tload5, _ := strconv.ParseFloat(resArray[0], 32)\n\tload10, _ := strconv.ParseFloat(resArray[1], 32)\n\tload15, _ := strconv.ParseFloat(resArray[2], 32)\n\trunningString := strings.Split(resArray[3], \"/\")\n\trunningprocess, _ := strconv.Atoi(runningString[0])\n\tc.JSON(http.StatusOK, gin.H{\n\t\t\"load5\": load5,\n\t\t\"load10\": load10,\n\t\t\"load15\": load15,\n\t\t\"runningprocess\": runningprocess,\n\t})\n}", "func measureSpammerMetrics() {\n\tif spammerStartTime.IsZero() {\n\t\t// Spammer not started yet\n\t\treturn\n\t}\n\n\tsentSpamMsgsCnt := deps.ServerMetrics.SentSpamMessages.Load()\n\tnew := utils.GetUint32Diff(sentSpamMsgsCnt, lastSentSpamMsgsCnt)\n\tlastSentSpamMsgsCnt = sentSpamMsgsCnt\n\n\tspammerAvgHeap.Add(uint64(new))\n\n\ttimeDiff := time.Since(spammerStartTime)\n\tif timeDiff > 60*time.Second {\n\t\t// Only filter over one minute maximum\n\t\ttimeDiff = 60 * time.Second\n\t}\n\n\t// trigger events for outside listeners\n\tEvents.AvgSpamMetricsUpdated.Trigger(&spammer.AvgSpamMetrics{\n\t\tNewMessages: new,\n\t\tAverageMessagesPerSecond: spammerAvgHeap.GetAveragePerSecond(timeDiff),\n\t})\n}", "func (c *Canary) GatherMetrics(config schemas.Config) error {\n\tif !c.StepStatus[constants.StepCleanChecking] {\n\t\treturn nil\n\t}\n\tif config.DisableMetrics {\n\t\treturn nil\n\t}\n\n\tif len(config.Region) > 0 {\n\t\tif !CheckRegionExist(config.Region, c.Stack.Regions) {\n\t\t\treturn nil\n\t\t}\n\t}\n\n\tif !config.CompleteCanary {\n\t\tc.Logger.Debug(\"Skip gathering metrics because canary is now applied\")\n\t\treturn nil\n\t}\n\n\tif err := c.Deployer.StartGatheringMetrics(config); err != nil {\n\t\treturn err\n\t}\n\n\tc.StepStatus[constants.StepGatherMetrics] = true\n\treturn nil\n}", "func (w *windowsResourceUsageGatherer) Gather(executor QueryExecutor, startTime time.Time, config *measurement.MeasurementConfig) ([]measurement.Summary, error) {\n\tcpuSummary, err := getSummary(cpuUsageQueryTop10, convertToCPUPerfData, cpuUsageMetricsName, executor, config)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tmemorySummary, err := getSummary(memoryUsageQueryTop10, convertToMemoryPerfData, memoryUsageMetricsName, executor, config)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn []measurement.Summary{cpuSummary, memorySummary}, nil\n}", "func (m *arangodb) CollectAgentLogs(w io.Writer) error {\n\tif m.HasAgent() {\n\t\tif err := m.updateServerInfo(); err != nil {\n\t\t\treturn maskAny(err)\n\t\t}\n\t\tif err := m.collectServerLogs(w, \"agent\"); err != nil && errors.Cause(err) != io.EOF {\n\t\t\treturn maskAny(err)\n\t\t}\n\t\treturn nil\n\t}\n\treturn nil\n}", "func FetchAppServerMemStats(r Result) []float32 {\n\treturn r.AppServerStats().Mem\n}", "func (mtbulk *MTbulk) ResponseCollector(ctx context.Context) {\n\thostsErrors := make(map[entities.Host][]error)\n\ncollectorLooop:\n\tfor {\n\t\tselect {\n\t\tcase <-ctx.Done():\n\t\t\tbreak collectorLooop\n\t\tcase result, ok := <-mtbulk.Results:\n\t\t\tif !ok {\n\t\t\t\tbreak collectorLooop\n\t\t\t}\n\t\t\tif result.Errors != nil {\n\t\t\t\thostsErrors[result.Job.Host] = append(hostsErrors[result.Job.Host], result.Errors...)\n\t\t\t}\n\n\t\t\tif (mtbulk.Verbose && result.Job.Host != entities.Host{}) {\n\t\t\t\tfmt.Printf(\"%s > /// job: \\\"%s\\\"\\n\", result.Job.Host, result.Job.Kind)\n\t\t\t\tfor _, commandResult := range result.Results {\n\t\t\t\t\tfor _, response := range commandResult.Responses {\n\t\t\t\t\t\tfor _, line := range strings.Split(response, \"\\n\") {\n\t\t\t\t\t\t\tif line != \"\" {\n\t\t\t\t\t\t\t\tfmt.Printf(\"%s > %s\\n\", result.Job.Host, line)\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\tif len(hostsErrors) == 0 {\n\t\treturn\n\t}\n\n\tmtbulk.Status.SetCode(1)\n\n\tif mtbulk.SkipSummary {\n\t\treturn\n\t}\n\n\tvulnerabilitiesDetected := false\n\n\tfmt.Println()\n\tfmt.Println(\"Errors list:\")\n\tfor host, errors := range hostsErrors {\n\t\tif host.IP != \"\" {\n\t\t\tfmt.Printf(\"Device: %s:%s\\n\", host.IP, host.Port)\n\t\t} else {\n\t\t\tfmt.Println(\"Generic:\")\n\t\t}\n\n\t\tfor _, err := range errors {\n\t\t\tif err == nil {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tfmt.Printf(\"\\t%s\\n\", err)\n\t\t\tif vul, ok := err.(vulnerabilities.VulnerabilityError); ok && vul.Vulnerabilities != nil {\n\t\t\t\tvulnerabilitiesDetected = true\n\t\t\t}\n\t\t}\n\t}\n\n\tif vulnerabilitiesDetected {\n\t\tcves := ExtractCVEs(hostsErrors)\n\n\t\tfmt.Println()\n\t\tfmt.Println(\"Deetected CVE:\")\n\t\tfor _, cve := range cves {\n\t\t\tfmt.Printf(\"%s\\n\", cve)\n\t\t}\n\t}\n}", "func (collector *KubeletCmdCollector) Collect() error {\n\toutput, err := utils.RunCommandOnHost(\"ps\", \"-o\", \"cmd=\", \"-C\", \"kubelet\")\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tcollector.KubeletCommand = output\n\n\treturn nil\n}", "func (c *metricCollector) collectMemory(service *rrhttp.Service, tick time.Duration) {\n\tstarted := false\n\tfor {\n\t\tserver := service.Server()\n\t\tif server == nil && started {\n\t\t\t// stopped\n\t\t\treturn\n\t\t}\n\n\t\tstarted = true\n\n\t\tif workers, err := util.ServerState(server); err == nil {\n\t\t\tsum := 0.0\n\t\t\tfor _, w := range workers {\n\t\t\t\tsum = sum + float64(w.MemoryUsage)\n\t\t\t}\n\n\t\t\tc.workersMemory.Set(sum)\n\t\t}\n\n\t\ttime.Sleep(tick)\n\t}\n}", "func (p *perfStoreManager) collect() {\n\tallCgroups, err := p.cgroupSt.ListAllCgroups(sets.NewString(appclass.AppClassOnline))\n\tif err != nil {\n\t\treturn\n\t}\n\n\tp.cpuLock.Lock()\n\tdefer p.cpuLock.Unlock()\n\n\twg := sync.WaitGroup{}\n\tfor k, v := range allCgroups {\n\t\tfor _, ignored := range p.IgnoredCgroups {\n\t\t\tif checkSubCgroup(ignored, k) {\n\t\t\t\tklog.V(4).Infof(\"cgroup(%s) has been ignored\", k)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t}\n\n\t\twg.Add(1)\n\t\tgo func(cg string, ref *cgstore.CgroupRef) {\n\t\t\tdefer wg.Done()\n\n\t\t\tcgPath, err := cgroup.GetPerfEventCgroupPath(cg)\n\t\t\tif err != nil {\n\t\t\t\tklog.Errorf(\"get perf_event cgroup path err: %v\", err)\n\t\t\t\treturn\n\t\t\t}\n\t\t\t// check pids\n\t\t\tpids, err := cgroup.GetPids(cgPath)\n\t\t\tif err != nil {\n\t\t\t\tklog.Errorf(\"cgroup(%s) get pid err: %v\", cg, err)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tif len(pids) == 0 {\n\t\t\t\tklog.V(4).Infof(\"cgroup(%s) has no pid\", cg)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\t// read cpus\n\t\t\tcpus, err := cgroup.GetCpuSet(cg, true)\n\t\t\tif err != nil {\n\t\t\t\tklog.Errorf(\"cgroup(%s) get cpu sets err: %v\", cg, err)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tif len(cpus) == 0 {\n\t\t\t\tklog.Errorf(\"cgroup(%s) get cpu sets is nil\", cg)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tstart := time.Now()\n\t\t\tcpuStartTotal, err := cgroup.GetCPUTotalUsage(cg)\n\t\t\tif err != nil {\n\t\t\t\tklog.Errorf(\"cgroup(%s) collect cpu usage failed: %v\", cg, err)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tpmuData, err := pmu.GetPMUValue(int(p.CollectDuration.Seconds()),\n\t\t\t\tcgPath, strings.Join(cpus, \",\"))\n\t\t\tif err != nil {\n\t\t\t\tklog.Errorf(\"cgroup(%s) collect perf data err: %v\", cg, err)\n\t\t\t\treturn\n\t\t\t}\n\t\t\ttimeElapsed := time.Since(start).Nanoseconds()\n\t\t\tcpuEndTotal, err := cgroup.GetCPUTotalUsage(cg)\n\t\t\tif err != nil {\n\t\t\t\tklog.Errorf(\"cgroup(%s) collect cpu usage failed: %v\", cg, err)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tpmuData.CPUUsage = float64(cpuEndTotal-cpuStartTotal) / float64(timeElapsed)\n\n\t\t\tmetric := &PerfMetrics{\n\t\t\t\tSpec: *ref,\n\t\t\t\tValue: pmuData,\n\t\t\t}\n\t\t\tp.addContainerPerf(cg, pmuData.Timestamp, metric)\n\t\t}(k, v)\n\t}\n\twg.Wait()\n\n\tp.delContainerPerfs()\n\n\treturn\n}", "func (e Exporter) Collect(ch chan<- prometheus.Metric) {\n\tctx := context.Background()\n\n\tcontainerService, err := container.NewService(ctx)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tcloudresourcemanagerService, err := cloudresourcemanager.NewService(ctx)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tprojectsListResponse, err := cloudresourcemanagerService.Projects.List().Filter(\"lifecycleState:ACTIVE\").Context(ctx).Do()\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tlog.Infof(\"Found %d projects\", len(projectsListResponse.Projects))\n\n\tvar mutex = &sync.Mutex{}\n\tvar wg sync.WaitGroup\n\twg.Add(len(projectsListResponse.Projects))\n\n\tvalidMasterVersions := map[string][]string{}\n\tmasterVersionCount := map[string]float64{}\n\n\tfor _, p := range projectsListResponse.Projects {\n\t\tgo func(p *cloudresourcemanager.Project) {\n\t\t\tdefer wg.Done()\n\t\t\tresp, err := containerService.Projects.Locations.Clusters.List(\"projects/\" + p.ProjectId + \"/locations/-\").Context(ctx).Do()\n\t\t\tif err != nil {\n\t\t\t\tif ae, ok := err.(*googleapi.Error); ok && ae.Code == http.StatusForbidden {\n\t\t\t\t\tlog.Warnf(\"Missing roles/container.clusterViewer on %s (%s)\", p.Name, p.ProjectId)\n\t\t\t\t\treturn\n\t\t\t\t} else if ae, ok := err.(*googleapi.Error); ok && ae.Code == http.StatusTooManyRequests {\n\t\t\t\t\tlog.Warn(\"Quota exceeded\")\n\t\t\t\t\treturn\n\t\t\t\t} else {\n\t\t\t\t\tlog.Fatal(err)\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tfor _, c := range resp.Clusters {\n\t\t\t\tmutex.Lock()\n\t\t\t\tif _, ok := validMasterVersions[c.Location]; !ok {\n\t\t\t\t\tlog.Infof(\"Pulling server configs for location %s\", c.Location)\n\t\t\t\t\tserverConfig, err := containerService.Projects.Locations.GetServerConfig(\"projects/\" + p.ProjectId + \"/locations/\" + c.Location).Do()\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\tif ae, ok := err.(*googleapi.Error); ok && ae.Code == http.StatusTooManyRequests {\n\t\t\t\t\t\t\tlog.Warn(\"Quota exceeded\")\n\t\t\t\t\t\t\treturn\n\t\t\t\t\t\t} else {\n\t\t\t\t\t\t\tlog.Fatal(err)\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\n\t\t\t\t\tvalidMasterVersions[c.Location] = serverConfig.ValidMasterVersions\n\t\t\t\t}\n\n\t\t\t\tif _, ok := masterVersionCount[c.CurrentMasterVersion]; !ok {\n\t\t\t\t\tmasterVersionCount[c.CurrentMasterVersion] = 1\n\t\t\t\t} else {\n\t\t\t\t\tmasterVersionCount[c.CurrentMasterVersion]++\n\t\t\t\t}\n\t\t\t\tmutex.Unlock()\n\n\t\t\t\tif !contains(c.CurrentMasterVersion, validMasterVersions[c.Location]) {\n\t\t\t\t\tch <- prometheus.MustNewConstMetric(\n\t\t\t\t\t\te.Metrics[\"gkeUnsupportedMasterVersion\"],\n\t\t\t\t\t\tprometheus.CounterValue,\n\t\t\t\t\t\t1,\n\t\t\t\t\t\t[]string{\n\t\t\t\t\t\t\tc.CurrentMasterVersion,\n\t\t\t\t\t\t\tp.ProjectId,\n\t\t\t\t\t\t\tp.Name,\n\t\t\t\t\t\t\tc.Name,\n\t\t\t\t\t\t\tc.Location,\n\t\t\t\t\t\t}...,\n\t\t\t\t\t)\n\t\t\t\t}\n\t\t\t}\n\t\t}(p)\n\t}\n\n\twg.Wait()\n\n\tfor version, cnt := range masterVersionCount {\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\te.Metrics[\"gkeMasterVersion\"],\n\t\t\tprometheus.CounterValue,\n\t\t\tcnt,\n\t\t\t[]string{\n\t\t\t\tversion,\n\t\t\t}...,\n\t\t)\n\t}\n\n\tlog.Info(\"Done\")\n}", "func (ld *loader) CollectMetrics() (writes metrics.RequestsSummary, reads metrics.RequestsSummary, err error) {\n\t// https://pkg.go.dev/github.com/prometheus/client_golang/prometheus?tab=doc#Gatherer\n\tmfs, err := prometheus.DefaultGatherer.Gather()\n\tif err != nil {\n\t\tld.cfg.Logger.Warn(\"failed to gather prometheus metrics\", zap.Error(err))\n\t\treturn metrics.RequestsSummary{}, metrics.RequestsSummary{}, err\n\t}\n\tfor _, mf := range mfs {\n\t\tif mf == nil {\n\t\t\tcontinue\n\t\t}\n\t\tswitch *mf.Name {\n\t\tcase \"secrets_client_write_requests_success_total\":\n\t\t\tgg := mf.Metric[0].GetGauge()\n\t\t\twrites.SuccessTotal = gg.GetValue()\n\t\tcase \"secrets_client_write_requests_failure_total\":\n\t\t\tgg := mf.Metric[0].GetGauge()\n\t\t\twrites.FailureTotal = gg.GetValue()\n\t\tcase \"secrets_client_write_request_latency_milliseconds\":\n\t\t\twrites.LatencyHistogram, err = metrics.ParseHistogram(\"milliseconds\", mf.Metric[0].GetHistogram())\n\t\t\tif err != nil {\n\t\t\t\treturn metrics.RequestsSummary{}, metrics.RequestsSummary{}, err\n\t\t\t}\n\n\t\tcase \"secrets_client_read_requests_success_total\":\n\t\t\tgg := mf.Metric[0].GetGauge()\n\t\t\treads.SuccessTotal = gg.GetValue()\n\t\tcase \"secrets_client_read_requests_failure_total\":\n\t\t\tgg := mf.Metric[0].GetGauge()\n\t\t\treads.FailureTotal = gg.GetValue()\n\t\tcase \"secrets_client_read_request_latency_milliseconds\":\n\t\t\treads.LatencyHistogram, err = metrics.ParseHistogram(\"milliseconds\", mf.Metric[0].GetHistogram())\n\t\t\tif err != nil {\n\t\t\t\treturn metrics.RequestsSummary{}, metrics.RequestsSummary{}, err\n\t\t\t}\n\t\t}\n\t}\n\n\tld.cfg.Logger.Info(\"sorting write latency results\", zap.Int(\"total-data-points\", ld.writeLatencies.Len()))\n\tnow := time.Now()\n\tsort.Sort(ld.writeLatencies)\n\tld.cfg.Logger.Info(\"sorted write latency results\", zap.Int(\"total-data-points\", ld.writeLatencies.Len()), zap.String(\"took\", time.Since(now).String()))\n\twrites.LantencyP50 = ld.writeLatencies.PickLantencyP50()\n\twrites.LantencyP90 = ld.writeLatencies.PickLantencyP90()\n\twrites.LantencyP99 = ld.writeLatencies.PickLantencyP99()\n\twrites.LantencyP999 = ld.writeLatencies.PickLantencyP999()\n\twrites.LantencyP9999 = ld.writeLatencies.PickLantencyP9999()\n\n\tld.cfg.Logger.Info(\"writing latency results in JSON to disk\", zap.String(\"path\", ld.cfg.WritesJSONPath))\n\twb, err := json.Marshal(ld.writeLatencies)\n\tif err != nil {\n\t\tld.cfg.Logger.Warn(\"failed to encode latency results in JSON\", zap.Error(err))\n\t\treturn metrics.RequestsSummary{}, metrics.RequestsSummary{}, err\n\t}\n\tif err = ioutil.WriteFile(ld.cfg.WritesJSONPath, wb, 0600); err != nil {\n\t\tld.cfg.Logger.Warn(\"failed to write latency results in JSON to disk\", zap.String(\"path\", ld.cfg.WritesJSONPath), zap.Error(err))\n\t\treturn metrics.RequestsSummary{}, metrics.RequestsSummary{}, err\n\t}\n\tld.cfg.Logger.Info(\"wrote latency results in JSON to disk\", zap.String(\"path\", ld.cfg.WritesJSONPath))\n\n\tld.cfg.Logger.Info(\"sorting read latency results\", zap.Int(\"total-data-points\", ld.readLatencies.Len()))\n\tnow = time.Now()\n\tsort.Sort(ld.readLatencies)\n\tld.cfg.Logger.Info(\"sorted read latency results\", zap.Int(\"total-data-points\", ld.readLatencies.Len()), zap.String(\"took\", time.Since(now).String()))\n\treads.LantencyP50 = ld.readLatencies.PickLantencyP50()\n\treads.LantencyP90 = ld.readLatencies.PickLantencyP90()\n\treads.LantencyP99 = ld.readLatencies.PickLantencyP99()\n\treads.LantencyP999 = ld.readLatencies.PickLantencyP999()\n\treads.LantencyP9999 = ld.readLatencies.PickLantencyP9999()\n\n\tld.cfg.Logger.Info(\"writing latency results in JSON to disk\", zap.String(\"path\", ld.cfg.ReadsJSONPath))\n\twb, err = json.Marshal(ld.readLatencies)\n\tif err != nil {\n\t\tld.cfg.Logger.Warn(\"failed to encode latency results in JSON\", zap.Error(err))\n\t\treturn metrics.RequestsSummary{}, metrics.RequestsSummary{}, err\n\t}\n\tif err = ioutil.WriteFile(ld.cfg.ReadsJSONPath, wb, 0600); err != nil {\n\t\tld.cfg.Logger.Warn(\"failed to write latency results in JSON to disk\", zap.String(\"path\", ld.cfg.ReadsJSONPath), zap.Error(err))\n\t\treturn metrics.RequestsSummary{}, metrics.RequestsSummary{}, err\n\t}\n\tld.cfg.Logger.Info(\"wrote latency results in JSON to disk\", zap.String(\"path\", ld.cfg.ReadsJSONPath))\n\n\treturn writes, reads, nil\n}", "func (m *metricFlinkJvmThreadsCount) emit(metrics pmetric.MetricSlice) {\n\tif m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 {\n\t\tm.updateCapacity()\n\t\tm.data.MoveTo(metrics.AppendEmpty())\n\t\tm.init()\n\t}\n}", "func (m *InstallManager) gatherLogs(cd *hivev1.ClusterDeployment) {\n\tif !m.isBootstrapComplete() {\n\t\tif err := m.gatherBootstrapNodeLogs(cd); err != nil {\n\t\t\tm.log.WithError(err).Warn(\"error fetching logs from bootstrap node\")\n\t\t\treturn\n\t\t}\n\t\tm.log.Info(\"successfully gathered logs from bootstrap node\")\n\t} else {\n\t\tif err := m.gatherClusterLogs(cd); err != nil {\n\t\t\tm.log.WithError(err).Warn(\"error fetching logs with oc adm must-gather\")\n\t\t\treturn\n\t\t}\n\t\tm.log.Info(\"successfully ran oc adm must-gather\")\n\t}\n}", "func (e *Exporter) Collect(ch chan<- prometheus.Metric) {\n\tresp, err := e.Pihole.GetMetrics()\n\tif err != nil {\n\t\tlog.Errorf(\"Pihole error: %s\", err.Error())\n\t\treturn\n\t}\n\tlog.Debugf(\"PiHole metrics: %#v\", resp)\n\tch <- prometheus.MustNewConstMetric(\n\t\tdomainsBeingBlocked, prometheus.CounterValue, float64(resp.DomainsBeingBlocked))\n\n\tch <- prometheus.MustNewConstMetric(\n\t\tdnsQueries, prometheus.CounterValue, float64(resp.DNSQueriesToday))\n\n\tch <- prometheus.MustNewConstMetric(\n\t\tadsBlocked, prometheus.CounterValue, float64(resp.AdsBlockedToday))\n\n\tch <- prometheus.MustNewConstMetric(\n\t\tadsPercentage, prometheus.CounterValue, float64(resp.AdsPercentageToday))\n\n\tfor k, v := range resp.Querytypes {\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tqueryTypes, prometheus.CounterValue, v, k)\n\t}\n\tfor k, v := range resp.TopQueries {\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\ttopQueries, prometheus.CounterValue, float64(v), k)\n\t}\n\tfor k, v := range resp.TopAds {\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\ttopAds, prometheus.CounterValue, float64(v), k)\n\n\t}\n\tfor k, v := range resp.TopSources {\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\ttopSources, prometheus.CounterValue, float64(v), k)\n\t}\n}", "func (s *CPUStat) Collect() {\n\tfile, err := os.Open(root + \"proc/stat\")\n\tdefer file.Close()\n\n\tif err != nil {\n\t\treturn\n\t}\n\tscanner := bufio.NewScanner(file)\n\tfor scanner.Scan() {\n\t\tf := regexp.MustCompile(\"\\\\s+\").Split(scanner.Text(), -1)\n\n\t\tisCPU, err := regexp.MatchString(\"^cpu\\\\d*\", f[0])\n\t\tif err == nil && isCPU {\n\t\t\tif f[0] == \"cpu\" {\n\t\t\t\tparseCPUline(s.All, f)\n\t\t\t\tpopulateComputedStats(s.All, float64(len(s.cpus)))\n\t\t\t\ts.All.TotalCount.Set(float64(len(s.cpus)))\n\t\t\t} else {\n\t\t\t\tperCPU, ok := s.cpus[f[0]]\n\t\t\t\tif !ok {\n\t\t\t\t\tperCPU = NewPerCPU(s.m, f[0])\n\t\t\t\t\ts.cpus[f[0]] = perCPU\n\t\t\t\t}\n\t\t\t\tparseCPUline(perCPU, f)\n\t\t\t\tpopulateComputedStats(perCPU, 1.0)\n\t\t\t\tperCPU.TotalCount.Set(1)\n\t\t\t}\n\t\t}\n\t}\n}", "func runCPUUsageStats(){\n\tnbCPU := float64(runtime.NumCPU())\n\tparams := fmt.Sprintf(\"(Get-process -Id %d).CPU\",os.Getpid())\n\tfor {\n\t\tcmd := exec.Command(\"powershell\", params)\n\t\tdata, _ := cmd.Output()\n\t\tcurrent,_ := strconv.ParseFloat(strings.Replace(string(data),\"\\r\\n\",\"\",-1),32)\n\t\tif previous == 0 {\n\t\t\tprevious = current\n\t\t}\n\t\tcurrentUsage = int(((current - previous)*float64(100))/(waitTime*nbCPU) )\n\t\tprevious = current\n\t\ttime.Sleep(time.Duration(waitTime )*time.Second)\n\t}\n}", "func CaptureRuntimeMemStats(registry RootRegistry, collectionFreq time.Duration) {\n\truntimeMemStats.Do(func() {\n\t\tif reg, ok := registry.(*rootRegistry); ok {\n\t\t\tgoRegistry := metrics.NewPrefixedChildRegistry(reg.registry, \"go.\")\n\t\t\tmetrics.RegisterRuntimeMemStats(goRegistry)\n\t\t\tgo metrics.CaptureRuntimeMemStats(goRegistry, collectionFreq)\n\t\t}\n\t})\n}", "func (m *KubeletMonitor) parseNodeStats(nodeStats stats.NodeStats) {\n\t// cpu\n\tcpuUsageCore := float64(*nodeStats.CPU.UsageNanoCores) / util.NanoToUnit\n\tglog.V(4).Infof(\"Cpu usage of node %s is %f core\", nodeStats.NodeName, cpuUsageCore)\n\tnodeCpuUsageCoreMetrics := metrics.NewEntityResourceMetric(task.NodeType, util.NodeStatsKeyFunc(nodeStats),\n\t\tmetrics.CPU, metrics.Used, cpuUsageCore)\n\n\t// memory\n\tmemoryUsageKiloBytes := float64(*nodeStats.Memory.UsageBytes) / util.KilobytesToBytes\n\tglog.V(4).Infof(\"Memory usage of node %s is %f Kb\", nodeStats.NodeName, memoryUsageKiloBytes)\n\tnodeMemoryUsageKiloBytesMetrics := metrics.NewEntityResourceMetric(task.NodeType,\n\t\tutil.NodeStatsKeyFunc(nodeStats), metrics.Memory, metrics.Used, memoryUsageKiloBytes)\n\n\tm.metricSink.AddNewMetricEntries(nodeCpuUsageCoreMetrics, nodeMemoryUsageKiloBytesMetrics)\n\n}", "func (g *Gatherer) Gather(ctx context.Context, gatherList []string, rec recorder.Interface) error {\n\tg.ctx = ctx\n\tvar errors []string\n\tvar gatherReport gatherMetadata\n\n\tif len(gatherList) == 0 {\n\t\terrors = append(errors, \"no gather functions are specified to run\")\n\t}\n\n\tif utils.StringInSlice(gatherAll, gatherList) {\n\t\tgatherList = fullGatherList()\n\t}\n\n\t// Starts the gathers in Go routines\n\tcases, starts, err := g.startGathering(gatherList, &errors)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// Gets the info from the Go routines\n\tfor range gatherList {\n\t\tchosen, value, _ := reflect.Select(cases)\n\t\t// The chosen channel has been closed, so zero out the channel to disable the case\n\t\tcases[chosen].Chan = reflect.ValueOf(nil)\n\t\tgather := gatherList[chosen]\n\n\t\tgi := NewGatherInfo(gather, value)\n\t\tstatusReport, errorsReport := createStatusReport(gi, rec, starts[chosen])\n\n\t\tif len(errorsReport) > 0 {\n\t\t\terrors = append(errors, errorsReport...)\n\t\t}\n\t\tgatherReport.StatusReports = append(gatherReport.StatusReports, statusReport)\n\t}\n\n\t// if obfuscation is enabled, we want to know it from the archive\n\tgatherReport.IsGlobalObfuscationEnabled = g.anonymizer != nil\n\n\t// fill in performance related data to the report\n\tvar m runtime.MemStats\n\truntime.ReadMemStats(&m)\n\tgatherReport.MemoryAlloc = m.HeapAlloc\n\tgatherReport.Uptime = time.Since(g.startTime).Truncate(time.Millisecond).Seconds()\n\n\t// records the report\n\tif err := recordGatherReport(rec, gatherReport); err != nil {\n\t\terrors = append(errors, fmt.Sprintf(\"unable to record io status reports: %v\", err))\n\t}\n\n\tif len(errors) > 0 {\n\t\treturn sumErrors(errors)\n\t}\n\n\treturn nil\n}", "func (e *Exporter) Collect(ch chan<- prometheus.Metric) {\n\tjunosTotalScrapeCount++\n\tch <- prometheus.MustNewConstMetric(junosDesc[\"ScrapesTotal\"], prometheus.CounterValue, junosTotalScrapeCount)\n\n\twg := &sync.WaitGroup{}\n\tfor _, collector := range e.Collectors {\n\t\twg.Add(1)\n\t\tgo e.runCollector(ch, collector, wg)\n\t}\n\twg.Wait()\n}", "func (m httpPostMetrics) Collect(metrics chan<- prometheus.Metric) {\n\tm.totalTime.Collect(metrics)\n\tm.firstProgressPacket.Collect(metrics)\n\tm.firstPackPacket.Collect(metrics)\n\tm.packBytes.Collect(metrics)\n}", "func getHourlyTestStatsPipeline(projectId string, requester string, start time.Time, end time.Time, tasks []string, lastUpdate time.Time, oldTasks bool) []bson.M {\n\tvar taskIdExpr string\n\tvar displayTaskLookupCollection string\n\tif oldTasks {\n\t\ttaskIdExpr = \"$old_task_id\"\n\t\tdisplayTaskLookupCollection = task.OldCollection\n\t} else {\n\t\ttaskIdExpr = \"$_id\"\n\t\tdisplayTaskLookupCollection = task.Collection\n\t}\n\tpipeline := []bson.M{\n\t\t{\"$match\": bson.M{\n\t\t\ttask.ProjectKey: projectId,\n\t\t\ttask.RequesterKey: requester,\n\t\t\ttask.CreateTimeKey: bson.M{\"$gte\": start, \"$lt\": end},\n\t\t\ttask.DisplayNameKey: bson.M{\"$in\": tasks},\n\t\t}},\n\t\t{\"$project\": bson.M{\n\t\t\ttask.IdKey: 0,\n\t\t\t\"task_id\": taskIdExpr,\n\t\t\t\"execution\": taskExecutionRef,\n\t\t\t\"project\": taskProjectKeyRef,\n\t\t\t\"task_name\": taskDisplayNameKeyRef,\n\t\t\t\"variant\": taskBuildVariantKeyRef,\n\t\t\t\"distro\": taskDistroIdKeyRef,\n\t\t\t\"requester\": taskRequesterKeyRef}},\n\t\t{\"$lookup\": bson.M{\n\t\t\t\"from\": displayTaskLookupCollection,\n\t\t\t\"localField\": \"task_id\",\n\t\t\t\"foreignField\": task.ExecutionTasksKey,\n\t\t\t\"as\": \"display_task\"}},\n\t\t{\"$unwind\": bson.M{\n\t\t\t\"path\": \"$display_task\",\n\t\t\t\"preserveNullAndEmptyArrays\": true}},\n\t\t{\"$lookup\": bson.M{\n\t\t\t\"from\": testresult.Collection,\n\t\t\t\"let\": bson.M{\"task_id\": \"$task_id\", \"execution\": \"$execution\"},\n\t\t\t\"pipeline\": []bson.M{\n\t\t\t\t{\"$match\": bson.M{\"$expr\": bson.M{\"$and\": []bson.M{\n\t\t\t\t\t{\"$eq\": array{testResultTaskIdKeyRef, \"$$task_id\"}},\n\t\t\t\t\t{\"$eq\": array{testResultExecutionRef, \"$$execution\"}}}}}},\n\t\t\t\t{\"$project\": bson.M{\n\t\t\t\t\ttestresult.IDKey: 0,\n\t\t\t\t\ttestresult.TestFileKey: 1,\n\t\t\t\t\ttestresult.StatusKey: 1,\n\t\t\t\t\ttestresult.StartTimeKey: 1,\n\t\t\t\t\ttestresult.EndTimeKey: 1}}},\n\t\t\t\"as\": \"testresults\"}},\n\t\t{\"$unwind\": \"$testresults\"},\n\t\t{\"$project\": bson.M{\n\t\t\t\"test_file\": \"$testresults.\" + testresult.TestFileKey,\n\t\t\t// We use the name of the display task if there is one.\n\t\t\t\"task_name\": bson.M{\"$ifNull\": array{\"$display_task.\" + task.DisplayNameKey, \"$task_name\"}},\n\t\t\t\"variant\": 1,\n\t\t\t\"distro\": 1,\n\t\t\t\"project\": 1,\n\t\t\t\"requester\": 1,\n\t\t\t\"status\": \"$testresults.\" + task.StatusKey,\n\t\t\t\"duration\": bson.M{\"$subtract\": array{\"$testresults.\" + testresult.EndTimeKey, \"$testresults.\" + testresult.StartTimeKey}}}},\n\t\t{\"$group\": bson.M{\n\t\t\t\"_id\": bson.D{\n\t\t\t\t{Name: \"test_file\", Value: \"$test_file\"},\n\t\t\t\t{Name: \"task_name\", Value: \"$task_name\"},\n\t\t\t\t{Name: \"variant\", Value: \"$variant\"},\n\t\t\t\t{Name: \"distro\", Value: \"$distro\"},\n\t\t\t\t{Name: \"project\", Value: \"$project\"},\n\t\t\t\t{Name: \"requester\", Value: \"$requester\"},\n\t\t\t},\n\t\t\t\"num_pass\": makeSum(bson.M{\"$eq\": array{\"$status\", \"pass\"}}),\n\t\t\t\"num_fail\": makeSum(bson.M{\"$ne\": array{\"$status\", \"pass\"}}),\n\t\t\t// \"IGNORE\" is not a special value, setting the value to something that is not a number will cause $avg to ignore it\n\t\t\t\"avg_duration_pass\": bson.M{\"$avg\": bson.M{\"$cond\": bson.M{\"if\": bson.M{\"$eq\": array{\"$status\", \"pass\"}}, \"then\": \"$duration\", \"else\": \"IGNORE\"}}}}},\n\t\t{\"$addFields\": bson.M{\n\t\t\t\"_id.date\": start,\n\t\t\t\"last_update\": lastUpdate,\n\t\t}},\n\t}\n\treturn pipeline\n}", "func logMemstatsSample() {\n\tl := log.WithField(\"process\", \"memstats\")\n\n\truntime.GC() // get up-to-date statistics\n\n\tmemStats := new(runtime.MemStats)\n\truntime.ReadMemStats(memStats)\n\n\tvar gcStats debug.GCStats\n\tdebug.ReadGCStats(&gcStats)\n\n\ts := memStats\n\n\tl.Infof(\"# runtime.MemStats\")\n\tl.Infof(\"# Alloc = %d\", s.Alloc)\n\tl.Infof(\"# TotalAlloc = %d\", s.TotalAlloc)\n\tl.Infof(\"# Sys = %d\", s.Sys)\n\tl.Infof(\"# Lookups = %d\", s.Lookups)\n\tl.Infof(\"# Mallocs = %d\", s.Mallocs)\n\tl.Infof(\"# Frees = %d\", s.Frees)\n\tl.Infof(\"# HeapAlloc = %d\", s.HeapAlloc)\n\tl.Infof(\"# HeapSys = %d\", s.HeapSys)\n\tl.Infof(\"# HeapIdle = %d\", s.HeapIdle)\n\tl.Infof(\"# HeapInuse = %d\", s.HeapInuse)\n\tl.Infof(\"# HeapReleased = %d\", s.HeapReleased)\n\tl.Infof(\"# HeapObjects = %d\", s.HeapObjects)\n\tl.Infof(\"# Stack = %d / %d\", s.StackInuse, s.StackSys)\n\tl.Infof(\"# NumGoroutine = %d\", runtime.NumGoroutine())\n\n\t// Record GC pause history, most recent 5 entries\n\tl.Infof(\"# Stop-the-world Pause time\")\n\n\tfor i, v := range gcStats.Pause {\n\t\tl.Infof(\"# gcStats.Pause[%d] = %d ns\", i, v)\n\n\t\tif i == 5 {\n\t\t\tbreak\n\t\t}\n\t}\n}", "func (dh *darwinHarvester) populateGauges(sample *types.ProcessSample, process Snapshot) error {\n\tvar err error\n\n\tcpuTimes, err := process.CPUTimes()\n\tif err != nil {\n\t\treturn err\n\t}\n\tsample.CPUPercent = cpuTimes.Percent\n\n\ttotalCPU := cpuTimes.User + cpuTimes.System\n\n\tif totalCPU > 0 {\n\t\tsample.CPUUserPercent = (cpuTimes.User / totalCPU) * sample.CPUPercent\n\t\tsample.CPUSystemPercent = (cpuTimes.System / totalCPU) * sample.CPUPercent\n\t} else {\n\t\tsample.CPUUserPercent = 0\n\t\tsample.CPUSystemPercent = 0\n\t}\n\n\t// Extra status data\n\tsample.Status = process.Status()\n\tsample.ThreadCount = process.NumThreads()\n\tsample.MemoryVMSBytes = process.VmSize()\n\tsample.MemoryRSSBytes = process.VmRSS()\n\n\treturn nil\n}", "func (u *Use) CollectMetrics(mts []plugin.Metric) ([]plugin.Metric, error) {\n\tcfg := mts[0].Config\n\tif !u.initialized {\n\t\tu.init(cfg)\n\t}\n\n\tmetrics := make([]plugin.Metric, len(mts))\n\tfor i, p := range mts {\n\t\tns := p.Namespace.String()\n\t\tswitch {\n\t\tcase cpure.MatchString(ns):\n\t\t\tmetric, err := u.computeStat(p.Namespace)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, errors.New(\"Unable to get compute stat: \" + err.Error())\n\t\t\t}\n\t\t\tmetrics[i] = *metric\n\n\t\tcase storre.MatchString(ns):\n\t\t\tmetric, err := u.diskStat(p.Namespace)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, errors.New(\"Unable to get disk stat: \" + err.Error())\n\t\t\t}\n\t\t\tmetrics[i] = *metric\n\t\tcase memre.MatchString(ns):\n\t\t\tmetric, err := memStat(p.Namespace, u.VmStatPath, u.MemInfoPath)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, errors.New(\"Unable to get mem stat: \" + err.Error())\n\t\t\t}\n\t\t\tmetrics[i] = *metric\n\t\t}\n\t\ttags, err := hostTags()\n\n\t\tif err == nil {\n\t\t\tmetrics[i].Tags = tags\n\t\t}\n\t\tmetrics[i].Timestamp = time.Now()\n\n\t}\n\treturn metrics, nil\n}", "func (p *Kafka) CollectMetrics(mts []plugin.MetricType) ([]plugin.MetricType, error) {\n\tmetrics := []plugin.MetricType{}\n\n\terr := p.loadMetricAPI(mts[0].Config())\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tfor _, m := range mts {\n\t\tresults := []nodeData{}\n\t\tsearch := strings.Split(replaceUnderscoreToDot(strings.TrimLeft(m.Namespace().String(), \"/\")), \"/\")\n\t\tif len(search) > 3 {\n\t\t\tp.client.Root.Get(p.client.client.GetUrl(), search[4:], 0, &results)\n\t\t}\n\n\t\tfor _, result := range results {\n\t\t\tns := append([]string{\"hyperpilot\", \"kafka\", \"node\", p.client.host}, strings.Split(result.Path, Slash)...)\n\t\t\tmetrics = append(metrics, plugin.MetricType{\n\t\t\t\tNamespace_: core.NewNamespace(ns...),\n\t\t\t\tTimestamp_: time.Now(),\n\t\t\t\tData_: result.Data,\n\t\t\t\tUnit_: reflect.TypeOf(result.Data).String(),\n\t\t\t})\n\t\t}\n\n\t}\n\n\treturn metrics, nil\n}", "func (k *KACollector) parseStats() ([]Stats, error) {\n\tdata := make([]Stats, 0)\n\n\tf, err := os.Open(\"/tmp/keepalived.stats\")\n\tif err != nil {\n\t\treturn data, err\n\t}\n\n\tdefer f.Close()\n\n\tsep := \"VRRP Instance\"\n\tprop := \":\"\n\n\tdt := Stats{}\n\tscanner := bufio.NewScanner(bufio.NewReader(f))\n\n\tsection := \"\"\n\tinstance := \"\"\n\n\tfor scanner.Scan() {\n\t\tline := scanner.Text()\n\t\tif strings.HasPrefix(line, sep) && strings.Contains(line, prop) {\n\t\t\tif instance != \"\" {\n\t\t\t\tdata = append(data, dt)\n\t\t\t\tdt = Stats{}\n\t\t\t}\n\n\t\t\tsp := strings.Split(strings.TrimSpace(line), prop)\n\t\t\tinstance = strings.TrimSpace(sp[1])\n\t\t} else if strings.HasPrefix(line, \" \") && strings.HasSuffix(line, prop) {\n\t\t\tsp := strings.Split(strings.TrimSpace(line), prop)\n\t\t\tsection = strings.TrimSpace(sp[0])\n\t\t} else if strings.HasPrefix(line, \" \") && section != \"\" {\n\t\t\tsp := strings.Split(strings.TrimSpace(line), prop)\n\t\t\tkey := strings.TrimSpace(sp[0])\n\t\t\tval := strings.TrimSpace(sp[1])\n\n\t\t\tvalue, err := strconv.Atoi(val)\n\t\t\tif err != nil {\n\t\t\t\treturn data, err\n\t\t\t}\n\n\t\t\tswitch section {\n\t\t\tcase \"Advertisements\":\n\t\t\t\tswitch key {\n\t\t\t\tcase \"Received\":\n\t\t\t\t\tdt.AdvertRcvd = value\n\t\t\t\tcase \"Sent\":\n\t\t\t\t\tdt.AdvertSent = value\n\t\t\t\t}\n\t\t\tcase \"Packet Errors\":\n\t\t\t\tswitch key {\n\t\t\t\tcase \"Length\":\n\t\t\t\t\tdt.PacketLenErr = value\n\t\t\t\tcase \"TTL\":\n\t\t\t\t\tdt.IPTTLErr = value\n\t\t\t\tcase \"Invalid Type\":\n\t\t\t\t\tdt.InvalidTypeRcvd = value\n\t\t\t\tcase \"Advertisement Interval\":\n\t\t\t\t\tdt.AdvertIntervalErr = value\n\t\t\t\tcase \"Address List\":\n\t\t\t\t\tdt.AddrListErr = value\n\t\t\t\t}\n\t\t\tcase \"Authentication Errors\":\n\t\t\t\tswitch key {\n\t\t\t\tcase \"Invalid Type\":\n\t\t\t\t\tdt.InvalidAuthtype = value\n\t\t\t\tcase \"Type Mismatch\":\n\t\t\t\t\tdt.AuthtypeMismatch = value\n\t\t\t\tcase \"Failure\":\n\t\t\t\t\tdt.AuthFailure = value\n\t\t\t\t}\n\t\t\tcase \"Priority Zero\":\n\t\t\t\tswitch key {\n\t\t\t\tcase \"Received\":\n\t\t\t\t\tdt.PriZeroRcvd = value\n\t\t\t\tcase \"Sent\":\n\t\t\t\t\tdt.PriZeroSent = value\n\t\t\t\t}\n\t\t\t}\n\t\t} else if strings.HasPrefix(line, \" \") && !strings.HasSuffix(line, prop) && !strings.HasPrefix(line, \" \") {\n\t\t\tsp := strings.Split(strings.TrimSpace(line), prop)\n\t\t\tkey := strings.TrimSpace(sp[0])\n\t\t\tval := strings.TrimSpace(sp[1])\n\t\t\tsection = \"\"\n\n\t\t\tvalue, err := strconv.Atoi(val)\n\t\t\tif err != nil {\n\t\t\t\treturn data, err\n\t\t\t}\n\n\t\t\tswitch key {\n\t\t\tcase \"Became master\":\n\t\t\t\tdt.BecomeMaster = value\n\t\t\tcase \"Released master\":\n\t\t\t\tdt.ReleaseMaster = value\n\t\t\t}\n\t\t}\n\t}\n\n\tif instance != \"\" {\n\t\tdata = append(data, dt)\n\t}\n\n\treturn data, nil\n}", "func lvCollect(ch chan<- prometheus.Metric, lvs []map[string]string, vgName string) {\n for _, lv := range lvs {\n lvSizeF, err := strconv.ParseFloat(strings.Trim(lv[\"lv_size\"], \"B\"), 64)\n if err != nil {\n log.Print(err)\n return\n }\n ch <- prometheus.MustNewConstMetric(lvSizeMetric, prometheus.GaugeValue, lvSizeF, lv[\"lv_name\"], lv[\"lv_uuid\"], vgName)\n }\n}", "func (systemd_units *SystemdUnits) Gather(acc telegraf.Accumulator) error {\n\tout, err := systemd_units.systemctl(systemd_units.Timeout, systemd_units.UnitType)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tscanner := bufio.NewScanner(out)\n\tfor scanner.Scan() {\n\t\tline := scanner.Text()\n\n\t\tdata := strings.Fields(line)\n\t\tif len(data) < 4 {\n\t\t\tacc.AddError(fmt.Errorf(\"Error parsing line (expected at least 4 fields): %s\", line))\n\t\t\tcontinue\n\t\t}\n\t\ttags := map[string]string{\n\t\t\t\"name\": data[0],\n\t\t}\n\t\tload := load_map[data[1]] - 1\n\t\tactive := active_map[data[2]] - 1\n\t\tsub := sub_map[data[3]] - 1\n\n\t\tfields := map[string]interface{}{\n\t\t\t\"load\": load,\n\t\t\t\"active\": active,\n\t\t\t\"sub\": sub,\n\t\t}\n\t\tacc.AddCounter(measurement, fields, tags)\n\t}\n\n\treturn nil\n}", "func (e *Exporter) Collect(ch chan<- prometheus.Metric) {\n\tfor _, db := range e.dbs {\n\t\t// logger.Log(\"Scraping\", db.String())\n\t\tgo e.scrapeDatabase(db)\n\t}\n\te.mutex.Lock()\n\tdefer e.mutex.Unlock()\n\te.cpuPercent.Collect(ch)\n\te.dataIO.Collect(ch)\n\te.logIO.Collect(ch)\n\te.memoryPercent.Collect(ch)\n\te.workPercent.Collect(ch)\n\te.sessionPercent.Collect(ch)\n\te.storagePercent.Collect(ch)\n\te.dbUp.Collect(ch)\n\te.up.Set(1)\n}", "func CollectGoStatsTotals() Composer {\n\ts := &GoRuntimeInfo{}\n\ts.build()\n\n\treturn s\n}", "func (e *UwsgiExporter) Collect(ch chan<- prometheus.Metric) {\n\tstartTime := time.Now()\n\terr := e.execute(ch)\n\td := time.Since(startTime).Seconds()\n\n\tif err != nil {\n\t\tlog.Errorf(\"ERROR: scrape failed after %fs: %s\", d, err)\n\t\te.uwsgiUp.Set(0)\n\t\te.scrapeDurations.WithLabelValues(\"error\").Observe(d)\n\t} else {\n\t\tlog.Debugf(\"OK: scrape successful after %fs.\", d)\n\t\te.uwsgiUp.Set(1)\n\t\te.scrapeDurations.WithLabelValues(\"success\").Observe(d)\n\t}\n\n\te.uwsgiUp.Collect(ch)\n\te.scrapeDurations.Collect(ch)\n}", "func (e *Exporter) Collect(ch chan<- prometheus.Metric) {\n\te.mutex.Lock()\n\tdefer e.mutex.Unlock()\n\n\te.scrape()\n\n\te.up.Collect(ch)\n\te.totalScrapes.Collect(ch)\n\te.exchangeStatus.Collect(ch)\n\te.ltp.Collect(ch)\n\te.bestBid.Collect(ch)\n\te.bestAsk.Collect(ch)\n\te.bestBidSize.Collect(ch)\n\te.bestAskSize.Collect(ch)\n\te.totalBidDepth.Collect(ch)\n\te.totalAskDepth.Collect(ch)\n\te.volume.Collect(ch)\n\te.volumeByProduct.Collect(ch)\n}", "func (h *GrayLog) gatherServer(\n\tacc telegraf.Accumulator,\n\tserverURL string,\n) error {\n\tresp, _, err := h.sendRequest(serverURL)\n\tif err != nil {\n\t\treturn err\n\t}\n\trequestURL, err := url.Parse(serverURL)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"unable to parse address %q: %w\", serverURL, err)\n\t}\n\n\thost, port, _ := net.SplitHostPort(requestURL.Host)\n\tvar dat ResponseMetrics\n\tif err := json.Unmarshal([]byte(resp), &dat); err != nil {\n\t\treturn err\n\t}\n\tfor _, mItem := range dat.Metrics {\n\t\tfields := make(map[string]interface{})\n\t\ttags := map[string]string{\n\t\t\t\"server\": host,\n\t\t\t\"port\": port,\n\t\t\t\"name\": mItem.Name,\n\t\t\t\"type\": mItem.Type,\n\t\t}\n\t\th.flatten(mItem.Fields, fields, \"\")\n\t\tacc.AddFields(mItem.FullName, fields, tags)\n\t}\n\treturn nil\n}" ]
[ "0.7737421", "0.67302734", "0.6526199", "0.6446691", "0.60606474", "0.5799007", "0.5660591", "0.55717057", "0.5517786", "0.5505716", "0.54413205", "0.53862643", "0.52611774", "0.5251062", "0.5178094", "0.5103062", "0.5089243", "0.50813794", "0.50379", "0.503636", "0.50349003", "0.497448", "0.49565867", "0.49424112", "0.49400556", "0.49281254", "0.4921599", "0.49047005", "0.49037635", "0.48759884", "0.48723745", "0.4870466", "0.4869588", "0.4866553", "0.4865768", "0.48599404", "0.48375243", "0.483276", "0.48258936", "0.4823713", "0.48152503", "0.48016068", "0.48015094", "0.48010975", "0.47970852", "0.47839382", "0.47825092", "0.4764711", "0.4762312", "0.4762074", "0.47358203", "0.4734364", "0.4731777", "0.4719743", "0.47151652", "0.47007212", "0.46985084", "0.46711993", "0.46707064", "0.46694243", "0.46602473", "0.46553785", "0.46511364", "0.46506512", "0.46400842", "0.46383986", "0.4635281", "0.46277568", "0.46245304", "0.46205395", "0.46129858", "0.4607527", "0.46073633", "0.46040985", "0.45893955", "0.45629045", "0.454604", "0.45415497", "0.4536309", "0.45294166", "0.45259154", "0.45252827", "0.45153546", "0.4510135", "0.45083782", "0.45061198", "0.45040008", "0.45017236", "0.45016098", "0.4496427", "0.4495939", "0.44951707", "0.44946134", "0.44905388", "0.4488603", "0.4477775", "0.44759244", "0.44602206", "0.44594702", "0.44516194" ]
0.67204577
2
Gather ask this plugin to start gathering metrics
func (logstash *Logstash) Gather(accumulator telegraf.Accumulator) error { if logstash.client == nil { client, err := logstash.createHTTPClient() if err != nil { return err } logstash.client = client } if choice.Contains("jvm", logstash.Collect) { jvmURL, err := url.Parse(logstash.URL + jvmStats) if err != nil { return err } if err := logstash.gatherJVMStats(jvmURL.String(), accumulator); err != nil { return err } } if choice.Contains("process", logstash.Collect) { processURL, err := url.Parse(logstash.URL + processStats) if err != nil { return err } if err := logstash.gatherProcessStats(processURL.String(), accumulator); err != nil { return err } } if choice.Contains("pipelines", logstash.Collect) { if logstash.SinglePipeline { pipelineURL, err := url.Parse(logstash.URL + pipelineStats) if err != nil { return err } if err := logstash.gatherPipelineStats(pipelineURL.String(), accumulator); err != nil { return err } } else { pipelinesURL, err := url.Parse(logstash.URL + pipelinesStats) if err != nil { return err } if err := logstash.gatherPipelinesStats(pipelinesURL.String(), accumulator); err != nil { return err } } } return nil }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (d *GatherJob) Gather(ctx context.Context, kubeConfig, protoKubeConfig *rest.Config) error {\n\tklog.Infof(\"Starting insights-operator %s\", version.Get().String())\n\t// these are operator clients\n\tkubeClient, err := kubernetes.NewForConfig(protoKubeConfig)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tconfigClient, err := configv1client.NewForConfig(kubeConfig)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tgatherProtoKubeConfig, gatherKubeConfig, metricsGatherKubeConfig, alertsGatherKubeConfig := prepareGatherConfigs(\n\t\tprotoKubeConfig, kubeConfig, d.Impersonate,\n\t)\n\n\ttpEnabled, err := isTechPreviewEnabled(ctx, configClient)\n\tif err != nil {\n\t\tklog.Error(\"can't read cluster feature gates: %v\", err)\n\t}\n\tvar gatherConfig v1alpha1.GatherConfig\n\tif tpEnabled {\n\t\tinsightsDataGather, err := configClient.ConfigV1alpha1().InsightsDataGathers().Get(ctx, \"cluster\", metav1.GetOptions{}) //nolint: govet\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tgatherConfig = insightsDataGather.Spec.GatherConfig\n\t}\n\n\t// ensure the insight snapshot directory exists\n\tif _, err = os.Stat(d.StoragePath); err != nil && os.IsNotExist(err) {\n\t\tif err = os.MkdirAll(d.StoragePath, 0777); err != nil {\n\t\t\treturn fmt.Errorf(\"can't create --path: %v\", err)\n\t\t}\n\t}\n\n\t// configobserver synthesizes all config into the status reporter controller\n\tconfigObserver := configobserver.New(d.Controller, kubeClient)\n\n\t// anonymizer is responsible for anonymizing sensitive data, it can be configured to disable specific anonymization\n\tanonymizer, err := anonymization.NewAnonymizerFromConfig(\n\t\tctx, gatherKubeConfig, gatherProtoKubeConfig, protoKubeConfig, configObserver, nil)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// the recorder stores the collected data and we flush at the end.\n\trecdriver := diskrecorder.New(d.StoragePath)\n\trec := recorder.New(recdriver, d.Interval, anonymizer)\n\tdefer func() {\n\t\tif err = rec.Flush(); err != nil {\n\t\t\tklog.Error(err)\n\t\t}\n\t}()\n\n\tauthorizer := clusterauthorizer.New(configObserver)\n\n\t// gatherConfigClient is configClient created from gatherKubeConfig, this name was used because configClient was already taken\n\t// this client is only used in insightsClient, it is created here\n\t// because pkg/insights/insightsclient/request_test.go unit test won't work otherwise\n\tgatherConfigClient, err := configv1client.NewForConfig(gatherKubeConfig)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tinsightsClient := insightsclient.New(nil, 0, \"default\", authorizer, gatherConfigClient)\n\tgatherers := gather.CreateAllGatherers(\n\t\tgatherKubeConfig, gatherProtoKubeConfig, metricsGatherKubeConfig, alertsGatherKubeConfig, anonymizer,\n\t\tconfigObserver, insightsClient,\n\t)\n\n\tallFunctionReports := make(map[string]gather.GathererFunctionReport)\n\tfor _, gatherer := range gatherers {\n\t\tfunctionReports, err := gather.CollectAndRecordGatherer(ctx, gatherer, rec, &gatherConfig)\n\t\tif err != nil {\n\t\t\tklog.Errorf(\"unable to process gatherer %v, error: %v\", gatherer.GetName(), err)\n\t\t}\n\n\t\tfor i := range functionReports {\n\t\t\tallFunctionReports[functionReports[i].FuncName] = functionReports[i]\n\t\t}\n\t}\n\n\treturn gather.RecordArchiveMetadata(mapToArray(allFunctionReports), rec, anonymizer)\n}", "func (e *Exporter) Collect(ch chan<- prometheus.Metric) {\n\tlog.Infof(\"Syno exporter starting\")\n\tif e.Client == nil {\n\t\tlog.Errorf(\"Syno client not configured.\")\n\t\treturn\n\t}\n\terr := e.Client.Connect()\n\tif err != nil {\n\t\tlog.Errorln(\"Can't connect to Synology for SNMP: %s\", err)\n\t\treturn\n\t}\n\tdefer e.Client.SNMP.Conn.Close()\n\n\te.collectSystemMetrics(ch)\n\te.collectCPUMetrics(ch)\n\te.collectLoadMetrics(ch)\n\te.collectMemoryMetrics(ch)\n\te.collectNetworkMetrics(ch)\n\te.collectDiskMetrics(ch)\n\n\tlog.Infof(\"Syno exporter finished\")\n}", "func (g *Gatherer) Gather(ctx context.Context, gatherList []string, rec recorder.Interface) error {\n\tg.ctx = ctx\n\tvar errors []string\n\tvar gatherReport gatherMetadata\n\n\tif len(gatherList) == 0 {\n\t\terrors = append(errors, \"no gather functions are specified to run\")\n\t}\n\n\tif utils.StringInSlice(gatherAll, gatherList) {\n\t\tgatherList = fullGatherList()\n\t}\n\n\t// Starts the gathers in Go routines\n\tcases, starts, err := g.startGathering(gatherList, &errors)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// Gets the info from the Go routines\n\tfor range gatherList {\n\t\tchosen, value, _ := reflect.Select(cases)\n\t\t// The chosen channel has been closed, so zero out the channel to disable the case\n\t\tcases[chosen].Chan = reflect.ValueOf(nil)\n\t\tgather := gatherList[chosen]\n\n\t\tgi := NewGatherInfo(gather, value)\n\t\tstatusReport, errorsReport := createStatusReport(gi, rec, starts[chosen])\n\n\t\tif len(errorsReport) > 0 {\n\t\t\terrors = append(errors, errorsReport...)\n\t\t}\n\t\tgatherReport.StatusReports = append(gatherReport.StatusReports, statusReport)\n\t}\n\n\t// if obfuscation is enabled, we want to know it from the archive\n\tgatherReport.IsGlobalObfuscationEnabled = g.anonymizer != nil\n\n\t// fill in performance related data to the report\n\tvar m runtime.MemStats\n\truntime.ReadMemStats(&m)\n\tgatherReport.MemoryAlloc = m.HeapAlloc\n\tgatherReport.Uptime = time.Since(g.startTime).Truncate(time.Millisecond).Seconds()\n\n\t// records the report\n\tif err := recordGatherReport(rec, gatherReport); err != nil {\n\t\terrors = append(errors, fmt.Sprintf(\"unable to record io status reports: %v\", err))\n\t}\n\n\tif len(errors) > 0 {\n\t\treturn sumErrors(errors)\n\t}\n\n\treturn nil\n}", "func (e *Exporter) Collect(ch chan<- prometheus.Metric) {\n\te.mutex.Lock() // To protect metrics from concurrent collects.\n\tdefer e.mutex.Unlock()\n\n\tif err := e.scrape(); err != nil {\n\t\tlog.Error(err)\n\t\tnomad_up.Set(0)\n\t\tch <- nomad_up\n\t\treturn\n\t}\n\n\tch <- nomad_up\n\tch <- metric_uptime\n\tch <- metric_request_response_time_total\n\tch <- metric_request_response_time_avg\n\n\tfor _, metric := range metric_request_status_count_current {\n\t\tch <- metric\n\t}\n\tfor _, metric := range metric_request_status_count_total {\n\t\tch <- metric\n\t}\n}", "func (collector *atlassianUPMCollector) Collect(ch chan<- prometheus.Metric) {\n\tstartTime := time.Now()\n\tlog.Debug(\"Collect start\")\n\n\tlog.Debug(\"create request object\")\n\treq, err := http.NewRequest(\"GET\", baseURL, nil)\n\tif err != nil {\n\t\tlog.Error(\"http.NewRequest returned an error:\", err)\n\t}\n\n\tlog.Debug(\"create Basic auth string from argument passed\")\n\tbearer = \"Basic \" + *token\n\n\tlog.Debug(\"add authorization header to the request\")\n\treq.Header.Add(\"Authorization\", bearer)\n\n\tlog.Debug(\"add content type to the request\")\n\treq.Header.Add(\"content-type\", \"application/json\")\n\n\tlog.Debug(\"make request... get back a response\")\n\tresp, err := http.DefaultClient.Do(req)\n\tif err != nil {\n\t\tlog.Debug(\"set metric atlassian_upm_rest_url_up\")\n\t\tch <- prometheus.MustNewConstMetric(collector.atlassianUPMUpMetric, prometheus.GaugeValue, 0, *fqdn)\n\t\tlog.Warn(\"http.DefaultClient.Do returned an error:\", err, \" return from Collect\")\n\t\treturn\n\t}\n\tdefer resp.Body.Close()\n\n\tif resp.StatusCode != 200 {\n\t\tlog.Debug(\"response status code: \", resp.StatusCode)\n\t}\n\n\tlog.Debug(\"set metric atlassian_upm_rest_url_up\")\n\tch <- prometheus.MustNewConstMetric(collector.atlassianUPMUpMetric, prometheus.GaugeValue, 1, *fqdn)\n\n\tvar allPlugins restPlugins\n\tif resp.StatusCode == 200 {\n\t\tlog.Debug(\"get all plugins\")\n\t\tallPlugins = plugins(resp)\n\n\t\t// return user-installed plugins if argument passed\n\t\tif *userInstalled {\n\t\t\tlog.Debug(\"-user-installed found\")\n\t\t\tallPlugins = userInstalledPlugins(allPlugins)\n\t\t}\n\n\t\t// plugins have the ability to be installed, but disabled, this will remove them if disabled\n\t\tif *dropDisabled {\n\t\t\tlog.Debug(\"-drop-disabled found\")\n\t\t\tallPlugins = dropDisabledPlugins(allPlugins)\n\t\t}\n\n\t\t// Jira specific\n\t\t// some plugins maintained by Jira have an additional element, this gives the option to drop those plugins\n\t\tif *dropJiraSoftware {\n\t\t\tlog.Debug(\"-drop-jira-software found\")\n\t\t\tallPlugins = dropJiraSoftwarePlugins(allPlugins)\n\t\t}\n\n\t\tlog.Debug(\"range over values in response, add each as metric with labels\")\n\t\tfor _, plugin := range allPlugins.Plugins {\n\n\t\t\tlog.Debug(\"creating plugin metric for: \" + plugin.Name)\n\t\t\tch <- prometheus.MustNewConstMetric(\n\t\t\t\tcollector.atlassianUPMPlugins,\n\t\t\t\tprometheus.GaugeValue,\n\t\t\t\t0,\n\t\t\t\tstrconv.FormatBool(plugin.Enabled), // convert bool to string for the 'enabled' value in the labels\n\t\t\t\tstring(plugin.Name),\n\t\t\t\tstring(plugin.Key),\n\t\t\t\tstring(plugin.Version),\n\t\t\t\tstrconv.FormatBool(plugin.UserInstalled),\n\t\t\t\t*fqdn,\n\t\t\t)\n\t\t}\n\t}\n\n\tif resp.StatusCode == 200 && *checkUpdates {\n\t\tlog.Debug(\"get remaining plugins available info\")\n\t\tavailablePluginsMap := getAvailablePluginInfo(allPlugins)\n\n\t\tlog.Debug(\"range over values in response, add each as metric with labels\")\n\t\tfor _, plugin := range availablePluginsMap {\n\t\t\tavailableUpdate := false\n\n\t\t\tverInstalled, err := version.NewVersion(plugin.InstalledVersion)\n\t\t\tif err != nil {\n\t\t\t\tlog.Debug(\"error turning plugin installed into version object\")\n\t\t\t}\n\n\t\t\tverAvailable, err := version.NewVersion(plugin.Version)\n\t\t\tif err != nil {\n\t\t\t\tlog.Debug(\"error turning available plugin into version object\")\n\t\t\t}\n\n\t\t\tif verInstalled.LessThan(verAvailable) {\n\t\t\t\tlog.Debug(\"plugin: \", plugin.Name, \", is currently running: \", plugin.InstalledVersion, \", and can be upgraded to: \", plugin.Version)\n\t\t\t\tavailableUpdate = true\n\t\t\t}\n\n\t\t\tlog.Debug(\"creating plugin version metric for: \", plugin.Name, \", with Key: \", plugin.Key)\n\t\t\tch <- prometheus.MustNewConstMetric(\n\t\t\t\tcollector.atlassianUPMVersionsMetric,\n\t\t\t\tprometheus.GaugeValue,\n\t\t\t\tboolToFloat(availableUpdate),\n\t\t\t\tstring(plugin.Name),\n\t\t\t\tstring(plugin.Key),\n\t\t\t\tstring(plugin.Version),\n\t\t\t\tstring(plugin.InstalledVersion),\n\t\t\t\tstrconv.FormatBool(plugin.Enabled), // convert bool to string for the 'enabled' value in the labels\n\t\t\t\tstrconv.FormatBool(plugin.UserInstalled),\n\t\t\t\t*fqdn,\n\t\t\t)\n\t\t}\n\t}\n\n\tfinishTime := time.Now()\n\telapsedTime := finishTime.Sub(startTime)\n\tlog.Debug(\"set the duration metric\")\n\tch <- prometheus.MustNewConstMetric(collector.atlassianUPMTimeMetric, prometheus.GaugeValue, elapsedTime.Seconds(), *fqdn)\n\n\tlog.Debug(\"Collect finished\")\n}", "func (e *Exporter) Collect(ch chan<- prometheus.Metric) {\n\t// Protect metrics from concurrent collects.\n\te.mutex.Lock()\n\tdefer e.mutex.Unlock()\n\n\t// Scrape metrics from Tankerkoenig API.\n\tif err := e.scrape(ch); err != nil {\n\t\te.logger.Printf(\"error: cannot scrape tankerkoenig api: %v\", err)\n\t}\n\n\t// Collect metrics.\n\te.up.Collect(ch)\n\te.scrapeDuration.Collect(ch)\n\te.failedScrapes.Collect(ch)\n\te.totalScrapes.Collect(ch)\n}", "func (p *plug) Collect(ch chan<- prometheus.Metric) {\n\tp.doStats(ch, doMetric)\n}", "func (mr *MetricsReporter) Start(d time.Duration) {\n\tticker := time.NewTicker(d)\n\tfor _ = range ticker.C {\n\t\tif err := mr.Report(); err != nil {\n\t\t\tlog.Printf(\"Datadog series error: %s\", err.Error())\n\t\t}\n\t}\n}", "func (c *Canary) GatherMetrics(config schemas.Config) error {\n\tif !c.StepStatus[constants.StepCleanChecking] {\n\t\treturn nil\n\t}\n\tif config.DisableMetrics {\n\t\treturn nil\n\t}\n\n\tif len(config.Region) > 0 {\n\t\tif !CheckRegionExist(config.Region, c.Stack.Regions) {\n\t\t\treturn nil\n\t\t}\n\t}\n\n\tif !config.CompleteCanary {\n\t\tc.Logger.Debug(\"Skip gathering metrics because canary is now applied\")\n\t\treturn nil\n\t}\n\n\tif err := c.Deployer.StartGatheringMetrics(config); err != nil {\n\t\treturn err\n\t}\n\n\tc.StepStatus[constants.StepGatherMetrics] = true\n\treturn nil\n}", "func (h *Metrics) Collect(in chan<- prometheus.Metric) {\n\th.duration.Collect(in)\n\th.totalRequests.Collect(in)\n\th.requestSize.Collect(in)\n\th.responseSize.Collect(in)\n\th.handlerStatuses.Collect(in)\n\th.responseTime.Collect(in)\n}", "func (e *Exporter) Collect(ch chan<- prometheus.Metric) {\n\te.mutex.Lock() // To protect metrics from concurrent collects.\n\tdefer e.mutex.Unlock()\n\n\t// Reset metrics.\n\tfor _, vec := range e.gauges {\n\t\tvec.Reset()\n\t}\n\n\tfor _, vec := range e.counters {\n\t\tvec.Reset()\n\t}\n\n\tresp, err := e.client.Get(e.URI)\n\tif err != nil {\n\t\te.up.Set(0)\n\t\tlog.Printf(\"Error while querying Elasticsearch: %v\", err)\n\t\treturn\n\t}\n\tdefer resp.Body.Close()\n\n\tbody, err := ioutil.ReadAll(resp.Body)\n\n\tif err != nil {\n\t\tlog.Printf(\"Failed to read ES response body: %v\", err)\n\t\te.up.Set(0)\n\t\treturn\n\t}\n\n\te.up.Set(1)\n\n\tvar all_stats NodeStatsResponse\n\terr = json.Unmarshal(body, &all_stats)\n\n\tif err != nil {\n\t\tlog.Printf(\"Failed to unmarshal JSON into struct: %v\", err)\n\t\treturn\n\t}\n\n\t// Regardless of whether we're querying the local host or the whole\n\t// cluster, here we can just iterate through all nodes found.\n\n\tfor node, stats := range all_stats.Nodes {\n\t\tlog.Printf(\"Processing node %v\", node)\n\t\t// GC Stats\n\t\tfor collector, gcstats := range stats.JVM.GC.Collectors {\n\t\t\te.counters[\"jvm_gc_collection_count\"].WithLabelValues(all_stats.ClusterName, stats.Name, collector).Set(float64(gcstats.CollectionCount))\n\t\t\te.counters[\"jvm_gc_collection_time_in_millis\"].WithLabelValues(all_stats.ClusterName, stats.Name, collector).Set(float64(gcstats.CollectionTime))\n\t\t}\n\n\t\t// Breaker stats\n\t\tfor breaker, bstats := range stats.Breakers {\n\t\t\te.gauges[\"breakers_estimated_size_in_bytes\"].WithLabelValues(all_stats.ClusterName, stats.Name, breaker).Set(float64(bstats.EstimatedSize))\n\t\t\te.gauges[\"breakers_limit_size_in_bytes\"].WithLabelValues(all_stats.ClusterName, stats.Name, breaker).Set(float64(bstats.LimitSize))\n\t\t}\n\n\t\t// JVM Memory Stats\n\t\te.gauges[\"jvm_mem_heap_committed_in_bytes\"].WithLabelValues(all_stats.ClusterName, stats.Name).Set(float64(stats.JVM.Mem.HeapCommitted))\n\t\te.gauges[\"jvm_mem_heap_used_in_bytes\"].WithLabelValues(all_stats.ClusterName, stats.Name).Set(float64(stats.JVM.Mem.HeapUsed))\n\t\te.gauges[\"jvm_mem_heap_max_in_bytes\"].WithLabelValues(all_stats.ClusterName, stats.Name).Set(float64(stats.JVM.Mem.HeapMax))\n\t\te.gauges[\"jvm_mem_non_heap_committed_in_bytes\"].WithLabelValues(all_stats.ClusterName, stats.Name).Set(float64(stats.JVM.Mem.NonHeapCommitted))\n\t\te.gauges[\"jvm_mem_non_heap_used_in_bytes\"].WithLabelValues(all_stats.ClusterName, stats.Name).Set(float64(stats.JVM.Mem.NonHeapUsed))\n\n\t\t// Indices Stats\n\t\te.gauges[\"indices_fielddata_evictions\"].WithLabelValues(all_stats.ClusterName, stats.Name).Set(float64(stats.Indices.FieldData.Evictions))\n\t\te.gauges[\"indices_fielddata_memory_size_in_bytes\"].WithLabelValues(all_stats.ClusterName, stats.Name).Set(float64(stats.Indices.FieldData.MemorySize))\n\t\te.gauges[\"indices_filter_cache_evictions\"].WithLabelValues(all_stats.ClusterName, stats.Name).Set(float64(stats.Indices.FilterCache.Evictions))\n\t\te.gauges[\"indices_filter_cache_memory_size_in_bytes\"].WithLabelValues(all_stats.ClusterName, stats.Name).Set(float64(stats.Indices.FilterCache.MemorySize))\n\n\t\te.gauges[\"indices_docs_count\"].WithLabelValues(all_stats.ClusterName, stats.Name).Set(float64(stats.Indices.Docs.Count))\n\t\te.gauges[\"indices_docs_deleted\"].WithLabelValues(all_stats.ClusterName, stats.Name).Set(float64(stats.Indices.Docs.Deleted))\n\n\t\te.gauges[\"indices_segments_memory_in_bytes\"].WithLabelValues(all_stats.ClusterName, stats.Name).Set(float64(stats.Indices.Segments.Memory))\n\n\t\te.gauges[\"indices_store_size_in_bytes\"].WithLabelValues(all_stats.ClusterName, stats.Name).Set(float64(stats.Indices.Store.Size))\n\t\te.counters[\"indices_store_throttle_time_in_millis\"].WithLabelValues(all_stats.ClusterName, stats.Name).Set(float64(stats.Indices.Store.ThrottleTime))\n\n\t\te.counters[\"indices_flush_total\"].WithLabelValues(all_stats.ClusterName, stats.Name).Set(float64(stats.Indices.Flush.Total))\n\t\te.counters[\"indices_flush_time_in_millis\"].WithLabelValues(all_stats.ClusterName, stats.Name).Set(float64(stats.Indices.Flush.Time))\n\n\t\t// Transport Stats\n\t\te.counters[\"transport_rx_count\"].WithLabelValues(all_stats.ClusterName, stats.Name).Set(float64(stats.Transport.RxCount))\n\t\te.counters[\"transport_rx_size_in_bytes\"].WithLabelValues(all_stats.ClusterName, stats.Name).Set(float64(stats.Transport.RxSize))\n\t\te.counters[\"transport_tx_count\"].WithLabelValues(all_stats.ClusterName, stats.Name).Set(float64(stats.Transport.TxCount))\n\t\te.counters[\"transport_tx_size_in_bytes\"].WithLabelValues(all_stats.ClusterName, stats.Name).Set(float64(stats.Transport.TxSize))\n\t}\n\n\t// Report metrics.\n\tch <- e.up\n\n\tfor _, vec := range e.counters {\n\t\tvec.Collect(ch)\n\t}\n\n\tfor _, vec := range e.gauges {\n\t\tvec.Collect(ch)\n\t}\n}", "func (*filterMetricProcessor) Start(ctx context.Context, host component.Host) error {\n\treturn nil\n}", "func (o *requestMetrics) Collect(ch chan<- prometheus.Metric) {\n\tmetricFamilies, err := o.stStore.GetPromDirectMetrics()\n\tif err != nil {\n\t\tklog.Errorf(\"fetch prometheus metrics failed: %v\", err)\n\t\treturn\n\t}\n\to.handleMetrics(metricFamilies, ch)\n}", "func (a *Agent) gatherer(\n\tshutdown chan struct{},\n\tsource *models.RunningSource,\n\tinterval time.Duration,\n) {\n\tdefer panicRecover(source)\n\n\tGatherTime := selfmetric.GetOrRegisterHistogram(\n\t\t\"gather\",\n\t\t\"gather_time_nanoseconds\",\n\t\tmap[string]string{\"source\": source.Config.Name},\n\t)\n\n\teventCh := source.EventsCh()\n\n\tticker := time.NewTicker(interval)\n\tdefer ticker.Stop()\n\n\tvar wg sync.WaitGroup\n\n\twg.Add(1)\n\tgo func() {\n\t\tdefer wg.Done()\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase event := <-eventCh:\n\t\t\t\tevents := []optic.Event{event}\n\t\t\t\tfor _, processor := range source.Config.Processors {\n\t\t\t\t\tevents = processor.Processor.Apply(event)\n\t\t\t\t\tif len(events) == 0 {\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tfor _, e := range events {\n\t\t\t\t\tgo func(event optic.Event) {\n\t\t\t\t\t\tsource.ForwardEvent(event)\n\t\t\t\t\t}(e)\n\t\t\t\t}\n\t\t\tcase <-shutdown:\n\t\t\t\tif len(eventCh) > 0 {\n\t\t\t\t\t// keep going until eventCh is flushed\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\treturn\n\t\t\tcase <-ticker.C:\n\t\t\t\tcontinue\n\t\t\t}\n\t\t}\n\t}()\n\n\twg.Add(1)\n\tgo func() {\n\t\tdefer wg.Done()\n\t\tif err := a.flusher(shutdown, eventCh); err != nil {\n\t\t\tlog.Printf(\"ERROR Flusher routine failed, exiting: %s\\n\", err.Error())\n\t\t}\n\t}()\n\n\tacc := NewAccumulator(source, eventCh)\n\n\tfor {\n\t\tinternal.RandomSleep(a.Config.Agent.CollectionJitter, shutdown)\n\n\t\tstart := time.Now()\n\t\tgatherWithTimeout(shutdown, source, acc, interval)\n\t\telapsed := time.Since(start)\n\n\t\tGatherTime.Update(elapsed.Nanoseconds())\n\n\t\tselect {\n\t\tcase <-shutdown:\n\t\t\tlog.Println(\"INFO Flushing any cached events before shutdown\")\n\t\t\t// wait for eventCh to get flushed before flushing sinks\n\t\t\twg.Wait()\n\t\t\ta.flush()\n\t\t\treturn\n\t\tcase <-ticker.C:\n\t\t\tcontinue\n\t\t}\n\t}\n}", "func (m *MeterImpl) collect(ctx context.Context, labels []attribute.KeyValue, measurements []Measurement) {\n\tm.provider.addMeasurement(Batch{\n\t\tCtx: ctx,\n\t\tLabels: labels,\n\t\tMeasurements: measurements,\n\t\tLibrary: m.library,\n\t})\n}", "func (e *Exporter) Collect(ch chan<- prometheus.Metric) {\n\te.mutex.Lock() // To protect metrics from concurrent collects.\n\tdefer e.mutex.Unlock()\n if err := e.scrape(ch); err != nil {\n\t\tlog.Infof(\"Error scraping tinystats: %s\", err)\n\t}\n e.ipv4QueryA.Collect(ch)\n e.ipv4QueryNS.Collect(ch)\n e.ipv4QueryCNAME.Collect(ch)\n e.ipv4QuerySOA.Collect(ch)\n e.ipv4QueryPTR.Collect(ch)\n e.ipv4QueryHINFO.Collect(ch)\n e.ipv4QueryMX.Collect(ch)\n e.ipv4QueryTXT.Collect(ch)\n e.ipv4QueryRP.Collect(ch)\n e.ipv4QuerySIG.Collect(ch)\n e.ipv4QueryKEY.Collect(ch)\n e.ipv4QueryAAAA.Collect(ch)\n e.ipv4QueryAXFR.Collect(ch)\n e.ipv4QueryANY.Collect(ch)\n e.ipv4QueryTOTAL.Collect(ch)\n e.ipv4QueryOTHER.Collect(ch)\n e.ipv4QueryNOTAUTH.Collect(ch)\n e.ipv4QueryNOTIMPL.Collect(ch)\n e.ipv4QueryBADCLASS.Collect(ch)\n e.ipv4QueryNOQUERY.Collect(ch)\n\n e.ipv6QueryA.Collect(ch)\n e.ipv6QueryNS.Collect(ch)\n e.ipv6QueryCNAME.Collect(ch)\n e.ipv6QuerySOA.Collect(ch)\n e.ipv6QueryPTR.Collect(ch)\n e.ipv6QueryHINFO.Collect(ch)\n e.ipv6QueryMX.Collect(ch)\n e.ipv6QueryTXT.Collect(ch)\n e.ipv6QueryRP.Collect(ch)\n e.ipv6QuerySIG.Collect(ch)\n e.ipv6QueryKEY.Collect(ch)\n e.ipv6QueryAAAA.Collect(ch)\n e.ipv6QueryAXFR.Collect(ch)\n e.ipv6QueryANY.Collect(ch)\n e.ipv6QueryTOTAL.Collect(ch)\n e.ipv6QueryOTHER.Collect(ch)\n e.ipv6QueryNOTAUTH.Collect(ch)\n e.ipv6QueryNOTIMPL.Collect(ch)\n e.ipv6QueryBADCLASS.Collect(ch)\n e.ipv6QueryNOQUERY.Collect(ch)\n\n\treturn\n}", "func (g *Gatherer) startGathering(gatherList []string, errors *[]string) ([]reflect.SelectCase, []time.Time, error) {\n\tvar cases []reflect.SelectCase\n\tvar starts []time.Time\n\n\t// Starts the gathers in Go routines\n\tfor _, gatherID := range gatherList {\n\t\tgather, ok := gatherFunctions[gatherID]\n\t\tgFn := gather.function\n\t\tif !ok {\n\t\t\t*errors = append(*errors, fmt.Sprintf(\"unknown gatherId in config: %s\", gatherID))\n\t\t\tcontinue\n\t\t}\n\t\tchannel := make(chan gatherResult)\n\t\tcases = append(cases, reflect.SelectCase{Dir: reflect.SelectRecv, Chan: reflect.ValueOf(channel)})\n\t\tgatherName := runtime.FuncForPC(reflect.ValueOf(gFn).Pointer()).Name()\n\n\t\tklog.V(5).Infof(\"Gathering %s\", gatherName)\n\t\tstarts = append(starts, time.Now())\n\t\tgo gFn(g, channel)\n\n\t\tif err := g.ctx.Err(); err != nil {\n\t\t\treturn nil, nil, err\n\t\t}\n\t}\n\n\treturn cases, starts, nil\n}", "func (agent *Agent) Run() error {\n\tif agent.NewrelicLicense == \"\" {\n\t\treturn errors.New(\"please, pass a valid newrelic license key\")\n\t}\n\n\tvar component newrelic_platform_go.IComponent\n\tcomponent = newrelic_platform_go.NewPluginComponent(agent.NewrelicName, agent.AgentGUID, agent.Verbose)\n\n\t// Add default metrics and tracer.\n\taddRuntimeMericsToComponent(component)\n\tagent.Tracer = newTracer(component)\n\n\t// Check agent flags and add relevant metrics.\n\tif agent.CollectGcStat {\n\t\taddGCMetricsToComponent(component, agent.GCPollInterval)\n\t\tagent.debug(fmt.Sprintf(\"Init GC metrics collection. Poll interval %d seconds.\", agent.GCPollInterval))\n\t}\n\n\tif agent.CollectMemoryStat {\n\t\taddMemoryMericsToComponent(component, agent.MemoryAllocatorPollInterval)\n\t\tagent.debug(fmt.Sprintf(\"Init memory allocator metrics collection. Poll interval %d seconds.\", agent.MemoryAllocatorPollInterval))\n\t}\n\n\tif agent.CollectHTTPStat {\n\t\tagent.initTimer()\n\t\taddHTTPMericsToComponent(component, agent.HTTPTimer)\n\t\tagent.debug(fmt.Sprintf(\"Init HTTP metrics collection.\"))\n\t}\n\n\tfor _, metric := range agent.CustomMetrics {\n\t\tcomponent.AddMetrica(metric)\n\t\tagent.debug(fmt.Sprintf(\"Init %s metric collection.\", metric.GetName()))\n\t}\n\n\tif agent.CollectHTTPStatuses {\n\t\tagent.initStatusCounters()\n\t\tcomponent = &resettableComponent{component, agent.HTTPStatusCounters}\n\t\taddHTTPStatusMetricsToComponent(component, agent.HTTPStatusCounters)\n\t\tagent.debug(fmt.Sprintf(\"Init HTTP status metrics collection.\"))\n\t}\n\n\t// Init newrelic reporting plugin.\n\tagent.plugin = newrelic_platform_go.NewNewrelicPlugin(agent.AgentVersion, agent.NewrelicLicense, agent.NewrelicPollInterval)\n\tagent.plugin.Client = agent.Client\n\tagent.plugin.Verbose = agent.Verbose\n\n\t// Add our metrics component to the plugin.\n\tagent.plugin.AddComponent(component)\n\n\t// Start reporting!\n\tgo agent.plugin.Run()\n\treturn nil\n}", "func (e *Exporter) Collect(ch chan<- prometheus.Metric) {\n\te.mutex.Lock() // To protect metrics from concurrent collects.\n\tdefer e.mutex.Unlock()\n\n\tup, result := e.scrape(ch)\n\n\tch <- e.totalScrapes\n\tch <- e.jsonParseFailures\n\tch <- prometheus.MustNewConstMetric(iqAirUp, prometheus.GaugeValue, up)\n\tch <- prometheus.MustNewConstMetric(iqAirCO2, prometheus.GaugeValue, float64(result.CO2))\n\tch <- prometheus.MustNewConstMetric(iqAirP25, prometheus.GaugeValue, float64(result.P25))\n\tch <- prometheus.MustNewConstMetric(iqAirP10, prometheus.GaugeValue, float64(result.P10))\n\tch <- prometheus.MustNewConstMetric(iqAirTemp, prometheus.GaugeValue, float64(result.Temperature))\n\tch <- prometheus.MustNewConstMetric(iqAirHumidity, prometheus.GaugeValue, float64(result.Humidity))\n}", "func (s *SystemdTimings) Gather(acc telegraf.Accumulator) error {\n\tif !bootIsFinished() {\n\t\t// We are not ready to collect yet, telegraf will call us later to\n\t\t// try again.\n\t\treturn nil\n\t}\n\n\tif s.Periodic == false {\n\t\t// We only want to run once.\n\t\tif collectionDone == true {\n\t\t\t// By default we only collect once since these are generally boot\n\t\t\t// time metrics.\n\t\t\treturn nil\n\t\t}\n\t}\n\n\t// Connect to the systemd dbus.\n\tdbusConn, err := dbus.NewSystemConnection()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tdefer dbusConn.Close()\n\n\terr = postAllManagerProps(dbusConn, acc)\n\tif err != nil {\n\t\tacc.AddError(err)\n\t\treturn err\n\t}\n\n\t// Read all unit timing data.\n\terr = postAllUnitTimingData(dbusConn, acc, s)\n\tif err != nil {\n\t\tacc.AddError(err)\n\t\treturn err\n\t}\n\n\tif err == nil {\n\t\tcollectionDone = true\n\t}\n\n\treturn err\n}", "func (e *UwsgiExporter) Collect(ch chan<- prometheus.Metric) {\n\tstartTime := time.Now()\n\terr := e.execute(ch)\n\td := time.Since(startTime).Seconds()\n\n\tif err != nil {\n\t\tlog.Errorf(\"ERROR: scrape failed after %fs: %s\", d, err)\n\t\te.uwsgiUp.Set(0)\n\t\te.scrapeDurations.WithLabelValues(\"error\").Observe(d)\n\t} else {\n\t\tlog.Debugf(\"OK: scrape successful after %fs.\", d)\n\t\te.uwsgiUp.Set(1)\n\t\te.scrapeDurations.WithLabelValues(\"success\").Observe(d)\n\t}\n\n\te.uwsgiUp.Collect(ch)\n\te.scrapeDurations.Collect(ch)\n}", "func (e *Exporter) Collect(ch chan<- prometheus.Metric) {\n\te.mutex.Lock() // To protect metrics from concurrent collects.\n\tdefer e.mutex.Unlock()\n\n\tup := e.scrape(ch)\n\n\tch <- prometheus.MustNewConstMetric(artifactoryUp, prometheus.GaugeValue, up)\n\tch <- e.totalScrapes\n\tch <- e.jsonParseFailures\n}", "func CollectAllMetrics(client *statsd.Client, log *li.StandardLogger) {\n\n\tvar metrics []metric\n\tmetrics = append(metrics, metric{name: \"gpu.temperature\", cmd: \"vcgencmd measure_temp | egrep -o '[0-9]*\\\\.[0-9]*'\"})\n\tmetrics = append(metrics, metric{name: \"cpu.temperature\", cmd: \"cat /sys/class/thermal/thermal_zone0/temp | awk 'END {print $1/1000}'\"})\n\tmetrics = append(metrics, metric{name: \"threads\", cmd: \"ps -eo nlwp | tail -n +2 | awk '{ num_threads += $1 } END { print num_threads }'\"})\n\tmetrics = append(metrics, metric{name: \"processes\", cmd: \"ps axu | wc -l\"})\n\n\tfor range time.Tick(15 * time.Second) {\n\t\tlog.Info(\"Starting metric collection\")\n\t\tfor _, m := range metrics {\n\t\t\terr := collectMetric(m, client, log)\n\t\t\tif err != nil {\n\t\t\t\tlog.Error(err)\n\t\t\t}\n\t\t}\n\t}\n}", "func (m *Metrics) start() error {\n\tm.once.Do(func() {\n\t\tm.define(\"\")\n\n\t\tprometheus.MustRegister(requestCount)\n\t\tprometheus.MustRegister(requestDuration)\n\t\tprometheus.MustRegister(responseLatency)\n\t\tprometheus.MustRegister(responseSize)\n\t\tprometheus.MustRegister(responseStatus)\n\n\t\tif !m.UseCaddyAddr {\n\t\t\thttp.Handle(m.Path, m.handler)\n\t\t\tgo func() {\n\t\t\t\terr := http.ListenAndServe(m.Addr, nil)\n\t\t\t\tif err != nil {\n\t\t\t\t\tm.logger.Error(\"start prometheus handler\", zap.Error(err))\n\t\t\t\t}\n\t\t\t}()\n\t\t}\n\t})\n\treturn nil\n}", "func (collector *Collector) Start() {\n\t// Begin our internal processing first\n\tgo collector.process()\n\n\t// Start the prospector to start collecting data\n\tcollector.prospector.Start()\n}", "func (a *Accumulator)Start(){\n\tgo func() {\n\t\tfor stats := range a.StatsChan {\n\t\t\ta.mu.Lock()\n\t\t\ta.Stats = append(a.Stats, stats)\n\t\t\ta.mu.Unlock()\n\t\t\tif ( len(a.Stats) >= a.MaxResponses) {\n\t\t\t\tLog(\"top\", \"All requests received\")\n\t\t\t\ta.Done <- true\n\t\t\t}\n\t\t}\n\t}()\n\n\tgo func() {\n\t\tfor stats := range a.OverallStatsChan {\n\t\t\ta.mu.Lock()\n\t\t\ta.OverallStats = append(a.OverallStats, stats)\n\t\t\ta.mu.Unlock()\n\t\t}\n\t}()\n}", "func (e *Exporter) Collect(ch chan<- prometheus.Metric) {\n\tresp, err := e.Pihole.GetMetrics()\n\tif err != nil {\n\t\tlog.Errorf(\"Pihole error: %s\", err.Error())\n\t\treturn\n\t}\n\tlog.Debugf(\"PiHole metrics: %#v\", resp)\n\tch <- prometheus.MustNewConstMetric(\n\t\tdomainsBeingBlocked, prometheus.CounterValue, float64(resp.DomainsBeingBlocked))\n\n\tch <- prometheus.MustNewConstMetric(\n\t\tdnsQueries, prometheus.CounterValue, float64(resp.DNSQueriesToday))\n\n\tch <- prometheus.MustNewConstMetric(\n\t\tadsBlocked, prometheus.CounterValue, float64(resp.AdsBlockedToday))\n\n\tch <- prometheus.MustNewConstMetric(\n\t\tadsPercentage, prometheus.CounterValue, float64(resp.AdsPercentageToday))\n\n\tfor k, v := range resp.Querytypes {\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tqueryTypes, prometheus.CounterValue, v, k)\n\t}\n\tfor k, v := range resp.TopQueries {\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\ttopQueries, prometheus.CounterValue, float64(v), k)\n\t}\n\tfor k, v := range resp.TopAds {\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\ttopAds, prometheus.CounterValue, float64(v), k)\n\n\t}\n\tfor k, v := range resp.TopSources {\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\ttopSources, prometheus.CounterValue, float64(v), k)\n\t}\n}", "func (c *CloudWatch) gatherMetrics(\n\tparams *cwClient.GetMetricDataInput,\n) ([]types.MetricDataResult, error) {\n\tresults := []types.MetricDataResult{}\n\n\tfor {\n\t\tresp, err := c.client.GetMetricData(context.Background(), params)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"failed to get metric data: %w\", err)\n\t\t}\n\n\t\tresults = append(results, resp.MetricDataResults...)\n\t\tif resp.NextToken == nil {\n\t\t\tbreak\n\t\t}\n\t\tparams.NextToken = resp.NextToken\n\t}\n\n\treturn results, nil\n}", "func (e *Exporter) Collect(ch chan<- prometheus.Metric) {\n\te.mutex.Lock() // To protect metrics from concurrent collects.\n\tdefer e.mutex.Unlock()\n\tif err := e.scrape(ch); err != nil {\n\t\tlog.Printf(\"Error scraping nightscout url: %s\", err)\n\t}\n\n\te.statusNightscout.Collect(ch)\n\n\treturn\n}", "func (sc *SlurmCollector) Collect(ch chan<- prometheus.Metric) {\n\tsc.mutex.Lock()\n\tdefer sc.mutex.Unlock()\n\n\tlog.Debugf(\"Time since last scrape: %f seconds\", time.Since(sc.lastScrape).Seconds())\n\tif time.Since(sc.lastScrape).Seconds() > float64(sc.scrapeInterval) {\n\t\tsc.updateDynamicJobIds()\n\t\tvar err error\n\t\tsc.sshClient, err = sc.sshConfig.NewClient()\n\t\tif err != nil {\n\t\t\tlog.Errorf(\"Creating SSH client: %s\", err.Error())\n\t\t\treturn\n\t\t}\n\t\tdefer sc.sshClient.Close()\n\t\tlog.Infof(\"Collecting metrics from Slurm...\")\n\t\tsc.trackedJobs = make(map[string]bool)\n\t\tif sc.targetJobIds == \"\" {\n\t\t\t// sc.collectQueue()\n\t\t} else {\n\t\t\tsc.collectAcct()\n\t\t}\n\t\tif !sc.skipInfra {\n\t\t\tsc.collectInfo()\n\t\t}\n\t\tsc.lastScrape = time.Now()\n\t\tsc.delJobs()\n\n\t}\n\n\tsc.updateMetrics(ch)\n}", "func (s *Stats) Start(done <-chan bool) {\n\tgo func() {\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase <-done:\n\t\t\t\treturn\n\t\t\tdefault:\n\t\t\t\ts.scrape()\n\t\t\t\ttime.Sleep(time.Second)\n\t\t\t}\n\t\t}\n\t}()\n\n\tgo func() {\n\t\tt := time.NewTicker(10 * time.Second)\n\t\tdefer t.Stop()\n\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase <-done:\n\t\t\t\treturn\n\t\t\tcase <-t.C:\n\t\t\t\tif err := s.scan(); err != nil {\n\t\t\t\t\tlog.Debug(err)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}()\n}", "func (e *Exporter) Collect(ch chan<- prometheus.Metric) {\n\tvar up float64 = 1\n\n\tglobalMutex.Lock()\n\tdefer globalMutex.Unlock()\n\n\tif e.config.resetStats && !globalResetExecuted {\n\t\t// Its time to try to reset the stats\n\t\tif e.resetStatsSemp1() {\n\t\t\tlevel.Info(e.logger).Log(\"msg\", \"Statistics successfully reset\")\n\t\t\tglobalResetExecuted = true\n\t\t\tup = 1\n\t\t} else {\n\t\t\tup = 0\n\t\t}\n\t}\n\n\tif e.config.details {\n\t\tif up > 0 {\n\t\t\tup = e.getClientSemp1(ch)\n\t\t}\n\t\tif up > 0 {\n\t\t\tup = e.getQueueSemp1(ch)\n\t\t}\n\t\tif up > 0 && e.config.scrapeRates {\n\t\t\tup = e.getQueueRatesSemp1(ch)\n\t\t}\n\t} else { // Basic\n\t\tif up > 0 {\n\t\t\tup = e.getRedundancySemp1(ch)\n\t\t}\n\t\tif up > 0 {\n\t\t\tup = e.getSpoolSemp1(ch)\n\t\t}\n\t\tif up > 0 {\n\t\t\tup = e.getHealthSemp1(ch)\n\t\t}\n\t\tif up > 0 {\n\t\t\tup = e.getVpnSemp1(ch)\n\t\t}\n\t}\n\n\tch <- prometheus.MustNewConstMetric(solaceUp, prometheus.GaugeValue, up)\n}", "func (g gatherer) GatherMetrics(ctx context.Context, out *apm.Metrics) error {\n\tmetricFamilies, err := g.p.Gather()\n\tif err != nil {\n\t\treturn errors.WithStack(err)\n\t}\n\tfor _, mf := range metricFamilies {\n\t\tname := mf.GetName()\n\t\tswitch mf.GetType() {\n\t\tcase dto.MetricType_COUNTER:\n\t\t\tfor _, m := range mf.GetMetric() {\n\t\t\t\tv := m.GetCounter().GetValue()\n\t\t\t\tout.Add(name, makeLabels(m.GetLabel()), v)\n\t\t\t}\n\t\tcase dto.MetricType_GAUGE:\n\t\t\tmetrics := mf.GetMetric()\n\t\t\tif name == \"go_info\" && len(metrics) == 1 && metrics[0].GetGauge().GetValue() == 1 {\n\t\t\t\t// Ignore the \"go_info\" metric from the\n\t\t\t\t// built-in GoCollector, as we provide\n\t\t\t\t// the same information in the payload.\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tfor _, m := range metrics {\n\t\t\t\tv := m.GetGauge().GetValue()\n\t\t\t\tout.Add(name, makeLabels(m.GetLabel()), v)\n\t\t\t}\n\t\tcase dto.MetricType_UNTYPED:\n\t\t\tfor _, m := range mf.GetMetric() {\n\t\t\t\tv := m.GetUntyped().GetValue()\n\t\t\t\tout.Add(name, makeLabels(m.GetLabel()), v)\n\t\t\t}\n\t\tcase dto.MetricType_SUMMARY:\n\t\t\tfor _, m := range mf.GetMetric() {\n\t\t\t\ts := m.GetSummary()\n\t\t\t\tlabels := makeLabels(m.GetLabel())\n\t\t\t\tout.Add(name+\".count\", labels, float64(s.GetSampleCount()))\n\t\t\t\tout.Add(name+\".total\", labels, float64(s.GetSampleSum()))\n\t\t\t\tfor _, q := range s.GetQuantile() {\n\t\t\t\t\tp := int(q.GetQuantile() * 100)\n\t\t\t\t\tout.Add(name+\".percentile.\"+strconv.Itoa(p), labels, q.GetValue())\n\t\t\t\t}\n\t\t\t}\n\t\tdefault:\n\t\t\t// TODO(axw) MetricType_HISTOGRAM\n\t\t}\n\t}\n\treturn nil\n}", "func (c *Collector) Run(ctx context.Context) {\n\twg := sync.WaitGroup{}\n\tquit := ctx.Done()\n\taggregationPeriod := time.Duration(c.config.AggregationPeriod.Duration)\n\t// If enabled, start periodically aggregating the collected HTTP trails\n\tif aggregationPeriod > 0 {\n\t\twg.Add(1)\n\t\taggregationTicker := time.NewTicker(aggregationPeriod)\n\t\taggregationWaitPeriod := time.Duration(c.config.AggregationWaitPeriod.Duration)\n\t\tsignalQuit := make(chan struct{})\n\t\tquit = signalQuit\n\n\t\tgo func() {\n\t\t\tdefer wg.Done()\n\t\t\tfor {\n\t\t\t\tselect {\n\t\t\t\tcase <-c.stopSendingMetricsCh:\n\t\t\t\t\treturn\n\t\t\t\tcase <-aggregationTicker.C:\n\t\t\t\t\tc.aggregateHTTPTrails(aggregationWaitPeriod)\n\t\t\t\tcase <-ctx.Done():\n\t\t\t\t\tc.aggregateHTTPTrails(0)\n\t\t\t\t\tc.flushHTTPTrails()\n\t\t\t\t\tclose(signalQuit)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t}\n\t\t}()\n\t}\n\n\tdefer func() {\n\t\twg.Wait()\n\t\tc.testFinished()\n\t}()\n\n\tpushTicker := time.NewTicker(time.Duration(c.config.MetricPushInterval.Duration))\n\tfor {\n\t\tselect {\n\t\tcase <-c.stopSendingMetricsCh:\n\t\t\treturn\n\t\tdefault:\n\t\t}\n\t\tselect {\n\t\tcase <-quit:\n\t\t\tc.pushMetrics()\n\t\t\treturn\n\t\tcase <-pushTicker.C:\n\t\t\tc.pushMetrics()\n\t\t}\n\t}\n}", "func (m *CirconusMetrics) Start() {\n\t// nop\n}", "func (b *Basic) startMetricsUpdater() {\n\tfor {\n\t\tupdateFreq := b.getMetricsUpdateFreqMillis()\n\t\tif updateFreq == 0 {\n\t\t\tupdateFreq = DefMetricsUpdateFreqMillis\n\t\t}\n\t\tif updateFreq < 250 {\n\t\t\tupdateFreq = 250 // don't peg the CPU\n\t\t}\n\n\t\tselect {\n\t\tcase <-b.done:\n\t\t\treturn\n\t\tcase <-time.After(time.Duration(updateFreq) * time.Millisecond):\n\t\t\tb.setQueueSizeGauge(float64(len(b.in)))\n\t\t}\n\t}\n}", "func (a *AttunityCollector) Collect(ch chan<- prometheus.Metric) {\n\n\t// Collect information on what servers are active\n\tservers, err := a.servers()\n\tif err != nil {\n\n\t\t// If the error is because the session_id expired, attempt to get a new one and collect info again\n\t\t// else, just fail with an invalid metric containing the error\n\t\tif strings.Contains(err.Error(), \"INVALID_SESSION_ID\") {\n\t\t\ta.SessionID = getSessionID(a.httpClient, a.APIURL, a.auth)\n\n\t\t\tservers, err = a.servers()\n\t\t\tif err != nil {\n\t\t\t\tlogrus.Error(err)\n\t\t\t\tch <- prometheus.NewInvalidMetric(prometheus.NewDesc(\"attunity_error\", \"Error scraping target\", nil, nil), err)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t} else {\n\t\t\tlogrus.Error(err)\n\t\t\tch <- prometheus.NewInvalidMetric(prometheus.NewDesc(\"attunity_error\", \"Error scraping target\", nil, nil), err)\n\t\t\treturn\n\t\t}\n\n\t} // end error handling for a.servers()\n\n\tfor _, s := range servers {\n\t\tch <- prometheus.MustNewConstMetric(serverDesc, prometheus.GaugeValue, 1.0, s.Name, s.State, s.Platform, s.Host)\n\t}\n\n\t// For each server, concurrently collect detailed information on\n\t// the tasks that are running on them.\n\twg := sync.WaitGroup{}\n\twg.Add(len(servers))\n\tfor _, s := range servers {\n\t\t// If the Server is not monitored, then it will not have any tasks so we can skip this bit.\n\t\tif s.State != \"MONITORED\" {\n\t\t\twg.Done()\n\t\t\tcontinue\n\t\t}\n\t\tgo func(s server) {\n\t\t\ttaskStates, err := a.taskStates(s.Name)\n\t\t\tif err != nil {\n\t\t\t\tlogrus.Error(err)\n\t\t\t} else {\n\t\t\t\tfor _, t := range taskStates {\n\t\t\t\t\t// inspired by: https://github.com/prometheus/node_exporter/blob/v0.18.1/collector/systemd_linux.go#L222\n\t\t\t\t\tfor _, tsn := range taskStateNames {\n\t\t\t\t\t\tvalue := 0.0\n\t\t\t\t\t\tif t.State == tsn {\n\t\t\t\t\t\t\tvalue = 1.0\n\t\t\t\t\t\t}\n\t\t\t\t\t\tch <- prometheus.MustNewConstMetric(taskStateDesc, prometheus.GaugeValue, value, s.Name, t.Name, tsn)\n\t\t\t\t\t}\n\n\t\t\t\t\t// Get details on each of the tasks and send them to the channel, too\n\t\t\t\t\tt.details(s.Name, a, ch)\n\t\t\t\t}\n\t\t\t}\n\t\t\twg.Done()\n\t\t}(s)\n\t}\n\n\t// For each server, collect high level details such as\n\t// how many tasks are in each state on them and\n\t// how many days until license expiration\n\twg.Add(len(servers))\n\tfor _, s := range servers {\n\t\tgo func(s server) {\n\t\t\tserverDeets, err := a.serverDetails(s.Name)\n\t\t\tif err != nil {\n\t\t\t\tlogrus.Error(err)\n\t\t\t} else {\n\t\t\t\t// Create metrics for task totals by state\n\t\t\t\t// These counts will not be affected by included/excluded task in the config file\n\t\t\t\tch <- prometheus.MustNewConstMetric(serverTasksDesc, prometheus.GaugeValue, float64(serverDeets.TaskSummary.Running), s.Name, \"RUNNING\")\n\t\t\t\tch <- prometheus.MustNewConstMetric(serverTasksDesc, prometheus.GaugeValue, float64(serverDeets.TaskSummary.Stopped), s.Name, \"STOPPED\")\n\t\t\t\tch <- prometheus.MustNewConstMetric(serverTasksDesc, prometheus.GaugeValue, float64(serverDeets.TaskSummary.Error), s.Name, \"ERROR\")\n\t\t\t\tch <- prometheus.MustNewConstMetric(serverTasksDesc, prometheus.GaugeValue, float64(serverDeets.TaskSummary.Recovering), s.Name, \"RECOVERING\")\n\n\t\t\t\t// Create metric for license expiration\n\t\t\t\tch <- prometheus.MustNewConstMetric(serverLicenseExpirationDesc, prometheus.GaugeValue, float64(serverDeets.License.DaysToExpiration), s.Name)\n\t\t\t}\n\t\t\twg.Done()\n\t\t}(s)\n\t}\n\twg.Wait()\n}", "func (e *Exporter) Collect(ch chan<- prometheus.Metric) {\n\te.mutex.Lock() // To protect metrics from concurrent collects.\n\tdefer e.mutex.Unlock()\n\tif err := e.collect(ch); err != nil {\n\t\tlog.Errorf(\"Error scraping ingestor: %s\", err)\n\t}\n\treturn\n}", "func (g gatherer) GatherMetrics(ctx context.Context, m *elasticapm.Metrics) error {\n\tg.r.Each(func(name string, v interface{}) {\n\t\tswitch v := v.(type) {\n\t\tcase metrics.Counter:\n\t\t\tm.Add(name, nil, float64(v.Count()))\n\t\tcase metrics.Gauge:\n\t\t\tm.Add(name, nil, float64(v.Value()))\n\t\tcase metrics.GaugeFloat64:\n\t\t\tm.Add(name, nil, v.Value())\n\t\tcase metrics.Histogram:\n\t\t\tm.Add(name+\".count\", nil, float64(v.Count()))\n\t\t\tm.Add(name+\".total\", nil, float64(v.Sum()))\n\t\t\tm.Add(name+\".min\", nil, float64(v.Min()))\n\t\t\tm.Add(name+\".max\", nil, float64(v.Max()))\n\t\t\tm.Add(name+\".stddev\", nil, v.StdDev())\n\t\t\tm.Add(name+\".percentile.50\", nil, v.Percentile(0.5))\n\t\t\tm.Add(name+\".percentile.95\", nil, v.Percentile(0.95))\n\t\t\tm.Add(name+\".percentile.99\", nil, v.Percentile(0.99))\n\t\tdefault:\n\t\t\t// TODO(axw) Meter, Timer, EWMA\n\t\t}\n\t})\n\treturn nil\n}", "func (c *CloudWatch) Gather(acc telegraf.Accumulator) error {\n\tfilteredMetrics, err := getFilteredMetrics(c)\n\tif err != nil {\n\t\treturn err\n\t}\n\tc.updateWindow(time.Now())\n\n\t// Get all of the possible queries so we can send groups of 100.\n\tqueries := c.getDataQueries(filteredMetrics)\n\tif len(queries) == 0 {\n\t\treturn nil\n\t}\n\n\t// Limit concurrency or we can easily exhaust user connection limit.\n\t// See cloudwatch API request limits:\n\t// http://docs.aws.amazon.com/AmazonCloudWatch/latest/DeveloperGuide/cloudwatch_limits.html\n\tlmtr := limiter.NewRateLimiter(c.RateLimit, time.Second)\n\tdefer lmtr.Stop()\n\twg := sync.WaitGroup{}\n\trLock := sync.Mutex{}\n\n\tresults := map[string][]types.MetricDataResult{}\n\n\tfor namespace, namespacedQueries := range queries {\n\t\tvar batches [][]types.MetricDataQuery\n\n\t\tfor c.BatchSize < len(namespacedQueries) {\n\t\t\tnamespacedQueries, batches = namespacedQueries[c.BatchSize:], append(batches, namespacedQueries[0:c.BatchSize:c.BatchSize])\n\t\t}\n\t\tbatches = append(batches, namespacedQueries)\n\n\t\tfor i := range batches {\n\t\t\twg.Add(1)\n\t\t\t<-lmtr.C\n\t\t\tgo func(n string, inm []types.MetricDataQuery) {\n\t\t\t\tdefer wg.Done()\n\t\t\t\tresult, err := c.gatherMetrics(c.getDataInputs(inm))\n\t\t\t\tif err != nil {\n\t\t\t\t\tacc.AddError(err)\n\t\t\t\t\treturn\n\t\t\t\t}\n\n\t\t\t\trLock.Lock()\n\t\t\t\tresults[n] = append(results[n], result...)\n\t\t\t\trLock.Unlock()\n\t\t\t}(namespace, batches[i])\n\t\t}\n\t}\n\n\twg.Wait()\n\treturn c.aggregateMetrics(acc, results)\n}", "func (c *Client) Collect(ch chan<- prometheus.Metric) {\n\tc.metrics.functionInvocation.Collect(ch)\n\tc.metrics.functionsHistogram.Collect(ch)\n\tc.metrics.queueHistogram.Collect(ch)\n\tc.metrics.functionInvocationStarted.Collect(ch)\n\tc.metrics.serviceReplicasGauge.Reset()\n\tfor _, service := range c.services {\n\t\tvar serviceName string\n\t\tif len(service.Namespace) > 0 {\n\t\t\tserviceName = fmt.Sprintf(\"%s.%s\", service.Name, service.Namespace)\n\t\t} else {\n\t\t\tserviceName = service.Name\n\t\t}\n\t\tc.metrics.serviceReplicasGauge.\n\t\t\tWithLabelValues(serviceName).\n\t\t\tSet(float64(service.Replicas))\n\t}\n\tc.metrics.serviceReplicasGauge.Collect(ch)\n}", "func (e *Exporter) Collect(ch chan<- prometheus.Metric) {\n\tignore := strings.Split(*flagStatsIgnore, \",\")\n\tif len(ignore) == 1 && ignore[0] == \"\" {\n\t\tignore = []string{}\n\t}\n\tnicks := strings.Split(*flagStatsNicks, \",\")\n\tif len(nicks) == 1 && nicks[0] == \"\" {\n\t\tnicks = []string{}\n\t}\n\tres := e.client.Stats(irc.StatsRequest{\n\t\tLocal: *flagStatsLocal,\n\t\tTimeout: *flagStatsTimeout,\n\t\tIgnoreServers: ignore,\n\t\tNicks: nicks,\n\t})\n\n\tch <- prometheus.MustNewConstMetric(\n\t\tconnected, prometheus.GaugeValue, boolToFloat[e.client.Server != \"\"])\n\n\t_, ok := res.Servers[e.client.Server]\n\tif res.Timeout && !ok {\n\t\t// Timeout, no data at all\n\t\tif e.client.Server != \"\" {\n\t\t\tch <- prometheus.MustNewConstMetric(\n\t\t\t\tup, prometheus.GaugeValue, 0.0, e.client.Server)\n\t\t}\n\t} else {\n\t\t// Global state\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tchannels, prometheus.GaugeValue, float64(res.Channels))\n\n\t\tfor nick, nickIson := range res.Nicks {\n\t\t\tch <- prometheus.MustNewConstMetric(\n\t\t\t\tison, prometheus.GaugeValue, boolToFloat[nickIson], nick)\n\t\t}\n\n\t\t// Per server state\n\t\tfor server, stats := range res.Servers {\n\t\t\tch <- prometheus.MustNewConstMetric(\n\t\t\t\tdistance, prometheus.GaugeValue, float64(stats.Distance), server)\n\n\t\t\tif *flagStatsLocal && e.client.Server != server {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tch <- prometheus.MustNewConstMetric(\n\t\t\t\tup, prometheus.GaugeValue, boolToFloat[stats.Up], server)\n\n\t\t\tif stats.Up {\n\t\t\t\tch <- prometheus.MustNewConstMetric(\n\t\t\t\t\tusers, prometheus.GaugeValue, float64(stats.Users), server)\n\n\t\t\t\tch <- prometheus.MustNewConstMetric(\n\t\t\t\t\tlatency, prometheus.GaugeValue, float64(stats.ResponseTime.Sub(stats.RequestTime))/float64(time.Second), server)\n\t\t\t}\n\t\t}\n\t}\n}", "func (e *Exporter) Collect(ch chan<- prometheus.Metric) {\n\te.mutex.Lock()\n\tdefer e.mutex.Unlock()\n\n\te.scrape()\n\n\te.up.Collect(ch)\n\te.totalScrapes.Collect(ch)\n\te.exchangeStatus.Collect(ch)\n\te.ltp.Collect(ch)\n\te.bestBid.Collect(ch)\n\te.bestAsk.Collect(ch)\n\te.bestBidSize.Collect(ch)\n\te.bestAskSize.Collect(ch)\n\te.totalBidDepth.Collect(ch)\n\te.totalAskDepth.Collect(ch)\n\te.volume.Collect(ch)\n\te.volumeByProduct.Collect(ch)\n}", "func (e *Exporter) Collect(ch chan<- prometheus.Metric) {\n\tvar (\n\t\tdata *Data\n\t\terr error\n\t)\n\n\te.mutex.Lock() // To protect metrics from concurrent collects.\n\tdefer e.mutex.Unlock()\n\n\te.resetGaugeVecs() // Clean starting point\n\n\tvar endpointOfAPI []string\n\tif strings.HasSuffix(rancherURL, \"v3\") || strings.HasSuffix(rancherURL, \"v3/\") {\n\t\tendpointOfAPI = endpointsV3\n\t} else {\n\t\tendpointOfAPI = endpoints\n\t}\n\n\tcacheExpired := e.IsCacheExpired()\n\n\t// Range over the pre-configured endpoints array\n\tfor _, p := range endpointOfAPI {\n\t\tif cacheExpired {\n\t\t\tdata, err = e.gatherData(e.rancherURL, e.resourceLimit, e.accessKey, e.secretKey, p, ch)\n\t\t\tif err != nil {\n\t\t\t\tlog.Errorf(\"Error getting JSON from URL %s\", p)\n\t\t\t\treturn\n\t\t\t}\n\t\t\te.cache[p] = data\n\t\t} else {\n\t\t\td, ok := e.cache[p]\n\t\t\tif !ok {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tdata = d\n\t\t}\n\n\t\tif err := e.processMetrics(data, p, e.hideSys, ch); err != nil {\n\t\t\tlog.Errorf(\"Error scraping rancher url: %s\", err)\n\t\t\treturn\n\t\t}\n\t\tlog.Infof(\"Metrics successfully processed for %s\", p)\n\t}\n\n\tif cacheExpired {\n\t\te.RenewCache()\n\t}\n\n\tfor _, m := range e.gaugeVecs {\n\t\tm.Collect(ch)\n\t}\n}", "func (*resourceMetricProcessor) Start(ctx context.Context, host component.Host) error {\n\treturn nil\n}", "func (r *Reporter) Start(ctx context.Context) error {\n\tif r.Errors != nil {\n\t\tr.D.Debug(\"starting errors reporter\")\n\t\tif err := r.Errors.Start(ctx); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tr.D.Debug(\"errors reporter started\")\n\t}\n\n\tif r.Logging != nil {\n\t\tr.D.Debug(\"starting logging reporter\")\n\t\tif err := r.Logging.Start(ctx); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tr.D.Debug(\"logging reporter started\")\n\t}\n\n\tif r.Metrics != nil {\n\t\tr.D.Debug(\"starting metrics reporter\")\n\t\tif err := r.Metrics.Start(ctx); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tr.D.Debug(\"metrics reporter started\")\n\t}\n\n\treturn nil\n}", "func (e *Exporter) Collect(ch chan<- prometheus.Metric) {\n\tok := e.collectPeersMetric(ch)\n\tok = e.collectLeaderMetric(ch) && ok\n\tok = e.collectNodesMetric(ch) && ok\n\tok = e.collectMembersMetric(ch) && ok\n\tok = e.collectMembersWanMetric(ch) && ok\n\tok = e.collectServicesMetric(ch) && ok\n\tok = e.collectHealthStateMetric(ch) && ok\n\tok = e.collectKeyValues(ch) && ok\n\n\tif ok {\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tup, prometheus.GaugeValue, 1.0,\n\t\t)\n\t} else {\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tup, prometheus.GaugeValue, 0.0,\n\t\t)\n\t}\n}", "func (t *Twemproxy) Gather(acc telegraf.Accumulator) error {\n\tconn, err := net.DialTimeout(\"tcp\", t.Addr, 1*time.Second)\n\tif err != nil {\n\t\treturn err\n\t}\n\tbody, err := io.ReadAll(conn)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tvar stats map[string]interface{}\n\tif err = json.Unmarshal(body, &stats); err != nil {\n\t\treturn errors.New(\"error decoding JSON response\")\n\t}\n\n\ttags := make(map[string]string)\n\ttags[\"twemproxy\"] = t.Addr\n\tt.processStat(acc, tags, stats)\n\n\treturn nil\n}", "func (acir *awsContainerInsightReceiver) collectData(ctx context.Context) error {\n\tvar mds []pmetric.Metrics\n\tif acir.cadvisor == nil && acir.k8sapiserver == nil {\n\t\terr := errors.New(\"both cadvisor and k8sapiserver failed to start\")\n\t\tacir.settings.Logger.Error(\"Failed to collect stats\", zap.Error(err))\n\t\treturn err\n\t}\n\n\tif acir.cadvisor != nil {\n\t\tmds = append(mds, acir.cadvisor.GetMetrics()...)\n\t}\n\n\tif acir.k8sapiserver != nil {\n\t\tmds = append(mds, acir.k8sapiserver.GetMetrics()...)\n\t}\n\n\tfor _, md := range mds {\n\t\terr := acir.nextConsumer.ConsumeMetrics(ctx, md)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}", "func (r *MetricRegistry) Start() {\n\tr.mu.Lock()\n\tif !r.started {\n\t\tr.wg.Add(1)\n\t\tgo func() {\n\t\t\tdefer r.wg.Done()\n\t\t\tr.run()\n\t\t}()\n\t}\n\tr.mu.Unlock()\n}", "func (p *Plugin) CollectMetrics(metrics []plugin.Metric) ([]plugin.Metric, error) {\n\tvar mtxMetrics sync.Mutex\n\tvar wgCollectedMetrics sync.WaitGroup\n\n\t//initialization of plugin structure (only once)\n\tif !p.initialized {\n\t\tconfigs, err := getMetricsConfig(metrics[0].Config)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tfor _, cfg := range configs {\n\t\t\tnamespace := core.NewNamespace(Vendor, PluginName)\n\t\t\tfor _, ns := range cfg.Namespace {\n\t\t\t\tif ns.Source == configReader.NsSourceString {\n\t\t\t\t\tnamespace = namespace.AddStaticElement(ns.String)\n\t\t\t\t} else {\n\t\t\t\t\tnamespace = namespace.AddDynamicElement(ns.Name, ns.Description)\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tif _, metricExist := p.metricsConfigs[namespace.String()]; metricExist {\n\t\t\t\tlogFields := map[string]interface{}{\n\t\t\t\t\t\"namespace\": namespace.String(),\n\t\t\t\t\t\"previous_metric_configuration\": p.metricsConfigs[namespace.String()],\n\t\t\t\t\t\"current_metric_configuration\": cfg,\n\t\t\t\t}\n\t\t\t\tlog.WithFields(logFields).Warn(fmt.Errorf(\"Plugin configuration file (`setfile`) contains metrics definitions which expose the same namespace, only one of them is in use. Correction of plugin configuration file (`setfile`) is recommended.\"))\n\t\t\t} else {\n\t\t\t\t//add metric configuration to plugin metric map\n\t\t\t\tp.metricsConfigs[namespace.String()] = cfg\n\t\t\t}\n\t\t}\n\t\tp.initialized = true\n\t}\n\n\tagentConfig, err := configReader.GetSnmpAgentConfig(metrics[0].Config)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t//lock using of connections in watchConnections\n\tmtxSnmpConnections.Lock()\n\tdefer mtxSnmpConnections.Unlock()\n\n\tconn, err := getConnection(agentConfig)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tmts := []plugin.Metric{}\n\n\tfor _, metric := range metrics {\n\n\t\t//get metrics to collect\n\t\tmetricsConfigs, err := getMetricsToCollect(metric.Namespace.String(), p.metricsConfigs)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\twgCollectedMetrics.Add(len(metricsConfigs))\n\n\t\tfor _, cfg := range metricsConfigs {\n\n\t\t\tgo func(cfg configReader.Metric) {\n\n\t\t\t\tdefer wgCollectedMetrics.Done()\n\n\t\t\t\tconn.mtx.Lock()\n\n\t\t\t\t//get value of metric/metrics\n\t\t\t\tresults, err := snmp_.readElements(conn.handler, cfg.Oid, cfg.Mode)\n\t\t\t\tif err != nil {\n\t\t\t\t\tlog.Warn(err)\n\t\t\t\t\tconn.mtx.Unlock()\n\t\t\t\t\treturn\n\t\t\t\t}\n\n\t\t\t\t//get dynamic elements of namespace parts\n\t\t\t\terr = getDynamicNamespaceElements(conn.handler, results, &cfg)\n\t\t\t\tif err != nil {\n\t\t\t\t\tconn.mtx.Unlock()\n\t\t\t\t\treturn\n\t\t\t\t}\n\n\t\t\t\tconn.lastUsed = time.Now()\n\t\t\t\tconn.mtx.Unlock()\n\n\t\t\t\tfor i, result := range results {\n\n\t\t\t\t\t//build namespace for metric\n\t\t\t\t\tnamespace := plugin.NewNamespace(Vendor, PluginName)\n\t\t\t\t\toffset := len(namespace)\n\t\t\t\t\tfor j, ns := range cfg.Namespace {\n\t\t\t\t\t\tif ns.Source == configReader.NsSourceString {\n\t\t\t\t\t\t\tnamespace = namespace.AddStaticElements(ns.String)\n\t\t\t\t\t\t} else {\n\t\t\t\t\t\t\tnamespace = namespace.AddDynamicElement(ns.Name, ns.Description)\n\t\t\t\t\t\t\tnamespace[j+offset].Value = ns.Values[i]\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\n\t\t\t\t\t//convert metric types\n\t\t\t\t\tval, err := convertSnmpDataToMetric(result.Variable.String(), result.Variable.Type())\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t}\n\n\t\t\t\t\t//modify numeric metric - use scale and shift parameters\n\t\t\t\t\tdata := modifyNumericMetric(val, cfg.Scale, cfg.Shift)\n\n\t\t\t\t\t//creating metric\n\t\t\t\t\tmt := plugin.Metric{\n\t\t\t\t\t\tNamespace: namespace,\n\t\t\t\t\t\tData: data,\n\t\t\t\t\t\tTimestamp: time.Now(),\n\t\t\t\t\t\tTags: map[string]string{\n\t\t\t\t\t\t\ttagSnmpAgentName: agentConfig.Name,\n\t\t\t\t\t\t\ttagSnmpAgentAddress: agentConfig.Address,\n\t\t\t\t\t\t\ttagOid: result.Oid.String()},\n\t\t\t\t\t\tUnit: metric.Unit,\n\t\t\t\t\t\tDescription: metric.Description,\n\t\t\t\t\t}\n\n\t\t\t\t\t//adding metric to list of metrics\n\t\t\t\t\tmtxMetrics.Lock()\n\n\t\t\t\t\t//filter specific instance\n\t\t\t\t\tnsPattern := strings.Replace(metric.Namespace.String(), \"*\", \".*\", -1)\n\t\t\t\t\tmatched, err := regexp.MatchString(nsPattern, mt.Namespace.String())\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\tlogFields := map[string]interface{}{\"namespace\": mt.Namespace.String(), \"pattern\": nsPattern, \"match_error\": err}\n\t\t\t\t\t\terr := fmt.Errorf(\"Cannot parse namespace element for matching\")\n\t\t\t\t\t\tlog.WithFields(logFields).Warn(err)\n\t\t\t\t\t\treturn\n\t\t\t\t\t}\n\t\t\t\t\tif matched {\n\t\t\t\t\t\tmts = append(mts, mt)\n\t\t\t\t\t}\n\n\t\t\t\t\tmtxMetrics.Unlock()\n\t\t\t\t}\n\t\t\t}(cfg)\n\t\t}\n\t\twgCollectedMetrics.Wait()\n\t}\n\treturn mts, nil\n}", "func (sr *ServicedStatsReporter) gatherStats(t time.Time) []Sample {\n\tstats := []Sample{}\n\t// Handle the host metrics.\n\treg, _ := sr.hostRegistry.(*metrics.StandardRegistry)\n\treg.Each(func(name string, i interface{}) {\n\t\ttagmap := map[string]string{\n\t\t\t\"controlplane_host_id\": sr.hostID,\n\t\t}\n\t\tswitch metric := i.(type) {\n\t\tcase metrics.Gauge:\n\t\t\tstats = append(stats, Sample{name, strconv.FormatInt(metric.Value(), 10), t.Unix(), tagmap})\n\t\tcase metrics.GaugeFloat64:\n\t\t\tstats = append(stats, Sample{name, strconv.FormatFloat(metric.Value(), 'f', -1, 32), t.Unix(), tagmap})\n\t\t}\n\t})\n\t// Handle each container's metrics.\n\tfor key, registry := range sr.containerRegistries {\n\t\treg, _ := registry.(*metrics.StandardRegistry)\n\t\treg.Each(func(name string, i interface{}) {\n\t\t\ttagmap := map[string]string{\n\t\t\t\t\"controlplane_host_id\": sr.hostID,\n\t\t\t\t\"controlplane_service_id\": key.serviceID,\n\t\t\t\t\"controlplane_instance_id\": strconv.FormatInt(int64(key.instanceID), 10),\n\t\t\t}\n\t\t\tswitch metric := i.(type) {\n\t\t\tcase metrics.Gauge:\n\t\t\t\tstats = append(stats, Sample{name, strconv.FormatInt(metric.Value(), 10), t.Unix(), tagmap})\n\t\t\tcase metrics.GaugeFloat64:\n\t\t\t\tstats = append(stats, Sample{name, strconv.FormatFloat(metric.Value(), 'f', -1, 32), t.Unix(), tagmap})\n\t\t\t}\n\t\t})\n\t}\n\treturn stats\n}", "func (c *OrchestratorCollector) Collect(ch chan<- prometheus.Metric) {\n\tc.mutex.Lock() // To protect metrics from concurrent collects\n\tdefer c.mutex.Unlock()\n\n\tstats, err := c.orchestratorClient.GetMetrics()\n\tif err != nil {\n\t\tc.upMetric.Set(serviceDown)\n\t\tch <- c.upMetric\n\t\tlog.Printf(\"Error getting Orchestrator stats: %v\", err)\n\t\treturn\n\t}\n\n\tc.upMetric.Set(serviceUp)\n\tch <- c.upMetric\n\n\tch <- prometheus.MustNewConstMetric(c.metrics[\"cluter_size\"],\n\t\tprometheus.GaugeValue, float64(len(stats.Status.Details.AvailableNodes)))\n\tch <- prometheus.MustNewConstMetric(c.metrics[\"is_active_node\"],\n\t\tprometheus.GaugeValue, boolToFloat64(stats.Status.Details.IsActiveNode))\n\tch <- prometheus.MustNewConstMetric(c.metrics[\"problems\"],\n\t\tprometheus.GaugeValue, float64(len(stats.Problems)))\n\tch <- prometheus.MustNewConstMetric(c.metrics[\"last_failover_id\"],\n\t\tprometheus.CounterValue, float64(stats.LastFailoverID))\n\tch <- prometheus.MustNewConstMetric(c.metrics[\"is_healthy\"],\n\t\tprometheus.GaugeValue, boolToFloat64(stats.Status.Details.Healthy))\n\tch <- prometheus.MustNewConstMetric(c.metrics[\"failed_seeds\"],\n\t\tprometheus.CounterValue, float64(stats.FailedSeeds))\n}", "func CollectProcessMetrics(refresh time.Duration) {\n\t// Short circuit if the metics system is disabled\n\tif !Enabled {\n\t\treturn\n\t}\n\t// Create the various data collectors\n\tmemstates := make([]*runtime.MemStats, 2)\n\tdiskstates := make([]*DiskStats, 2)\n\tfor i := 0; i < len(memstates); i++ {\n\t\tmemstates[i] = new(runtime.MemStats)\n\t\tdiskstates[i] = new(DiskStats)\n\t}\n\t// Define the various metics to collect\n\tmemAllocs := metics.GetOrRegisterMeter(\"system/memory/allocs\", metics.DefaultRegistry)\n\tmemFrees := metics.GetOrRegisterMeter(\"system/memory/frees\", metics.DefaultRegistry)\n\tmemInuse := metics.GetOrRegisterMeter(\"system/memory/inuse\", metics.DefaultRegistry)\n\tmemPauses := metics.GetOrRegisterMeter(\"system/memory/pauses\", metics.DefaultRegistry)\n\n\tvar diskReads, diskReadBytes, diskWrites, diskWriteBytes metics.Meter\n\tif err := ReadDiskStats(diskstates[0]); err == nil {\n\t\tdiskReads = metics.GetOrRegisterMeter(\"system/disk/readcount\", metics.DefaultRegistry)\n\t\tdiskReadBytes = metics.GetOrRegisterMeter(\"system/disk/readdata\", metics.DefaultRegistry)\n\t\tdiskWrites = metics.GetOrRegisterMeter(\"system/disk/writecount\", metics.DefaultRegistry)\n\t\tdiskWriteBytes = metics.GetOrRegisterMeter(\"system/disk/writedata\", metics.DefaultRegistry)\n\t} else {\n\t\tbgmlogs.Debug(\"Failed to read disk metics\", \"err\", err)\n\t}\n\t// Iterate loading the different states and updating the meters\n\tfor i := 1; ; i++ {\n\t\truntime.ReadMemStats(memstates[i%2])\n\t\tmemAllocs.Mark(int64(memstates[i%2].Mallocs - memstates[(i-1)%2].Mallocs))\n\t\tmemFrees.Mark(int64(memstates[i%2].Frees - memstates[(i-1)%2].Frees))\n\t\tmemInuse.Mark(int64(memstates[i%2].Alloc - memstates[(i-1)%2].Alloc))\n\t\tmemPauses.Mark(int64(memstates[i%2].PauseTotalNs - memstates[(i-1)%2].PauseTotalNs))\n\n\t\tif ReadDiskStats(diskstates[i%2]) == nil {\n\t\t\tdiskReads.Mark(diskstates[i%2].ReadCount - diskstates[(i-1)%2].ReadCount)\n\t\t\tdiskReadBytes.Mark(diskstates[i%2].ReadBytes - diskstates[(i-1)%2].ReadBytes)\n\t\t\tdiskWrites.Mark(diskstates[i%2].WriteCount - diskstates[(i-1)%2].WriteCount)\n\t\t\tdiskWriteBytes.Mark(diskstates[i%2].WriteBytes - diskstates[(i-1)%2].WriteBytes)\n\t\t}\n\t\ttime.Sleep(refresh)\n\t}\n}", "func Start() {\n\n\trate := config.StatsRate\n\n\tif rate == 0 {\n\t\tlog.Printf(\"Stats collection disabled\")\n\t\treturn\n\t}\n\n\ts := &stats{}\n\ts.collect() // 1st time initialisation\n\n\tgo func() {\n\t\tfor _ = range time.Tick(rate) {\n\t\t\ts.collect()\n\t\t}\n\t}()\n\n\tlog.Printf(\"Stats collection started, frequency: %s\", rate)\n}", "func (c *StatsCollector) Collect(metricChannel chan<- prometheus.Metric) {\n\t// read all stats from Kamailio\n\tif completeStatMap, err := c.fetchStats(); err == nil {\n\t\t// and produce various prometheus.Metric for well-known stats\n\t\tproduceMetrics(completeStatMap, metricChannel)\n\t\t// produce prometheus.Metric objects for scripted stats (if any)\n\t\tconvertScriptedMetrics(completeStatMap, metricChannel)\n\t} else {\n\t\t// something went wrong\n\t\t// TODO: add a error metric\n\t\tlog.Error(\"Could not fetch values from kamailio\", err)\n\t}\n}", "func (metrics Metrics) Start() {\n\tticker := time.NewTicker(metrics.refreshRate)\n\tdefer ticker.Stop()\n\n\tif err := metrics.Hydrate(); err != nil {\n\t\tlog.Warn().Msg(err.Error())\n\t}\n\n\tmetrics.Persist()\n\tmetrics.MarkReady()\n\n\tselect {\n\tcase <-metrics.CanStart:\n\t\tbreak\n\tcase <-metrics.Done():\n\t\tmetrics.MarkDone()\n\t\treturn\n\t}\n\n\tlog.Info().Msgf(\"Start metrics daemon, update each %v into %v\", metrics.refreshRate, metrics.storage.Root)\n\n\tgo func() {\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase <-metrics.Done():\n\t\t\t\tmetrics.Persist()\n\t\t\t\tmetrics.MarkDone()\n\t\t\t\treturn\n\t\t\tcase <-ticker.C:\n\t\t\t\tmetrics.Persist()\n\t\t\t}\n\t\t}\n\t}()\n\n\tmetrics.WaitStop()\n\tlog.Info().Msg(\"Stop metrics daemon\")\n}", "func (p CognitoIdpPlugin) FetchMetrics() (map[string]interface{}, error) {\n\tstat := make(map[string]interface{})\n\n\tfor _, met := range [...]metrics{\n\t\t{CloudWatchName: \"SignUpSuccesses\", MackerelName: \"SignUpSuccesses\", Type: metricsTypeSum},\n\t\t{CloudWatchName: \"SignUpSuccesses\", MackerelName: \"SignUpParcentageOfSuccessful\", Type: metricsTypeAverage},\n\t\t{CloudWatchName: \"SignUpSuccesses\", MackerelName: \"SignUpSampleCount\", Type: metricsTypeSampleCount},\n\t\t{CloudWatchName: \"SignUpThrottles\", MackerelName: \"SignUpThrottles\", Type: metricsTypeSum},\n\t\t{CloudWatchName: \"SignInSuccesses\", MackerelName: \"SignInSuccesses\", Type: metricsTypeSum},\n\t\t{CloudWatchName: \"SignInSuccesses\", MackerelName: \"SignInParcentageOfSuccessful\", Type: metricsTypeAverage},\n\t\t{CloudWatchName: \"SignInSuccesses\", MackerelName: \"SignInSampleCount\", Type: metricsTypeSampleCount},\n\t\t{CloudWatchName: \"SignInThrottles\", MackerelName: \"SignInThrottles\", Type: metricsTypeSum},\n\t\t{CloudWatchName: \"TokenRefreshSuccesses\", MackerelName: \"TokenRefreshSuccesses\", Type: metricsTypeSum},\n\t\t{CloudWatchName: \"TokenRefreshSuccesses\", MackerelName: \"TokenRefreshParcentageOfSuccessful\", Type: metricsTypeAverage},\n\t\t{CloudWatchName: \"TokenRefreshSuccesses\", MackerelName: \"TokenRefreshSampleCount\", Type: metricsTypeSampleCount},\n\t\t{CloudWatchName: \"TokenRefreshThrottles\", MackerelName: \"TokenRefreshThrottles\", Type: metricsTypeSum},\n\t\t{CloudWatchName: \"FederationSuccesses\", MackerelName: \"FederationSuccesses\", Type: metricsTypeSum},\n\t\t{CloudWatchName: \"FederationSuccesses\", MackerelName: \"FederationParcentageOfSuccessful\", Type: metricsTypeAverage},\n\t\t{CloudWatchName: \"FederationSuccesses\", MackerelName: \"FederationSampleCount\", Type: metricsTypeSampleCount},\n\t\t{CloudWatchName: \"FederationThrottles\", MackerelName: \"FederationThrottles\", Type: metricsTypeSum},\n\t} {\n\t\tv, err := p.getLastPoint(met)\n\t\tif err == nil {\n\t\t\tstat[met.MackerelName] = v\n\t\t} else {\n\t\t\tlog.Printf(\"%s: %s\", met, err)\n\t\t}\n\t}\n\treturn stat, nil\n}", "func (e *Exporter) Collect(ch chan<- prometheus.Metric) {\n\te.mutex.Lock() // To protect metrics from concurrent collects.\n\tdefer e.mutex.Unlock()\n\tif err := e.collect(ch); err != nil {\n\t\tlog.Errorf(\"Error scraping: %s\", err)\n\t}\n\treturn\n}", "func (c *Collector) Collect(ch chan<- prometheus.Metric) {\n\tc.Lock()\n\tdefer c.Unlock()\n\n\tc.totalScrapes.Inc()\n\terr := c.getDadataBalance()\n\tif err != nil {\n\t\tc.failedBalanceScrapes.Inc()\n\t}\n\terr = c.getDadataStats()\n\tif err != nil {\n\t\tc.failedStatsScrapes.Inc()\n\t}\n\n\tch <- c.totalScrapes\n\tch <- c.failedBalanceScrapes\n\tch <- c.failedStatsScrapes\n\tch <- c.CurrentBalance\n\tch <- c.ServicesClean\n\tch <- c.ServicesMerging\n\tch <- c.ServicesSuggestions\n}", "func (phStats *passwordHasherStats) startAccumulating() {\n\tgo phStats.accumulateStats()\n}", "func (c *SchedulerController) CollectMetrics(ch chan<- prometheus.Metric) {\n\tmetric, err := prometheus.NewConstMetric(scheduler.ControllerWorkerSum, prometheus.GaugeValue, float64(c.RunningWorkers()), \"seed\")\n\tif err != nil {\n\t\tscheduler.ScrapeFailures.With(prometheus.Labels{\"kind\": \"gardener-shoot-scheduler\"}).Inc()\n\t\treturn\n\t}\n\tch <- metric\n}", "func (c *VMCollector) Collect(ch chan<- prometheus.Metric) {\n\tfor _, m := range c.getMetrics() {\n\t\tch <- m\n\t}\n}", "func init() {\n\tmb.Registry.MustAddMetricSet(\"connection\", \"load_stats\", New)\n}", "func (c *MetricsCollector) Collect(ch chan<- prometheus.Metric) {\n\tfor _, s := range c.status {\n\t\ts.RLock()\n\t\tdefer s.RUnlock()\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tc.verify,\n\t\t\tprometheus.GaugeValue,\n\t\t\tfloat64(s.VerifyRestore),\n\t\t\t\"verify_restore\",\n\t\t\ts.BackupService,\n\t\t\ts.StorageService,\n\t\t)\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tc.verify,\n\t\t\tprometheus.GaugeValue,\n\t\t\tfloat64(s.VerifyDiff),\n\t\t\t\"verify_diff\",\n\t\t\ts.BackupService,\n\t\t\ts.StorageService,\n\t\t)\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tc.verify,\n\t\t\tprometheus.GaugeValue,\n\t\t\tfloat64(s.VerifyChecksum),\n\t\t\t\"verify_checksum\",\n\t\t\ts.BackupService,\n\t\t\ts.StorageService,\n\t\t)\n\t}\n\n}", "func (sc *controller) Start(ctx context.Context, host component.Host) error {\n\tfor _, scraper := range sc.resourceMetricScrapers {\n\t\tif err := scraper.Start(ctx, host); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tsc.initialized = true\n\tsc.startScraping()\n\treturn nil\n}", "func (s SolrPlugin) FetchMetrics() (map[string]interface{}, error) {\n\tstat := make(map[string]interface{})\n\tfor core, stats := range s.Stats {\n\t\tfor k, v := range stats {\n\t\t\tstat[core+\"_\"+k] = v\n\t\t}\n\t}\n\treturn stat, nil\n}", "func init() {\n\tprometheus.MustRegister(duration)\n\tprometheus.MustRegister(counter)\n\tprometheus.MustRegister(requestsTotal)\n}", "func (vmc *VMMetricsCollector) StartCollection() {\n\tdetectResource()\n\n\tgo func() {\n\t\tticker := time.NewTicker(vmc.scrapeInterval)\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase <-ticker.C:\n\t\t\t\tvmc.scrapeAndExport()\n\n\t\t\tcase <-vmc.done:\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}()\n}", "func (collector *MetricsCollector) Collect(ch chan<- prometheus.Metric) {\n\tfilterMetricsByKind := func(kind string, orgMetrics []constMetric) (filteredMetrics []constMetric) {\n\t\tfor _, metric := range orgMetrics {\n\t\t\tif metric.kind == kind {\n\t\t\t\tfilteredMetrics = append(filteredMetrics, metric)\n\t\t\t}\n\t\t}\n\t\treturn filteredMetrics\n\t}\n\tcollector.defMetrics.reset()\n\tfor k := range collector.metrics {\n\t\tcounters := filterMetricsByKind(config.KeyMetricTypeCounter, collector.metrics[k])\n\t\tgauges := filterMetricsByKind(config.KeyMetricTypeGauge, collector.metrics[k])\n\t\thistograms := filterMetricsByKind(config.KeyMetricTypeHistogram, collector.metrics[k])\n\t\tcollectCounters(counters, collector.defMetrics, ch)\n\t\tcollectGauges(gauges, collector.defMetrics, ch)\n\t\tcollectHistograms(histograms, collector.defMetrics, ch)\n\t\tcollector.cache.Reset()\n\t}\n\tcollector.defMetrics.collectDefaultMetrics(ch)\n}", "func (m VarnishPlugin) FetchMetrics() (map[string]interface{}, error) {\n\tvar out []byte\n\tvar err error\n\n\tif m.VarnishName == \"\" {\n\t\tout, err = exec.Command(m.VarnishStatPath, \"-1\").CombinedOutput()\n\t} else {\n\t\tout, err = exec.Command(m.VarnishStatPath, \"-1\", \"-n\", m.VarnishName).CombinedOutput()\n\t}\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"%s: %s\", err, out)\n\t}\n\n\tlineexp := regexp.MustCompile(`^([^ ]+) +(\\d+)`)\n\tsmaexp := regexp.MustCompile(`^SMA\\.([^\\.]+)\\.(.+)$`)\n\n\tstat := map[string]interface{}{\n\t\t\"requests\": float64(0),\n\t}\n\n\tvar tmpv float64\n\tfor _, line := range strings.Split(string(out), \"\\n\") {\n\t\tmatch := lineexp.FindStringSubmatch(line)\n\t\tif match == nil {\n\t\t\tcontinue\n\t\t}\n\n\t\ttmpv, err = strconv.ParseFloat(match[2], 64)\n\t\tif err != nil {\n\t\t\tcontinue\n\t\t}\n\n\t\tswitch match[1] {\n\t\tcase \"cache_hit\", \"MAIN.cache_hit\":\n\t\t\tstat[\"cache_hits\"] = tmpv\n\t\t\tstat[\"requests\"] = stat[\"requests\"].(float64) + tmpv\n\t\tcase \"cache_miss\", \"MAIN.cache_miss\":\n\t\t\tstat[\"requests\"] = stat[\"requests\"].(float64) + tmpv\n\t\tcase \"cache_hitpass\", \"MAIN.cache_hitpass\":\n\t\t\tstat[\"requests\"] = stat[\"requests\"].(float64) + tmpv\n\t\tcase \"MAIN.backend_req\":\n\t\t\tstat[\"backend_req\"] = tmpv\n\t\tcase \"MAIN.backend_conn\":\n\t\t\tstat[\"backend_conn\"] = tmpv\n\t\tcase \"MAIN.backend_fail\":\n\t\t\tstat[\"backend_fail\"] = tmpv\n\t\tcase \"MAIN.backend_reuse\":\n\t\t\tstat[\"backend_reuse\"] = tmpv\n\t\tcase \"MAIN.backend_recycle\":\n\t\t\tstat[\"backend_recycle\"] = tmpv\n\t\tcase \"MAIN.n_object\":\n\t\t\tstat[\"n_object\"] = tmpv\n\t\tcase \"MAIN.n_objectcore\":\n\t\t\tstat[\"n_objectcore\"] = tmpv\n\t\tcase \"MAIN.n_expired\":\n\t\t\tstat[\"n_expired\"] = tmpv\n\t\tcase \"MAIN.n_objecthead\":\n\t\t\tstat[\"n_objecthead\"] = tmpv\n\t\tcase \"MAIN.busy_sleep\":\n\t\t\tstat[\"busy_sleep\"] = tmpv\n\t\tcase \"MAIN.busy_wakeup\":\n\t\t\tstat[\"busy_wakeup\"] = tmpv\n\t\tdefault:\n\t\t\tsmamatch := smaexp.FindStringSubmatch(match[1])\n\t\t\tif smamatch == nil {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif smamatch[2] == \"g_alloc\" {\n\t\t\t\tstat[\"varnish.sma.g_alloc.\"+smamatch[1]+\".g_alloc\"] = tmpv\n\t\t\t} else if smamatch[2] == \"g_bytes\" {\n\t\t\t\tstat[\"varnish.sma.memory.\"+smamatch[1]+\".allocated\"] = tmpv\n\t\t\t} else if smamatch[2] == \"g_space\" {\n\t\t\t\tstat[\"varnish.sma.memory.\"+smamatch[1]+\".available\"] = tmpv\n\t\t\t}\n\t\t}\n\t}\n\n\treturn stat, err\n}", "func (c *Controller) CollectMetrics(ch chan<- prometheus.Metric) {\n\tmetric, err := prometheus.NewConstMetric(controllermanager.ControllerWorkerSum, prometheus.GaugeValue, float64(c.RunningWorkers()), \"controllerregistration\")\n\tif err != nil {\n\t\tcontrollermanager.ScrapeFailures.With(prometheus.Labels{\"kind\": \"controllerregistration-controller\"}).Inc()\n\t\treturn\n\t}\n\tch <- metric\n}", "func (cg *Gatherer) Gather() ([]*dto.MetricFamily, error) {\n\tlogger.V(2).Println(\"Gather() called directly on a check gatherer, this is a bug!\")\n\n\tctx, cancel := context.WithTimeout(context.Background(), defaultGatherTimeout)\n\tdefer cancel()\n\n\treturn cg.GatherWithState(ctx, registry.GatherState{})\n}", "func (vmc *VMMetricsCollector) StartCollection() {\n\tgo func() {\n\t\tticker := time.NewTicker(vmc.scrapeInterval)\n\t\tvar prevProcStat *procfs.ProcStat\n\t\tvar prevStat *procfs.Stat\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase <-ticker.C:\n\t\t\t\tprevProcStat, prevStat = vmc.scrape(prevProcStat, prevStat)\n\t\t\t\tvmc.export()\n\n\t\t\tcase <-vmc.done:\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}()\n}", "func (e *PrometheusExporter) Start() {\n\tpromoExposer := NewPrometheusHTTPExposer(e)\n\tgo promoExposer.Run(PromoHTTPEndpoint, PromoHTTPPort)\n}", "func setupMetrics(mf monitoring.MetricFactory) {\n\tif mf == nil {\n\t\tmf = monitoring.InertMetricFactory{}\n\t}\n\tknownSourceLogs = mf.NewGauge(\"known_logs\", \"Set to 1 for known source logs\", \"logname\")\n\treadsCounter = mf.NewCounter(\"log_reads\", \"Number of source log read requests\", \"logname\")\n\treadErrorsCounter = mf.NewCounter(\"log_read_errors\", \"Number of source log read errors\", \"logname\")\n\tlastSeenSTHTimestamp = mf.NewGauge(\"last_seen_sth_timestamp\", \"Time of last seen STH in ms since epoch\", \"logname\")\n\tlastSeenSTHTreeSize = mf.NewGauge(\"last_seen_sth_treesize\", \"Size of tree at last seen STH\", \"logname\")\n\tlastRecordedSTHTimestamp = mf.NewGauge(\"last_recorded_sth_timestamp\", \"Time of last recorded STH in ms since epoch\", \"logname\")\n\tlastRecordedSTHTreeSize = mf.NewGauge(\"last_recorded_sth_treesize\", \"Size of tree at last recorded STH\", \"logname\")\n\n\tdestPureHub = mf.NewGauge(\"dest_pure_hub\", \"Set to for known destination hubs\", \"hubname\")\n\twritesCounter = mf.NewCounter(\"hub_writes\", \"Number of destination hub submissions\", \"hubname\")\n\twriteErrorsCounter = mf.NewCounter(\"hub_write_errors\", \"Number of destination hub submission errors\", \"hubname\")\n}", "func (i *combinedInformers) Start(stopCh <-chan struct{}) {\n\ti.externalKubeInformers.Start(stopCh)\n\ti.appInformers.Start(stopCh)\n\ti.authorizationInformers.Start(stopCh)\n\ti.buildInformers.Start(stopCh)\n\ti.imageInformers.Start(stopCh)\n\ti.networkInformers.Start(stopCh)\n\ti.oauthInformers.Start(stopCh)\n\ti.quotaInformers.Start(stopCh)\n\ti.routeInformers.Start(stopCh)\n\ti.securityInformers.Start(stopCh)\n\ti.templateInformers.Start(stopCh)\n\ti.userInformers.Start(stopCh)\n}", "func (pc *PrometheusCollector) Collect(ch chan<- prometheus.Metric) {\n\tpc.attempts.Collect(ch)\n\tpc.errors.Collect(ch)\n\tpc.successes.Collect(ch)\n\tpc.failures.Collect(ch)\n\tpc.rejects.Collect(ch)\n\tpc.shortCircuits.Collect(ch)\n\tpc.timeouts.Collect(ch)\n\tpc.fallbackSuccesses.Collect(ch)\n\tpc.fallbackFailures.Collect(ch)\n\tpc.totalDuration.Collect(ch)\n\tpc.runDuration.Collect(ch)\n}", "func (p *Probe) Start(ctx context.Context, dataChan chan *metrics.EventMetrics) {\n\tif p.conn == nil {\n\t\tp.l.Critical(\"Probe has not been properly initialized yet.\")\n\t}\n\tdefer p.conn.close()\n\tfor ts := range time.Tick(p.opts.Interval) {\n\t\t// Don't run another probe if context is canceled already.\n\t\tselect {\n\t\tcase <-ctx.Done():\n\t\t\treturn\n\t\tdefault:\n\t\t}\n\n\t\tp.runProbe()\n\t\tp.l.Debugf(\"%s: Probe finished.\", p.name)\n\t\tif (p.runCnt % uint64(p.c.GetStatsExportInterval())) != 0 {\n\t\t\tcontinue\n\t\t}\n\t\tfor _, t := range p.targets {\n\t\t\tem := metrics.NewEventMetrics(ts).\n\t\t\t\tAddMetric(\"total\", metrics.NewInt(p.sent[t])).\n\t\t\t\tAddMetric(\"success\", metrics.NewInt(p.received[t])).\n\t\t\t\tAddMetric(\"latency\", metrics.NewFloat(p.latency[t].Seconds()/p.opts.LatencyUnit.Seconds())).\n\t\t\t\tAddLabel(\"ptype\", \"ping\").\n\t\t\t\tAddLabel(\"probe\", p.name).\n\t\t\t\tAddLabel(\"dst\", t)\n\n\t\t\tdataChan <- em.Clone()\n\t\t\tp.l.Info(em.String())\n\t\t}\n\t}\n}", "func (e *Exporter) Collect(ch chan<- prometheus.Metric) {\n\te.mutex.Lock() // To protect metrics from concurrent collects.\n\tdefer e.mutex.Unlock()\n\tif err := e.collect(ch); err != nil {\n\t\tglog.Error(fmt.Sprintf(\"Error collecting stats: %s\", err))\n\t}\n\treturn\n}", "func StartReporter() {\n\tctx := context.Background()\n\tparent := opentracing.StartSpan(\"(*woker-ops).StartReporter\")\n\tdefer parent.Finish()\n\tparent.LogFields(\n\t\ttracelog.String(\"event\", \"Received static report\"))\n\tvar stdoutmsg string\n\tvartime := viper.GetInt(\"before\")\n\n\tmyresult, err := generate.Launch(ctx, parent, vartime)\n\tif err != nil {\n\t\tlog.Error().Msgf(\"Error Generate report: %v\", err)\n\t}\n\tif len(myresult) > 0 {\n\t\tfor _, n := range myresult {\n\n\t\t\tstdoutmsg = \"Worker still running more than \" + viper.GetString(\"before\") + \" hour(s) in \" + n.Env + \" environment: \\n\"\n\n\t\t\tfor _, o := range n.Instances {\n\t\t\t\tstdoutmsg = stdoutmsg + \" Name: \" + o.Name + \" DNS: \" + o.Dnsname + \" on region: \" + o.Region + \" Started since UTC: \" + o.Launchtime.String() + \"\\n\"\n\t\t\t}\n\n\t\t}\n\n\t\tfmt.Println(stdoutmsg)\n\t} else {\n\t\tfmt.Printf(\"No worker running since: %v hour(s)\\n\", viper.GetInt(\"before\"))\n\t}\n}", "func (a *Agent) start() {\n\ta.initAPI()\n\tnb := 0\n\tfor {\n\t\ta.updateStreams()\n\t\tnb++\n\t\tif nb == 10 {\n\t\t\tlog.Printf(\"Sent %d logs and %d metrics on the last %d seconds\\n\", a.nbLogs, a.nbMetrics, nb*conf.period)\n\t\t\tnb = 0\n\t\t\ta.nbLogs = 0\n\t\t\ta.nbMetrics = 0\n\t\t}\n\t\ttime.Sleep(time.Duration(conf.period) * time.Second)\n\t}\n}", "func setupMetrics() *Metrics {\n\t// Requests duration\n\tduration := prometheus.NewHistogramVec(prometheus.HistogramOpts{\n\t\tName: \"http_request_duration\",\n\t\tHelp: \"Duration of the http requests processed.\",\n\t},\n\t\t[]string{\"status\", \"method\", \"path\"},\n\t)\n\tprometheus.MustRegister(duration)\n\n\treturn &Metrics{\n\t\tduration: duration,\n\t}\n}", "func (c *metricbeatCollector) Collect(ch chan<- prometheus.Metric) {\n\n\tfor _, i := range c.metrics {\n\t\tch <- prometheus.MustNewConstMetric(i.desc, i.valType, i.eval(c.stats))\n\t}\n\n}", "func (b *EBPFTelemetry) Collect(ch chan<- prometheus.Metric) {\n\tb.getHelpersTelemetry(ch)\n\tb.getMapsTelemetry(ch)\n}", "func (e *Exporter) Collect(ch chan<- prometheus.Metric) {\n\tif ch == nil {\n\t\tglog.Info(\"Prometheus channel is closed. Skipping\")\n\t\treturn\n\t}\n\n\te.mutex.Lock()\n\tdefer func() {\n\t\te.mutex.Unlock()\n\t\te.cleanup.Range(func(key, value interface{}) bool {\n\t\t\tswitch chiName := key.(type) {\n\t\t\tcase string:\n\t\t\t\te.cleanup.Delete(key)\n\t\t\t\te.removeInstallationReference(chiName)\n\t\t\t}\n\t\t\treturn true\n\t\t})\n\t}()\n\n\tglog.Info(\"Starting Collect\")\n\tvar wg = sync.WaitGroup{}\n\t// Getting hostnames of Pods and requesting the metrics data from ClickHouse instances within\n\tfor chiName := range e.chInstallations {\n\t\t// Loop over all hostnames of this installation\n\t\tglog.Infof(\"Collecting metrics for %s\\n\", chiName)\n\t\tfor _, hostname := range e.chInstallations[chiName].hostnames {\n\t\t\twg.Add(1)\n\t\t\tgo func(name, hostname string, c chan<- prometheus.Metric) {\n\t\t\t\tdefer wg.Done()\n\n\t\t\t\tglog.Infof(\"Querying metrics for %s\\n\", hostname)\n\t\t\t\tmetricsData := make([][]string, 0)\n\t\t\t\tfetcher := e.newFetcher(hostname)\n\t\t\t\tif err := fetcher.clickHouseQueryMetrics(&metricsData); err != nil {\n\t\t\t\t\t// In case of an error fetching data from clickhouse store CHI name in e.cleanup\n\t\t\t\t\tglog.Infof(\"Error querying metrics for %s: %s\\n\", hostname, err)\n\t\t\t\t\te.cleanup.Store(name, struct{}{})\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tglog.Infof(\"Extracted %d metrics for %s\\n\", len(metricsData), hostname)\n\t\t\t\twriteMetricsDataToPrometheus(c, metricsData, name, hostname)\n\n\t\t\t\tglog.Infof(\"Querying table sizes for %s\\n\", hostname)\n\t\t\t\ttableSizes := make([][]string, 0)\n\t\t\t\tif err := fetcher.clickHouseQueryTableSizes(&tableSizes); err != nil {\n\t\t\t\t\t// In case of an error fetching data from clickhouse store CHI name in e.cleanup\n\t\t\t\t\tglog.Infof(\"Error querying table sizes for %s: %s\\n\", hostname, err)\n\t\t\t\t\te.cleanup.Store(name, struct{}{})\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tglog.Infof(\"Extracted %d table sizes for %s\\n\", len(tableSizes), hostname)\n\t\t\t\twriteTableSizesDataToPrometheus(c, tableSizes, name, hostname)\n\n\t\t\t}(chiName, hostname, ch)\n\t\t}\n\t}\n\twg.Wait()\n\tglog.Info(\"Finished Collect\")\n}", "func (core *Core) monitor() {\n\tgo core.metricMonitor()\n\tgo core.taskHealthCheck(1 * time.Minute)\n}", "func (s *Systemctl) Start(acc telegraf.Accumulator) error {\n\t// lock the function\n\ts.mux.Lock()\n\t// release the lock at the end of the function\n\tdefer s.mux.Unlock()\n\t// check that the sampler has been initiatised\n\tif s.Sampler == nil {\n\t\treturn errors.New(\"Systemctl.Sampler has not been set\")\n\t}\n\t// check the sampler is not already running\n\tif s.running {\n\t\treturn nil\n\t}\n\n\tSetLogLevel(s.LogLevel)\n\tlog.WithFields(log.Fields{\n\t\t\"InputPlugin\": \"systemctl\",\n\t}).Debug(\"Starting\")\n\n\t// check the sample has not already initalised the aggregators\n\tif s.Aggregators == nil {\n\t\t// create an aggregator for each service defined within the configuration\n\t\tserviceCount := len(s.Services)\n\t\ts.Aggregators = make([]StateAggregator, serviceCount)\n\t\tfor i, service := range s.Services {\n\t\t\ts.Aggregators[i] = StateAggregator{\n\t\t\t\tResourceName: service,\n\t\t\t\tAggState: make(map[string]uint64),\n\t\t\t\tCurrentState: \"unknown\",\n\t\t\t\tCurrentStateDuration: 0,\n\t\t\t\tStateCollector: Collector{\n\t\t\t\t\tSampleRate: s.SampleRate,\n\t\t\t\t\tDone: make(chan bool),\n\t\t\t\t\tCollect: make(chan bool),\n\t\t\t\t\tSampleResults: make(chan []Sample),\n\t\t\t\t},\n\t\t\t}\n\t\t\t// start collecting samples for each aggregator in a separate go routine\n\t\t\t// providing the service name and the sampler used to collect data\n\t\t\tlog.WithFields(log.Fields{\n\t\t\t\t\"InputPlugin\": \"systemctl\",\n\t\t\t\t\"ResourceName\": service,\n\t\t\t}).Debug(\"Starting CollectSamples\")\n\t\t\tgo s.Aggregators[i].StateCollector.CollectSamples(service, s.Sampler)\n\t\t}\n\t}\n\n\t// set the state that the input plugin has started\n\ts.running = true\n\n\tlog.WithFields(log.Fields{\n\t\t\"InputPlugin\": \"systemctl\",\n\t}).Debug(\"Started\")\n\n\treturn nil\n}", "func (p *ProbeHandler) Start() error {\n\t// start a goroutine in order to update the graph\n\tgo func() {\n\t\t// update the graph each five seconds\n\t\tticker := time.NewTicker(5 * time.Second)\n\t\tdefer ticker.Stop()\n\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase <-p.quit:\n\t\t\t\t// got a message on the quit chan\n\t\t\t\treturn\n\t\t\tcase <-ticker.C:\n\t\t\t\tv, err := mem.VirtualMemory()\n\t\t\t\tif err != nil {\n\t\t\t\t\tp.Ctx.Logger.Errorf(\"unable to retrieve memory information: %s\", err)\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\n\t\t\t\tusage := &Usage{\n\t\t\t\t\tTotal: int64(v.Total),\n\t\t\t\t\tFree: int64(v.Free),\n\t\t\t\t\tUsedPercent: int64(v.UsedPercent),\n\t\t\t\t}\n\n\t\t\t\t// lock the graph for modification\n\t\t\t\tp.Ctx.Graph.Lock()\n\n\t\t\t\t// add metadata entry to the root node\n\t\t\t\tp.Ctx.Graph.AddMetadata(p.Ctx.RootNode, \"Memory\", usage)\n\n\t\t\t\t// release the graph lock\n\t\t\t\tp.Ctx.Graph.Unlock()\n\t\t\t}\n\t\t}\n\t}()\n\n\treturn nil\n}", "func (r *Reporter) Start(_ context.Context) error {\n\treturn nil\n}", "func init() {\n\tpromRegistry := prometheus.NewRegistry()\n\tpromRegistry.MustRegister(uptime, reqCount, passCount, blockCount, reqDuration)\n\tgo recordUptime()\n\tpromHandler = promhttp.InstrumentMetricHandler(promRegistry, promhttp.HandlerFor(promRegistry, promhttp.HandlerOpts{}))\n}", "func (a *app) gatherStat() {\n\tabout, err := a.srv.About.Get().Fields(\"storageQuota\").Do()\n\tif err != nil {\n\t\tlog.Fatalf(\"Unable to execute an about request: %v\", err)\n\t}\n\n\ta.sq = about.StorageQuota\n}", "func (w *windowsResourceUsageGatherer) Gather(executor QueryExecutor, startTime time.Time, config *measurement.MeasurementConfig) ([]measurement.Summary, error) {\n\tcpuSummary, err := getSummary(cpuUsageQueryTop10, convertToCPUPerfData, cpuUsageMetricsName, executor, config)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tmemorySummary, err := getSummary(memoryUsageQueryTop10, convertToMemoryPerfData, memoryUsageMetricsName, executor, config)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn []measurement.Summary{cpuSummary, memorySummary}, nil\n}", "func (p RekognitionPlugin) FetchMetrics() (map[string]float64, error) {\n\tstat := make(map[string]float64)\n\n\tfor _, met := range [...]string{\n\t\t\"SuccessfulRequestCount\",\n\t\t\"ThrottledCount\",\n\t\t\"ResponseTime\",\n\t\t\"DetectedFaceCount\",\n\t\t\"DetectedLabelCount\",\n\t\t\"ServerErrorCount\",\n\t\t\"UserErrorCount\",\n\t} {\n\t\tv, err := p.getLastPoint(met)\n\t\tif err == nil {\n\t\t\tstat[met] = v\n\t\t} else {\n\t\t\tlog.Printf(\"%s: %s\", met, err)\n\t\t}\n\t}\n\n\treturn stat, nil\n}", "func startAnalytics(ctx context.Context, cfg Config) error {\n\tfor {\n\t\topType := randomAnalyticsOp()\n\t\tif err := ctx.Err(); err != nil {\n\t\t\treturn err\n\t\t}\n\t\terr := runAnalyticsOp(ctx, cfg, opType)\n\t\tstats.Lock()\n\t\tstats.totalOps++\n\t\tstats.noAnalyticsOps++\n\t\tstats.analyticsOpCounts[opType]++\n\t\tif err != nil {\n\t\t\tstats.failedOps++\n\t\t\tlog.Printf(\"failed to run analytics op: %d: %s\", opType, err)\n\t\t}\n\t\tstats.Unlock()\n\n\t\ttime.Sleep(time.Second * time.Duration(cfg.AnalyticsQueriesWaitSeconds))\n\t}\n}", "func (c *solarCollector) collect(ch chan<- prometheus.Metric) error {\n\t// fetch the status of the controller\n\ttracer, err := gotracer.Status(\"/dev/ttyUSB0\")\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t/*\n\t * report the collected data\n\t */\n\n\t// store boolean values as a float (1 == true, 0 == false)\n\tvar loadIsActive float64\n\t// Panel array\n\tch <- prometheus.MustNewConstMetric(\n\t\tc.panelVoltage,\n\t\tprometheus.GaugeValue,\n\t\tfloat64(tracer.ArrayVoltage),\n\t)\n\tch <- prometheus.MustNewConstMetric(\n\t\tc.panelCurrent,\n\t\tprometheus.GaugeValue,\n\t\tfloat64(tracer.ArrayCurrent),\n\t)\n\tch <- prometheus.MustNewConstMetric(\n\t\tc.panelPower,\n\t\tprometheus.GaugeValue,\n\t\tfloat64(tracer.ArrayPower),\n\t)\n\n\t// Batteries\n\tch <- prometheus.MustNewConstMetric(\n\t\tc.batteryCurrent,\n\t\tprometheus.GaugeValue,\n\t\tfloat64(tracer.BatteryCurrent),\n\t)\n\tch <- prometheus.MustNewConstMetric(\n\t\tc.batteryVoltage,\n\t\tprometheus.GaugeValue,\n\t\tfloat64(tracer.BatteryVoltage),\n\t)\n\tch <- prometheus.MustNewConstMetric(\n\t\tc.batterySOC,\n\t\tprometheus.GaugeValue,\n\t\tfloat64(tracer.BatterySOC),\n\t)\n\tch <- prometheus.MustNewConstMetric(\n\t\tc.batteryTemp,\n\t\tprometheus.GaugeValue,\n\t\tfloat64(tracer.BatteryTemp),\n\t)\n\tch <- prometheus.MustNewConstMetric(\n\t\tc.batteryMinVoltage,\n\t\tprometheus.GaugeValue,\n\t\tfloat64(tracer.BatteryMinVoltage),\n\t)\n\tch <- prometheus.MustNewConstMetric(\n\t\tc.batteryMaxVoltage,\n\t\tprometheus.GaugeValue,\n\t\tfloat64(tracer.BatteryMaxVoltage),\n\t)\n\n\t// Load output\n\tif tracer.Load {\n\t\tloadIsActive = 1\n\t}\n\tch <- prometheus.MustNewConstMetric(\n\t\tc.loadActive,\n\t\tprometheus.GaugeValue,\n\t\tloadIsActive,\n\t)\n\tch <- prometheus.MustNewConstMetric(\n\t\tc.loadVoltage,\n\t\tprometheus.GaugeValue,\n\t\tfloat64(tracer.LoadVoltage),\n\t)\n\tch <- prometheus.MustNewConstMetric(\n\t\tc.loadCurrent,\n\t\tprometheus.GaugeValue,\n\t\tfloat64(tracer.LoadCurrent),\n\t)\n\tch <- prometheus.MustNewConstMetric(\n\t\tc.loadPower,\n\t\tprometheus.GaugeValue,\n\t\tfloat64(tracer.LoadPower),\n\t)\n\n\t// controller infos\n\tch <- prometheus.MustNewConstMetric(\n\t\tc.deviceTemp,\n\t\tprometheus.GaugeValue,\n\t\tfloat64(tracer.DeviceTemp),\n\t)\n\n\t// energy consumed\n\tch <- prometheus.MustNewConstMetric(\n\t\tc.energyConsumedDaily,\n\t\tprometheus.GaugeValue,\n\t\tfloat64(tracer.EnergyConsumedDaily),\n\t)\n\tch <- prometheus.MustNewConstMetric(\n\t\tc.energyConsumedMonthly,\n\t\tprometheus.GaugeValue,\n\t\tfloat64(tracer.EnergyConsumedMonthly),\n\t)\n\tch <- prometheus.MustNewConstMetric(\n\t\tc.energyConsumedAnnual,\n\t\tprometheus.GaugeValue,\n\t\tfloat64(tracer.EnergyConsumedAnnual),\n\t)\n\tch <- prometheus.MustNewConstMetric(\n\t\tc.energyConsumedTotal,\n\t\tprometheus.GaugeValue,\n\t\tfloat64(tracer.EnergyConsumedTotal),\n\t)\n\t// energy generated\n\tch <- prometheus.MustNewConstMetric(\n\t\tc.energyGeneratedDaily,\n\t\tprometheus.GaugeValue,\n\t\tfloat64(tracer.EnergyGeneratedDaily),\n\t)\n\tch <- prometheus.MustNewConstMetric(\n\t\tc.energyGeneratedMonthly,\n\t\tprometheus.GaugeValue,\n\t\tfloat64(tracer.EnergyGeneratedMonthly),\n\t)\n\tch <- prometheus.MustNewConstMetric(\n\t\tc.energyGeneratedAnnual,\n\t\tprometheus.GaugeValue,\n\t\tfloat64(tracer.EnergyGeneratedAnnual),\n\t)\n\tch <- prometheus.MustNewConstMetric(\n\t\tc.energyGeneratedTotal,\n\t\tprometheus.GaugeValue,\n\t\tfloat64(tracer.EnergyGeneratedTotal),\n\t)\n\n\treturn nil\n}", "func (inst *Instance) Start() error {\n\tticker := time.NewTicker(time.Duration(inst.period) * time.Second)\n\tdefer ticker.Stop()\n\n\tfor {\n\t\tselect {\n\t\tcase <-inst.ctx.Done():\n\t\t\treturn nil\n\t\tcase <-ticker.C:\n\t\t\tinst.logger.Debug().Msg(\"metric collection triggered\")\n\t\t\tsess, err := inst.createSession(inst.regionCfg.Name)\n\t\t\tif err != nil {\n\t\t\t\tinst.logger.Warn().Err(err).Msg(\"creating AWS SDK session\")\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tinst.Lock()\n\t\t\tif inst.running {\n\t\t\t\tinst.Unlock()\n\t\t\t\tinst.logger.Warn().Msg(\"collection already in progress, not starting another\")\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tinst.running = true\n\t\t\tinst.Unlock()\n\n\t\t\tend := time.Now()\n\t\t\tstart := end.Add(-(time.Duration(inst.interval) * time.Second))\n\t\t\ttimespan := collectors.MetricTimespan{\n\t\t\t\tStart: start,\n\t\t\t\tEnd: end,\n\t\t\t\tPeriod: inst.period,\n\t\t\t}\n\t\t\tfor _, c := range inst.collectors {\n\t\t\t\tif err := c.Collect(sess, timespan, inst.baseTags); err != nil {\n\t\t\t\t\tinst.check.ReportError(errors.WithMessage(err, fmt.Sprintf(\"id: %s, collector: %s\", inst.cfg.ID, c.ID())))\n\t\t\t\t\tinst.logger.Warn().Err(err).Str(\"collector\", c.ID()).Msg(\"collecting telemetry\")\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tinst.Lock()\n\t\t\tinst.running = false\n\t\t\tinst.Unlock()\n\t\t}\n\t}\n}", "func main() {\n\tcluster.RegisterMetrics()\n\tgo func() {\n\t\tfor {\n\t\t\tcluster.RecordMetrics()\n\t\t\ttime.Sleep(5 * time.Second)\n\t\t}\n\t}()\n\n\thttp.Handle(\"/metrics\", promhttp.Handler())\n\thttp.ListenAndServe(\":2112\", nil)\n\n}", "func (p *ProcMetrics) Collect() {\n\tif m, err := CollectProcInfo(p.pid); err == nil {\n\t\tnow := time.Now()\n\n\t\tif !p.lastTime.IsZero() {\n\t\t\tratio := 1.0\n\t\t\tswitch {\n\t\t\tcase m.CPU.Period > 0 && m.CPU.Quota > 0:\n\t\t\t\tratio = float64(m.CPU.Quota) / float64(m.CPU.Period)\n\t\t\tcase m.CPU.Shares > 0:\n\t\t\t\tratio = float64(m.CPU.Shares) / 1024\n\t\t\tdefault:\n\t\t\t\tratio = 1 / float64(runtime.NumCPU())\n\t\t\t}\n\n\t\t\tinterval := ratio * float64(now.Sub(p.lastTime))\n\n\t\t\tp.cpu.user.time = m.CPU.User - p.last.CPU.User\n\t\t\tp.cpu.user.percent = 100 * float64(p.cpu.user.time) / interval\n\n\t\t\tp.cpu.system.time = m.CPU.Sys - p.last.CPU.Sys\n\t\t\tp.cpu.system.percent = 100 * float64(p.cpu.system.time) / interval\n\n\t\t\tp.cpu.total.time = (m.CPU.User + m.CPU.Sys) - (p.last.CPU.User + p.last.CPU.Sys)\n\t\t\tp.cpu.total.percent = 100 * float64(p.cpu.total.time) / interval\n\t\t}\n\n\t\tp.memory.available = m.Memory.Available\n\t\tp.memory.size = m.Memory.Size\n\t\tp.memory.resident.usage = m.Memory.Resident\n\t\tp.memory.resident.percent = 100 * float64(p.memory.resident.usage) / float64(p.memory.available)\n\t\tp.memory.shared.usage = m.Memory.Shared\n\t\tp.memory.text.usage = m.Memory.Text\n\t\tp.memory.data.usage = m.Memory.Data\n\t\tp.memory.pagefault.major.count = m.Memory.MajorPageFaults - p.last.Memory.MajorPageFaults\n\t\tp.memory.pagefault.minor.count = m.Memory.MinorPageFaults - p.last.Memory.MinorPageFaults\n\n\t\tp.files.open = m.Files.Open\n\t\tp.files.max = m.Files.Max\n\n\t\tp.threads.num = m.Threads.Num\n\t\tp.threads.switches.voluntary.count = m.Threads.VoluntaryContextSwitches - p.last.Threads.VoluntaryContextSwitches\n\t\tp.threads.switches.involuntary.count = m.Threads.InvoluntaryContextSwitches - p.last.Threads.InvoluntaryContextSwitches\n\n\t\tp.last = m\n\t\tp.lastTime = now\n\t\tp.engine.Report(p)\n\t}\n}", "func (e *Exporter) Collect(ch chan<- prometheus.Metric) {\n\tjunosTotalScrapeCount++\n\tch <- prometheus.MustNewConstMetric(junosDesc[\"ScrapesTotal\"], prometheus.CounterValue, junosTotalScrapeCount)\n\n\twg := &sync.WaitGroup{}\n\tfor _, collector := range e.Collectors {\n\t\twg.Add(1)\n\t\tgo e.runCollector(ch, collector, wg)\n\t}\n\twg.Wait()\n}" ]
[ "0.6267255", "0.5989622", "0.59146374", "0.5805551", "0.57958657", "0.5792979", "0.57153803", "0.5703085", "0.56891817", "0.56713253", "0.5658135", "0.56294215", "0.5611643", "0.5592443", "0.5585979", "0.5580495", "0.5564782", "0.5561991", "0.5531033", "0.5516628", "0.5489683", "0.547175", "0.54582906", "0.54496145", "0.54469836", "0.5422912", "0.5422206", "0.54103225", "0.5406607", "0.5397181", "0.53916353", "0.53834856", "0.5380801", "0.53696275", "0.5355495", "0.5354002", "0.53486717", "0.534624", "0.5345761", "0.5341564", "0.53238875", "0.5317563", "0.53026754", "0.5300878", "0.5300759", "0.52882916", "0.5287198", "0.5271888", "0.5267767", "0.52666", "0.5258271", "0.5254214", "0.52529687", "0.52481306", "0.5245557", "0.5239906", "0.52396965", "0.5236216", "0.5232036", "0.5230846", "0.52281964", "0.5224899", "0.52207416", "0.52111363", "0.52045727", "0.52013934", "0.51989305", "0.51963234", "0.519624", "0.5193696", "0.51921123", "0.5189802", "0.51806337", "0.51761484", "0.5175417", "0.5173794", "0.51689947", "0.51596993", "0.5159074", "0.5158661", "0.51549846", "0.515237", "0.5144237", "0.5140437", "0.5139692", "0.5135855", "0.5131532", "0.5122401", "0.51209366", "0.5119797", "0.5109831", "0.5104386", "0.5098804", "0.5095948", "0.5091188", "0.5090079", "0.5089842", "0.5085625", "0.508478", "0.50823516" ]
0.54232216
25
init registers this plugin instance
func init() { inputs.Add("logstash", func() telegraf.Input { return NewLogstash() }) }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func init() {\n\tapid.RegisterPlugin(initPlugin)\n}", "func init() {\n\t// TODO: set logger\n\t// TODO: register storage plugin to plugin manager\n}", "func (p *PRPlugin) init() {\n\tp.Logger = log.NewFor(p.Name)\n\tp.Debug(\"plugin initialized\")\n}", "func (p *CouchBasePlugin) init(cfg plugin.Config) error {\n\tif p.initialized {\n\t\treturn nil\n\t}\n\n\tapi, err := cfg.GetString(\"api_url\")\n\tun, err := cfg.GetString(\"username\")\n\tpw, err := cfg.GetString(\"password\")\n\tif err != nil {\n\t\treturn fmt.Errorf(\"plugin initalization failed : [%v]\", err)\n\t}\n\n\tcfgItems := map[string]string{\n\t\t\"api_url\": api,\n\t\t\"username\": un,\n\t\t\"password\": pw,\n\t}\n\n\tp.app = makeCollector(cfgItems)\n\n\tmetrics, err := p.app.Discover()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfor _, m := range metrics {\n\t\tp.callDiscovery = append(p.callDiscovery, m)\n\t}\n\n\tp.initialized = true\n\treturn nil\n}", "func init() {\n\tlogger.Info(\"Initialising Parse JWT Go Plugin\")\n}", "func init() {\n\tinstance = GetInstance()\n}", "func init() {\n\tvar logger = log.Get()\n\tlogger.Info(\"Processing Golang plugin init function!!\" )\n\t//Here you write the code for db connection\n}", "func init() {\n\tinterfaces.AddPlugin(\"cloudfoundryhosting\", nil, Init)\n}", "func (*plugin) Init(params plugins.InitParams) error {\n\treturn nil\n}", "func registerPlugin(name string, init constructor) {\n\tif Pipes == nil {\n\t\tPipes = make(map[string]constructor)\n\t}\n\tPipes[name] = init\n}", "func init() {\n\tinitHandlers()\n}", "func init() {\n\t// init func\n}", "func init() {\n\tregistry.Add(\"tapo\", NewTapoFromConfig)\n}", "func init() {\n\t//todo...\n}", "func (p *SimpleProxy) init() error {\n\treturn p.registerSubscribers()\n}", "func init() {\n\t// Initialization goes here\n}", "func (p *ExamplePlugin) Init() error {\n\treturn nil\n}", "func (p *ExamplePlugin) Init() error {\n\treturn nil\n}", "func init() {\n\tingestion.Register(config.CONSTRUCTOR_NANO, newPerfProcessor)\n}", "func init() {\n\tworkflow.Register(Workflow)\n\tactivity.Register(helloworldActivity)\n}", "func (s *Service) init() {\n\tif s.hasInit {\n\t\treturn\n\t}\n\ts.hasInit = true\n\ts.usageErrors = make(map[string][]string)\n\tif s.fs == nil {\n\t\ts.fs = flag.CommandLine\n\t}\n\ts.es = fenv.NewEnvSet(s.fs, fenv.Prefix(\"plugin_\"))\n\tif s.envFunc == nil {\n\t\ts.envFunc = fenv.OSEnv\n\t}\n\tif s.log == nil {\n\t\ts.log = &Logger{}\n\t}\n\ts.log.s = s\n\n}", "func init() {\n\tp, err := New()\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tproviders.Register(p)\n}", "func init() {\n\tc.getConf()\n\trotateLogger()\n}", "func (h *Handler) init() {\n\th.meta = meta.New()\n}", "func init() {\n\tingestion.Register(config.CONSTRUCTOR_GOLD_TRYJOB, newGoldTryjobProcessor)\n}", "func init() {\n\tSetup()\n}", "func (i *I2PGatePlugin) Init() error {\n /*i := Setup()\n if err != nil {\n\t\treturn nil, err\n\t}*/\n\treturn nil\n}", "func init() {\n\tPlugin = IntelPlugin{\n\t\tPluginName: \"intel_plugin\",\n\t\tSpecVersion: \"1.0\",\n\t}\n}", "func init() {\n\tinjection.Default.RegisterInformer(withInformer)\n}", "func init() {}", "func init() {}", "func init() {}", "func init() {}", "func (pp *protocPlugin) init() {\n\tpp.Dependencies = map[string]string{\n\t\t\"proto\": \"google.golang.org/protobuf/proto\",\n\t}\n}", "func init() {\n\tRegistry.Add(eksinfo.New())\n\tRegistry.Add(vpcinfo.New())\n\tRegistry.Add(iamresourceusage.New())\n}", "func init() {\n\th, err := calcHashSum(*config.Promises)\n\tif err != nil {\n\t\tlog.Fatalln(\"Error:\", err)\n\t}\n\n\tfi, err := os.Stat(*config.Promises)\n\tif err != nil {\n\t\tlog.Println(\"Error:\", err)\n\t}\n\n\tvar r Report\n\tr = &Promises{\n\t\tFilename: *config.Promises,\n\t\tChecksum: h,\n\t\tOldModTime: fi.ModTime(),\n\t}\n\n\tRegister(\"promises\", r)\n}", "func init() {\n\turls = &pluginWebURL{\n\t\tbase: \"https://api.github.com\",\n\t\tauthURL: \"/authorizations\",\n\t\tassigneeURL: \"/repos/%s/%s/issues/%d/assignees\",\n\t\tissueURL: \"/repos/%s/%s/issues\",\n\t\tlabelURL: \"/repos/%s/%s/issues/%d/labels\",\n\t\trepo: \"/repos/%s/%s\",\n\t\tuserRepo: \"/user/repos\",\n\t}\n\n\tinfo, _ := githandler.Remote()\n\torg := info.Organisation\n\trepo := info.Repository\n\ttoken := githandler.ConfigGet(\"token\", \"phlow\")\n\n\tGitHub = &GitHubImpl{\n\t\turls,\n\t\trepo,\n\t\torg,\n\t\ttoken,\n\t}\n}", "func init() {\n\tregister(\"jemoji\", jemojiPlugin{})\n\tregister(\"jekyll-mentions\", jekyllMentionsPlugin{})\n\tregister(\"jekyll-optional-front-matter\", jekyllOptionalFrontMatterPlugin{})\n\n\t// Gojekyll behaves as though the following plugins are always loaded.\n\t// Define them here so we don't see warnings that they aren't defined.\n\tregister(\"jekyll-live-reload\", plugin{})\n\tregister(\"jekyll-sass-converter\", plugin{})\n}", "func init() {\n\tconsul.Register()\n\tetcd.Register()\n\tzookeeper.Register()\n\tboltdb.Register()\n}", "func (g *Generator) init() {\n\tg.filedInit = []string{}\n\tg.imports = map[string]bool{}\n\tg.pooledObjects = map[string]string{}\n\tg.structTypes = map[string]string{}\n\tg.sliceTypes = map[string]string{}\n\tg.poolInit = map[string]string{}\n\tg.addImport(gojayPackage)\n\t// if we want pools, add the sync package right away\n\tif g.options.PoolObjects {\n\t\tg.addImport(\"sync\")\n\t}\n}", "func init() {\n\tklog.InitFlags(nil)\n\tlogf.SetLogger(klogr.New())\n\n\t// Register required object kinds with global scheme.\n\t_ = clusterv1.AddToScheme(scheme.Scheme)\n}", "func init() {\n\ti = New()\n}", "func (p *Plugin) Init(context *InitContext) {\n\tp.transport.SetInitHandler(func(out []byte, err error) error {\n\t\tif err != nil {\n\t\t\tpanic(fmt.Sprintf(\"Unable to get registration info from plugin. Error: %v\", err))\n\t\t}\n\n\t\tregInfo := registrationInfo{}\n\t\tif err := json.Unmarshal(out, &regInfo); err != nil {\n\t\t\tpanic(err)\n\t\t}\n\n\t\tp.processRegistrationInfo(context, regInfo)\n\t\treturn nil\n\t})\n\n\tlog.WithFields(logrus.Fields{\n\t\t\"plugin\": p,\n\t}).Debugln(\"request plugin to return configuration\")\n\tgo p.transport.RequestInit()\n}", "func init() {\n\tcloudprovider.RegisterCloudProvider(providerName, newCloudConnection)\n}", "func init() {\n\tkv.RegisterMerger(MergerName, NewMerger)\n}", "func (p *pluginImpl) Init(e wicore.Editor) {\n\tp.e = e\n\n\t// TODO(maruel): Generate automatically?\n\te.RegisterCommands(func(cmds wicore.EnqueuedCommands) {\n\t\t//log.Printf(\"Commands(%v)\", cmds)\n\t})\n\te.RegisterDocumentCreated(func(doc wicore.Document) {\n\t\tlog.Printf(\"DocumentCreated(%s)\", doc)\n\t})\n\te.RegisterDocumentCursorMoved(func(doc wicore.Document, col, row int) {\n\t\tlog.Printf(\"DocumentCursorMoved(%s, %d, %d)\", doc, col, row)\n\t})\n\te.RegisterEditorKeyboardModeChanged(func(mode wicore.KeyboardMode) {\n\t\tlog.Printf(\"EditorKeyboardModeChanged(%s)\", mode)\n\t})\n\te.RegisterEditorLanguage(func(l lang.Language) {\n\t\tlog.Printf(\"EditorLanguage(%s)\", l)\n\t})\n\te.RegisterTerminalResized(func() {\n\t\tlog.Printf(\"TerminalResized()\")\n\t})\n\te.RegisterTerminalKeyPressed(func(k key.Press) {\n\t\tlog.Printf(\"TerminalKeyPressed(%s)\", k)\n\t})\n\te.RegisterViewCreated(func(view wicore.View) {\n\t\tlog.Printf(\"ViewCreated(%s)\", view)\n\t})\n\te.RegisterWindowCreated(func(window wicore.Window) {\n\t\tlog.Printf(\"WindowCreated(%s)\", window)\n\t})\n\te.RegisterWindowResized(func(window wicore.Window) {\n\t\tlog.Printf(\"WindowResized(%s)\", window)\n\t})\n}", "func (instance *NDiscovery) init() error {\n\tif !instance.initialized {\n\t\tinstance.initialized = true\n\n\t\tvar err error\n\n\t\tif instance.IsEnabled() {\n\n\t\t\t// storage\n\t\t\tinstance.storage = NewNodeStorage(lygo_paths.GetWorkspacePath())\n\t\t\terr = instance.storage.Start()\n\t\t\tif nil != err {\n\t\t\t\tgoto exit\n\t\t\t}\n\t\t\tinstance.storage.UnlockNodes(instance.config.NetworkId) // release existing nodes\n\n\t\t\t// add self as a publisher\n\t\t\tif instance.config.Publisher.Enabled && instance.config.Publish.HasAddress() {\n\t\t\t\tinstance.storage.AddPublisher(instance.config.Publish.Address)\n\t\t\t}\n\n\t\t\t// main loop\n\t\t\tgo instance.discover()\n\t\t}\n\n\t\t// exit procedure\n\texit:\n\t\treturn err\n\t}\n\treturn nil\n}", "func (plugin *ExamplePlugin) Init() error {\n\n\t// add new metric to default registry (accessible at the path /metrics)\n\t//\n\t// the current value is returned by provided callback\n\t// created gauge is identified by tuple(namespace, subsystem, name) only the name field is mandatory\n\t// additional properties can be defined using labels - key-value pairs. They do not change over time for the given gauge.\n\terr := plugin.Prometheus.RegisterGaugeFunc(prom.DefaultRegistry, \"ns\", \"sub\", \"gaugeOne\",\n\t\t\"this metrics represents randomly generated numbers\", prometheus.Labels{\"Property1\": \"ABC\", \"Property2\": \"DEF\"}, func() float64 {\n\t\t\treturn rand.Float64()\n\t\t})\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// create new registry that will be exposed at /custom path\n\terr = plugin.Prometheus.NewRegistry(customRegistry, promhttp.HandlerOpts{ErrorHandling: promhttp.ContinueOnError})\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// create gauge using prometheus API\n\tplugin.temporaryCounter = prometheus.NewGauge(prometheus.GaugeOpts{\n\t\tName: \"Countdown\",\n\t\tHelp: \"This gauge is decremented by 1 each second, once it reaches 0 the gauge is removed.\",\n\t})\n\tplugin.counterVal = 60\n\tplugin.temporaryCounter.Set(float64(plugin.counterVal))\n\n\t// register created gauge to the custom registry\n\terr = plugin.Prometheus.Register(customRegistry, plugin.temporaryCounter)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// create gauge vector and register it\n\tplugin.gaugeVec = prometheus.NewGaugeVec(prometheus.GaugeOpts{\n\t\tName: \"Vector\",\n\t\tHelp: \"This gauge groups multiple similar metrics.\",\n\t\tConstLabels: prometheus.Labels{\"type\": \"vector\", \"answer\": \"42\"},\n\t}, []string{orderLabel})\n\terr = plugin.Prometheus.Register(customRegistry, plugin.gaugeVec)\n\n\treturn err\n\n}", "func (m *Mixtape) init() {\n\tm.Users = []User{}\n\tm.Playlists = []Playlist{}\n\tm.Songs = []Song{}\n}", "func Init() {\n\tregisterPlugin(apps.New())\n}", "func (meta *Meta) init() {\n\tmeta.client = utils.CreateMongoDB(dbConfig.Str(\"address\"), log)\n\tmeta.database = meta.client.Database(dbConfig.Str(\"db\"))\n\tmeta.collection = meta.database.Collection(metaCollection)\n}", "func init() {\n\tparser.SharedParser().RegisterFabric(UpdateFabric{})\n}", "func init() {\n\tconfig.Register(\"instrumentation\", \"Instrumentation for traces and metrics.\",\n\t\topt, defaultOptions, config.WithNotify(configNotify))\n}", "func init() {\n\tlog = config.Logger()\n}", "func init() {\n\tlog = config.Logger()\n}", "func (a *Forj) driver_init(instance string) error {\n\n\td, found := a.drivers.Get(instance)\n\tif !found {\n\t\treturn fmt.Errorf(\"Internal error: Unable to find %s from drivers.\", instance)\n\t}\n\ta.CurrentPluginDriver = d\n\treturn nil\n}", "func (c *Config) init() {\n\n\tc.logger = logrus.New()\n\n\t// Connect slots\n\tc.ConnectStringSet(func(key string, val string) {\n\t\tc.SetString(key, val)\n\t})\n\tc.ConnectBoolSet(func(key string, val bool) {\n\t\tc.SetBool(key, val)\n\t})\n\tc.ConnectStringValue(func(key string) string {\n\t\treturn c.GetString(key)\n\t})\n\tc.ConnectBoolValue(func(key string) bool {\n\t\treturn c.GetBool(key)\n\t})\n\tc.ConnectSave(func() {\n\t\tc.save()\n\t})\n\tc.ConnectDefaults(func() {\n\t\tc.SetDefaults()\n\t})\n}", "func init() {\n\ttypes.AllowUserExec = append(types.AllowUserExec, []byte(Zksync))\n\t//注册合约启用高度\n\ttypes.RegFork(Zksync, InitFork)\n\ttypes.RegExec(Zksync, InitExecutor)\n}", "func init() {\n\tcallbacks = make(map[ModuleType]*ConfigCallback, 8)\n\tmodules = make(map[string]ModuleType, 32)\n}", "func init() {\n\tif err := mb.Registry.AddMetricSet(\"haproxy\", \"info\", New); err != nil {\n\t\tpanic(err)\n\t}\n}", "func (plugin *Skeleton) Init() (err error) {\n\treturn nil\n}", "func (l *Logger) init() {\r\n\t// Set Testing flag to TRUE if testing detected\r\n\tl.Options.Testing = (flag.Lookup(\"test.v\") != nil)\r\n\r\n\tl.timeReset()\r\n\tl.started = l.timer\r\n\tinitColors()\r\n\tinitFormatPlaceholders()\r\n}", "func (r *registry) initDecoratorPlugin(p *plugin.Plugin) {\n\tconstructorSymbol, err := p.Lookup(decoratorPluginFactory)\n\tif err != nil {\n\t\tpanicWithLookupError(decoratorPluginFactory, err)\n\t}\n\tconstructor, ok := constructorSymbol.(func() decoration.Decorator)\n\tif !ok {\n\t\tpanicWithDefinitionError(decoratorPluginFactory)\n\t}\n\tdecorator := constructor()\n\tif decorator != nil {\n\t\tr.decorators = append(r.decorators, constructor())\n\t}\n}", "func Init() {\n\tplugins.RegisterPlugins(\"gitlab\", &GitlabPlugin{})\n}", "func (l *Loader) init() {\n\tif l.loading == nil {\n\t\tl.loading = stringset.New(1)\n\t\tl.sources = make(map[string]string, 1)\n\t\tl.symbols = make(map[string]*Struct, 1)\n\t}\n}", "func init() {\n\tcli.InitConfig(configName)\n}", "func init() {\n\tcli.InitConfig(configName)\n}", "func (plugin *Plugin) Init() error {\n\tplugin.Log.Debug(\"Initializing interface plugin\")\n\n\tplugin.fixNilPointers()\n\n\tplugin.ifStateNotifications = plugin.Deps.IfStatePub\n\tconfig, err := plugin.retrieveDPConfig()\n\tif err != nil {\n\t\treturn err\n\t}\n\tif config != nil {\n\t\tplugin.ifMtu = config.Mtu\n\t\tplugin.Log.Infof(\"Mtu read from config us set to %v\", plugin.ifMtu)\n\t\tplugin.enableStopwatch = config.Stopwatch\n\t\tif plugin.enableStopwatch {\n\t\t\tplugin.Log.Infof(\"stopwatch enabled for %v\", plugin.PluginName)\n\t\t} else {\n\t\t\tplugin.Log.Infof(\"stopwatch disabled for %v\", plugin.PluginName)\n\t\t}\n\t} else {\n\t\tplugin.ifMtu = defaultMtu\n\t\tplugin.Log.Infof(\"MTU set to default value %v\", plugin.ifMtu)\n\t\tplugin.Log.Infof(\"stopwatch disabled for %v\", plugin.PluginName)\n\t}\n\n\t// all channels that are used inside of publishIfStateEvents or watchEvents must be created in advance!\n\tplugin.ifStateChan = make(chan *intf.InterfaceStateNotification, 100)\n\tplugin.bdStateChan = make(chan *l2plugin.BridgeDomainStateNotification, 100)\n\tplugin.resyncConfigChan = make(chan datasync.ResyncEvent)\n\tplugin.resyncStatusChan = make(chan datasync.ResyncEvent)\n\tplugin.changeChan = make(chan datasync.ChangeEvent)\n\tplugin.ifIdxWatchCh = make(chan ifaceidx.SwIfIdxDto, 100)\n\tplugin.bdIdxWatchCh = make(chan bdidx.ChangeDto, 100)\n\tplugin.linuxIfIdxWatchCh = make(chan ifaceidx2.LinuxIfIndexDto, 100)\n\tplugin.errorChannel = make(chan ErrCtx, 100)\n\n\t// create plugin context, save cancel function into the plugin handle\n\tvar ctx context.Context\n\tctx, plugin.cancel = context.WithCancel(context.Background())\n\n\t//FIXME run following go routines later than following init*() calls - just before Watch()\n\n\t// run event handler go routines\n\tgo plugin.publishIfStateEvents(ctx)\n\tgo plugin.publishBdStateEvents(ctx)\n\tgo plugin.watchEvents(ctx)\n\n\t// run error handler\n\tgo plugin.changePropagateError()\n\n\terr = plugin.initIF(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = plugin.initACL(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = plugin.initL2(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = plugin.initL3(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = plugin.initErrorHandler()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = plugin.subscribeWatcher()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tgPlugin = plugin\n\n\treturn nil\n}", "func init() {\n\tlogger = config.Initlog()\n}", "func init() {\n\thostifService = &HostifService{\n\t\tinstances: make(map[string]*HostifInstance),\n\t\tdone: make(chan struct{}),\n\t\tport: -1,\n\t}\n\n\tif l, err := vlog.New(moduleName); err == nil {\n\t\tlog = l\n\t} else {\n\t\tlog.Fatalf(\"Can't create logger: %s\", moduleName)\n\t}\n\n\trp := &vswitch.RingParam{\n\t\tCount: C.MAX_HOSTIF_MBUFS,\n\t\tSocketId: dpdk.SOCKET_ID_ANY,\n\t}\n\n\tif err := vswitch.RegisterModule(moduleName, newHostifInstance, rp, vswitch.TypeOther); err != nil {\n\t\tlog.Fatalf(\"Failed to register hostif: %v\", err)\n\t}\n}", "func (p *servicePlugin) Init(g *generator.Generator) {\n\tp.Generator = g\n}", "func (instance *DBSyncSlave) init() {\n\tif nil != instance {\n\t\tif len(instance.Config.Uuid) > 0 {\n\t\t\tinstance.UID = instance.Config.Uuid\n\t\t} else {\n\t\t\tinstance.UID, _ = lygo_sys.ID()\n\t\t}\n\t\tif nil == instance.client {\n\t\t\tinstance.client = lygo_nio.NewNioClient(instance.Config.Host(), instance.Config.Port())\n\t\t\tinstance.client.OnConnect(instance.doConnect)\n\t\t\tinstance.client.OnDisconnect(instance.doDisconnect)\n\t\t}\n\t}\n}", "func (u *Uploader) init() {\n\t// generate mac and upload token\n\tputPolicy := storage.PutPolicy{\n\t\tScope: u.bucket,\n\t}\n\tmac := qbox.NewMac(u.accessKey, u.secretKey)\n\tu.upToken = putPolicy.UploadToken(mac)\n\n\tcfg := storage.Config{}\n\t// 空间对应的机房\n\tcfg.Zone = &storage.ZoneHuadong\n\t// 是否使用https域名\n\tcfg.UseHTTPS = false\n\t// 上传是否使用CDN上传加速\n\tcfg.UseCdnDomains = false\n\t// 构建表单上传的对象\n\tu.formUploader = storage.NewFormUploader(&cfg)\n\tu.bucketManager = storage.NewBucketManager(mac, &cfg)\n\n\treturn\n}", "func (c *switchBotCollector) init() error {\n\tdevices, _, err := c.client.Device().List(context.Background())\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfor _, d := range devices {\n\t\tswitch d.Type {\n\t\tcase switchbot.Meter:\n\t\t\tc.meters = append(c.meters, d)\n\t\t\tlog.Printf(\"adding meter with device id: %s, name: %s\\n\", d.ID, d.Name)\n\t\t}\n\t}\n\n\treturn nil\n}", "func init() {\n\t// Register the mockup input plugin for the required names\n\tinputs.Add(\"typesmockup\", func() telegraf.Input { return &MockupTypesPlugin{} })\n}", "func init() {\n\tcore.AddAcl(new(AclHmac))\n}", "func init() {\n\tdb.RegisterInit(meterReader)\n}", "func init() {\n\tevent = &pubSubEvent{ctx: context.Background()}\n\tevent.initClientAndTopic()\n}", "func init() {\n\tnetwork.RegisterMessage(AnnounceAggregation{})\n\tnetwork.RegisterMessage(ReplySumCipherBytes{})\n\tonet.GlobalProtocolRegister(AggregationProtocolName, NewAggregationProtocol)\n}", "func init() {\n\t// <<-- Creer-Merge: init -->>\n\t// package initialization logic can go here\n\t// <<-- /Creer-Merge: init -->>\n}", "func (p *Plugin) Init() (err error) {\n\t// Prepare topic and subscription for status check client\n\tp.subscription = make(chan *client.ConsumerMessage)\n\n\t// Get muxCfg data (contains kafka brokers ip addresses)\n\tmuxCfg := &mux.Config{}\n\tfound, err := p.Cfg.LoadValue(muxCfg)\n\tif !found {\n\t\tp.Log.Info(\"kafka config not found \", p.Cfg.GetConfigName(), \" - skip loading this plugin\")\n\t\tp.disabled = true\n\t\treturn nil //skip loading the plugin\n\t}\n\tif err != nil {\n\t\treturn err\n\t}\n\t// retrieve clientCfg\n\tclientCfg, err := p.getClientConfig(muxCfg, p.Log, topic)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// init 'hash' sarama client\n\tp.hsClient, err = client.NewClient(clientCfg, client.Hash)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// init 'manual' sarama client\n\tp.manClient, err = client.NewClient(clientCfg, client.Manual)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// Initialize both multiplexers to allow both, dynamic and manual mode\n\tif p.mux == nil {\n\t\tname := clientCfg.GroupID\n\t\tp.Log.Infof(\"Group ID is set to %v\", name)\n\t\tp.mux, err = mux.InitMultiplexerWithConfig(clientCfg, p.hsClient, p.manClient, name, p.Log)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tp.Log.Debug(\"Default multiplexer initialized\")\n\t}\n\n\treturn err\n}", "func Init(config *config.Registry) {\n\tcfg = config\n}", "func init() {\n\tinitconf(configLocation)\n}", "func init() {\n\tmd := activity.NewMetadata(jsonMetadata)\n\tactivity.Register(NewActivity(md))\n}", "func Init() *node.Plugin {\n\tconfigure := func(*node.Plugin) {\n\t\tlog := logger.NewLogger(PluginName)\n\t\tdownloader.Init(log, parameters.GetString(parameters.IpfsGatewayAddress))\n\t}\n\trun := func(*node.Plugin) {\n\t\t// Nothing to run here\n\t}\n\n\tPlugin := node.NewPlugin(PluginName, node.Enabled, configure, run)\n\treturn Plugin\n}", "func (l *Service) init() {\n\tl.RegisterUnaryOperation(opSize, l.Size)\n\tl.RegisterUnaryOperation(opContains, l.Contains)\n\tl.RegisterUnaryOperation(opAppend, l.Append)\n\tl.RegisterUnaryOperation(opInsert, l.Insert)\n\tl.RegisterUnaryOperation(opGet, l.Get)\n\tl.RegisterUnaryOperation(opSet, l.Set)\n\tl.RegisterUnaryOperation(opRemove, l.Remove)\n\tl.RegisterUnaryOperation(opClear, l.Clear)\n\tl.RegisterStreamOperation(opEvents, l.Events)\n\tl.RegisterStreamOperation(opIterate, l.Iterate)\n}", "func (r *registry) initAuthPlugin(p *plugin.Plugin) {\n\tconstructorSymbol, err := p.Lookup(authPluginFactory)\n\tif err != nil {\n\t\tpanicWithLookupError(authPluginFactory, err)\n\t}\n\tconstructor, ok := constructorSymbol.(func() auth.Filter)\n\tif !ok {\n\t\tpanicWithDefinitionError(authPluginFactory)\n\t}\n\n\tfilter := constructor()\n\tif filter != nil {\n\t\tr.filters = append(r.filters, filter)\n\t}\n}", "func Init(pluginRegistry *pluginregistry.PluginRegistry, log xcontext.Logger) {\n\n\t// Register TargetManager plugins\n\tfor _, tmloader := range targetManagers {\n\t\tif err := pluginRegistry.RegisterTargetManager(tmloader()); err != nil {\n\t\t\tlog.Fatalf(\"%v\", err)\n\t\t}\n\t}\n\n\t// Register TestFetcher plugins\n\tfor _, tfloader := range testFetchers {\n\t\tif err := pluginRegistry.RegisterTestFetcher(tfloader()); err != nil {\n\t\t\tlog.Fatalf(\"%v\", err)\n\t\t}\n\t}\n\n\t// Register TestStep plugins\n\tfor _, tsloader := range testSteps {\n\t\tif err := pluginRegistry.RegisterTestStep(tsloader()); err != nil {\n\t\t\tlog.Fatalf(\"%v\", err)\n\n\t\t}\n\t}\n\n\t// Register Reporter plugins\n\tfor _, rfloader := range reporters {\n\t\tif err := pluginRegistry.RegisterReporter(rfloader()); err != nil {\n\t\t\tlog.Fatalf(\"%v\", err)\n\t\t}\n\t}\n\n\t// user-defined function registration\n\ttestInitOnce.Do(func() {\n\t\tfor _, userFunction := range userFunctions {\n\t\t\tfor name, fn := range userFunction {\n\t\t\t\tif err := test.RegisterFunction(name, fn); err != nil {\n\t\t\t\t\tlog.Fatalf(\"%v\", err)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t})\n}", "func init() {\n\tbackend.Register(\"local\", NewLocalBackend())\n}", "func init() {\n\tactions = make(map[string]InitFunc)\n}", "func FLBPluginInit(plugin unsafe.Pointer) int {\n\t// Example to retrieve an optional configuration parameter\n\tbucket := output.FLBPluginConfigKey(plugin, \"bucket\")\n\tregion := output.FLBPluginConfigKey(plugin, \"region\")\n\ts3PartSize := output.FLBPluginConfigKey(plugin, \"s3_part_size\")\n\ts3KeyPattern := output.FLBPluginConfigKey(plugin, \"s3_key_pattern\")\n\n\tparameters.bucket = bucket\n\tparameters.region = region\n\ts3KeyPatternInt, err := strconv.Atoi(s3PartSize)\n\tif err != nil {\n\t\tlog.Printf(\"[out_s3] multipart_size invalid\")\n\t\treturn output.FLB_ERROR\n\t}\n\tparameters.s3PartSize = s3KeyPatternInt\n\tparameters.s3KeyPattern = s3KeyPattern\n\treturn output.FLB_OK\n}", "func init() {\n\tcodegen.RegisterPluginFirst(\"zaplogger\", \"gen\", nil, Generate)\n\tcodegen.RegisterPluginLast(\"zaplogger-updater\", \"example\", nil, UpdateExample)\n}", "func Init() *node.Plugin {\n\treturn node.NewPlugin(pluginName, node.Enabled, configure, run)\n}", "func Init() (*Plugin, error) {\n\tvar ex run.Info\n\treader := bufio.NewReader(os.Stdin)\n\tb, err := reader.ReadBytes('\\n')\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\terr = json.Unmarshal(b, &ex)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// TODO: set log level from the config\n\tlog.SetLevel(log.DebugLevel)\n\n\treturn &Plugin{\n\t\tMeta: &ex,\n\t}, nil\n}", "func init() {\n\tloadImages()\n}", "func init() {\n\tworkflow.RegisterWithOptions(recoverWorkflow, workflow.RegisterOptions{Name: \"recoverWorkflow\"})\n\tactivity.Register(listOpenExecutions)\n\tactivity.Register(recoverExecutions)\n}", "func init() {\n\tinitTld()\n}", "func init() {\n\tRegisterEngine(EngineTypeHTML, &htmlEngine{})\n}", "func init() {\n\tmonitors.Register(monitorType, func() interface{} {\n\t\treturn &Monitor{\n\t\t\t*collectd.NewMonitorCore(CollectdTemplate),\n\t\t}\n\t}, &Config{})\n}", "func (c *ChromaHighlight) init() (err error) {\n\n\t// Option handling registering formatters\n\tswitch c.formatter {\n\tcase \"gtkDirectToTextBuffer\":\n\t\tformatters.Register(\"gtkDirectToTextBuffer\", chroma.FormatterFunc(c.gtkDirectToTextBufferFormatter))\n\tcase \"gtkTextBuffer\":\n\t\tformatters.Register(\"gtkTextBuffer\", chroma.FormatterFunc(c.gtkTextBufferFormatter))\n\tcase \"pango\":\n\t\tformatters.Register(\"pango\", chroma.FormatterFunc(c.pangoFormatter))\n\t}\n\n\t// Used to parse GdkColor\n\tc.regBG = regexp.MustCompile(`bg:#[a-fA-F|0-9]{6}`)\n\tc.regFG = regexp.MustCompile(`#[a-fA-F|0-9]{6}`)\n\n\tc.RemoveTags()\n\n\t// To check if source text have been modified.\n\tc.md5SizeAnalyze = 1024 // Set to 0 means there is no limit\n\n\tswitch c.srcBuff {\n\tcase nil:\n\t\tc.textTagTable, err = c.txtBuff.GetTagTable()\n\tdefault:\n\t\tc.textTagTable, err = c.srcBuff.GetTagTable()\n\t}\n\treturn\n}", "func (n *notifs) init(p *proxy) {\n\tn.p = p\n\tn.nls = newListeners()\n\tn.fin = newListeners()\n\n\tn.added = make([]nl.Listener, 16)\n\tn.removed = make([]nl.Listener, 16)\n\tn.finished = make([]nl.Listener, 16)\n\n\thk.Reg(notifsName+hk.NameSuffix, n.housekeep, hk.PruneActiveIval)\n\tn.p.Sowner().Listeners().Reg(n)\n}" ]
[ "0.80929697", "0.74094325", "0.72971886", "0.70811", "0.6980509", "0.6893134", "0.6740067", "0.66833556", "0.66826326", "0.6619359", "0.66111064", "0.658682", "0.65835285", "0.6549847", "0.65423113", "0.6536784", "0.65058976", "0.65058976", "0.6488702", "0.6450263", "0.63920665", "0.6317787", "0.63161457", "0.6314196", "0.6309274", "0.6296504", "0.629126", "0.62825394", "0.6244593", "0.6241433", "0.6241433", "0.6241433", "0.6241433", "0.62411904", "0.6224352", "0.6203751", "0.62010735", "0.6198305", "0.6182599", "0.6181865", "0.6175874", "0.6171069", "0.6157148", "0.613518", "0.6134298", "0.61267215", "0.6110622", "0.61087346", "0.60818917", "0.6079814", "0.6073013", "0.6060912", "0.6042941", "0.6026283", "0.6026283", "0.602608", "0.60194534", "0.60174584", "0.60068774", "0.60050595", "0.6002467", "0.6000576", "0.5995534", "0.59899247", "0.5983425", "0.5978414", "0.5978414", "0.59783846", "0.5977826", "0.5977616", "0.59750366", "0.5958355", "0.5957408", "0.5948616", "0.59459144", "0.5945893", "0.5943006", "0.5942603", "0.5934485", "0.59153515", "0.5907634", "0.5907565", "0.58921266", "0.58890927", "0.5886861", "0.5863206", "0.5844021", "0.5840108", "0.58258283", "0.5824629", "0.582132", "0.58164555", "0.5813343", "0.58061343", "0.57981634", "0.5787806", "0.57853967", "0.57850426", "0.5777583", "0.5774269", "0.57699794" ]
0.0
-1
ReadResponse reads a server response into the received o.
func (o *GetNodeReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { switch response.Code() { case 200: result := NewGetNodeOK() if err := result.readResponse(response, consumer, o.formats); err != nil { return nil, err } return result, nil case 400: result := NewGetNodeBadRequest() if err := result.readResponse(response, consumer, o.formats); err != nil { return nil, err } return nil, result case 401: result := NewGetNodeUnauthorized() if err := result.readResponse(response, consumer, o.formats); err != nil { return nil, err } return nil, result case 403: result := NewGetNodeForbidden() if err := result.readResponse(response, consumer, o.formats); err != nil { return nil, err } return nil, result case 404: result := NewGetNodeNotFound() if err := result.readResponse(response, consumer, o.formats); err != nil { return nil, err } return nil, result case 422: result := NewGetNodeUnprocessableEntity() if err := result.readResponse(response, consumer, o.formats); err != nil { return nil, err } return nil, result case 500: result := NewGetNodeInternalServerError() if err := result.readResponse(response, consumer, o.formats); err != nil { return nil, err } return nil, result default: return nil, runtime.NewAPIError("unknown error", response, response.Code()) } }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (d *ResourceHandler) ReadResponse(dataOut unsafe.Pointer, bytesToRead int32, bytesRead *int32, callback *Callback) int32 {\n\treturn lookupResourceHandlerProxy(d.Base()).ReadResponse(d, dataOut, bytesToRead, bytesRead, callback)\n}", "func (o *GetServerReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\tcase 200:\n\t\tresult := NewGetServerOK()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\tdefault:\n\t\treturn nil, runtime.NewAPIError(\"response status code does not match any response statuses defined for this endpoint in the swagger spec\", response, response.Code())\n\t}\n}", "func (o *InteractionBindReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\n\tcase 200:\n\t\tresult := NewInteractionBindOK()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\n\tcase 404:\n\t\tresult := NewInteractionBindNotFound()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\n\tcase 500:\n\t\tresult := NewInteractionBindInternalServerError()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\n\tdefault:\n\t\treturn nil, runtime.NewAPIError(\"unknown error\", response, response.Code())\n\t}\n}", "func (o *InteractionUnbindReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\n\tcase 200:\n\t\tresult := NewInteractionUnbindOK()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\n\tcase 404:\n\t\tresult := NewInteractionUnbindNotFound()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\n\tcase 500:\n\t\tresult := NewInteractionUnbindInternalServerError()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\n\tdefault:\n\t\treturn nil, runtime.NewAPIError(\"unknown error\", response, response.Code())\n\t}\n}", "func (r *ResponseReader) ReadResponse(req *Request) (res *Response, err error) {\n\tres = CreateEmptyResponse(req)\n\t_, err = readFirstLine(r, res)\n\tif err != nil {\n\t\treturn\n\t}\n\n\terr = readHeaders(r, res)\n\tif err != nil {\n\t\treturn\n\t}\n\n\t_, err = readBodyContent(r, res)\n\tif err != nil {\n\t\treturn\n\t}\n\treturn res, nil\n}", "func (c *Conn) ReadResponse(rmsg *Response) error {\n\tdata, err := c.ReadDataUnit()\n\tif err != nil {\n\t\treturn err\n\t}\n\tcolor.Printf(\"@{c}<!-- RESPONSE -->\\n%s\\n\\n\", string(data))\n\terr = xml.Unmarshal(data, rmsg)\n\tif err != nil {\n\t\treturn err\n\t}\n\t// color.Fprintf(os.Stderr, \"@{y}%s\\n\", spew.Sprintf(\"%+v\", msg))\n\tif len(rmsg.Results) != 0 {\n\t\tr := rmsg.Results[0]\n\t\tif r.IsError() {\n\t\t\treturn r\n\t\t}\n\t}\n\treturn nil\n}", "func (o *VerifyConnectionReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\tcase 200:\n\t\tresult := NewVerifyConnectionOK()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\tdefault:\n\t\treturn nil, runtime.NewAPIError(\"response status code does not match any response statuses defined for this endpoint in the swagger spec\", response, response.Code())\n\t}\n}", "func (o *GetAvailableReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\n\tcase 200:\n\t\tresult := NewGetAvailableOK()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\n\tdefault:\n\t\treturn nil, runtime.NewAPIError(\"unknown error\", response, response.Code())\n\t}\n}", "func (o *ClosePositionReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\n\tcase 200:\n\t\tresult := NewClosePositionOK()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\n\tcase 400:\n\t\tresult := NewClosePositionBadRequest()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\n\tcase 401:\n\t\tresult := NewClosePositionUnauthorized()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\n\tcase 404:\n\t\tresult := NewClosePositionNotFound()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\n\tcase 405:\n\t\tresult := NewClosePositionMethodNotAllowed()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\n\tdefault:\n\t\treturn nil, runtime.NewAPIError(\"unknown error\", response, response.Code())\n\t}\n}", "func (o *DescribeServerReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\tcase 200:\n\t\tresult := NewDescribeServerOK()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\tcase 400:\n\t\tresult := NewDescribeServerBadRequest()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\tcase 404:\n\t\tresult := NewDescribeServerNotFound()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\tcase 409:\n\t\tresult := NewDescribeServerConflict()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\tcase 500:\n\t\tresult := NewDescribeServerInternalServerError()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\tdefault:\n\t\treturn nil, runtime.NewAPIError(\"response status code does not match any response statuses defined for this endpoint in the swagger spec\", response, response.Code())\n\t}\n}", "func (o *GetServerSessionReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\tcase 200:\n\t\tresult := NewGetServerSessionOK()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\tcase 400:\n\t\tresult := NewGetServerSessionBadRequest()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\tcase 401:\n\t\tresult := NewGetServerSessionUnauthorized()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\tcase 404:\n\t\tresult := NewGetServerSessionNotFound()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\tcase 500:\n\t\tresult := NewGetServerSessionInternalServerError()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\n\tdefault:\n\t\tdata, err := ioutil.ReadAll(response.Body())\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\treturn nil, fmt.Errorf(\"Requested GET /dsmcontroller/namespaces/{namespace}/servers/{podName}/session returns an error %d: %s\", response.Code(), string(data))\n\t}\n}", "func (o *StartReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\n\tcase 200:\n\t\tresult := NewStartOK()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\n\tdefault:\n\t\treturn nil, runtime.NewAPIError(\"unknown error\", response, response.Code())\n\t}\n}", "func (resp *PharosResponse) readResponse() {\n\tif !resp.hasBeenRead && resp.Response != nil && resp.Response.Body != nil {\n\t\tresp.data, resp.Error = ioutil.ReadAll(resp.Response.Body)\n\t\tresp.Response.Body.Close()\n\t\tresp.hasBeenRead = true\n\t}\n}", "func (o *HelloWorldReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\tcase 200:\n\t\tresult := NewHelloWorldOK()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\tcase 400:\n\t\tresult := NewHelloWorldBadRequest()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\tcase 500:\n\t\tresult := NewHelloWorldInternalServerError()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\n\tdefault:\n\t\treturn nil, runtime.NewAPIError(\"unknown error\", response, response.Code())\n\t}\n}", "func (reader *BasicRpcReader) ReadResponse(r io.Reader, method string, requestID int32, resp proto.Message) error {\n\trrh := &hadoop.RpcResponseHeaderProto{}\n\terr := readRPCPacket(r, rrh, resp)\n\tif err != nil {\n\t\treturn err\n\t} else if int32(rrh.GetCallId()) != requestID {\n\t\treturn errors.New(\"unexpected sequence number\")\n\t} else if rrh.GetStatus() != hadoop.RpcResponseHeaderProto_SUCCESS {\n\t\treturn &NamenodeError{\n\t\t\tmethod: method,\n\t\t\tmessage: rrh.GetErrorMsg(),\n\t\t\tcode: int(rrh.GetErrorDetail()),\n\t\t\texception: rrh.GetExceptionClassName(),\n\t\t}\n\t}\n\n\treturn nil\n}", "func (o *UpdateAntivirusServerReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\n\tcase 204:\n\t\tresult := NewUpdateAntivirusServerNoContent()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\n\tdefault:\n\t\tresult := NewUpdateAntivirusServerDefault(response.Code())\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tif response.Code()/100 == 2 {\n\t\t\treturn result, nil\n\t\t}\n\t\treturn nil, result\n\t}\n}", "func (o *HasEventsReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\tcase 200:\n\t\tresult := NewHasEventsOK()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\tcase 401:\n\t\tresult := NewHasEventsUnauthorized()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\tcase 403:\n\t\tresult := NewHasEventsForbidden()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\n\tdefault:\n\t\treturn nil, runtime.NewAPIError(\"unknown error\", response, response.Code())\n\t}\n}", "func (o *GetV2Reader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\tcase 200:\n\t\tresult := NewGetV2OK()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\tcase 500:\n\t\tresult := NewGetV2InternalServerError()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\n\tdefault:\n\t\treturn nil, runtime.NewAPIError(\"unknown error\", response, response.Code())\n\t}\n}", "func (o *SaveReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\n\tcase 204:\n\t\tresult := NewSaveNoContent()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\n\tcase 500:\n\t\tresult := NewSaveInternalServerError()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\n\tdefault:\n\t\treturn nil, runtime.NewAPIError(\"unknown error\", response, response.Code())\n\t}\n}", "func (o *TestWriteReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\tcase 200:\n\t\tresult := NewTestWriteOK()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\tcase 401:\n\t\tresult := NewTestWriteUnauthorized()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\n\tdefault:\n\t\treturn nil, runtime.NewAPIError(\"response status code does not match any response statuses defined for this endpoint in the swagger spec\", response, response.Code())\n\t}\n}", "func (o *AllConnectionsReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\tcase 200:\n\t\tresult := NewAllConnectionsOK()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\tcase 400:\n\t\tresult := NewAllConnectionsBadRequest()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\tcase 404:\n\t\tresult := NewAllConnectionsNotFound()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\n\tdefault:\n\t\treturn nil, runtime.NewAPIError(\"unknown error\", response, response.Code())\n\t}\n}", "func (o *SendDataToDeviceReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\tcase 200:\n\t\tresult := NewSendDataToDeviceOK()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\tcase 400:\n\t\tresult := NewSendDataToDeviceBadRequest()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\tcase 500:\n\t\tresult := NewSendDataToDeviceInternalServerError()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\n\tdefault:\n\t\treturn nil, runtime.NewAPIError(\"unknown error\", response, response.Code())\n\t}\n}", "func (o *HealthNoopReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\tcase 200:\n\t\tresult := NewHealthNoopOK()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\n\tdefault:\n\t\treturn nil, runtime.NewAPIError(\"unknown error\", response, response.Code())\n\t}\n}", "func (o *PutOutOfRotationReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\n\tcase 204:\n\t\tresult := NewPutOutOfRotationNoContent()\n\t\tresult.HttpResponse = response\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\tdefault:\n\t\terrorResult := kbcommon.NewKillbillError(response.Code())\n\t\tif err := consumer.Consume(response.Body(), &errorResult); err != nil && err != io.EOF {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, errorResult\n\t}\n}", "func (o *GetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\tcase 200:\n\t\tresult := NewGetOK()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\n\tdefault:\n\t\treturn nil, runtime.NewAPIError(\"unknown error\", response, response.Code())\n\t}\n}", "func (o *ReplaceServerReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\tcase 200:\n\t\tresult := NewReplaceServerOK()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\tcase 202:\n\t\tresult := NewReplaceServerAccepted()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\tcase 400:\n\t\tresult := NewReplaceServerBadRequest()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\tcase 404:\n\t\tresult := NewReplaceServerNotFound()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\tdefault:\n\t\tresult := NewReplaceServerDefault(response.Code())\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tif response.Code()/100 == 2 {\n\t\t\treturn result, nil\n\t\t}\n\t\treturn nil, result\n\t}\n}", "func (o *StatusReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\tcase 200:\n\t\tresult := NewStatusOK()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\tcase 401:\n\t\tresult := NewStatusUnauthorized()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\tcase 403:\n\t\tresult := NewStatusForbidden()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\n\tdefault:\n\t\treturn nil, runtime.NewAPIError(\"unknown error\", response, response.Code())\n\t}\n}", "func ReadResponse(r *bfe_bufio.Reader, req *Request) (*Response, error) {\n\ttp := textproto.NewReader(r)\n\tresp := &Response{\n\t\tRequest: req,\n\t}\n\n\t// Parse the first line of the response.\n\tline, err := tp.ReadLine()\n\tif err != nil {\n\t\tif err == io.EOF {\n\t\t\terr = io.ErrUnexpectedEOF\n\t\t}\n\t\treturn nil, err\n\t}\n\tf := strings.SplitN(line, \" \", 3)\n\tif len(f) < 2 {\n\t\treturn nil, &badStringError{\"malformed HTTP response\", line}\n\t}\n\treasonPhrase := \"\"\n\tif len(f) > 2 {\n\t\treasonPhrase = f[2]\n\t}\n\tresp.Status = f[1] + \" \" + reasonPhrase\n\tresp.StatusCode, err = strconv.Atoi(f[1])\n\tif err != nil {\n\t\treturn nil, &badStringError{\"malformed HTTP status code\", f[1]}\n\t}\n\n\tresp.Proto = f[0]\n\tvar ok bool\n\tif resp.ProtoMajor, resp.ProtoMinor, ok = ParseHTTPVersion(resp.Proto); !ok {\n\t\treturn nil, &badStringError{\"malformed HTTP version\", resp.Proto}\n\t}\n\n\t// Parse the response headers.\n\tmimeHeader, err := tp.ReadMIMEHeader()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tresp.Header = Header(mimeHeader)\n\n\tfixPragmaCacheControl(resp.Header)\n\n\terr = readTransfer(resp, r)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resp, nil\n}", "func (o *PostChatroomsChannelHashReadReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\n\tcase 200:\n\t\tresult := NewPostChatroomsChannelHashReadOK()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\n\tcase 403:\n\t\tresult := NewPostChatroomsChannelHashReadForbidden()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\n\tdefault:\n\t\treturn nil, runtime.NewAPIError(\"unknown error\", response, response.Code())\n\t}\n}", "func (o *TogglePacketGeneratorsReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\tcase 201:\n\t\tresult := NewTogglePacketGeneratorsCreated()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\tdefault:\n\t\treturn nil, runtime.NewAPIError(\"response status code does not match any response statuses defined for this endpoint in the swagger spec\", response, response.Code())\n\t}\n}", "func (o *FrontPutBinaryReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\n\tcase 200:\n\t\tresult := NewFrontPutBinaryOK()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\n\tdefault:\n\t\treturn nil, runtime.NewAPIError(\"unknown error\", response, response.Code())\n\t}\n}", "func (o *SystemPingReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\n\tcase 200:\n\t\tresult := NewSystemPingOK()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\n\tcase 500:\n\t\tresult := NewSystemPingInternalServerError()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\n\tdefault:\n\t\treturn nil, runtime.NewAPIError(\"unknown error\", response, response.Code())\n\t}\n}", "func (o *SendDummyAlertReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\n\tcase 200:\n\t\tresult := NewSendDummyAlertOK()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\n\tcase 400:\n\t\tresult := NewSendDummyAlertBadRequest()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\n\tcase 404:\n\t\tresult := NewSendDummyAlertNotFound()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\n\tdefault:\n\t\treturn nil, runtime.NewAPIError(\"unknown error\", response, response.Code())\n\t}\n}", "func (o *GetViewsConnectionReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\tcase 200:\n\t\tresult := NewGetViewsConnectionOK()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\tcase 400:\n\t\tresult := NewGetViewsConnectionBadRequest()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\tdefault:\n\t\treturn nil, runtime.NewAPIError(\"response status code does not match any response statuses defined for this endpoint in the swagger spec\", response, response.Code())\n\t}\n}", "func (o *SyncCopyReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\tcase 200:\n\t\tresult := NewSyncCopyOK()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\tdefault:\n\t\tresult := NewSyncCopyDefault(response.Code())\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tif response.Code()/100 == 2 {\n\t\t\treturn result, nil\n\t\t}\n\t\treturn nil, result\n\t}\n}", "func (o *PostPatientsReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\n\tcase 200:\n\t\tresult := NewPostPatientsOK()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\n\tcase 400:\n\t\tresult := NewPostPatientsBadRequest()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\n\tcase 405:\n\t\tresult := NewPostPatientsMethodNotAllowed()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\n\tdefault:\n\t\treturn nil, runtime.NewAPIError(\"unknown error\", response, response.Code())\n\t}\n}", "func (c *Conn) readResponse(res *response_) error {\n\terr := c.readDataUnit()\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = IgnoreEOF(scanResponse.Scan(c.decoder, res))\n\tif err != nil {\n\t\treturn err\n\t}\n\tif res.Result.IsError() {\n\t\treturn res.Result\n\t}\n\treturn nil\n}", "func (o *AllConnectionsReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n switch response.Code() {\n \n case 200:\n result := NewAllConnectionsOK()\n if err := result.readResponse(response, consumer, o.formats); err != nil {\n return nil, err\n }\n return result, nil\n \n case 400:\n result := NewAllConnectionsBadRequest()\n if err := result.readResponse(response, consumer, o.formats); err != nil {\n return nil, err\n }\n return nil, result\n \n case 404:\n result := NewAllConnectionsNotFound()\n if err := result.readResponse(response, consumer, o.formats); err != nil {\n return nil, err\n }\n return nil, result\n \n default:\n return nil, runtime.NewAPIError(\"unknown error\", response, response.Code())\n }\n}", "func (o *GetMsgVpnReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\n\tcase 200:\n\t\tresult := NewGetMsgVpnOK()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\n\tdefault:\n\t\tresult := NewGetMsgVpnDefault(response.Code())\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tif response.Code()/100 == 2 {\n\t\t\treturn result, nil\n\t\t}\n\t\treturn nil, result\n\t}\n}", "func (r *Response) Read(p []byte) (n int, err error) {\n\n\tif r.Error != nil {\n\t\treturn -1, r.Error\n\t}\n\n\treturn r.RawResponse.Body.Read(p)\n}", "func (o *PostPciLinksMoidReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\tcase 201:\n\t\tresult := NewPostPciLinksMoidCreated()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\tdefault:\n\t\tresult := NewPostPciLinksMoidDefault(response.Code())\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tif response.Code()/100 == 2 {\n\t\t\treturn result, nil\n\t\t}\n\t\treturn nil, result\n\t}\n}", "func (o *THSRAPIODFare2121Reader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\tcase 200:\n\t\tresult := NewTHSRAPIODFare2121OK()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\tcase 299:\n\t\tresult := NewTHSRAPIODFare2121Status299()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\tcase 304:\n\t\tresult := NewTHSRAPIODFare2121NotModified()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\tdefault:\n\t\treturn nil, runtime.NewAPIError(\"response status code does not match any response statuses defined for this endpoint in the swagger spec\", response, response.Code())\n\t}\n}", "func (o *PostGatewayConnectNetaddressReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\tcase 204:\n\t\tresult := NewPostGatewayConnectNetaddressNoContent()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\tdefault:\n\t\tresult := NewPostGatewayConnectNetaddressDefault(response.Code())\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tif response.Code()/100 == 2 {\n\t\t\treturn result, nil\n\t\t}\n\t\treturn nil, result\n\t}\n}", "func (o *DNSGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\tcase 200:\n\t\tresult := NewDNSGetOK()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\tdefault:\n\t\tresult := NewDNSGetDefault(response.Code())\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tif response.Code()/100 == 2 {\n\t\t\treturn result, nil\n\t\t}\n\t\treturn nil, result\n\t}\n}", "func (o *GetGreetStatusReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\tcase 200:\n\t\tresult := NewGetGreetStatusOK()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\n\tdefault:\n\t\treturn nil, runtime.NewAPIError(\"unknown error\", response, response.Code())\n\t}\n}", "func (o *PostAPIV2EventsReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\tcase 201:\n\t\tresult := NewPostAPIV2EventsNoContent()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\tcase 400:\n\t\tresult := NewPostAPIV2EventsBadRequest()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\tcase 403:\n\t\tresult := NewPostAPIV2EventsForbidden()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\n\tdefault:\n\t\treturn nil, runtime.NewAPIError(\"response status code does not match any response statuses defined for this endpoint in the swagger spec\", response, response.Code())\n\t}\n}", "func (o *CreateAntivirusServerReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\n\tcase 200:\n\t\tresult := NewCreateAntivirusServerOK()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\n\tdefault:\n\t\tresult := NewCreateAntivirusServerDefault(response.Code())\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tif response.Code()/100 == 2 {\n\t\t\treturn result, nil\n\t\t}\n\t\treturn nil, result\n\t}\n}", "func (o *PostCarsReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\n\tcase 200:\n\t\tresult := NewPostCarsOK()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\n\tcase 405:\n\t\tresult := NewPostCarsMethodNotAllowed()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\n\tdefault:\n\t\treturn nil, runtime.NewAPIError(\"unknown error\", response, response.Code())\n\t}\n}", "func (o *LogReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\tcase 200:\n\t\tresult := NewLogOK()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\tcase 404:\n\t\tresult := NewLogNotFound()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\tdefault:\n\t\treturn nil, runtime.NewAPIError(\"response status code does not match any response statuses defined for this endpoint in the swagger spec\", response, response.Code())\n\t}\n}", "func (o *ChatGetConnectedReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\n\tcase 200:\n\t\tresult := NewChatGetConnectedOK()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\n\tcase 400:\n\t\tresult := NewChatGetConnectedBadRequest()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\n\tcase 401:\n\t\tresult := NewChatGetConnectedUnauthorized()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\n\tcase 404:\n\t\tresult := NewChatGetConnectedNotFound()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\n\tdefault:\n\t\treturn nil, runtime.NewAPIError(\"unknown error\", response, response.Code())\n\t}\n}", "func (o *WebModifyReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\tcase 200:\n\t\tresult := NewWebModifyOK()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\tcase 202:\n\t\tresult := NewWebModifyAccepted()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\tdefault:\n\t\tresult := NewWebModifyDefault(response.Code())\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tif response.Code()/100 == 2 {\n\t\t\treturn result, nil\n\t\t}\n\t\treturn nil, result\n\t}\n}", "func (o *GetHyperflexServerModelsMoidReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\tcase 200:\n\t\tresult := NewGetHyperflexServerModelsMoidOK()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\tcase 404:\n\t\tresult := NewGetHyperflexServerModelsMoidNotFound()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\tdefault:\n\t\tresult := NewGetHyperflexServerModelsMoidDefault(response.Code())\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tif response.Code()/100 == 2 {\n\t\t\treturn result, nil\n\t\t}\n\t\treturn nil, result\n\t}\n}", "func (o *KillQueryReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\tcase 204:\n\t\tresult := NewKillQueryNoContent()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\tcase 400:\n\t\tresult := NewKillQueryBadRequest()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\tcase 404:\n\t\tresult := NewKillQueryNotFound()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\tcase 422:\n\t\tresult := NewKillQueryUnprocessableEntity()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\n\tdefault:\n\t\treturn nil, runtime.NewAPIError(\"unknown error\", response, response.Code())\n\t}\n}", "func (o *GetProgressionViewReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\tcase 200:\n\t\tresult := NewGetProgressionViewOK()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\tcase 400:\n\t\tresult := NewGetProgressionViewBadRequest()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\n\tdefault:\n\t\treturn nil, runtime.NewAPIError(\"response status code does not match any response statuses defined for this endpoint in the swagger spec\", response, response.Code())\n\t}\n}", "func (o *UpdateRackTopoReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\tcase 200:\n\t\tresult := NewUpdateRackTopoOK()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\tcase 400:\n\t\tresult := NewUpdateRackTopoBadRequest()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\tdefault:\n\t\treturn nil, runtime.NewAPIError(\"response status code does not match any response statuses defined for this endpoint in the swagger spec\", response, response.Code())\n\t}\n}", "func (o *GetByUIDReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\tcase 200:\n\t\tresult := NewGetByUIDOK()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\tcase 404:\n\t\tresult := NewGetByUIDNotFound()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\n\tdefault:\n\t\treturn nil, runtime.NewAPIError(\"unknown error\", response, response.Code())\n\t}\n}", "func (o *UtilTestReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\tcase 200:\n\t\tresult := NewUtilTestOK()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\tdefault:\n\t\treturn nil, runtime.NewAPIError(\"response status code does not match any response statuses defined for this endpoint in the swagger spec\", response, response.Code())\n\t}\n}", "func (o *GetMeReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\tcase 200:\n\t\tresult := NewGetMeOK()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\tdefault:\n\t\tresult := NewGetMeDefault(response.Code())\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tif response.Code()/100 == 2 {\n\t\t\treturn result, nil\n\t\t}\n\t\treturn nil, result\n\t}\n}", "func (o *Delete1Reader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\tcase 204:\n\t\tresult := NewDelete1NoContent()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\tcase 404:\n\t\tresult := NewDelete1NotFound()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\tdefault:\n\t\treturn nil, runtime.NewAPIError(\"response status code does not match any response statuses defined for this endpoint in the swagger spec\", response, response.Code())\n\t}\n}", "func (o *RevokeReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\tcase 200:\n\t\tresult := NewRevokeOK()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\tcase 401:\n\t\tresult := NewRevokeUnauthorized()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\tcase 404:\n\t\tresult := NewRevokeNotFound()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\n\tdefault:\n\t\treturn nil, runtime.NewAPIError(\"unknown error\", response, response.Code())\n\t}\n}", "func (o *PostGatewayDisconnectNetaddressReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\tcase 204:\n\t\tresult := NewPostGatewayDisconnectNetaddressNoContent()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\tdefault:\n\t\tresult := NewPostGatewayDisconnectNetaddressDefault(response.Code())\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tif response.Code()/100 == 2 {\n\t\t\treturn result, nil\n\t\t}\n\t\treturn nil, result\n\t}\n}", "func (o *GetProtocolsUsingGETReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\tcase 200:\n\t\tresult := NewGetProtocolsUsingGETOK()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\n\tdefault:\n\t\treturn nil, runtime.NewAPIError(\"response status code does not match any response statuses defined for this endpoint in the swagger spec\", response, response.Code())\n\t}\n}", "func (o *DestroySessionUsingPOSTReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\tcase 200:\n\t\tresult := NewDestroySessionUsingPOSTOK()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\n\tdefault:\n\t\treturn nil, runtime.NewAPIError(\"response status code does not match any response statuses defined for this endpoint in the swagger spec\", response, response.Code())\n\t}\n}", "func (o *CompleteTransactionReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\n\tcase 204:\n\t\tresult := NewCompleteTransactionNoContent()\n\t\tresult.HttpResponse = response\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\n\tdefault:\n\t\terrorResult := kbcommon.NewKillbillError(response.Code())\n\t\tif err := consumer.Consume(response.Body(), &errorResult); err != nil && err != io.EOF {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, errorResult\n\t}\n}", "func (o *GetMapNameEventsReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\tcase 200:\n\t\tresult := NewGetMapNameEventsOK(o.writer)\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\tcase 404:\n\t\tresult := NewGetMapNameEventsNotFound()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\tdefault:\n\t\treturn nil, runtime.NewAPIError(\"response status code does not match any response statuses defined for this endpoint in the swagger spec\", response, response.Code())\n\t}\n}", "func (o *RecoveryReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\tcase 200:\n\t\tresult := NewRecoveryOK()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\tcase 500:\n\t\tresult := NewRecoveryInternalServerError()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\n\tdefault:\n\t\treturn nil, runtime.NewAPIError(\"unknown error\", response, response.Code())\n\t}\n}", "func (o *GetPeersReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\n\tcase 200:\n\t\tresult := NewGetPeersOK()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\n\tcase 403:\n\t\tresult := NewGetPeersForbidden()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\n\tdefault:\n\t\treturn nil, runtime.NewAPIError(\"unknown error\", response, response.Code())\n\t}\n}", "func (o *InstallEventsReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\n\tcase 200:\n\t\tresult := NewInstallEventsOK()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\n\tdefault:\n\t\treturn nil, runtime.NewAPIError(\"unknown error\", response, response.Code())\n\t}\n}", "func (o *UpdateRackTopoReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\tcase 200:\n\t\tresult := NewUpdateRackTopoOK()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\tcase 400:\n\t\tresult := NewUpdateRackTopoBadRequest()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\tcase 404:\n\t\tresult := NewUpdateRackTopoNotFound()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\tcase 500:\n\t\tresult := NewUpdateRackTopoInternalServerError()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\tdefault:\n\t\treturn nil, runtime.NewAPIError(\"response status code does not match any response statuses defined for this endpoint in the swagger spec\", response, response.Code())\n\t}\n}", "func (o *SetMemoRequiredReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\tcase 200:\n\t\tresult := NewSetMemoRequiredOK()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\tcase 400:\n\t\tresult := NewSetMemoRequiredBadRequest()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\tcase 500:\n\t\tresult := NewSetMemoRequiredInternalServerError()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\n\tdefault:\n\t\treturn nil, runtime.NewAPIError(\"unknown error\", response, response.Code())\n\t}\n}", "func (o *GetVoicesReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\tcase 200:\n\t\tresult := NewGetVoicesOK()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\n\tdefault:\n\t\treturn nil, runtime.NewAPIError(\"unknown error\", response, response.Code())\n\t}\n}", "func (o *PatchHyperflexServerModelsMoidReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\tcase 201:\n\t\tresult := NewPatchHyperflexServerModelsMoidCreated()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\tdefault:\n\t\tresult := NewPatchHyperflexServerModelsMoidDefault(response.Code())\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tif response.Code()/100 == 2 {\n\t\t\treturn result, nil\n\t\t}\n\t\treturn nil, result\n\t}\n}", "func (o *BounceReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tresult := NewBounceDefault(response.Code())\n\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\treturn nil, err\n\t}\n\tif response.Code()/100 == 2 {\n\t\treturn result, nil\n\t}\n\treturn nil, result\n}", "func (o *PostHyperflexHxdpVersionsMoidReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\tcase 201:\n\t\tresult := NewPostHyperflexHxdpVersionsMoidCreated()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\tdefault:\n\t\tresult := NewPostHyperflexHxdpVersionsMoidDefault(response.Code())\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tif response.Code()/100 == 2 {\n\t\t\treturn result, nil\n\t\t}\n\t\treturn nil, result\n\t}\n}", "func (o *GetObmsLibraryIdentifierReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\n\tcase 200:\n\t\tresult := NewGetObmsLibraryIdentifierOK()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\n\tcase 404:\n\t\tresult := NewGetObmsLibraryIdentifierNotFound()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\n\tdefault:\n\t\tresult := NewGetObmsLibraryIdentifierDefault(response.Code())\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\t}\n}", "func (o *UserQuerySessionReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\tcase 200:\n\t\tresult := NewUserQuerySessionOK()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\tcase 400:\n\t\tresult := NewUserQuerySessionBadRequest()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\tcase 500:\n\t\tresult := NewUserQuerySessionInternalServerError()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\n\tdefault:\n\t\tdata, err := ioutil.ReadAll(response.Body())\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\treturn nil, fmt.Errorf(\"Requested GET /sessionbrowser/namespaces/{namespace}/gamesession returns an error %d: %s\", response.Code(), string(data))\n\t}\n}", "func (o *DeleteApplianceRestoresMoidReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\tcase 200:\n\t\tresult := NewDeleteApplianceRestoresMoidOK()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\tcase 404:\n\t\tresult := NewDeleteApplianceRestoresMoidNotFound()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\tdefault:\n\t\tresult := NewDeleteApplianceRestoresMoidDefault(response.Code())\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tif response.Code()/100 == 2 {\n\t\t\treturn result, nil\n\t\t}\n\t\treturn nil, result\n\t}\n}", "func (o *GetDiscoverReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\n\tcase 200:\n\t\tresult := NewGetDiscoverOK()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\n\tdefault:\n\t\treturn nil, runtime.NewAPIError(\"unknown error\", response, response.Code())\n\t}\n}", "func (o *UnclaimTrafficFilterLinkIDReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\tcase 200:\n\t\tresult := NewUnclaimTrafficFilterLinkIDOK()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\tcase 400:\n\t\tresult := NewUnclaimTrafficFilterLinkIDBadRequest()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\tcase 500:\n\t\tresult := NewUnclaimTrafficFilterLinkIDInternalServerError()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\tdefault:\n\t\treturn nil, runtime.NewAPIError(\"response status code does not match any response statuses defined for this endpoint in the swagger spec\", response, response.Code())\n\t}\n}", "func (r *overwriteConsumerReader) ReadResponse(resp runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tif r.forStatusCode == ForAllStatusCodes || resp.Code() == r.forStatusCode {\n\t\treturn r.requestReader.ReadResponse(resp, r.consumer)\n\t}\n\n\treturn r.requestReader.ReadResponse(resp, consumer)\n}", "func (o *ChangeaspecificSpeedDialReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\tcase 204:\n\t\tresult := NewChangeaspecificSpeedDialNoContent()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\n\tdefault:\n\t\treturn nil, runtime.NewAPIError(\"unknown error\", response, response.Code())\n\t}\n}", "func (o *GetDebugRequestReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\n\tcase 200:\n\t\tresult := NewGetDebugRequestOK()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\n\tcase 404:\n\t\tresult := NewGetDebugRequestNotFound()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\n\tdefault:\n\t\treturn nil, runtime.NewAPIError(\"unknown error\", response, response.Code())\n\t}\n}", "func (o *PostMemoryArraysMoidReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\tcase 201:\n\t\tresult := NewPostMemoryArraysMoidCreated()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\tdefault:\n\t\tresult := NewPostMemoryArraysMoidDefault(response.Code())\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tif response.Code()/100 == 2 {\n\t\t\treturn result, nil\n\t\t}\n\t\treturn nil, result\n\t}\n}", "func (c *Client) readResponse(conn net.Conn) ([]byte, error) {\n\tif c.Timeout > 0 {\n\t\t_ = conn.SetReadDeadline(time.Now().Add(c.Timeout))\n\t}\n\n\tproto := \"udp\"\n\tif _, ok := conn.(*net.TCPConn); ok {\n\t\tproto = \"tcp\"\n\t}\n\n\tif proto == \"udp\" {\n\t\tbufSize := c.UDPSize\n\t\tif bufSize == 0 {\n\t\t\tbufSize = dns.MinMsgSize\n\t\t}\n\t\tresponse := make([]byte, bufSize)\n\t\tn, err := conn.Read(response)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn response[:n], nil\n\t}\n\n\t// If we got here, this is a TCP connection\n\t// so we should read a 2-byte prefix first\n\treturn readPrefixed(conn)\n}", "func (o *PayReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\tcase 200:\n\t\tresult := NewPayOK()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\tcase 400:\n\t\tresult := NewPayBadRequest()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\tcase 404:\n\t\tresult := NewPayNotFound()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\tcase 409:\n\t\tresult := NewPayConflict()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\n\tdefault:\n\t\tdata, err := ioutil.ReadAll(response.Body())\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\treturn nil, fmt.Errorf(\"Requested POST /platform/public/namespaces/{namespace}/payment/orders/{paymentOrderNo}/pay returns an error %d: %s\", response.Code(), string(data))\n\t}\n}", "func (o *CountReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\n\tcase 200:\n\t\tresult := NewCountOK()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\n\tcase 400:\n\t\tresult := NewCountBadRequest()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\n\tdefault:\n\t\treturn nil, runtime.NewAPIError(\"unknown error\", response, response.Code())\n\t}\n}", "func (o *PostNodesIdentifierObmIdentifyReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\n\tcase 201:\n\t\tresult := NewPostNodesIdentifierObmIdentifyCreated()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\n\tcase 404:\n\t\tresult := NewPostNodesIdentifierObmIdentifyNotFound()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\n\tdefault:\n\t\tresult := NewPostNodesIdentifierObmIdentifyDefault(response.Code())\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\t}\n}", "func (o *GetInterpreterReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\tcase 200:\n\t\tresult := NewGetInterpreterOK()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\tcase 404:\n\t\tresult := NewGetInterpreterNotFound()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\n\tdefault:\n\t\treturn nil, runtime.NewAPIError(\"unknown error\", response, response.Code())\n\t}\n}", "func (o *DeleteEventsEventIDReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\n\tcase 204:\n\t\tresult := NewDeleteEventsEventIDNoContent()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\n\tcase 401:\n\t\tresult := NewDeleteEventsEventIDUnauthorized()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\n\tcase 404:\n\t\tresult := NewDeleteEventsEventIDNotFound()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\n\tdefault:\n\t\treturn nil, runtime.NewAPIError(\"unknown error\", response, response.Code())\n\t}\n}", "func (o *UtilityServiceReadyReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\tcase 200:\n\t\tresult := NewUtilityServiceReadyOK()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\tdefault:\n\t\tresult := NewUtilityServiceReadyDefault(response.Code())\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tif response.Code()/100 == 2 {\n\t\t\treturn result, nil\n\t\t}\n\t\treturn nil, result\n\t}\n}", "func (o *SubscriptionReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\tcase 200:\n\t\tresult := NewSubscriptionOK()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\n\tdefault:\n\t\treturn nil, runtime.NewAPIError(\"response status code does not match any response statuses defined for this endpoint in the swagger spec\", response, response.Code())\n\t}\n}", "func (o *HTTPGetPersistenceItemDataReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\tcase 200:\n\t\tresult := NewHTTPGetPersistenceItemDataOK()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\tcase 404:\n\t\tresult := NewHTTPGetPersistenceItemDataNotFound()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\n\tdefault:\n\t\treturn nil, runtime.NewAPIError(\"unknown error\", response, response.Code())\n\t}\n}", "func (o *FrontSessionReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\n\tcase 200:\n\t\tresult := NewFrontSessionOK()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\n\tdefault:\n\t\treturn nil, runtime.NewAPIError(\"unknown error\", response, response.Code())\n\t}\n}", "func (o *PostEquipmentIoExpandersMoidReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\tcase 201:\n\t\tresult := NewPostEquipmentIoExpandersMoidCreated()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\tdefault:\n\t\tresult := NewPostEquipmentIoExpandersMoidDefault(response.Code())\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tif response.Code()/100 == 2 {\n\t\t\treturn result, nil\n\t\t}\n\t\treturn nil, result\n\t}\n}", "func (pr *PushedRequest) ReadResponse(ctx context.Context) (*http.Response, error) {\n\tselect {\n\tcase <-ctx.Done():\n\t\tpr.Cancel()\n\t\tpr.pushedStream.bufPipe.CloseWithError(ctx.Err())\n\t\treturn nil, ctx.Err()\n\tcase <-pr.pushedStream.peerReset:\n\t\treturn nil, pr.pushedStream.resetErr\n\tcase resErr := <-pr.pushedStream.resc:\n\t\tif resErr.err != nil {\n\t\t\tfmt.Println(resErr.err.Error())\n\t\t\tpr.Cancel()\n\t\t\tpr.pushedStream.bufPipe.CloseWithError(resErr.err)\n\t\t\treturn nil, resErr.err\n\t\t}\n\t\tresErr.res.Request = pr.Promise\n\t\tresErr.res.TLS = pr.pushedStream.cc.tlsState\n\t\treturn resErr.res, resErr.err\n\t}\n}", "func (o *DeleteFirmwareUpgradesMoidReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\tcase 200:\n\t\tresult := NewDeleteFirmwareUpgradesMoidOK()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\tcase 404:\n\t\tresult := NewDeleteFirmwareUpgradesMoidNotFound()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\tdefault:\n\t\tresult := NewDeleteFirmwareUpgradesMoidDefault(response.Code())\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tif response.Code()/100 == 2 {\n\t\t\treturn result, nil\n\t\t}\n\t\treturn nil, result\n\t}\n}", "func (o *GetZippedReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tresult := NewGetZippedDefault(response.Code())\n\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\treturn nil, err\n\t}\n\tif response.Code()/100 == 2 {\n\t\treturn result, nil\n\t}\n\treturn nil, result\n}", "func (o *GetEtherPhysicalPortsReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\tcase 200:\n\t\tresult := NewGetEtherPhysicalPortsOK()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\tdefault:\n\t\tresult := NewGetEtherPhysicalPortsDefault(response.Code())\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tif response.Code()/100 == 2 {\n\t\t\treturn result, nil\n\t\t}\n\t\treturn nil, result\n\t}\n}", "func (o *ZoneStreamReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\tcase 200:\n\t\tresult := NewZoneStreamOK()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\n\tdefault:\n\t\treturn nil, runtime.NewAPIError(\"response status code does not match any response statuses defined for this endpoint in the swagger spec\", response, response.Code())\n\t}\n}", "func (o *ByNamespaceReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\n\tcase 200:\n\t\tresult := NewByNamespaceOK()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\n\tcase 404:\n\t\tresult := NewByNamespaceNotFound()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\n\tdefault:\n\t\treturn nil, runtime.NewAPIError(\"unknown error\", response, response.Code())\n\t}\n}", "func (o *SystemDataUsageReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\n\tcase 200:\n\t\tresult := NewSystemDataUsageOK()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\n\tcase 500:\n\t\tresult := NewSystemDataUsageInternalServerError()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\n\tdefault:\n\t\treturn nil, runtime.NewAPIError(\"unknown error\", response, response.Code())\n\t}\n}" ]
[ "0.76405734", "0.7608144", "0.7521353", "0.75097424", "0.74801594", "0.7473206", "0.7434389", "0.74247", "0.7375657", "0.73675185", "0.73588216", "0.7355202", "0.7350198", "0.73473", "0.73469675", "0.7340845", "0.73362285", "0.7323048", "0.73161495", "0.73157775", "0.7310404", "0.73078555", "0.72906756", "0.72870046", "0.728198", "0.72748476", "0.72743714", "0.72661567", "0.72642744", "0.7262768", "0.72550696", "0.7250208", "0.7248681", "0.72485346", "0.7241619", "0.7224572", "0.7224563", "0.7219402", "0.7215418", "0.72118115", "0.7210954", "0.720973", "0.72095066", "0.72003406", "0.719849", "0.7197134", "0.71935445", "0.717748", "0.7174178", "0.7174148", "0.71663266", "0.7156039", "0.7149911", "0.7149321", "0.71492857", "0.71440345", "0.71437865", "0.7141145", "0.71395946", "0.7137083", "0.71370006", "0.7136694", "0.7136165", "0.7135898", "0.7134014", "0.71309704", "0.71249527", "0.71243346", "0.7120785", "0.7120046", "0.7119027", "0.7115224", "0.7104784", "0.7101035", "0.7099133", "0.70986223", "0.7098585", "0.7097942", "0.7096886", "0.7096308", "0.7093752", "0.7093519", "0.70886284", "0.708607", "0.7083482", "0.7081503", "0.70787483", "0.7077372", "0.7077005", "0.7076419", "0.7070349", "0.7069371", "0.7067944", "0.7067342", "0.70670754", "0.7062562", "0.70619905", "0.70604306", "0.7058176", "0.70545226", "0.70516527" ]
0.0
-1
NewGetNodeOK creates a GetNodeOK with default headers values
func NewGetNodeOK() *GetNodeOK { return &GetNodeOK{} }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func NodeGet(w http.ResponseWriter, r *http.Request) {\n\tw.Header().Set(\"Content-Type\", \"application/json; charset=UTF-8\")\n\n\tif &node != nil {\n\t\tw.WriteHeader(200)\n\t\tif err := json.NewEncoder(w).Encode(node); err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t} else {\n\t\tparamError := swl.ParamError{\n\t\t\tError: \"Node not found\",\n\t\t}\n\t\tw.WriteHeader(404)\n\t\tif err := json.NewEncoder(w).Encode(paramError); err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t}\n}", "func (a *FileStorageApiService) GetNode(Ctx _context.Context, bucketId string, nodeId string) ApiGetNodeRequest {\n\treturn ApiGetNodeRequest{\n\t\tApiService: a,\n\t\tCtx: Ctx,\n\t\tP_bucketId: bucketId,\n\t\tP_nodeId: nodeId,\n\t}\n}", "func getNode(ctx context.Context, client client.Interface, nodeName string) *libapi.Node {\n\tnode, err := client.Nodes().Get(ctx, nodeName, options.GetOptions{})\n\tif err != nil {\n\t\tif _, ok := err.(cerrors.ErrorResourceDoesNotExist); !ok {\n\t\t\tlog.WithError(err).WithField(\"Name\", nodeName).Info(\"Unable to query node configuration\")\n\t\t\tlog.Warn(\"Unable to access datastore to query node configuration\")\n\t\t\tutils.Terminate()\n\t\t}\n\n\t\tlog.WithField(\"Name\", nodeName).Info(\"Building new node resource\")\n\t\tnode = libapi.NewNode()\n\t\tnode.Name = nodeName\n\t}\n\n\treturn node\n}", "func getNode(nd *wssdcloud.Node) *cloud.Node {\n\treturn &cloud.Node{\n\t\tName: &nd.Name,\n\t\tLocation: &nd.LocationName,\n\t\tNodeProperties: &cloud.NodeProperties{\n\t\t\tFQDN: &nd.Fqdn,\n\t\t\tPort: &nd.Port,\n\t\t\tAuthorizerPort: &nd.AuthorizerPort,\n\t\t\tCertificate: &nd.Certificate,\n\t\t\tStatuses: getNodeStatuses(nd),\n\t\t},\n\t\tVersion: &nd.Status.Version.Number,\n\t}\n}", "func NewNodesGetOK() *NodesGetOK {\n\treturn &NodesGetOK{}\n}", "func NewGetNodeNotFound() *GetNodeNotFound {\n\treturn &GetNodeNotFound{}\n}", "func NewGetNodesDefault(code int) *GetNodesDefault {\n\treturn &GetNodesDefault{\n\t\t_statusCode: code,\n\t}\n}", "func NewGetNodeUnauthorized() *GetNodeUnauthorized {\n\treturn &GetNodeUnauthorized{}\n}", "func (n *Node) getNode() (*Node, error) {\n\tres, err := n.Talk(\"/node\", false, nil)\n\tif err != nil {\n\t\terr := errors.New(fmt.Sprintln(\"/node\", n.Nodestr, \"error\"))\n\t\treturn nil, err\n\t}\n\tif len(res) == 0 {\n\t\treturn nil, errors.New(\"no response\")\n\t}\n\treturn newNode(res[0])\n}", "func (p *Plugin) NodeGetInfo(\n\tctx context.Context,\n\treq *csi.NodeGetInfoRequest) (\n\t*csi.NodeGetInfoResponse, error) {\n\n\tglog.Info(\"start to GetNodeInfo\")\n\tdefer glog.Info(\"end to GetNodeInfo\")\n\n\tif client == nil {\n\t\tmsg := \"client is nil\"\n\t\tglog.Error(msg)\n\t\treturn nil, status.Error(codes.InvalidArgument, msg)\n\t}\n\n\thostName, err := connector.GetHostName()\n\tif err != nil {\n\t\tmsg := fmt.Sprintf(\"failed to get node name %v\", err)\n\t\tglog.Error(msg)\n\t\treturn nil, status.Error(codes.FailedPrecondition, msg)\n\t}\n\n\tvar initiators []string\n\n\tvolDriverTypes := []string{connector.FcDriver, connector.IscsiDriver}\n\n\tfor _, volDriverType := range volDriverTypes {\n\t\tvolDriver := connector.NewConnector(volDriverType)\n\t\tif volDriver == nil {\n\t\t\tglog.Errorf(\"unsupport volDriver: %s\", volDriverType)\n\t\t\tcontinue\n\t\t}\n\n\t\tinitiator, err := volDriver.GetInitiatorInfo()\n\t\tif err != nil {\n\t\t\tglog.Errorf(\"cannot get initiator for driver volume type %s, err: %v\", volDriverType, err)\n\t\t\tcontinue\n\t\t}\n\n\t\tinitiators = append(initiators, initiator)\n\t}\n\n\tif len(initiators) == 0 {\n\t\tmsg := fmt.Sprintf(\"cannot get any initiator for host %s\", hostName)\n\t\tglog.Error(msg)\n\t\treturn nil, status.Error(codes.FailedPrecondition, msg)\n\t}\n\n\tnodeId := hostName + \",\" + strings.Join(initiators, \",\")\n\n\tglog.Infof(\"node info is %s\", nodeId)\n\n\treturn &csi.NodeGetInfoResponse{\n\t\tNodeId: nodeId,\n\t}, nil\n}", "func newnode(id byte, name string, value string) *xmlx.Node {\n\tnode := xmlx.NewNode(id)\n\tif name != \"\" {\n\t\tnode.Name = xml.Name{\n\t\t\tLocal: name,\n\t\t}\n\t}\n\tif value != \"\" {\n\t\tnode.Value = value\n\t}\n\treturn node\n}", "func NewGetTransportNodeParams() *GetTransportNodeParams {\n\tvar ()\n\treturn &GetTransportNodeParams{\n\n\t\ttimeout: cr.DefaultTimeout,\n\t}\n}", "func NewGetNodesOK() *GetNodesOK {\n\treturn &GetNodesOK{}\n}", "func NewGetTransportNodeParamsWithHTTPClient(client *http.Client) *GetTransportNodeParams {\n\tvar ()\n\treturn &GetTransportNodeParams{\n\t\tHTTPClient: client,\n\t}\n}", "func (c *Conn) GetNode(path ...NodeName) (Node, error) {\n\tvar node Node\n\terr := c.Request(\"get\", &node, pathToArgs(path)...)\n\treturn node, err\n}", "func (a *FileStorageApiService) GetNodeExecute(r ApiGetNodeRequest) (SingleNode, *_nethttp.Response, error) {\n\tvar (\n\t\tlocalVarHTTPMethod = _nethttp.MethodGet\n\t\tlocalVarPostBody interface{}\n\t\tlocalVarFormFileName string\n\t\tlocalVarFileName string\n\t\tlocalVarFileBytes []byte\n\t\tlocalVarReturnValue SingleNode\n\t)\n\n\tlocalBasePath, err := a.client.cfg.ServerURLWithContext(r.Ctx, \"FileStorageApiService.GetNode\")\n\tif localBasePath == \"/\" {\n\t localBasePath = \"\"\n\t}\n\tif err != nil {\n\t\treturn localVarReturnValue, nil, GenericOpenAPIError{error: err.Error()}\n\t}\n\n\tlocalVarPath := localBasePath + \"/v2/file_storage/buckets/{bucket_id}/nodes/{node_id}\"\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"bucket_id\"+\"}\", _neturl.PathEscape(parameterToString(r.P_bucketId, \"\")) , -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"node_id\"+\"}\", _neturl.PathEscape(parameterToString(r.P_nodeId, \"\")) , -1)\n\n\tlocalVarHeaderParams := make(map[string]string)\n\tlocalVarQueryParams := _neturl.Values{}\n\tlocalVarFormParams := _neturl.Values{}\n\n\tif r.P_directorySize != nil {\n\t\tlocalVarQueryParams.Add(\"directory_size\", parameterToString(*r.P_directorySize, \"\"))\n\t}\n\t// to determine the Content-Type header\n\tlocalVarHTTPContentTypes := []string{}\n\n\t// set Content-Type header\n\tlocalVarHTTPContentType := selectHeaderContentType(localVarHTTPContentTypes)\n\tif localVarHTTPContentType != \"\" {\n\t\tlocalVarHeaderParams[\"Content-Type\"] = localVarHTTPContentType\n\t}\n\n\t// to determine the Accept header\n\tlocalVarHTTPHeaderAccepts := []string{\"application/json\"}\n\n\t// set Accept header\n\tlocalVarHTTPHeaderAccept := selectHeaderAccept(localVarHTTPHeaderAccepts)\n\tif localVarHTTPHeaderAccept != \"\" {\n\t\tlocalVarHeaderParams[\"Accept\"] = localVarHTTPHeaderAccept\n\t}\n\treq, err := a.client.prepareRequest(r.Ctx, localVarPath, localVarHTTPMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFormFileName, localVarFileName, localVarFileBytes)\n\tif err != nil {\n\t\treturn localVarReturnValue, nil, err\n\t}\n\n\tlocalVarHTTPResponse, err := a.client.callAPI(req)\n\tif err != nil || localVarHTTPResponse == nil {\n\t\treturn localVarReturnValue, localVarHTTPResponse, err\n\t}\n\n\tlocalVarBody, err := _ioutil.ReadAll(localVarHTTPResponse.Body)\n\tlocalVarHTTPResponse.Body.Close()\n\tif err != nil {\n\t\treturn localVarReturnValue, localVarHTTPResponse, err\n\t}\n\n\tif localVarHTTPResponse.StatusCode >= 300 {\n\t\tnewErr := GenericOpenAPIError{\n\t\t\tbody: localVarBody,\n\t\t\terror: localVarHTTPResponse.Status,\n\t\t}\n\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t}\n\n\terr = a.client.decode(&localVarReturnValue, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\tif err != nil {\n\t\tnewErr := GenericOpenAPIError{\n\t\t\tbody: localVarBody,\n\t\t\terror: err.Error(),\n\t\t}\n\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t}\n\n\treturn localVarReturnValue, localVarHTTPResponse, nil\n}", "func NewGetOK() *GetOK {\n\treturn &GetOK{}\n}", "func NewGetNodeBadRequest() *GetNodeBadRequest {\n\treturn &GetNodeBadRequest{}\n}", "func (c *DOM) RequestNodeWithParams(v *DOMRequestNodeParams) (int, error) {\n\tresp, err := gcdmessage.SendCustomReturn(c.target, c.target.GetSendCh(), &gcdmessage.ParamRequest{Id: c.target.GetId(), Method: \"DOM.requestNode\", Params: v})\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\n\tvar chromeData struct {\n\t\tResult struct {\n\t\t\tNodeId int\n\t\t}\n\t}\n\n\tif resp == nil {\n\t\treturn 0, &gcdmessage.ChromeEmptyResponseErr{}\n\t}\n\n\t// test if error first\n\tcerr := &gcdmessage.ChromeErrorResponse{}\n\tjson.Unmarshal(resp.Data, cerr)\n\tif cerr != nil && cerr.Error != nil {\n\t\treturn 0, &gcdmessage.ChromeRequestErr{Resp: cerr}\n\t}\n\n\tif err := json.Unmarshal(resp.Data, &chromeData); err != nil {\n\t\treturn 0, err\n\t}\n\n\treturn chromeData.Result.NodeId, nil\n}", "func (a *Client) GetNodeInfo(params *GetNodeInfoParams) (*GetNodeInfoOK, error) {\n\t// TODO: Validate the params before sending\n\tif params == nil {\n\t\tparams = NewGetNodeInfoParams()\n\t}\n\n\tresult, err := a.transport.Submit(&runtime.ClientOperation{\n\t\tID: \"GetNodeInfo\",\n\t\tMethod: \"GET\",\n\t\tPathPattern: \"/node_info\",\n\t\tProducesMediaTypes: []string{\"application/json\"},\n\t\tConsumesMediaTypes: []string{\"application/json\"},\n\t\tSchemes: []string{\"https\"},\n\t\tParams: params,\n\t\tReader: &GetNodeInfoReader{formats: a.formats},\n\t\tContext: params.Context,\n\t\tClient: params.HTTPClient,\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tsuccess, ok := result.(*GetNodeInfoOK)\n\tif ok {\n\t\treturn success, nil\n\t}\n\t// unexpected success response\n\t// safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue\n\tmsg := fmt.Sprintf(\"unexpected success response for GetNodeInfo: API contract not enforced by server. Client expected to get an error, but got: %T\", result)\n\tpanic(msg)\n}", "func createNewEmptyNode() Node {\n\tnextNewId--\n\treturn Node{\n\t\tId: nextNewId,\n\t\tVisible: true,\n\t\tTimestamp: time.Now().Format(\"2006-01-02T15:04:05Z\"),\n\t\tVersion: \"1\",\n\t}\n}", "func NewGetNodeUpgradesDefault(code int) *GetNodeUpgradesDefault {\n\treturn &GetNodeUpgradesDefault{\n\t\t_statusCode: code,\n\t}\n}", "func MakeGetNodeEndpoint(s registry.Service) endpoint.Endpoint {\n\treturn func(ctx context.Context, request interface{}) (interface{}, error) {\n\t\treq := request.(GetNodeRequest)\n\t\tnode, err := s.GetNode(ctx, req.Token, req.Id)\n\t\treturn GetNodeResponse{\n\t\t\tErr: err,\n\t\t\tNode: node,\n\t\t}, nil\n\t}\n}", "func (s *server) GetNode(context.Context, *goblinpb.GetNodeRequest) (*goblinpb.GetNodeResponse, error) {\n\treturn &goblinpb.GetNodeResponse{\n\t\tName: s.pool.GetName(),\n\t\tAddr: s.pool.GetMemberlistAddress(),\n\t}, nil\n}", "func (d *Driver) NodeGetInfo(ctx context.Context, request *csi.NodeGetInfoRequest) (*csi.NodeGetInfoResponse, error) {\n\td.log.WithField(\"method\", \"node_get_info\").Info(\"node get info called\")\n\treturn &csi.NodeGetInfoResponse{\n\t\tNodeId: d.nodeId,\n\t\tMaxVolumesPerNode: _defaultMaxAzureVolumeLimit,\n\n\t\t// make sure that the driver works on this particular region only\n\t\tAccessibleTopology: &csi.Topology{\n\t\t\tSegments: map[string]string{\n\t\t\t\t\"location\": d.az.Location,\n\t\t\t},\n\t\t},\n\t}, nil\n}", "func GetInfo(url string, id string, token string) (BaseNode, error) {\n\tclient := &http.Client{Timeout: time.Second * 30}\n\trequest, err := http.NewRequest(\"GET\", url, nil)\n\tif err != nil {\n\t\treturn BaseNode{}, err\n\t}\n\tif id != \"\" {\n\t\tif token == \"\" {\n\t\t\treturn BaseNode{}, fmt.Errorf(\"missing token for authentication\")\n\t\t}\n\t\trequest.SetBasicAuth(id, token)\n\t}\n\n\tr, err := client.Do(request)\n\tif err != nil {\n\t\treturn BaseNode{}, err\n\t}\n\tdefer r.Body.Close()\n\n\tbody, err := ioutil.ReadAll(r.Body)\n\tif err != nil {\n\t\treturn BaseNode{}, err\n\t}\n\n\tnode := BaseNode{}\n\terr = json.Unmarshal(body, &node)\n\tif err != nil {\n\t\treturn BaseNode{}, err\n\t}\n\n\treturn node, nil\n}", "func (ed *NodesEndpoint) Get(ctx context.Context, in *pb.NodesRequest) (*pb.NodesReply, error) {\n\tkey, err := ed.getKey(in, ctx)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdata, err := ed.wch.Get(key)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tobj, _, err := ed.make(key, data)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn obj, nil\n}", "func (api *PublicBlockChainAPI) GetNodeInfo() (interface{}, error) {\n\tbest := api.node.blockManager.GetChain().BestSnapshot()\n\tnode := api.node.blockManager.GetChain().BlockDAG().GetBlock(&best.Hash)\n\tpowNodes := api.node.blockManager.GetChain().GetCurrentPowDiff(node, pow.MEERXKECCAKV1)\n\tret := &json.InfoNodeResult{\n\t\tID: api.node.node.peerServer.PeerID().String(),\n\t\tVersion: int32(1000000*version.Major + 10000*version.Minor + 100*version.Patch),\n\t\tBuildVersion: version.String(),\n\t\tProtocolVersion: int32(protocol.ProtocolVersion),\n\t\tTotalSubsidy: best.TotalSubsidy,\n\t\tTimeOffset: int64(api.node.blockManager.GetChain().TimeSource().Offset().Seconds()),\n\t\tConnections: int32(len(api.node.node.peerServer.Peers().Connected())),\n\t\tPowDiff: &json.PowDiff{\n\t\t\tCurrentDiff: getDifficultyRatio(powNodes, api.node.node.Params, pow.MEERXKECCAKV1),\n\t\t},\n\t\tNetwork: params.ActiveNetParams.Name,\n\t\tConfirmations: blockdag.StableConfirmations,\n\t\tCoinbaseMaturity: int32(api.node.node.Params.CoinbaseMaturity),\n\t\tModules: []string{cmds.DefaultServiceNameSpace, cmds.MinerNameSpace, cmds.TestNameSpace, cmds.LogNameSpace},\n\t}\n\tret.GraphState = GetGraphStateResult(best.GraphState)\n\thostdns := api.node.node.peerServer.HostDNS()\n\tif hostdns != nil {\n\t\tret.DNS = hostdns.String()\n\t}\n\tif api.node.node.peerServer.Node() != nil {\n\t\tret.QNR = api.node.node.peerServer.Node().String()\n\t}\n\tif len(api.node.node.peerServer.HostAddress()) > 0 {\n\t\tret.Addresss = api.node.node.peerServer.HostAddress()\n\t}\n\n\t// soft forks\n\tret.ConsensusDeployment = make(map[string]*json.ConsensusDeploymentDesc)\n\tfor deployment, deploymentDetails := range params.ActiveNetParams.Deployments {\n\t\t// Map the integer deployment ID into a human readable\n\t\t// fork-name.\n\t\tvar forkName string\n\t\tswitch deployment {\n\t\tcase params.DeploymentTestDummy:\n\t\t\tforkName = \"dummy\"\n\n\t\tcase params.DeploymentToken:\n\t\t\tforkName = \"token\"\n\n\t\tdefault:\n\t\t\treturn nil, fmt.Errorf(\"Unknown deployment %v detected\\n\", deployment)\n\t\t}\n\n\t\t// Query the chain for the current status of the deployment as\n\t\t// identified by its deployment ID.\n\t\tdeploymentStatus, err := api.node.blockManager.GetChain().ThresholdState(uint32(deployment))\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"Failed to obtain deployment status\\n\")\n\t\t}\n\n\t\t// Finally, populate the soft-fork description with all the\n\t\t// information gathered above.\n\t\tret.ConsensusDeployment[forkName] = &json.ConsensusDeploymentDesc{\n\t\t\tStatus: deploymentStatus.HumanString(),\n\t\t\tBit: deploymentDetails.BitNumber,\n\t\t\tStartTime: int64(deploymentDetails.StartTime),\n\t\t\tTimeout: int64(deploymentDetails.ExpireTime),\n\t\t}\n\n\t\tif deploymentDetails.PerformTime != 0 {\n\t\t\tret.ConsensusDeployment[forkName].Perform = int64(deploymentDetails.PerformTime)\n\t\t}\n\n\t\tif deploymentDetails.StartTime >= blockchain.CheckerTimeThreshold {\n\t\t\tif time.Unix(int64(deploymentDetails.ExpireTime), 0).After(best.MedianTime) {\n\t\t\t\tstartTime := time.Unix(int64(deploymentDetails.StartTime), 0)\n\t\t\t\tret.ConsensusDeployment[forkName].Since = best.MedianTime.Sub(startTime).String()\n\t\t\t}\n\t\t}\n\n\t}\n\treturn ret, nil\n}", "func NewNode(key int, value string) *Node {\n\treturn &Node{1, nil, nil, key, value}\n}", "func getNode(ex SessionExecutor, msg CommonMessage) (rep CommonReply) {\n\tnodeMsg := msg.(nodeMessage)\n\n\tselectNodeTmpl := ex.getQuery(selectNodeOp)\n\tcn := newNode()\n\trows, err := ex.Query(fmt.Sprintf(selectNodeTmpl, nodeMsg.GetNodeTable()))\n\tif err != nil {\n\t\tdbLogger.Errorf(\"getNode query error:%s\", err)\n\t\treturn newNodeReply(nil, err)\n\t}\n\tdefer closeRowsAndLog(rows)\n\n\tfor rows.Next() {\n\t\terr := rows.Scan(&cn.name, &cn.ip, &cn.isCollector, &cn.duration, &cn.description, &cn.coords, &cn.address)\n\t\tif err != nil {\n\t\t\tdbLogger.Errorf(\"getNode fetch node row:%s\", err)\n\t\t\treturn newNodeReply(nil, err)\n\t\t}\n\t\t// Try to match the node.\n\t\tdbLogger.Infof(\"trying node matching with name:%s ip:%s\", cn.name, cn.ip)\n\t\tname, ip := nodeMsg.getNodeName(), nodeMsg.getNodeIP()\n\t\tif (name == cn.name && name != \"\") || (ip == cn.ip && ip != \"\") {\n\t\t\treturn newNodeReply(cn, nil)\n\t\t}\n\t}\n\n\treturn newNodeReply(nil, errNoNode)\n}", "func GetNode(id int) NodeResponse {\n\tvar node NodeResponse\n\tresponse := network.Get(fmt.Sprintf(\"admin/nodes/%d\", id))\n\tjson.Unmarshal(response, &node)\n\n\treturn node\n}", "func NewGetNodeTopoesConnectionOK() *GetNodeTopoesConnectionOK {\n\treturn &GetNodeTopoesConnectionOK{}\n}", "func (d *Driver) NodeGetInfo(ctx context.Context, req *csi.NodeGetInfoRequest) (*csi.NodeGetInfoResponse, error) {\n\td.log.WithField(\"method\", \"node_get_info\").Info(\"node get info called\")\n\treturn &csi.NodeGetInfoResponse{\n\t\tNodeId: d.hostID(),\n\t\tMaxVolumesPerNode: maxVolumesPerNode,\n\n\t\t// make sure that the driver works on this particular region only\n\t\tAccessibleTopology: &csi.Topology{\n\t\t\tSegments: map[string]string{\n\t\t\t\t\"region\": d.region,\n\t\t\t},\n\t\t},\n\t}, nil\n}", "func (c *Client) newGetRequest(URLStr string) (*http.Request, error) {\n\trel, err := url.Parse(URLStr)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tu := c.baseURL.ResolveReference(rel)\n\n\t// Create a new get request with the url provided\n\treq, err := http.NewRequest(\"GET\", u.String(), nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Set the api key on the request\n\treq.Header.Set(apiKeyHeader, c.apiKey)\n\n\t// If we specify a user agent we override the current one\n\tif c.userAgent != \"\" {\n\t\treq.Header.Set(userAgentHeader, c.userAgent)\n\t}\n\treturn req, nil\n}", "func NewPatchNodeOK() *PatchNodeOK {\n\n\treturn &PatchNodeOK{}\n}", "func NewGetNodeHardwareFastParams() *GetNodeHardwareFastParams {\n\tvar ()\n\treturn &GetNodeHardwareFastParams{\n\n\t\ttimeout: cr.DefaultTimeout,\n\t}\n}", "func (p *jsonOutputNode) New(attr string) outputNode {\n\treturn &jsonOutputNode{make(map[string]interface{})}\n}", "func NewGetNodesIDOK() *GetNodesIDOK {\n\treturn &GetNodesIDOK{}\n}", "func (a *FileStorageApiService) GetNodes(Ctx _context.Context, bucketId string) ApiGetNodesRequest {\n\treturn ApiGetNodesRequest{\n\t\tApiService: a,\n\t\tCtx: Ctx,\n\t\tP_bucketId: bucketId,\n\t}\n}", "func createNewNode(ctx context.Context, nodeName string, virtual bool, clientset kubernetes.Interface) (*corev1.Node, error) {\n\tresources := corev1.ResourceList{}\n\tresources[corev1.ResourceCPU] = *resource.NewScaledQuantity(5000, resource.Milli)\n\tresources[corev1.ResourceMemory] = *resource.NewScaledQuantity(5, resource.Mega)\n\tnode := &corev1.Node{\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tName: nodeName,\n\t\t},\n\t}\n\tif virtual {\n\t\tnode.Labels = map[string]string{\n\t\t\tconsts.TypeLabel: consts.TypeNode,\n\t\t}\n\t}\n\tnode.Status = corev1.NodeStatus{\n\t\tCapacity: resources,\n\t\tAllocatable: resources,\n\t\tConditions: []corev1.NodeCondition{\n\t\t\t0: {\n\t\t\t\tType: corev1.NodeReady,\n\t\t\t\tStatus: corev1.ConditionTrue,\n\t\t\t},\n\t\t},\n\t}\n\tnode, err := clientset.CoreV1().Nodes().Create(ctx, node, metav1.CreateOptions{})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn node, nil\n}", "func (c *client) getNode(name string) (result, error) {\n\tnode, err := c.queryEndpoint(APINodesEndpoint, name+\"?memory=true\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif len(node) > 1 {\n\t\treturn nil, fmt.Errorf(\"Error: more than one result returned\")\n\t}\n\treturn node[0], nil\n}", "func (c *Client) getNodeClientImpl(nodeID string, timeout time.Duration, q *QueryOptions, lookup nodeLookup) (*Client, error) {\n\tnode, _, err := lookup(nodeID, q)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif node.Status == \"down\" {\n\t\treturn nil, NodeDownErr\n\t}\n\tif node.HTTPAddr == \"\" {\n\t\treturn nil, fmt.Errorf(\"http addr of node %q (%s) is not advertised\", node.Name, nodeID)\n\t}\n\n\tvar region string\n\tswitch {\n\tcase q != nil && q.Region != \"\":\n\t\t// Prefer the region set in the query parameter\n\t\tregion = q.Region\n\tcase c.config.Region != \"\":\n\t\t// If the client is configured for a particular region use that\n\t\tregion = c.config.Region\n\tdefault:\n\t\t// No region information is given so use GlobalRegion as the default.\n\t\tregion = GlobalRegion\n\t}\n\n\t// Get an API client for the node\n\tconf := c.config.ClientConfig(region, node.HTTPAddr, node.TLSEnabled)\n\n\t// set timeout - preserve old behavior where errors are ignored and use untimed one\n\thttpClient, err := cloneWithTimeout(c.httpClient, timeout)\n\t// on error, fallback to using current http client\n\tif err != nil {\n\t\thttpClient = c.httpClient\n\t}\n\tconf.HttpClient = httpClient\n\n\treturn NewClient(conf)\n}", "func NewReturnNode(info token.FileInfo) *ReturnNode {\n\treturn &ReturnNode{\n\t\tFileInfo: info,\n\t\tNodeType: NodeReturn,\n\t}\n}", "func GetNodeinfo(cfg *setting.Setting) (*crpc.NodeInfo, error) {\n\tvar ni *crpc.NodeInfo\n\terr := cfg.CallRPC(func(cl setting.RPCIF) error {\n\t\tvar err2 error\n\t\tni, err2 = cl.GetNodeinfo()\n\t\tlog.Println(cl, err2)\n\t\treturn err2\n\t})\n\treturn ni, err\n}", "func newFindNodeProtocol(service service.Service, rt RoutingTable) *findNodeProtocol {\n\n\tp := &findNodeProtocol{\n\t\trt: rt,\n\t\tpending: make(map[crypto.UUID]chan findNodeResults),\n\t\tingressChannel: service.RegisterDirectProtocol(protocol),\n\t\tservice: service,\n\t}\n\n\tif srv, ok := service.(localService); ok {\n\t\tp.log = srv.LocalNode().Log\n\t} else {\n\t\tp.log = log.AppLog\n\t}\n\n\tgo p.readLoop()\n\n\treturn p\n}", "func (d *nodeService) NodeGetInfo(ctx context.Context, req *csi.NodeGetInfoRequest) (*csi.NodeGetInfoResponse, error) {\n\tklog.V(6).Infof(\"NodeGetInfo: called with args %+v\", req)\n\n\treturn &csi.NodeGetInfoResponse{\n\t\tNodeId: d.nodeID,\n\t}, nil\n}", "func (o *GetTransportNodeParams) WithHTTPClient(client *http.Client) *GetTransportNodeParams {\n\to.SetHTTPClient(client)\n\treturn o\n}", "func (c *nodes) Get(name string) (*api.Node, error) {\n\tresult := &api.Node{}\n\terr := c.r.Get().Resource(c.resourceName()).Name(name).Do().Into(result)\n\treturn result, err\n}", "func createNewNodeNetworkObject(writer *bufio.Writer, sourceOsmNode *osm.Node) {\n\ttags := sourceOsmNode.TagMap()\n\n\t// Punktnetzwerk 'Fahrrad'\n\tnewOsmNode := *sourceOsmNode // copy content (don't modify origin/source node)\n\tnewOsmNode.ID = 0\n\tnewOsmNode.Tags = []osm.Tag{} // remove all source tags\n\trefValue, found := tags[\"icn_ref\"]\n\tif found {\n\t\ttag := osm.Tag{Key: \"node_network\", Value: \"node_bicycle\"}\n\t\tnewOsmNode.Tags = append(newOsmNode.Tags, tag)\n\t\ttag = osm.Tag{Key: \"name\", Value: refValue}\n\t\tnewOsmNode.Tags = append(newOsmNode.Tags, tag)\n\t\twriteNewNodeObject(writer, &newOsmNode)\n\t} else {\n\t\trefValue, found = tags[\"ncn_ref\"]\n\t\tif found {\n\t\t\ttag := osm.Tag{Key: \"node_network\", Value: \"node_bicycle\"}\n\t\t\tnewOsmNode.Tags = append(newOsmNode.Tags, tag)\n\t\t\ttag = osm.Tag{Key: \"name\", Value: refValue}\n\t\t\tnewOsmNode.Tags = append(newOsmNode.Tags, tag)\n\t\t\twriteNewNodeObject(writer, &newOsmNode)\n\t\t} else {\n\t\t\trefValue, found = tags[\"rcn_ref\"]\n\t\t\tif found {\n\t\t\t\ttag := osm.Tag{Key: \"node_network\", Value: \"node_bicycle\"}\n\t\t\t\tnewOsmNode.Tags = append(newOsmNode.Tags, tag)\n\t\t\t\ttag = osm.Tag{Key: \"name\", Value: refValue}\n\t\t\t\tnewOsmNode.Tags = append(newOsmNode.Tags, tag)\n\t\t\t\twriteNewNodeObject(writer, &newOsmNode)\n\t\t\t} else {\n\t\t\t\trefValue, found = tags[\"lcn_ref\"]\n\t\t\t\tif found {\n\t\t\t\t\ttag := osm.Tag{Key: \"node_network\", Value: \"node_bicycle\"}\n\t\t\t\t\tnewOsmNode.Tags = append(newOsmNode.Tags, tag)\n\t\t\t\t\ttag = osm.Tag{Key: \"name\", Value: refValue}\n\t\t\t\t\tnewOsmNode.Tags = append(newOsmNode.Tags, tag)\n\t\t\t\t\twriteNewNodeObject(writer, &newOsmNode)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\t// Punktnetzwerk 'Wandern'\n\tnewOsmNode = *sourceOsmNode // copy content (don't modify origin/source node)\n\tnewOsmNode.ID = 0\n\tnewOsmNode.Tags = []osm.Tag{} // remove all source tags\n\trefValue, found = tags[\"iwn_ref\"]\n\tif found {\n\t\ttag := osm.Tag{Key: \"node_network\", Value: \"node_hiking\"}\n\t\tnewOsmNode.Tags = append(newOsmNode.Tags, tag)\n\t\ttag = osm.Tag{Key: \"name\", Value: refValue}\n\t\tnewOsmNode.Tags = append(newOsmNode.Tags, tag)\n\t\twriteNewNodeObject(writer, &newOsmNode)\n\t} else {\n\t\trefValue, found = tags[\"nwn_ref\"]\n\t\tif found {\n\t\t\ttag := osm.Tag{Key: \"node_network\", Value: \"node_hiking\"}\n\t\t\tnewOsmNode.Tags = append(newOsmNode.Tags, tag)\n\t\t\ttag = osm.Tag{Key: \"name\", Value: refValue}\n\t\t\tnewOsmNode.Tags = append(newOsmNode.Tags, tag)\n\t\t\twriteNewNodeObject(writer, &newOsmNode)\n\t\t} else {\n\t\t\trefValue, found = tags[\"rwn_ref\"]\n\t\t\tif found {\n\t\t\t\ttag := osm.Tag{Key: \"node_network\", Value: \"node_hiking\"}\n\t\t\t\tnewOsmNode.Tags = append(newOsmNode.Tags, tag)\n\t\t\t\ttag = osm.Tag{Key: \"name\", Value: refValue}\n\t\t\t\tnewOsmNode.Tags = append(newOsmNode.Tags, tag)\n\t\t\t\twriteNewNodeObject(writer, &newOsmNode)\n\t\t\t} else {\n\t\t\t\trefValue, found = tags[\"lwn_ref\"]\n\t\t\t\tif found {\n\t\t\t\t\ttag := osm.Tag{Key: \"node_network\", Value: \"node_hiking\"}\n\t\t\t\t\tnewOsmNode.Tags = append(newOsmNode.Tags, tag)\n\t\t\t\t\ttag = osm.Tag{Key: \"name\", Value: refValue}\n\t\t\t\t\tnewOsmNode.Tags = append(newOsmNode.Tags, tag)\n\t\t\t\t\twriteNewNodeObject(writer, &newOsmNode)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\t// Punktnetzwerk 'Inline-Skaten'\n\tnewOsmNode = *sourceOsmNode // copy content (don't modify origin/source node)\n\tnewOsmNode.ID = 0\n\tnewOsmNode.Tags = []osm.Tag{} // remove all source tags\n\trefValue, found = tags[\"rin_ref\"]\n\tif found {\n\t\ttag := osm.Tag{Key: \"node_network\", Value: \"node_inline_skates\"}\n\t\tnewOsmNode.Tags = append(newOsmNode.Tags, tag)\n\t\ttag = osm.Tag{Key: \"name\", Value: refValue}\n\t\tnewOsmNode.Tags = append(newOsmNode.Tags, tag)\n\t\twriteNewNodeObject(writer, &newOsmNode)\n\t}\n\n\t// Punktnetzwerk 'Reiten'\n\tnewOsmNode = *sourceOsmNode // copy content (don't modify origin/source node)\n\tnewOsmNode.ID = 0\n\tnewOsmNode.Tags = []osm.Tag{} // remove all source tags\n\trefValue, found = tags[\"rhn_ref\"]\n\tif found {\n\t\ttag := osm.Tag{Key: \"node_network\", Value: \"node_horse\"}\n\t\tnewOsmNode.Tags = append(newOsmNode.Tags, tag)\n\t\ttag = osm.Tag{Key: \"name\", Value: refValue}\n\t\tnewOsmNode.Tags = append(newOsmNode.Tags, tag)\n\t\twriteNewNodeObject(writer, &newOsmNode)\n\t}\n\n\t// Punktnetzwerk 'Kanu'\n\tnewOsmNode = *sourceOsmNode // copy content (don't modify origin/source node)\n\tnewOsmNode.ID = 0\n\tnewOsmNode.Tags = []osm.Tag{} // remove all source tags\n\trefValue, found = tags[\"rpn_ref\"]\n\tif found {\n\t\ttag := osm.Tag{Key: \"node_network\", Value: \"node_canoe\"}\n\t\tnewOsmNode.Tags = append(newOsmNode.Tags, tag)\n\t\ttag = osm.Tag{Key: \"name\", Value: refValue}\n\t\tnewOsmNode.Tags = append(newOsmNode.Tags, tag)\n\t\twriteNewNodeObject(writer, &newOsmNode)\n\t}\n\n\t// Punktnetzwerk 'Motorboot'\n\tnewOsmNode = *sourceOsmNode // copy content (don't modify origin/source node)\n\tnewOsmNode.ID = 0\n\tnewOsmNode.Tags = []osm.Tag{} // remove all source tags\n\trefValue, found = tags[\"rmn_ref\"]\n\tif found {\n\t\ttag := osm.Tag{Key: \"node_network\", Value: \"node_motorboat\"}\n\t\tnewOsmNode.Tags = append(newOsmNode.Tags, tag)\n\t\ttag = osm.Tag{Key: \"name\", Value: refValue}\n\t\tnewOsmNode.Tags = append(newOsmNode.Tags, tag)\n\t\twriteNewNodeObject(writer, &newOsmNode)\n\t}\n}", "func Get(name string) (*Node, error) {\n\tcmd := exec.Command(\"kubectl\", \"get\", \"node\", \"-ojson\", name)\n\tutil.PrintCommand(cmd)\n\tout, err := cmd.CombinedOutput()\n\tif err != nil {\n\t\treturn nil, errors.Wrapf(err, \"Failed to get nodes from the Kubernetes cluster: %s\", string(out))\n\t}\n\n\tvar n Node\n\tif err := json.Unmarshal(out, &n); err != nil {\n\t\treturn nil, errors.Wrap(err, \"error unmarshalling node\")\n\t}\n\treturn &n, nil\n}", "func newNode(cluster *Cluster, nv *nodeValidator) *Node {\n\treturn &Node{\n\t\tcluster: cluster,\n\t\tname: nv.name,\n\t\taliases: nv.aliases,\n\t\taddress: nv.address,\n\t\tuseNewInfo: nv.useNewInfo,\n\n\t\t// Assign host to first IP alias because the server identifies nodes\n\t\t// by IP address (not hostname).\n\t\thost: nv.aliases[0],\n\t\tconnections: NewAtomicQueue(cluster.clientPolicy.ConnectionQueueSize),\n\t\tconnectionCount: NewAtomicInt(0),\n\t\thealth: NewAtomicInt(_FULL_HEALTH),\n\t\tpartitionGeneration: NewAtomicInt(-1),\n\t\treferenceCount: NewAtomicInt(0),\n\t\trefreshCount: NewAtomicInt(0),\n\t\tresponded: NewAtomicBool(false),\n\t\tactive: NewAtomicBool(true),\n\n\t\tsupportsFloat: NewAtomicBool(nv.supportsFloat),\n\t\tsupportsBatchIndex: NewAtomicBool(nv.supportsBatchIndex),\n\t\tsupportsReplicasAll: NewAtomicBool(nv.supportsReplicasAll),\n\t\tsupportsGeo: NewAtomicBool(nv.supportsGeo),\n\t}\n}", "func NewGetNodeForbidden() *GetNodeForbidden {\n\treturn &GetNodeForbidden{}\n}", "func newNode(k collection.Comparer, v interface{}, h int) *node {\n\tn := &node{K: k, V: v, H: h, C: make([]*node, 2)}\n\treturn n\n}", "func (client *Client) newGETRequest(url string) (*http.Request, error) {\n\trequest, err := http.NewRequest(\n\t\thttp.MethodGet,\n\t\turl,\n\t\tnil,\n\t)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\trequest.Header = http.Header{}\n\trequest.Header.Set(\"Accept\", \"application/json\")\n\trequest.Header.Set(\"Content-Type\", \"application/json\")\n\trequest.Header.Set(\"User-Agent\", client.userAgent)\n\n\treturn request, nil\n}", "func Test(w http.ResponseWriter, r *http.Request) {\n\tfmt.Fprintln(w, \"Node is up\")\n\tn := GetNode()\n\tfmt.Printf(\"ID: %d\", n.ID)\n\tfmt.Printf(\"Name: %s\", *n.Name)\n\tfmt.Printf(\"IP: %s\", *n.IP)\n\tfmt.Printf(\"Enabled: %t\", n.Enabled)\n\tfmt.Printf(\"Clients: %d\", n.Clients)\n\tfmt.Printf(\"Added: %v\", n.Added)\n}", "func New() VerkleNode {\n\treturn newInternalNode(0)\n}", "func (*FakeReconcilerClient) GetNode(id string) (swarm.Node, error) {\n\treturn swarm.Node{}, FakeUnimplemented\n}", "func newCbsNode(region string, volumeAttachLimit int64) (*cbsNode, error) {\n\tsecretID, secretKey, token, _ := util.GetSercet()\n\tcred := &common.Credential{\n\t\tSecretId: secretID,\n\t\tSecretKey: secretKey,\n\t\tToken: token,\n\t}\n\n\tclient, err := cbs.NewClient(cred, region, profile.NewClientProfile())\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tnode := cbsNode{\n\t\tmetadataClient: metadata.NewMetaData(http.DefaultClient),\n\t\tcbsClient: client,\n\t\tmounter: mount.SafeFormatAndMount{\n\t\t\tInterface: mount.New(\"\"),\n\t\t\tExec: exec.New(),\n\t\t},\n\t\tidempotent: util.NewIdempotent(),\n\t\tvolumeAttachLimit: volumeAttachLimit,\n\t}\n\treturn &node, nil\n}", "func NewNode(host string, size int) Node {\n\treturn node{host: host, size: size}\n}", "func (ns *NodeServer) NodeGetInfo(ctx context.Context, req *csi.NodeGetInfoRequest) (*csi.NodeGetInfoResponse, error) {\n\treturn &csi.NodeGetInfoResponse{\n\t\tNodeId: ns.Driver.nodeID,\n\t}, nil\n}", "func newGetClusterNodeCmd(out io.Writer) *cobra.Command {\n\tcmd := &cobra.Command{\n\t\tUse: \"node\",\n\t\tShort: \"Get Cluster Nodes\",\n\t\tLong: `Get Cluster Nodes`,\n\t\tRun: func(cmd *cobra.Command, args []string) {\n\t\t\tc7nclient.InitClient(&clientConfig, &clientPlatformConfig)\n\t\t\terror := c7nclient.Client.CheckIsLogin()\n\t\t\tif error != nil {\n\t\t\t\tfmt.Println(error)\n\t\t\t\treturn\n\t\t\t}\n\t\t\terr, userInfo := c7nclient.Client.QuerySelf(cmd.OutOrStdout())\n\t\t\tif err != nil {\n\t\t\t\treturn\n\t\t\t}\n\t\t\terr = c7nclient.Client.SetOrganization(cmd.OutOrStdout(), userInfo.ID)\n\t\t\tif err != nil {\n\t\t\t\treturn\n\t\t\t}\n\t\t\terr = c7nclient.Client.SetProject(cmd.OutOrStdout(), userInfo.ID)\n\t\t\tif err != nil {\n\t\t\t\treturn\n\t\t\t}\n\t\t\terr, pro := c7nclient.Client.GetProject(cmd.OutOrStdout(), userInfo.ID, proCode)\n\t\t\tif err != nil {\n\t\t\t\treturn\n\t\t\t}\n\t\t\terr, cluster := c7nclient.Client.GetCluster(cmd.OutOrStdout(), pro.ID, clusterCode)\n\t\t\tif err != nil {\n\t\t\t\treturn\n\t\t\t}\n\t\t\tc7nclient.Client.ListClusterNode(cmd.OutOrStdout(), pro.ID, cluster.ID)\n\t\t},\n\t}\n\n\tcmd.Flags().StringVarP(&clusterCode, \"clusterCode\", \"c\", \"\", \"cluster code\")\n\tcmd.MarkFlagRequired(\"clusterCode\")\n\n\treturn cmd\n}", "func NewGetTagDefault(code int) *GetTagDefault {\n\tif code <= 0 {\n\t\tcode = 500\n\t}\n\n\treturn &GetTagDefault{\n\t\t_statusCode: code,\n\t}\n}", "func (h *Hsc) getNodeInfo() (NodeInfo, error) {\n\tendpoint := fmt.Sprintf(\"%s/node_info\", h.LcdEndpoint)\n\tresp, err := http.Get(endpoint)\n\tif err != nil {\n\t\treturn NodeInfo{}, err\n\t}\n\n\tvar nodeInfo NodeInfo\n\tjdec := json.NewDecoder(resp.Body)\n\terr = jdec.Decode(&nodeInfo)\n\tif err != nil {\n\t\treturn NodeInfo{}, err\n\t}\n\n\treturn nodeInfo, nil\n}", "func (r *RPCClient) GetNodeInfo() (json.RawMessage, error) {\n\targs := RPCEmptyArguments{}\n\tvar reply json.RawMessage\n\terr := r.Client.Call(\"Node.Info\", &args, &reply)\n\treturn reply, err\n}", "func NewGet(url string) *Request { return NewRequest(\"GET\", url) }", "func NewNode(name string) TreeNode {\n\treturn TreeNode{\n\t\tname: name,\n\t\tsize: 0,\n\t\tfiles: make(map[string]Entry),\n\t}\n}", "func NewNode(id uint64) *Node { return &Node{Id: id} }", "func (ns *nodeServer) NodeGetInfo(ctx context.Context, req *csi.NodeGetInfoRequest) (*csi.NodeGetInfoResponse, error) {\n\ttopology := &csi.Topology{\n\t\tSegments: map[string]string{TopologyKeyNode: ns.nodeID},\n\t}\n\n\treturn &csi.NodeGetInfoResponse{\n\t\tNodeId: \tns.nodeID,\n\t\tMaxVolumesPerNode: ns.maxVolumePerNode,\n\t\tAccessibleTopology: topology,\n\t}, nil\n}", "func NewGetHostInfoOK() *GetHostInfoOK {\n\treturn &GetHostInfoOK{}\n}", "func NewGetPingDefault(code int) *GetPingDefault {\n\tif code <= 0 {\n\t\tcode = 500\n\t}\n\n\treturn &GetPingDefault{\n\t\t_statusCode: code,\n\t}\n}", "func newResponseNode(model string, hash string, version *big.Int, sim *backends.SimulatedBackend) {\n\topts := mqtt.NewClientOptions().AddBroker(\"tcp://localhost:1883\")\n\tclient := mqtt.NewClient(opts)\n\ttoken := client.Connect()\n\tif token.Wait() && token.Error() != nil {\n\t\tlog.Fatalln(token.Error())\n\t}\n\n\tvar messageHandler mqtt.MessageHandler = func(client mqtt.Client, msg mqtt.Message) {\n\t\tcontract, _ := contracts.NewVerifier(common.HexToAddress(string(msg.Payload())), sim)\n\t\tsendResponse(sim, contract, hash, version)\n\t}\n\ttoken = client.Subscribe(model, 0, messageHandler)\n\tif token.Wait() && token.Error() != nil {\n\t\tlog.Fatalln(token.Error())\n\t}\n}", "func NewGetConstructorOK() *GetConstructorOK {\n\treturn &GetConstructorOK{}\n}", "func NewNode(opts ...NodeOpt) *Node {\n\tn := &Node{}\n\tfor _, opt := range opts {\n\t\topt(n)\n\t}\n\treturn n\n}", "func newNode(nodePath string) Node {\n\treturn &nodeImpl{nodePath: nodePath}\n}", "func newNode() *topicNode {\n\treturn &topicNode{\n\t\tchildren: children{},\n\t\tclients: make(clientOpts),\n\t\tshared: make(map[string]clientOpts),\n\t}\n}", "func (c *Client) GetNodes(queryParams ...string) (map[string]interface{}, error) {\n\tlog.info(\"========== GET CLIENT NODES ==========\")\n\turl := buildURL(path[\"nodes\"])\n\n\treturn c.do(\"GET\", url, \"\", queryParams)\n}", "func (t *BPTree) newNode() *Node {\n\tnode := &Node{\n\t\tKeys: make([][]byte, order-1),\n\t\tpointers: make([]interface{}, order),\n\t\tisLeaf: false,\n\t\tparent: nil,\n\t\tKeysNum: 0,\n\t\tAddress: t.LastAddress,\n\t}\n\tsize := getBinaryNodeSize()\n\tt.LastAddress += size\n\n\treturn node\n}", "func (c *lazyClient) getNode(key string) (*etcd.Node, error) {\n\tfor k, n := range c.nodes {\n\t\tif strings.HasPrefix(key, k) {\n\t\t\treturn c.findNode(key, n)\n\t\t}\n\t}\n\tresponse, err := c.c.Get(key, true, true)\n\tif err != nil {\n\t\treturn nil, convertErr(err)\n\t}\n\tc.nodes[key] = response.Node\n\treturn c.findNode(key, response.Node)\n}", "func (a API) AddNodeGetRes() (out *None, e error) {\n\tout, _ = a.Result.(*None)\n\te, _ = a.Result.(error)\n\treturn \n}", "func newNodes(c *Client) *nodes {\n\treturn &nodes{c}\n}", "func NewGetMeDefault(code int) *GetMeDefault {\n\treturn &GetMeDefault{\n\t\t_statusCode: code,\n\t}\n}", "func (t *Btree) newNode() *Node {\n\t*t.NodeCount++\n\tid := t.genrateID()\n\tnode := &Node{\n\t\tNodeRecordMetaData: NodeRecordMetaData{\n\t\t\tId: proto.Int64(id),\n\t\t\tIsDirt: proto.Int32(0),\n\t\t},\n\t}\n\tt.nodes[id] = node\n\treturn node\n}", "func NewGetTreeParams() *GetTreeParams {\n\tvar ()\n\treturn &GetTreeParams{\n\n\t\ttimeout: cr.DefaultTimeout,\n\t}\n}", "func (c *DOM) DescribeNodeWithParams(v *DOMDescribeNodeParams) (*DOMNode, error) {\n\tresp, err := gcdmessage.SendCustomReturn(c.target, c.target.GetSendCh(), &gcdmessage.ParamRequest{Id: c.target.GetId(), Method: \"DOM.describeNode\", Params: v})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar chromeData struct {\n\t\tResult struct {\n\t\t\tNode *DOMNode\n\t\t}\n\t}\n\n\tif resp == nil {\n\t\treturn nil, &gcdmessage.ChromeEmptyResponseErr{}\n\t}\n\n\t// test if error first\n\tcerr := &gcdmessage.ChromeErrorResponse{}\n\tjson.Unmarshal(resp.Data, cerr)\n\tif cerr != nil && cerr.Error != nil {\n\t\treturn nil, &gcdmessage.ChromeRequestErr{Resp: cerr}\n\t}\n\n\tif err := json.Unmarshal(resp.Data, &chromeData); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn chromeData.Result.Node, nil\n}", "func New(nodeFile string) Node {\n\treturn Node{nodeFile: nodeFile}\n}", "func newNode() *node {\n\treturn &node{\n\t\tvalue: nil,\n\t\tchildren: map[string]*node{},\n\t}\n}", "func getNodeVersion(def *cbgt.NodeDef) (*ComparableVersion, error) {\n\tif len(def.Extras) == 0 {\n\t\treturn nil, nil\n\t}\n\tvar extras nodeExtras\n\tif err := JSONUnmarshal([]byte(def.Extras), &extras); err != nil {\n\t\treturn nil, fmt.Errorf(\"parsing node extras: %w\", err)\n\t}\n\treturn extras.Version, nil\n}", "func NewGetVersionDefault(code int) *GetVersionDefault {\n\treturn &GetVersionDefault{\n\t\t_statusCode: code,\n\t}\n}", "func NewNode(key, value interface{}) *Node {\n\tvar node Node\n\tnode.value = value\n\tnode.key = key\n\tnode.next = nil\n\treturn &node\n}", "func CreateDefaultTree() *binaryTree {\n\tfour := node{4, nil, nil}\n\tfive := node{5, nil, nil}\n\tsix := node{6, nil, nil}\n\tseven := node{7, nil, nil}\n\n\ttwo := node{2, &four, &five}\n\tthree := node{3, &six, &seven}\n\n\tone := node{1, &two, &three}\n\treturn &binaryTree{root: &one}\n}", "func (a *HyperflexApiService) GetHyperflexNodeByMoid(ctx context.Context, moid string) ApiGetHyperflexNodeByMoidRequest {\n\treturn ApiGetHyperflexNodeByMoidRequest{\n\t\tApiService: a,\n\t\tctx: ctx,\n\t\tmoid: moid,\n\t}\n}", "func (ctx Context) GetNode(name string) (UniCloudNode, error) {\n\tvar result UniCloudNode\n\turl := fmt.Sprintf(\"https://%s:8443/v1/nodes/%s\", ctx.Address, name)\n\tdata, err := ctx.ucGetDelete(\"GET\", url)\n\tif err == nil {\n\t\terr = json.Unmarshal(data, &result)\n\t}\n\treturn result, err\n}", "func (f *file) GetNode(p Path) (Node, error) {\n if p == \"\" {\n return f\n }\n\n return nil, ipld.ErrParse\n}", "func NewGetSectionOK() *GetSectionOK {\n\n\treturn &GetSectionOK{}\n}", "func (r *resolver) newNoopNode(name string, jobArgs map[string]interface{}) (*Node, error) {\n\tid, err := r.idGen.UID()\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"Error making id for no-op job %s: %s\", name, err)\n\t}\n\tjid := job.NewIdWithRequestId(\"noop\", name, id, r.request.Id)\n\trj, err := r.jobFactory.Make(jid)\n\tif err != nil {\n\t\tswitch err {\n\t\tcase job.ErrUnknownJobType:\n\t\t\t// No custom noop job, use built-in default\n\t\t\trj = &noopJob{\n\t\t\t\tid: jid,\n\t\t\t}\n\t\tdefault:\n\t\t\treturn nil, fmt.Errorf(\"Error making no-op job %s: %s\", name, err)\n\t\t}\n\t}\n\tif err := rj.Create(jobArgs); err != nil {\n\t\treturn nil, fmt.Errorf(\"Error creating no-op job %s: %s\", name, err)\n\t}\n\tbytes, err := rj.Serialize()\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"Error serializing no-op job %s: %s\", name, err)\n\t}\n\n\treturn &Node{\n\t\tName: name,\n\t\tId: id,\n\t\tSpec: &spec.NoopNode, // on the next refactor, we shouldn't need to set this ourselves\n\t\tJobBytes: bytes,\n\t}, nil\n}", "func (fs *Ipfs) createNode(ctx context.Context, repoPath string) (icore.CoreAPI, error) {\n\t// Open the repo\n\trepo, err := fsrepo.Open(repoPath)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Construct the node\n\tnodeOptions := &core.BuildCfg{\n\t\tOnline: true,\n\n\t\t// This option sets the node to be a full DHT node\n\t\t// (both fetching and storing DHT Records)\n\t\tRouting: libp2p.DHTOption,\n\n\t\t// Routing: libp2p.DHTClientOption,\n\t\t// This option sets the node to be a client DHT node (only fetching records)\n\n\t\tRepo: repo,\n\t}\n\n\tnode, err := core.NewNode(ctx, nodeOptions)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tfs.ipfsNode = node\n\n\t// Attach the Core API to the constructed node\n\treturn coreapi.NewCoreAPI(node)\n}", "func newNode() *Node {\n\tn := &Node{}\n\treturn n\n}", "func (c *DOM) RequestNode(objectId string) (int, error) {\n\tvar v DOMRequestNodeParams\n\tv.ObjectId = objectId\n\treturn c.RequestNodeWithParams(&v)\n}", "func NewNode(key, value interface{}) *Node {\n\tnode := new(Node)\n\tnode.key = key\n\tnode.value = value\n\tnode.left = nil\n\tnode.right = nil\n\tnode.color = RED\n\treturn node\n}", "func NewNode(name string, w Worker) *Node {\n\tid := getID()\n\tn := &Node{id: id, w: w, name: name}\n\tn.chained = make(map[string]struct{})\n\tn.close = make(chan struct{})\n\treturn n\n}" ]
[ "0.6256709", "0.5656355", "0.5653993", "0.5629114", "0.5605105", "0.55883247", "0.5546044", "0.553135", "0.55062854", "0.5418763", "0.5404175", "0.5389311", "0.5362681", "0.53083205", "0.5295457", "0.52701765", "0.52578914", "0.52487177", "0.52446", "0.5196332", "0.51951283", "0.5189337", "0.51684254", "0.5112357", "0.5111227", "0.5091043", "0.50769067", "0.50644475", "0.5053503", "0.504961", "0.5048681", "0.50382555", "0.5029509", "0.5015268", "0.5009602", "0.4998734", "0.49924228", "0.49923387", "0.49887183", "0.4973608", "0.49707553", "0.49626568", "0.49614993", "0.494034", "0.49356925", "0.49355537", "0.49313635", "0.49239373", "0.49235433", "0.48997968", "0.4895479", "0.48943025", "0.48918605", "0.488587", "0.48792928", "0.4877954", "0.48666131", "0.48598143", "0.4856963", "0.48569143", "0.48483863", "0.4847797", "0.48428681", "0.48413455", "0.48343316", "0.48315802", "0.48252988", "0.48204082", "0.48137903", "0.48084834", "0.48060188", "0.48014128", "0.4798865", "0.47944427", "0.47925645", "0.4790499", "0.47850096", "0.47826284", "0.47747084", "0.47735155", "0.4773245", "0.47666508", "0.47492456", "0.47464702", "0.47393018", "0.47326505", "0.47317782", "0.47285122", "0.47275138", "0.4725027", "0.47219327", "0.4720912", "0.4720385", "0.47186604", "0.47180519", "0.47159395", "0.47054124", "0.46963987", "0.46959737", "0.46949804" ]
0.7065208
0
NewGetNodeBadRequest creates a GetNodeBadRequest with default headers values
func NewGetNodeBadRequest() *GetNodeBadRequest { return &GetNodeBadRequest{} }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func NewGetNodesBadRequest() *GetNodesBadRequest {\n\treturn &GetNodesBadRequest{}\n}", "func NewGetNodesIDBadRequest() *GetNodesIDBadRequest {\n\treturn &GetNodesIDBadRequest{}\n}", "func NewGetNodeTopoesConnectionBadRequest() *GetNodeTopoesConnectionBadRequest {\n\treturn &GetNodeTopoesConnectionBadRequest{}\n}", "func NewGetAWSNodeTypesBadRequest() *GetAWSNodeTypesBadRequest {\n\treturn &GetAWSNodeTypesBadRequest{}\n}", "func NewGetNodeNotFound() *GetNodeNotFound {\n\treturn &GetNodeNotFound{}\n}", "func NewDeregisterNodeBadRequest() *DeregisterNodeBadRequest {\n\n\treturn &DeregisterNodeBadRequest{}\n}", "func NewBadRequest(err error, msg ...string) *Errs {\n\tif err == nil {\n\t\terr = ErrBadRequest\n\t}\n\n\treturn &Errs{\n\t\tcodeHTTP: http.StatusBadRequest,\n\t\terr: err,\n\t\tkind: trace(2),\n\t\tmessage: msg,\n\t}\n}", "func NewGetNodeUnauthorized() *GetNodeUnauthorized {\n\treturn &GetNodeUnauthorized{}\n}", "func NewGetTagBadRequest() *GetTagBadRequest {\n\n\treturn &GetTagBadRequest{}\n}", "func NewGetTagBadRequest() *GetTagBadRequest {\n\n\treturn &GetTagBadRequest{}\n}", "func (ctx *CreateHostContext) BadRequest() error {\n\tctx.ResponseData.WriteHeader(400)\n\treturn nil\n}", "func NewGetBadRequest(body *GetBadRequestResponseBody) *goa.ServiceError {\n\tv := &goa.ServiceError{\n\t\tName: *body.Name,\n\t\tID: *body.ID,\n\t\tMessage: *body.Message,\n\t\tTemporary: *body.Temporary,\n\t\tTimeout: *body.Timeout,\n\t\tFault: *body.Fault,\n\t}\n\n\treturn v\n}", "func NewGetBadRequest(body *GetBadRequestResponseBody) *goa.ServiceError {\n\tv := &goa.ServiceError{\n\t\tName: *body.Name,\n\t\tID: *body.ID,\n\t\tMessage: *body.Message,\n\t\tTemporary: *body.Temporary,\n\t\tTimeout: *body.Timeout,\n\t\tFault: *body.Fault,\n\t}\n\n\treturn v\n}", "func NewGetNodeForbidden() *GetNodeForbidden {\n\treturn &GetNodeForbidden{}\n}", "func NewGetNicsBadRequest() *GetNicsBadRequest {\n\treturn &GetNicsBadRequest{}\n}", "func NewGetConfigurationSourceTreeUsingGETBadRequest() *GetConfigurationSourceTreeUsingGETBadRequest {\n\treturn &GetConfigurationSourceTreeUsingGETBadRequest{}\n}", "func NewPostOperationsGetNodeEdgePointDetailsBadRequest() *PostOperationsGetNodeEdgePointDetailsBadRequest {\n\n\treturn &PostOperationsGetNodeEdgePointDetailsBadRequest{}\n}", "func NewNewThreadBadRequest() *NewThreadBadRequest {\n\treturn &NewThreadBadRequest{}\n}", "func (a *FileStorageApiService) GetNode(Ctx _context.Context, bucketId string, nodeId string) ApiGetNodeRequest {\n\treturn ApiGetNodeRequest{\n\t\tApiService: a,\n\t\tCtx: Ctx,\n\t\tP_bucketId: bucketId,\n\t\tP_nodeId: nodeId,\n\t}\n}", "func NewUndeleteNodeBadRequest() *UndeleteNodeBadRequest {\n\treturn &UndeleteNodeBadRequest{}\n}", "func NodeGet(w http.ResponseWriter, r *http.Request) {\n\tw.Header().Set(\"Content-Type\", \"application/json; charset=UTF-8\")\n\n\tif &node != nil {\n\t\tw.WriteHeader(200)\n\t\tif err := json.NewEncoder(w).Encode(node); err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t} else {\n\t\tparamError := swl.ParamError{\n\t\t\tError: \"Node not found\",\n\t\t}\n\t\tw.WriteHeader(404)\n\t\tif err := json.NewEncoder(w).Encode(paramError); err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t}\n}", "func SetNewBadRequest(errorName, errDesc string) *ErrorMessage {\n\treturn SetNewError(http.StatusBadRequest, errorName, errDesc)\n}", "func (ctx *GetByIDHostContext) BadRequest() error {\n\tctx.ResponseData.WriteHeader(400)\n\treturn nil\n}", "func getNode(ctx context.Context, client client.Interface, nodeName string) *libapi.Node {\n\tnode, err := client.Nodes().Get(ctx, nodeName, options.GetOptions{})\n\tif err != nil {\n\t\tif _, ok := err.(cerrors.ErrorResourceDoesNotExist); !ok {\n\t\t\tlog.WithError(err).WithField(\"Name\", nodeName).Info(\"Unable to query node configuration\")\n\t\t\tlog.Warn(\"Unable to access datastore to query node configuration\")\n\t\t\tutils.Terminate()\n\t\t}\n\n\t\tlog.WithField(\"Name\", nodeName).Info(\"Building new node resource\")\n\t\tnode = libapi.NewNode()\n\t\tnode.Name = nodeName\n\t}\n\n\treturn node\n}", "func NewGetVariableRegistryUpdateRequestBadRequest() *GetVariableRegistryUpdateRequestBadRequest {\n\treturn &GetVariableRegistryUpdateRequestBadRequest{}\n}", "func BadRequest(err error) error {\n\treturn New(http.StatusBadRequest, err)\n}", "func NewNotFound() error {\n\treturn requestError{\n\t\tClientError: ClientError{\n\t\t\tErrors: []clientErrorSubError{{Message: \"status code 404\"}},\n\t\t},\n\t}\n}", "func NewBadRequest(field string, message string, args ...interface{}) *AppError {\n\treturn NewError(InvalidArgument, field, message, args...)\n}", "func NewGetHostGroupsBadRequest() *GetHostGroupsBadRequest {\n\treturn &GetHostGroupsBadRequest{}\n}", "func (ctx *GetOpmlContext) BadRequest(r error) error {\n\tif ctx.ResponseData.Header().Get(\"Content-Type\") == \"\" {\n\t\tctx.ResponseData.Header().Set(\"Content-Type\", \"application/vnd.goa.error\")\n\t}\n\treturn ctx.ResponseData.Service.Send(ctx.Context, 400, r)\n}", "func BadRequest(msg string) Error {\n\te := err{msg: msg, code: badRequestCode, group: generic, kind: badRequest}\n\treturn &e\n}", "func BadRequest(format string, args ...interface{}) error {\n\treturn New(http.StatusBadRequest, format, args...)\n}", "func CreateBadRequest(errorMessage string) BadRequest {\n\treturn BadRequest{Error: errorMessage}\n}", "func NewGetOneKeyspaceBadRequest() *GetOneKeyspaceBadRequest {\n\treturn &GetOneKeyspaceBadRequest{}\n}", "func NewGetActionTemplateLogoVersionBadRequest() *GetActionTemplateLogoVersionBadRequest {\n\treturn &GetActionTemplateLogoVersionBadRequest{}\n}", "func NewSystemGetBadRequest() *SystemGetBadRequest {\n\treturn &SystemGetBadRequest{}\n}", "func NewGetEveroutePackagesBadRequest() *GetEveroutePackagesBadRequest {\n\treturn &GetEveroutePackagesBadRequest{}\n}", "func (ctx *DeleteHostContext) BadRequest() error {\n\tctx.ResponseData.WriteHeader(400)\n\treturn nil\n}", "func NewGetNamespaceBadRequest() *GetNamespaceBadRequest {\n\treturn &GetNamespaceBadRequest{}\n}", "func NewGetNamespaceBadRequest() *GetNamespaceBadRequest {\n\treturn &GetNamespaceBadRequest{}\n}", "func NewGetNodeUpgradesDefault(code int) *GetNodeUpgradesDefault {\n\treturn &GetNodeUpgradesDefault{\n\t\t_statusCode: code,\n\t}\n}", "func SetNewBadRequestByFormat(ef *ErrorFormat) *ErrorMessage {\n\treturn &ErrorMessage{\n\t\tCode: http.StatusBadRequest,\n\t\tErrorList: []*ErrorFormat{\n\t\t\tef,\n\t\t},\n\t}\n}", "func (ctx *CreateItemContext) BadRequest(r error) error {\n\tif ctx.ResponseData.Header().Get(\"Content-Type\") == \"\" {\n\t\tctx.ResponseData.Header().Set(\"Content-Type\", \"application/vnd.goa.error\")\n\t}\n\treturn ctx.ResponseData.Service.Send(ctx.Context, 400, r)\n}", "func NewDescribeDefault(code int) *DescribeDefault {\n\tif code <= 0 {\n\t\tcode = 500\n\t}\n\n\treturn &DescribeDefault{\n\t\t_statusCode: code,\n\t}\n}", "func (ctx *CreateMessageContext) BadRequest(r error) error {\n\tif ctx.ResponseData.Header().Get(\"Content-Type\") == \"\" {\n\t\tctx.ResponseData.Header().Set(\"Content-Type\", \"application/vnd.goa.error\")\n\t}\n\treturn ctx.ResponseData.Service.Send(ctx.Context, 400, r)\n}", "func NewGetLinkInfoBadRequest() *GetLinkInfoBadRequest {\n\n\treturn &GetLinkInfoBadRequest{}\n}", "func NewGetDeltaBadRequest() *GetDeltaBadRequest {\n\treturn &GetDeltaBadRequest{}\n}", "func NewGetDocumentBadRequest() *GetDocumentBadRequest {\n\n\treturn &GetDocumentBadRequest{}\n}", "func NewGetSepainstantBadRequest() *GetSepainstantBadRequest {\n\treturn &GetSepainstantBadRequest{}\n}", "func NewGetIntrospectionBadRequest() *GetIntrospectionBadRequest {\n\treturn &GetIntrospectionBadRequest{}\n}", "func NewGetNFTContractTokenBadRequest() *GetNFTContractTokenBadRequest {\n\n\treturn &GetNFTContractTokenBadRequest{}\n}", "func NewGetUserBadRequest() *GetUserBadRequest {\n\n\treturn &GetUserBadRequest{}\n}", "func (ctx *CreateOutputContext) BadRequest(r error) error {\n\tif ctx.ResponseData.Header().Get(\"Content-Type\") == \"\" {\n\t\tctx.ResponseData.Header().Set(\"Content-Type\", \"application/vnd.goa.error\")\n\t}\n\treturn ctx.ResponseData.Service.Send(ctx.Context, 400, r)\n}", "func NewGetActionBadRequest() *GetActionBadRequest {\n\treturn &GetActionBadRequest{}\n}", "func NewGetStateAddressBadRequest() *GetStateAddressBadRequest {\n\n\treturn &GetStateAddressBadRequest{}\n}", "func NewGetFullTextIndexStatusConfigurationBadRequest() *GetFullTextIndexStatusConfigurationBadRequest {\n\treturn &GetFullTextIndexStatusConfigurationBadRequest{}\n}", "func NewGetKeysBadRequest() *GetKeysBadRequest {\n\treturn &GetKeysBadRequest{}\n}", "func NewGetNotFound(body *GetNotFoundResponseBody) *goa.ServiceError {\n\tv := &goa.ServiceError{\n\t\tName: *body.Name,\n\t\tID: *body.ID,\n\t\tMessage: *body.Message,\n\t\tTemporary: *body.Temporary,\n\t\tTimeout: *body.Timeout,\n\t\tFault: *body.Fault,\n\t}\n\n\treturn v\n}", "func NewGetNotFound(body *GetNotFoundResponseBody) *goa.ServiceError {\n\tv := &goa.ServiceError{\n\t\tName: *body.Name,\n\t\tID: *body.ID,\n\t\tMessage: *body.Message,\n\t\tTemporary: *body.Temporary,\n\t\tTimeout: *body.Timeout,\n\t\tFault: *body.Fault,\n\t}\n\n\treturn v\n}", "func NewGetTallyBadRequest() *GetTallyBadRequest {\n\treturn &GetTallyBadRequest{}\n}", "func (ctx *CreateUserContext) BadRequest(r error) error {\n\tctx.ResponseData.Header().Set(\"Content-Type\", \"application/vnd.goa.error\")\n\treturn ctx.ResponseData.Service.Send(ctx.Context, 400, r)\n}", "func NewGetEntriesBadRequest() *GetEntriesBadRequest {\n\treturn &GetEntriesBadRequest{}\n}", "func NewGetNvmfNamespaceSnapshotsBadRequest() *GetNvmfNamespaceSnapshotsBadRequest {\n\treturn &GetNvmfNamespaceSnapshotsBadRequest{}\n}", "func badRequest(resp *ApiResponse, msg string) error {\n resp.StatusCode = http.StatusBadRequest\n resp.Message = []byte(msg)\n resp.ErrorMessage = msg\n\n return nil\n}", "func NewGetGatewayUsingGETBadRequest() *GetGatewayUsingGETBadRequest {\n\treturn &GetGatewayUsingGETBadRequest{}\n}", "func NewNotFound(msg string) error {\n\treturn &ELBError{\n\t\tmsg: msg,\n\t\tCode: http.StatusNotFound,\n\t}\n}", "func NewAllConnectionsBadRequest() *AllConnectionsBadRequest {\n return &AllConnectionsBadRequest{\n }\n}", "func NewLeaderboardGetBadRequest() *LeaderboardGetBadRequest {\n\treturn &LeaderboardGetBadRequest{}\n}", "func TestGraphQL_BadRequest(t *testing.T) {\n\tstack := createNode(t, false, true)\n\tdefer stack.Close()\n\t// start node\n\tif err := stack.Start(); err != nil {\n\t\tt.Fatalf(\"could not start node: %v\", err)\n\t}\n\t// create http request\n\tbody := strings.NewReader(\"{\\\"query\\\": \\\"{bleh{number}}\\\",\\\"variables\\\": null}\")\n\tgqlReq, err := http.NewRequest(http.MethodGet, fmt.Sprintf(\"%s/graphql\", stack.HTTPEndpoint()), body)\n\tif err != nil {\n\t\tt.Fatalf(\"could not post: %v\", err)\n\t}\n\t// read from response\n\tresp := doHTTPRequest(t, gqlReq)\n\tbodyBytes, err := ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\tt.Fatalf(\"could not read from response body: %v\", err)\n\t}\n\tassert.Equal(t, \"\", string(bodyBytes)) // TODO: geth1.10.2: check changes\n\tassert.Equal(t, 404, resp.StatusCode)\n}", "func NewGetTagDefault(code int) *GetTagDefault {\n\tif code <= 0 {\n\t\tcode = 500\n\t}\n\n\treturn &GetTagDefault{\n\t\t_statusCode: code,\n\t}\n}", "func NewGetConsistencyGroupsBadRequest() *GetConsistencyGroupsBadRequest {\n\treturn &GetConsistencyGroupsBadRequest{}\n}", "func (client MultipleResponsesClient) Get202None204NoneDefaultNone400InvalidPreparer() (*http.Request, error) {\n preparer := autorest.CreatePreparer(\n autorest.AsGet(),\n autorest.WithBaseURL(client.BaseURI),\n autorest.WithPath(\"/http/payloads/202/none/204/none/default/none/response/400/invalid\"))\n return preparer.Prepare(&http.Request{})\n}", "func newNodesMessage(nodes map[string]config.NodeConfig) nodesMessage {\n\treturn nodesMessage{CommonMessage: newMessage(), nodes: nodes}\n}", "func NewGetVMMetricsBadRequest() *GetVMMetricsBadRequest {\n\treturn &GetVMMetricsBadRequest{}\n}", "func (ctx *CreateFeedContext) BadRequest(r error) error {\n\tif ctx.ResponseData.Header().Get(\"Content-Type\") == \"\" {\n\t\tctx.ResponseData.Header().Set(\"Content-Type\", \"application/vnd.goa.error\")\n\t}\n\treturn ctx.ResponseData.Service.Send(ctx.Context, 400, r)\n}", "func (c *DOM) RequestNodeWithParams(v *DOMRequestNodeParams) (int, error) {\n\tresp, err := gcdmessage.SendCustomReturn(c.target, c.target.GetSendCh(), &gcdmessage.ParamRequest{Id: c.target.GetId(), Method: \"DOM.requestNode\", Params: v})\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\n\tvar chromeData struct {\n\t\tResult struct {\n\t\t\tNodeId int\n\t\t}\n\t}\n\n\tif resp == nil {\n\t\treturn 0, &gcdmessage.ChromeEmptyResponseErr{}\n\t}\n\n\t// test if error first\n\tcerr := &gcdmessage.ChromeErrorResponse{}\n\tjson.Unmarshal(resp.Data, cerr)\n\tif cerr != nil && cerr.Error != nil {\n\t\treturn 0, &gcdmessage.ChromeRequestErr{Resp: cerr}\n\t}\n\n\tif err := json.Unmarshal(resp.Data, &chromeData); err != nil {\n\t\treturn 0, err\n\t}\n\n\treturn chromeData.Result.NodeId, nil\n}", "func NewGetDomainUsingGETBadRequest() *GetDomainUsingGETBadRequest {\n\treturn &GetDomainUsingGETBadRequest{}\n}", "func BadRequest(message ...interface{}) Err {\n\treturn Boomify(http.StatusBadRequest, message...)\n}", "func NewGetClustersBadRequest() *GetClustersBadRequest {\n\treturn &GetClustersBadRequest{}\n}", "func NewArtifactListerBadRequest() *ArtifactListerBadRequest {\n\n\treturn &ArtifactListerBadRequest{}\n}", "func BadRequest(err error) Response {\n\treturn &errorResponse{\n\t\tcode: http.StatusBadRequest,\n\t\tmsg: err.Error(),\n\t}\n}", "func NewGetPrimeEntitlementsBadRequest() *GetPrimeEntitlementsBadRequest {\n\treturn &GetPrimeEntitlementsBadRequest{}\n}", "func NewGetAdminMetadataBadRequest() *GetAdminMetadataBadRequest {\n\treturn &GetAdminMetadataBadRequest{}\n}", "func NewGetNodeOK() *GetNodeOK {\n\treturn &GetNodeOK{}\n}", "func NewGetIscsiConnectionsBadRequest() *GetIscsiConnectionsBadRequest {\n\treturn &GetIscsiConnectionsBadRequest{}\n}", "func (ctx *GetDogsByHostIDHostContext) BadRequest() error {\n\tctx.ResponseData.WriteHeader(400)\n\treturn nil\n}", "func NewGetSecurityGroupBadRequest() *GetSecurityGroupBadRequest {\n\treturn &GetSecurityGroupBadRequest{}\n}", "func NewGetDetailBadRequest() *GetDetailBadRequest {\n\treturn &GetDetailBadRequest{}\n}", "func NewGetEntityFiltersBadRequest() *GetEntityFiltersBadRequest {\n\treturn &GetEntityFiltersBadRequest{}\n}", "func NewGetTaskTaskIDBadRequest() *GetTaskTaskIDBadRequest {\n\n\treturn &GetTaskTaskIDBadRequest{}\n}", "func (client MultipleResponsesClient) GetDefaultNone400InvalidPreparer() (*http.Request, error) {\n preparer := autorest.CreatePreparer(\n autorest.AsGet(),\n autorest.WithBaseURL(client.BaseURI),\n autorest.WithPath(\"/http/payloads/default/none/response/400/invalid\"))\n return preparer.Prepare(&http.Request{})\n}", "func NewGetLeaderboardBadRequest() *GetLeaderboardBadRequest {\n\treturn &GetLeaderboardBadRequest{}\n}", "func (ctx *CreateVerificationContext) BadRequest(r error) error {\n\tif ctx.ResponseData.Header().Get(\"Content-Type\") == \"\" {\n\t\tctx.ResponseData.Header().Set(\"Content-Type\", \"application/vnd.goa.error\")\n\t}\n\treturn ctx.ResponseData.Service.Send(ctx.Context, 400, r)\n}", "func NewGetCustomNameByProjectIDBadRequest() *GetCustomNameByProjectIDBadRequest {\n\treturn &GetCustomNameByProjectIDBadRequest{}\n}", "func (ctx *UploadOpmlContext) BadRequest(r error) error {\n\tif ctx.ResponseData.Header().Get(\"Content-Type\") == \"\" {\n\t\tctx.ResponseData.Header().Set(\"Content-Type\", \"application/vnd.goa.error\")\n\t}\n\treturn ctx.ResponseData.Service.Send(ctx.Context, 400, r)\n}", "func newCbsNode(region string, volumeAttachLimit int64) (*cbsNode, error) {\n\tsecretID, secretKey, token, _ := util.GetSercet()\n\tcred := &common.Credential{\n\t\tSecretId: secretID,\n\t\tSecretKey: secretKey,\n\t\tToken: token,\n\t}\n\n\tclient, err := cbs.NewClient(cred, region, profile.NewClientProfile())\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tnode := cbsNode{\n\t\tmetadataClient: metadata.NewMetaData(http.DefaultClient),\n\t\tcbsClient: client,\n\t\tmounter: mount.SafeFormatAndMount{\n\t\t\tInterface: mount.New(\"\"),\n\t\t\tExec: exec.New(),\n\t\t},\n\t\tidempotent: util.NewIdempotent(),\n\t\tvolumeAttachLimit: volumeAttachLimit,\n\t}\n\treturn &node, nil\n}", "func BadRequest(msg string) error {\n\tif msg == \"\" {\n\t\tmsg = \"su solicitud está en un formato incorrecto.\"\n\t}\n\treturn echo.NewHTTPError(http.StatusBadRequest, msg)\n}", "func (client MultipleResponsesClient) Get202None204NoneDefaultNone202InvalidPreparer() (*http.Request, error) {\n preparer := autorest.CreatePreparer(\n autorest.AsGet(),\n autorest.WithBaseURL(client.BaseURI),\n autorest.WithPath(\"/http/payloads/202/none/204/none/default/none/response/202/invalid\"))\n return preparer.Prepare(&http.Request{})\n}", "func NewGetIPAMvrfgroupBadRequest() *GetIPAMvrfgroupBadRequest {\n\treturn &GetIPAMvrfgroupBadRequest{}\n}", "func NewGetDataContextTopologyUUIDNodeNodeUUIDBadRequest() *GetDataContextTopologyUUIDNodeNodeUUIDBadRequest {\n\n\treturn &GetDataContextTopologyUUIDNodeNodeUUIDBadRequest{}\n}" ]
[ "0.65216607", "0.6145907", "0.60190094", "0.58946764", "0.58436394", "0.58317244", "0.5631672", "0.5584461", "0.5531108", "0.5531108", "0.551818", "0.55171156", "0.55171156", "0.5502871", "0.5453922", "0.5437031", "0.54297453", "0.53937536", "0.53785926", "0.5357476", "0.5352971", "0.5351455", "0.53494954", "0.53382355", "0.53226125", "0.53015316", "0.5295033", "0.5294793", "0.5282608", "0.51957613", "0.5188548", "0.5185474", "0.51715314", "0.5130074", "0.5120647", "0.5119849", "0.5113639", "0.50987697", "0.5081195", "0.5081195", "0.50665146", "0.5053302", "0.505004", "0.5039668", "0.50379395", "0.5036406", "0.5025895", "0.50198346", "0.5018363", "0.5016429", "0.50127107", "0.49987164", "0.49795547", "0.49594745", "0.49433756", "0.4935565", "0.49045923", "0.49015072", "0.49015072", "0.4898319", "0.48913425", "0.4890552", "0.48647645", "0.4861841", "0.48581254", "0.4857165", "0.48570806", "0.4855167", "0.4852276", "0.48477268", "0.48467922", "0.4843286", "0.483899", "0.4833805", "0.48330864", "0.4830713", "0.48220843", "0.48143736", "0.48128748", "0.4810628", "0.48081133", "0.48079124", "0.48036623", "0.47885212", "0.47864836", "0.47843984", "0.47826687", "0.47732183", "0.47716442", "0.4770589", "0.47704348", "0.47674462", "0.47661397", "0.47641918", "0.47571358", "0.47569954", "0.47556207", "0.47523287", "0.47502363", "0.47486448" ]
0.8110655
0
NewGetNodeUnauthorized creates a GetNodeUnauthorized with default headers values
func NewGetNodeUnauthorized() *GetNodeUnauthorized { return &GetNodeUnauthorized{} }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func NewGetNodeUpgradesUnauthorized() *GetNodeUpgradesUnauthorized {\n\treturn &GetNodeUpgradesUnauthorized{}\n}", "func NewGetUnauthorized(body *GetUnauthorizedResponseBody) *goa.ServiceError {\n\tv := &goa.ServiceError{\n\t\tName: *body.Name,\n\t\tID: *body.ID,\n\t\tMessage: *body.Message,\n\t\tTemporary: *body.Temporary,\n\t\tTimeout: *body.Timeout,\n\t\tFault: *body.Fault,\n\t}\n\n\treturn v\n}", "func NewGetNodeForbidden() *GetNodeForbidden {\n\treturn &GetNodeForbidden{}\n}", "func NewGetAWSNodeTypesUnauthorized() *GetAWSNodeTypesUnauthorized {\n\treturn &GetAWSNodeTypesUnauthorized{}\n}", "func NewGetConfigurationSourceTreeUsingGETUnauthorized() *GetConfigurationSourceTreeUsingGETUnauthorized {\n\treturn &GetConfigurationSourceTreeUsingGETUnauthorized{}\n}", "func NewGetTagUnauthorized() *GetTagUnauthorized {\n\treturn &GetTagUnauthorized{}\n}", "func NewUnauthorized(cause error) Unauthorized { return Unauthorized(cause.Error()) }", "func NewUndeleteNodeUnauthorized() *UndeleteNodeUnauthorized {\n\treturn &UndeleteNodeUnauthorized{}\n}", "func NewGetSepainstantUnauthorized() *GetSepainstantUnauthorized {\n\treturn &GetSepainstantUnauthorized{}\n}", "func NewGetNamespaceUnauthorized() *GetNamespaceUnauthorized {\n\treturn &GetNamespaceUnauthorized{}\n}", "func NewGetNamespaceUnauthorized() *GetNamespaceUnauthorized {\n\treturn &GetNamespaceUnauthorized{}\n}", "func NewGetDeltaUnauthorized() *GetDeltaUnauthorized {\n\treturn &GetDeltaUnauthorized{}\n}", "func NewGetNodeV1beta1APIResourcesUnauthorized() *GetNodeV1beta1APIResourcesUnauthorized {\n\treturn &GetNodeV1beta1APIResourcesUnauthorized{}\n}", "func NewGetModelUnauthorized() *GetModelUnauthorized {\n\treturn &GetModelUnauthorized{}\n}", "func NewUnauthorized(err error, msg string) error {\n\treturn &unauthorized{wrap(err, msg, \"\")}\n}", "func NewGetClusterUnauthorized() *GetClusterUnauthorized {\n\n\treturn &GetClusterUnauthorized{}\n}", "func NewUnauthorized(err error, msg ...string) *Errs {\n\tif err == nil {\n\t\terr = ErrUnauthorized\n\t}\n\treturn &Errs{\n\t\tcodeHTTP: http.StatusUnauthorized,\n\t\terr: err,\n\t\tkind: trace(2),\n\t\tmessage: msg,\n\t}\n}", "func NewGetDocumentUnauthorized() *GetDocumentUnauthorized {\n\n\treturn &GetDocumentUnauthorized{}\n}", "func NewGetVariableRegistryUpdateRequestUnauthorized() *GetVariableRegistryUpdateRequestUnauthorized {\n\treturn &GetVariableRegistryUpdateRequestUnauthorized{}\n}", "func NewGetIntrospectionUnauthorized() *GetIntrospectionUnauthorized {\n\treturn &GetIntrospectionUnauthorized{}\n}", "func NewGetOrderUnauthorized() *GetOrderUnauthorized {\n\treturn &GetOrderUnauthorized{}\n}", "func NewGetVCenterUsingGETUnauthorized() *GetVCenterUsingGETUnauthorized {\n\treturn &GetVCenterUsingGETUnauthorized{}\n}", "func NewGetServiceLevelUnauthorized() *GetServiceLevelUnauthorized {\n\treturn &GetServiceLevelUnauthorized{}\n}", "func NewGetNodeNotFound() *GetNodeNotFound {\n\treturn &GetNodeNotFound{}\n}", "func NewUnauthorized(res calcsvc.Unauthorized) Unauthorized {\n\tbody := Unauthorized(res)\n\treturn body\n}", "func NewGetUserUnauthorized() *GetUserUnauthorized {\n\n\treturn &GetUserUnauthorized{}\n}", "func NewGetMaintenanceWindowUnauthorized() *GetMaintenanceWindowUnauthorized {\n\treturn &GetMaintenanceWindowUnauthorized{}\n}", "func getNode(nd *wssdcloud.Node) *cloud.Node {\n\treturn &cloud.Node{\n\t\tName: &nd.Name,\n\t\tLocation: &nd.LocationName,\n\t\tNodeProperties: &cloud.NodeProperties{\n\t\t\tFQDN: &nd.Fqdn,\n\t\t\tPort: &nd.Port,\n\t\t\tAuthorizerPort: &nd.AuthorizerPort,\n\t\t\tCertificate: &nd.Certificate,\n\t\t\tStatuses: getNodeStatuses(nd),\n\t\t},\n\t\tVersion: &nd.Status.Version.Number,\n\t}\n}", "func NewGetGCUnauthorized() *GetGCUnauthorized {\n\treturn &GetGCUnauthorized{}\n}", "func NewDeleteNodeUsingDELETEUnauthorized() *DeleteNodeUsingDELETEUnauthorized {\n\treturn &DeleteNodeUsingDELETEUnauthorized{}\n}", "func NewGetNodeBadRequest() *GetNodeBadRequest {\n\treturn &GetNodeBadRequest{}\n}", "func NewGetTagsUnauthorized() *GetTagsUnauthorized {\n\treturn &GetTagsUnauthorized{}\n}", "func NewGetIPAMSearchUnauthorized() *GetIPAMSearchUnauthorized {\n\treturn &GetIPAMSearchUnauthorized{}\n}", "func NewGetDomainUsingGETUnauthorized() *GetDomainUsingGETUnauthorized {\n\treturn &GetDomainUsingGETUnauthorized{}\n}", "func NewAuthPrivilegeRequestWithoutParam() *AuthPrivilegeRequest {\n\n return &AuthPrivilegeRequest{\n JDCloudRequest: core.JDCloudRequest{\n URL: \"/management:authPrivilege\",\n Method: \"POST\",\n Header: nil,\n Version: \"v1\",\n },\n }\n}", "func NewGetPublicIPByUUIDUsingGETUnauthorized() *GetPublicIPByUUIDUsingGETUnauthorized {\n\treturn &GetPublicIPByUUIDUsingGETUnauthorized{}\n}", "func NewGetActionUnauthorized() *GetActionUnauthorized {\n\treturn &GetActionUnauthorized{}\n}", "func NewGetRoleUnauthorized() *GetRoleUnauthorized {\n\treturn &GetRoleUnauthorized{}\n}", "func NewGetWhoamiUnauthorized() *GetWhoamiUnauthorized {\n\n\treturn &GetWhoamiUnauthorized{}\n}", "func NewGetClientUnauthorized() *GetClientUnauthorized {\n\treturn &GetClientUnauthorized{}\n}", "func NewObjectsGetUnauthorized() *ObjectsGetUnauthorized {\n\treturn &ObjectsGetUnauthorized{}\n}", "func NewGetOneKeyspaceUnauthorized() *GetOneKeyspaceUnauthorized {\n\treturn &GetOneKeyspaceUnauthorized{}\n}", "func NewGetWorkflowTemplateUnauthorized() *GetWorkflowTemplateUnauthorized {\n\treturn &GetWorkflowTemplateUnauthorized{}\n}", "func NewGetClustersUnauthorized() *GetClustersUnauthorized {\n\treturn &GetClustersUnauthorized{}\n}", "func NewGetEntriesUnauthorized() *GetEntriesUnauthorized {\n\treturn &GetEntriesUnauthorized{}\n}", "func NewGetScannerUnauthorized() *GetScannerUnauthorized {\n\treturn &GetScannerUnauthorized{}\n}", "func NewGetAPIUnauthorized() *GetAPIUnauthorized {\n\treturn &GetAPIUnauthorized{}\n}", "func NewGetGatewaysUnauthorized() *GetGatewaysUnauthorized {\n\treturn &GetGatewaysUnauthorized{}\n}", "func NewConnectCoreV1GetNodeProxyUnauthorized() *ConnectCoreV1GetNodeProxyUnauthorized {\n\n\treturn &ConnectCoreV1GetNodeProxyUnauthorized{}\n}", "func NewGetScopeUnauthorized() *GetScopeUnauthorized {\n\treturn &GetScopeUnauthorized{}\n}", "func NewCatalogGetUnauthorized() *CatalogGetUnauthorized {\n\treturn &CatalogGetUnauthorized{}\n}", "func NewGetOneColumnUnauthorized() *GetOneColumnUnauthorized {\n\treturn &GetOneColumnUnauthorized{}\n}", "func NewGetCompanyUnauthorized(body *GetCompanyUnauthorizedResponseBody) *goa.ServiceError {\n\tv := &goa.ServiceError{\n\t\tName: *body.Name,\n\t\tID: *body.ID,\n\t\tMessage: *body.Message,\n\t\tTemporary: *body.Temporary,\n\t\tTimeout: *body.Timeout,\n\t\tFault: *body.Fault,\n\t}\n\n\treturn v\n}", "func NewAdminGetTagUnauthorized() *AdminGetTagUnauthorized {\n\treturn &AdminGetTagUnauthorized{}\n}", "func NewLeaderboardGetUnauthorized() *LeaderboardGetUnauthorized {\n\treturn &LeaderboardGetUnauthorized{}\n}", "func NewGetKeysUnauthorized() *GetKeysUnauthorized {\n\treturn &GetKeysUnauthorized{}\n}", "func NewGetTransportNodeParams() *GetTransportNodeParams {\n\tvar ()\n\treturn &GetTransportNodeParams{\n\n\t\ttimeout: cr.DefaultTimeout,\n\t}\n}", "func NewGetInternalconfigUnauthorized() *GetInternalconfigUnauthorized {\n\treturn &GetInternalconfigUnauthorized{}\n}", "func NewGetUsingGET5Unauthorized() *GetUsingGET5Unauthorized {\n\treturn &GetUsingGET5Unauthorized{}\n}", "func NewGetIPAMvrfgroupUnauthorized() *GetIPAMvrfgroupUnauthorized {\n\treturn &GetIPAMvrfgroupUnauthorized{}\n}", "func NewGetControllerServiceUnauthorized() *GetControllerServiceUnauthorized {\n\treturn &GetControllerServiceUnauthorized{}\n}", "func NewGetDashboardUnauthorized() *GetDashboardUnauthorized {\n\treturn &GetDashboardUnauthorized{}\n}", "func NewGetUserTotpauthUnauthorized() *GetUserTotpauthUnauthorized {\n\treturn &GetUserTotpauthUnauthorized{}\n}", "func getNode(ctx context.Context, client client.Interface, nodeName string) *libapi.Node {\n\tnode, err := client.Nodes().Get(ctx, nodeName, options.GetOptions{})\n\tif err != nil {\n\t\tif _, ok := err.(cerrors.ErrorResourceDoesNotExist); !ok {\n\t\t\tlog.WithError(err).WithField(\"Name\", nodeName).Info(\"Unable to query node configuration\")\n\t\t\tlog.Warn(\"Unable to access datastore to query node configuration\")\n\t\t\tutils.Terminate()\n\t\t}\n\n\t\tlog.WithField(\"Name\", nodeName).Info(\"Building new node resource\")\n\t\tnode = libapi.NewNode()\n\t\tnode.Name = nodeName\n\t}\n\n\treturn node\n}", "func NodeGet(w http.ResponseWriter, r *http.Request) {\n\tw.Header().Set(\"Content-Type\", \"application/json; charset=UTF-8\")\n\n\tif &node != nil {\n\t\tw.WriteHeader(200)\n\t\tif err := json.NewEncoder(w).Encode(node); err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t} else {\n\t\tparamError := swl.ParamError{\n\t\t\tError: \"Node not found\",\n\t\t}\n\t\tw.WriteHeader(404)\n\t\tif err := json.NewEncoder(w).Encode(paramError); err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t}\n}", "func NewWeaviateThingsGetUnauthorized() *WeaviateThingsGetUnauthorized {\n\treturn &WeaviateThingsGetUnauthorized{}\n}", "func NewGetConfigurationUnauthorized() *GetConfigurationUnauthorized {\n\treturn &GetConfigurationUnauthorized{}\n}", "func NewGetCloudSystemMetricsUnauthorized() *GetCloudSystemMetricsUnauthorized {\n\treturn &GetCloudSystemMetricsUnauthorized{}\n}", "func (r *Requester) newRequest(endpoint string) (*http.Request, error) {\n req, err := http.NewRequest(\"GET\", endpoint, nil)\n if err != nil {\n return nil, err\n }\n\tbearer := fmt.Sprintf(\"Bearer %s\", r.bearer)\n req.Header.Add(\"Authorization\", bearer)\n\treq.Header.Add(\"Ocp-Apim-Subscription-Key\", apimKey)\n req.Header.Set(\"User-Agent\", \"hackacraic\")\n\treturn req, nil\n}", "func NewGetGroupUnauthorized() *GetGroupUnauthorized {\n\treturn &GetGroupUnauthorized{}\n}", "func NewGetPublicIPUnauthorized() *GetPublicIPUnauthorized {\n\treturn &GetPublicIPUnauthorized{}\n}", "func NewReadFeatureUnauthorized() *ReadFeatureUnauthorized {\n\treturn &ReadFeatureUnauthorized{}\n}", "func NewCurrenciesGetUnauthorized() *CurrenciesGetUnauthorized {\n\treturn &CurrenciesGetUnauthorized{}\n}", "func NewGetControllerStatusUnauthorized() *GetControllerStatusUnauthorized {\n\treturn &GetControllerStatusUnauthorized{}\n}", "func NewGetConfigurationSourceTreeUsingGETForbidden() *GetConfigurationSourceTreeUsingGETForbidden {\n\treturn &GetConfigurationSourceTreeUsingGETForbidden{}\n}", "func NewGetSelLogServiceUnauthorized() *GetSelLogServiceUnauthorized {\n\treturn &GetSelLogServiceUnauthorized{}\n}", "func NewGetCustomerGroupsUnauthorized() *GetCustomerGroupsUnauthorized {\n\treturn &GetCustomerGroupsUnauthorized{}\n}", "func NewGetRefreshTokenUnauthorized() *GetRefreshTokenUnauthorized {\n\n\treturn &GetRefreshTokenUnauthorized{}\n}", "func (a *FileStorageApiService) GetNode(Ctx _context.Context, bucketId string, nodeId string) ApiGetNodeRequest {\n\treturn ApiGetNodeRequest{\n\t\tApiService: a,\n\t\tCtx: Ctx,\n\t\tP_bucketId: bucketId,\n\t\tP_nodeId: nodeId,\n\t}\n}", "func NewGetNodeUpgradesForbidden() *GetNodeUpgradesForbidden {\n\treturn &GetNodeUpgradesForbidden{}\n}", "func NewGetTeamDetailUnauthorized() *GetTeamDetailUnauthorized {\n\treturn &GetTeamDetailUnauthorized{}\n}", "func New() VerkleNode {\n\treturn newInternalNode(0)\n}", "func NewGetCollectionsUnauthorized() *GetCollectionsUnauthorized {\n\treturn &GetCollectionsUnauthorized{}\n}", "func NewGetCustomIntegrationVersionByIDUsingGETUnauthorized() *GetCustomIntegrationVersionByIDUsingGETUnauthorized {\n\treturn &GetCustomIntegrationVersionByIDUsingGETUnauthorized{}\n}", "func NewGetCardsUnauthorized() *GetCardsUnauthorized {\n\n\treturn &GetCardsUnauthorized{}\n}", "func NewGetClusterInstallConfigUnauthorized() *GetClusterInstallConfigUnauthorized {\n\n\treturn &GetClusterInstallConfigUnauthorized{}\n}", "func NewGetAuth() context.Handler {\n\treturn func(ctx context.Context) {\n\t\tname := ctx.Params().Get(\"name\")\n\t\ttoken := ctx.GetHeader(\"token\")\n\t\tif token == \"\" || getClientToken(name) != token {\n\t\t\tctx.StatusCode(403)\n\t\t\tctx.StopExecution()\n\t\t\treturn\n\t\t}\n\t\tctx.Next()\n\t}\n}", "func newAuthenticatedRequest(t testing.TB, method, path string, body io.Reader, user, pass string) *http.Request {\n\treq := newRequest(t, method, path, body)\n\treq.SetBasicAuth(user, pass)\n\treq.Header.Add(\"Accept\", resticAPIV2)\n\treturn req\n}", "func NewGetNodeOK() *GetNodeOK {\n\treturn &GetNodeOK{}\n}", "func NewGetClusterOidcUnauthorized() *GetClusterOidcUnauthorized {\n\treturn &GetClusterOidcUnauthorized{}\n}", "func newHandleGetOrHeader(\n\tcore core.Core,\n) handleGetOrHeader {\n\treturn _handleGetOrHeader{\n\t\tcore: core,\n\t\thttp: ihttp.New(),\n\t}\n}", "func newHTTPHeader(header, value string) httpHeader {\n\treturn httpHeader{Header: header, Value: value}\n}", "func NewGetScannersUnauthorized() *GetScannersUnauthorized {\n\treturn &GetScannersUnauthorized{}\n}", "func NewGetPayportUnauthorized() *GetPayportUnauthorized {\n\treturn &GetPayportUnauthorized{}\n}", "func NewDisableHostUnauthorized() *DisableHostUnauthorized {\n\treturn &DisableHostUnauthorized{}\n}", "func NewGetOfficeUserUnauthorized() *GetOfficeUserUnauthorized {\n\n\treturn &GetOfficeUserUnauthorized{}\n}", "func MakeGetNodeEndpoint(s registry.Service) endpoint.Endpoint {\n\treturn func(ctx context.Context, request interface{}) (interface{}, error) {\n\t\treq := request.(GetNodeRequest)\n\t\tnode, err := s.GetNode(ctx, req.Token, req.Id)\n\t\treturn GetNodeResponse{\n\t\t\tErr: err,\n\t\t\tNode: node,\n\t\t}, nil\n\t}\n}", "func NewGetGroupsByDisplayNameUsingGETUnauthorized() *GetGroupsByDisplayNameUsingGETUnauthorized {\n\treturn &GetGroupsByDisplayNameUsingGETUnauthorized{}\n}", "func (a *FileStorageApiService) GetNodeExecute(r ApiGetNodeRequest) (SingleNode, *_nethttp.Response, error) {\n\tvar (\n\t\tlocalVarHTTPMethod = _nethttp.MethodGet\n\t\tlocalVarPostBody interface{}\n\t\tlocalVarFormFileName string\n\t\tlocalVarFileName string\n\t\tlocalVarFileBytes []byte\n\t\tlocalVarReturnValue SingleNode\n\t)\n\n\tlocalBasePath, err := a.client.cfg.ServerURLWithContext(r.Ctx, \"FileStorageApiService.GetNode\")\n\tif localBasePath == \"/\" {\n\t localBasePath = \"\"\n\t}\n\tif err != nil {\n\t\treturn localVarReturnValue, nil, GenericOpenAPIError{error: err.Error()}\n\t}\n\n\tlocalVarPath := localBasePath + \"/v2/file_storage/buckets/{bucket_id}/nodes/{node_id}\"\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"bucket_id\"+\"}\", _neturl.PathEscape(parameterToString(r.P_bucketId, \"\")) , -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"node_id\"+\"}\", _neturl.PathEscape(parameterToString(r.P_nodeId, \"\")) , -1)\n\n\tlocalVarHeaderParams := make(map[string]string)\n\tlocalVarQueryParams := _neturl.Values{}\n\tlocalVarFormParams := _neturl.Values{}\n\n\tif r.P_directorySize != nil {\n\t\tlocalVarQueryParams.Add(\"directory_size\", parameterToString(*r.P_directorySize, \"\"))\n\t}\n\t// to determine the Content-Type header\n\tlocalVarHTTPContentTypes := []string{}\n\n\t// set Content-Type header\n\tlocalVarHTTPContentType := selectHeaderContentType(localVarHTTPContentTypes)\n\tif localVarHTTPContentType != \"\" {\n\t\tlocalVarHeaderParams[\"Content-Type\"] = localVarHTTPContentType\n\t}\n\n\t// to determine the Accept header\n\tlocalVarHTTPHeaderAccepts := []string{\"application/json\"}\n\n\t// set Accept header\n\tlocalVarHTTPHeaderAccept := selectHeaderAccept(localVarHTTPHeaderAccepts)\n\tif localVarHTTPHeaderAccept != \"\" {\n\t\tlocalVarHeaderParams[\"Accept\"] = localVarHTTPHeaderAccept\n\t}\n\treq, err := a.client.prepareRequest(r.Ctx, localVarPath, localVarHTTPMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFormFileName, localVarFileName, localVarFileBytes)\n\tif err != nil {\n\t\treturn localVarReturnValue, nil, err\n\t}\n\n\tlocalVarHTTPResponse, err := a.client.callAPI(req)\n\tif err != nil || localVarHTTPResponse == nil {\n\t\treturn localVarReturnValue, localVarHTTPResponse, err\n\t}\n\n\tlocalVarBody, err := _ioutil.ReadAll(localVarHTTPResponse.Body)\n\tlocalVarHTTPResponse.Body.Close()\n\tif err != nil {\n\t\treturn localVarReturnValue, localVarHTTPResponse, err\n\t}\n\n\tif localVarHTTPResponse.StatusCode >= 300 {\n\t\tnewErr := GenericOpenAPIError{\n\t\t\tbody: localVarBody,\n\t\t\terror: localVarHTTPResponse.Status,\n\t\t}\n\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t}\n\n\terr = a.client.decode(&localVarReturnValue, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\tif err != nil {\n\t\tnewErr := GenericOpenAPIError{\n\t\t\tbody: localVarBody,\n\t\t\terror: err.Error(),\n\t\t}\n\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t}\n\n\treturn localVarReturnValue, localVarHTTPResponse, nil\n}", "func NewGetBucketsUnauthorized() *GetBucketsUnauthorized {\n\treturn &GetBucketsUnauthorized{}\n}" ]
[ "0.62317866", "0.58861405", "0.58682585", "0.5845086", "0.5691563", "0.5689939", "0.5642161", "0.5590622", "0.55228585", "0.5522682", "0.5522682", "0.55073184", "0.5500066", "0.5496636", "0.54705286", "0.5444548", "0.5434237", "0.537068", "0.5360181", "0.5313686", "0.5278469", "0.5270924", "0.5253128", "0.52410215", "0.52374816", "0.5236632", "0.52106506", "0.5189341", "0.51793766", "0.5169311", "0.5157321", "0.514865", "0.51189107", "0.5108238", "0.51029575", "0.5061558", "0.5047932", "0.50425375", "0.5023919", "0.49989846", "0.4994048", "0.49910867", "0.4975243", "0.49537325", "0.49457163", "0.4939189", "0.49329528", "0.49271134", "0.49168015", "0.49066925", "0.4900957", "0.49001077", "0.4899646", "0.48904032", "0.48636606", "0.48440927", "0.48424986", "0.48398668", "0.4819619", "0.4797664", "0.4788237", "0.47829363", "0.47813877", "0.47779965", "0.4764564", "0.4760512", "0.47517666", "0.47460368", "0.47385168", "0.47370595", "0.47358978", "0.47235838", "0.47137845", "0.47069615", "0.46748936", "0.46744487", "0.4651788", "0.46371353", "0.46285683", "0.46273017", "0.46213412", "0.46184197", "0.46152902", "0.45982453", "0.45965308", "0.4593452", "0.4588482", "0.4581621", "0.45734465", "0.45707092", "0.45667708", "0.4564601", "0.45561975", "0.45461807", "0.45417207", "0.45371678", "0.45360696", "0.45352313", "0.4528368", "0.45197165" ]
0.82049906
0
NewGetNodeForbidden creates a GetNodeForbidden with default headers values
func NewGetNodeForbidden() *GetNodeForbidden { return &GetNodeForbidden{} }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func NewGetNodeUpgradesForbidden() *GetNodeUpgradesForbidden {\n\treturn &GetNodeUpgradesForbidden{}\n}", "func NewGetNodeUnauthorized() *GetNodeUnauthorized {\n\treturn &GetNodeUnauthorized{}\n}", "func NewGetForbidden(body *GetForbiddenResponseBody) *goa.ServiceError {\n\tv := &goa.ServiceError{\n\t\tName: *body.Name,\n\t\tID: *body.ID,\n\t\tMessage: *body.Message,\n\t\tTemporary: *body.Temporary,\n\t\tTimeout: *body.Timeout,\n\t\tFault: *body.Fault,\n\t}\n\n\treturn v\n}", "func NewGetNodeBadRequest() *GetNodeBadRequest {\n\treturn &GetNodeBadRequest{}\n}", "func NewGetTagForbidden() *GetTagForbidden {\n\n\treturn &GetTagForbidden{}\n}", "func NewGetClusterForbidden() *GetClusterForbidden {\n\n\treturn &GetClusterForbidden{}\n}", "func NewGetNamespaceForbidden() *GetNamespaceForbidden {\n\treturn &GetNamespaceForbidden{}\n}", "func NewGetNamespaceForbidden() *GetNamespaceForbidden {\n\treturn &GetNamespaceForbidden{}\n}", "func NewGetDeltaForbidden() *GetDeltaForbidden {\n\treturn &GetDeltaForbidden{}\n}", "func NewGetTokenForbidden() *GetTokenForbidden {\n\treturn &GetTokenForbidden{}\n}", "func NewGetConfigurationSourceTreeUsingGETForbidden() *GetConfigurationSourceTreeUsingGETForbidden {\n\treturn &GetConfigurationSourceTreeUsingGETForbidden{}\n}", "func NewForbidden(err error, msg ...string) *Errs {\n\tif err == nil {\n\t\terr = ErrForbidden\n\t}\n\n\treturn &Errs{\n\t\tcodeHTTP: http.StatusForbidden,\n\t\terr: err,\n\t\tkind: trace(2),\n\t\tmessage: msg,\n\t}\n}", "func NewGetModelRegistryForbidden() *GetModelRegistryForbidden {\n\treturn &GetModelRegistryForbidden{}\n}", "func NewGetGCForbidden() *GetGCForbidden {\n\treturn &GetGCForbidden{}\n}", "func (nnr NetworkNameError) Forbidden() {}", "func NewForbidden(a Attributes, internalError error) error {\n\t// do not double wrap an error of same type\n\tif apierrors.IsForbidden(internalError) {\n\t\treturn internalError\n\t}\n\tname, resource, err := extractResourceName(a)\n\tif err != nil {\n\t\treturn apierrors.NewInternalError(utilerrors.NewAggregate([]error{internalError, err}))\n\t}\n\treturn apierrors.NewForbidden(resource, name, internalError)\n}", "func NewGetMaintenanceWindowForbidden() *GetMaintenanceWindowForbidden {\n\treturn &GetMaintenanceWindowForbidden{}\n}", "func NewGetDocumentForbidden() *GetDocumentForbidden {\n\n\treturn &GetDocumentForbidden{}\n}", "func NewGetPeersForbidden() *GetPeersForbidden {\n\treturn &GetPeersForbidden{}\n}", "func Forbidden(msg string) Error {\n\te := err{msg: msg, code: forbiddenCode, group: generic, kind: forbidden}\n\treturn &e\n}", "func NewDeleteNodeUsingDELETEForbidden() *DeleteNodeUsingDELETEForbidden {\n\treturn &DeleteNodeUsingDELETEForbidden{}\n}", "func NewUndeleteNodeForbidden() *UndeleteNodeForbidden {\n\treturn &UndeleteNodeForbidden{}\n}", "func NodeGet(w http.ResponseWriter, r *http.Request) {\n\tw.Header().Set(\"Content-Type\", \"application/json; charset=UTF-8\")\n\n\tif &node != nil {\n\t\tw.WriteHeader(200)\n\t\tif err := json.NewEncoder(w).Encode(node); err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t} else {\n\t\tparamError := swl.ParamError{\n\t\t\tError: \"Node not found\",\n\t\t}\n\t\tw.WriteHeader(404)\n\t\tif err := json.NewEncoder(w).Encode(paramError); err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t}\n}", "func (a *FileStorageApiService) GetNode(Ctx _context.Context, bucketId string, nodeId string) ApiGetNodeRequest {\n\treturn ApiGetNodeRequest{\n\t\tApiService: a,\n\t\tCtx: Ctx,\n\t\tP_bucketId: bucketId,\n\t\tP_nodeId: nodeId,\n\t}\n}", "func NewGetSepainstantForbidden() *GetSepainstantForbidden {\n\treturn &GetSepainstantForbidden{}\n}", "func NewGetNodeNotFound() *GetNodeNotFound {\n\treturn &GetNodeNotFound{}\n}", "func Forbidden(format string, args ...interface{}) error {\n\treturn New(http.StatusForbidden, format, args...)\n}", "func NewGetGatewaysForbidden() *GetGatewaysForbidden {\n\treturn &GetGatewaysForbidden{}\n}", "func NewGetScopeForbidden() *GetScopeForbidden {\n\treturn &GetScopeForbidden{}\n}", "func NewGetCapabilityForbidden() *GetCapabilityForbidden {\n\treturn &GetCapabilityForbidden{}\n}", "func NewGetActionForbidden() *GetActionForbidden {\n\treturn &GetActionForbidden{}\n}", "func NewGetHostGroupsForbidden() *GetHostGroupsForbidden {\n\treturn &GetHostGroupsForbidden{}\n}", "func NewGetDomainUsingGETForbidden() *GetDomainUsingGETForbidden {\n\treturn &GetDomainUsingGETForbidden{}\n}", "func getNode(ctx context.Context, client client.Interface, nodeName string) *libapi.Node {\n\tnode, err := client.Nodes().Get(ctx, nodeName, options.GetOptions{})\n\tif err != nil {\n\t\tif _, ok := err.(cerrors.ErrorResourceDoesNotExist); !ok {\n\t\t\tlog.WithError(err).WithField(\"Name\", nodeName).Info(\"Unable to query node configuration\")\n\t\t\tlog.Warn(\"Unable to access datastore to query node configuration\")\n\t\t\tutils.Terminate()\n\t\t}\n\n\t\tlog.WithField(\"Name\", nodeName).Info(\"Building new node resource\")\n\t\tnode = libapi.NewNode()\n\t\tnode.Name = nodeName\n\t}\n\n\treturn node\n}", "func NewGetTagsForbidden() *GetTagsForbidden {\n\treturn &GetTagsForbidden{}\n}", "func NewGetVariableRegistryUpdateRequestForbidden() *GetVariableRegistryUpdateRequestForbidden {\n\treturn &GetVariableRegistryUpdateRequestForbidden{}\n}", "func NewGetLoadBalancerForbidden() *GetLoadBalancerForbidden {\n\treturn &GetLoadBalancerForbidden{}\n}", "func NewGetRoleForbidden() *GetRoleForbidden {\n\treturn &GetRoleForbidden{}\n}", "func NewGetAdminMetadataForbidden() *GetAdminMetadataForbidden {\n\treturn &GetAdminMetadataForbidden{}\n}", "func NewGetKeysForbidden() *GetKeysForbidden {\n\treturn &GetKeysForbidden{}\n}", "func NewGetAPIForbidden() *GetAPIForbidden {\n\treturn &GetAPIForbidden{}\n}", "func getNode(nd *wssdcloud.Node) *cloud.Node {\n\treturn &cloud.Node{\n\t\tName: &nd.Name,\n\t\tLocation: &nd.LocationName,\n\t\tNodeProperties: &cloud.NodeProperties{\n\t\t\tFQDN: &nd.Fqdn,\n\t\t\tPort: &nd.Port,\n\t\t\tAuthorizerPort: &nd.AuthorizerPort,\n\t\t\tCertificate: &nd.Certificate,\n\t\t\tStatuses: getNodeStatuses(nd),\n\t\t},\n\t\tVersion: &nd.Status.Version.Number,\n\t}\n}", "func NewGetClientForbidden() *GetClientForbidden {\n\treturn &GetClientForbidden{}\n}", "func NewReadFeatureForbidden() *ReadFeatureForbidden {\n\treturn &ReadFeatureForbidden{}\n}", "func (aee *ActiveEndpointsError) Forbidden() {}", "func Forbidden(msg string) error {\n\treturn &forbiddenError{errors.New(msg)}\n}", "func Forbidden(messages ...string) Error {\n\treturn createMessageError(http.StatusText(http.StatusForbidden), messages)\n}", "func NewGetVCenterUsingGETForbidden() *GetVCenterUsingGETForbidden {\n\treturn &GetVCenterUsingGETForbidden{}\n}", "func NewWeaviateThingsGetForbidden() *WeaviateThingsGetForbidden {\n\treturn &WeaviateThingsGetForbidden{}\n}", "func NewGetLimitsForbidden() *GetLimitsForbidden {\n\treturn &GetLimitsForbidden{}\n}", "func NewGetEntriesForbidden() *GetEntriesForbidden {\n\treturn &GetEntriesForbidden{}\n}", "func Forbidden(err error) Response {\n\tmessage := \"not authorized\"\n\tif err != nil {\n\t\tmessage = err.Error()\n\t}\n\treturn &errorResponse{\n\t\tcode: http.StatusForbidden,\n\t\tmsg: message,\n\t}\n}", "func NewGetConfigurationForbidden() *GetConfigurationForbidden {\n\treturn &GetConfigurationForbidden{}\n}", "func NewGetNewPartGeometryUploadURLForbidden() *GetNewPartGeometryUploadURLForbidden {\n\treturn &GetNewPartGeometryUploadURLForbidden{}\n}", "func NewGetRequestTrackerForbidden() *GetRequestTrackerForbidden {\n\treturn &GetRequestTrackerForbidden{}\n}", "func NewGetScannerForbidden() *GetScannerForbidden {\n\treturn &GetScannerForbidden{}\n}", "func (r *Responder) Forbidden() { r.write(http.StatusForbidden) }", "func NewGetProjectForbidden() *GetProjectForbidden {\n\treturn &GetProjectForbidden{}\n}", "func (a *FileStorageApiService) GetNodeExecute(r ApiGetNodeRequest) (SingleNode, *_nethttp.Response, error) {\n\tvar (\n\t\tlocalVarHTTPMethod = _nethttp.MethodGet\n\t\tlocalVarPostBody interface{}\n\t\tlocalVarFormFileName string\n\t\tlocalVarFileName string\n\t\tlocalVarFileBytes []byte\n\t\tlocalVarReturnValue SingleNode\n\t)\n\n\tlocalBasePath, err := a.client.cfg.ServerURLWithContext(r.Ctx, \"FileStorageApiService.GetNode\")\n\tif localBasePath == \"/\" {\n\t localBasePath = \"\"\n\t}\n\tif err != nil {\n\t\treturn localVarReturnValue, nil, GenericOpenAPIError{error: err.Error()}\n\t}\n\n\tlocalVarPath := localBasePath + \"/v2/file_storage/buckets/{bucket_id}/nodes/{node_id}\"\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"bucket_id\"+\"}\", _neturl.PathEscape(parameterToString(r.P_bucketId, \"\")) , -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"node_id\"+\"}\", _neturl.PathEscape(parameterToString(r.P_nodeId, \"\")) , -1)\n\n\tlocalVarHeaderParams := make(map[string]string)\n\tlocalVarQueryParams := _neturl.Values{}\n\tlocalVarFormParams := _neturl.Values{}\n\n\tif r.P_directorySize != nil {\n\t\tlocalVarQueryParams.Add(\"directory_size\", parameterToString(*r.P_directorySize, \"\"))\n\t}\n\t// to determine the Content-Type header\n\tlocalVarHTTPContentTypes := []string{}\n\n\t// set Content-Type header\n\tlocalVarHTTPContentType := selectHeaderContentType(localVarHTTPContentTypes)\n\tif localVarHTTPContentType != \"\" {\n\t\tlocalVarHeaderParams[\"Content-Type\"] = localVarHTTPContentType\n\t}\n\n\t// to determine the Accept header\n\tlocalVarHTTPHeaderAccepts := []string{\"application/json\"}\n\n\t// set Accept header\n\tlocalVarHTTPHeaderAccept := selectHeaderAccept(localVarHTTPHeaderAccepts)\n\tif localVarHTTPHeaderAccept != \"\" {\n\t\tlocalVarHeaderParams[\"Accept\"] = localVarHTTPHeaderAccept\n\t}\n\treq, err := a.client.prepareRequest(r.Ctx, localVarPath, localVarHTTPMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFormFileName, localVarFileName, localVarFileBytes)\n\tif err != nil {\n\t\treturn localVarReturnValue, nil, err\n\t}\n\n\tlocalVarHTTPResponse, err := a.client.callAPI(req)\n\tif err != nil || localVarHTTPResponse == nil {\n\t\treturn localVarReturnValue, localVarHTTPResponse, err\n\t}\n\n\tlocalVarBody, err := _ioutil.ReadAll(localVarHTTPResponse.Body)\n\tlocalVarHTTPResponse.Body.Close()\n\tif err != nil {\n\t\treturn localVarReturnValue, localVarHTTPResponse, err\n\t}\n\n\tif localVarHTTPResponse.StatusCode >= 300 {\n\t\tnewErr := GenericOpenAPIError{\n\t\t\tbody: localVarBody,\n\t\t\terror: localVarHTTPResponse.Status,\n\t\t}\n\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t}\n\n\terr = a.client.decode(&localVarReturnValue, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\tif err != nil {\n\t\tnewErr := GenericOpenAPIError{\n\t\t\tbody: localVarBody,\n\t\t\terror: err.Error(),\n\t\t}\n\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t}\n\n\treturn localVarReturnValue, localVarHTTPResponse, nil\n}", "func NewGetAgentTokenForbidden() *GetAgentTokenForbidden {\n\treturn &GetAgentTokenForbidden{}\n}", "func NewObjectsGetForbidden() *ObjectsGetForbidden {\n\treturn &ObjectsGetForbidden{}\n}", "func NewGetIPAMSearchForbidden() *GetIPAMSearchForbidden {\n\treturn &GetIPAMSearchForbidden{}\n}", "func AllowBoostrapTokensToGetNodes(client clientset.Interface) error {\n\tfmt.Println(\"[bootstrap-token] Configured RBAC rules to allow Node Bootstrap tokens to get nodes\")\n\n\tif err := apiclient.CreateOrUpdateClusterRole(client, &rbac.ClusterRole{\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tName: constants.GetNodesClusterRoleName,\n\t\t\tNamespace: metav1.NamespaceSystem,\n\t\t},\n\t\tRules: []rbac.PolicyRule{\n\t\t\t{\n\t\t\t\tVerbs: []string{\"get\"},\n\t\t\t\tAPIGroups: []string{\"\"},\n\t\t\t\tResources: []string{\"nodes\"},\n\t\t\t},\n\t\t},\n\t}); err != nil {\n\t\treturn err\n\t}\n\n\treturn apiclient.CreateOrUpdateClusterRoleBinding(client, &rbac.ClusterRoleBinding{\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tName: constants.GetNodesClusterRoleName,\n\t\t\tNamespace: metav1.NamespaceSystem,\n\t\t},\n\t\tRoleRef: rbac.RoleRef{\n\t\t\tAPIGroup: rbac.GroupName,\n\t\t\tKind: \"ClusterRole\",\n\t\t\tName: constants.GetNodesClusterRoleName,\n\t\t},\n\t\tSubjects: []rbac.Subject{\n\t\t\t{\n\t\t\t\tKind: rbac.GroupKind,\n\t\t\t\tName: constants.NodeBootstrapTokenAuthGroup,\n\t\t\t},\n\t\t},\n\t})\n}", "func NewGetRepoManifestForbidden() *GetRepoManifestForbidden {\n\treturn &GetRepoManifestForbidden{}\n}", "func NewGetServiceLevelForbidden() *GetServiceLevelForbidden {\n\treturn &GetServiceLevelForbidden{}\n}", "func getForbiddenResponse(req *http.Request) *http.Response {\n\treturn buildResponse(req, http.StatusForbidden, \"403 Forbidden\", \"403 Forbidden\", nil)\n}", "func Forbidden(message ...interface{}) Err {\n\treturn Boomify(http.StatusForbidden, message...)\n}", "func NewGetRefreshTokenForbidden() *GetRefreshTokenForbidden {\n\n\treturn &GetRefreshTokenForbidden{}\n}", "func NewForbidden() *AppError {\n\treturn NewForbiddenR(StatusText(PermissionDenied))\n}", "func NewGetGitignoreTemplatesForbidden() *GetGitignoreTemplatesForbidden {\n\treturn &GetGitignoreTemplatesForbidden{}\n}", "func Forbidden(msg string) ErrorResponse {\n\tif msg == \"\" {\n\t\tmsg = \"You are not authorized to perform the requested action.\"\n\t}\n\treturn ErrorResponse{\n\t\tStatus: http.StatusForbidden,\n\t\tMessage: msg,\n\t}\n}", "func Forbidden(msg string) error {\n\tif msg == \"\" {\n\t\tmsg = \"no está autorizado a realizar la acción solicitada.\"\n\t}\n\treturn echo.NewHTTPError(http.StatusForbidden, msg)\n}", "func forbidden(rw http.ResponseWriter, r *http.Request) {\n\n}", "func NewGetClusterInstallConfigForbidden() *GetClusterInstallConfigForbidden {\n\n\treturn &GetClusterInstallConfigForbidden{}\n}", "func NewGetTiersForbidden() *GetTiersForbidden {\n\treturn &GetTiersForbidden{}\n}", "func NewGetPublicIPByUUIDUsingGETForbidden() *GetPublicIPByUUIDUsingGETForbidden {\n\treturn &GetPublicIPByUUIDUsingGETForbidden{}\n}", "func NewRedirectUsingHEADForbidden() *RedirectUsingHEADForbidden {\n\treturn &RedirectUsingHEADForbidden{}\n}", "func NewCreateForbidden() *CreateForbidden {\n\treturn &CreateForbidden{}\n}", "func NewGetInternalconfigForbidden() *GetInternalconfigForbidden {\n\treturn &GetInternalconfigForbidden{}\n}", "func Forbidden(w http.ResponseWriter, err error) {\n\t(Response{Error: err.Error()}).json(w, http.StatusForbidden)\n}", "func NewGetOneColumnForbidden() *GetOneColumnForbidden {\n\treturn &GetOneColumnForbidden{}\n}", "func NewGetCustomersForbidden() *GetCustomersForbidden {\n\treturn &GetCustomersForbidden{}\n}", "func NewGetNetworkSharesForbidden() *GetNetworkSharesForbidden {\n\treturn &GetNetworkSharesForbidden{}\n}", "func NewGetPayportForbidden() *GetPayportForbidden {\n\treturn &GetPayportForbidden{}\n}", "func NewQueryForbiddenInfoListRequestWithoutParam() *QueryForbiddenInfoListRequest {\n\n return &QueryForbiddenInfoListRequest{\n JDCloudRequest: core.JDCloudRequest{\n URL: \"/forbiddenInfo:query\",\n Method: \"POST\",\n Header: nil,\n Version: \"v1\",\n },\n }\n}", "func (s *server) GetNode(context.Context, *goblinpb.GetNodeRequest) (*goblinpb.GetNodeResponse, error) {\n\treturn &goblinpb.GetNodeResponse{\n\t\tName: s.pool.GetName(),\n\t\tAddr: s.pool.GetMemberlistAddress(),\n\t}, nil\n}", "func NewGetReplicationPolicyForbidden() *GetReplicationPolicyForbidden {\n\treturn &GetReplicationPolicyForbidden{}\n}", "func NewTestEndpointForbidden() *TestEndpointForbidden {\n\n\treturn &TestEndpointForbidden{}\n}", "func (r ApiCreateHyperflexNodeConfigPolicyRequest) IfNoneMatch(ifNoneMatch string) ApiCreateHyperflexNodeConfigPolicyRequest {\n\tr.ifNoneMatch = &ifNoneMatch\n\treturn r\n}", "func AsForbidden(err error) error {\n\tif err == nil {\n\t\treturn nil\n\t}\n\treturn &forbiddenError{err}\n}", "func NewDisableHostForbidden() *DisableHostForbidden {\n\treturn &DisableHostForbidden{}\n}", "func NewGetControllerServiceForbidden() *GetControllerServiceForbidden {\n\treturn &GetControllerServiceForbidden{}\n}", "func NewGetUsingGET5Forbidden() *GetUsingGET5Forbidden {\n\treturn &GetUsingGET5Forbidden{}\n}", "func NewGetClusterMethodNotAllowed() *GetClusterMethodNotAllowed {\n\n\treturn &GetClusterMethodNotAllowed{}\n}", "func newDefaultDenyResponse() *admission.AdmissionResponse {\n\treturn &admission.AdmissionResponse{\n\t\tAllowed: false,\n\t\tResult: &metav1.Status{},\n\t}\n}", "func ForbiddenErr(err error, format string, args ...interface{}) error {\n\treturn NewError(http.StatusForbidden, err, format, args...)\n}", "func newNode(nodePath string) Node {\n\treturn &nodeImpl{nodePath: nodePath}\n}", "func NewGetDatalinksForbidden() *GetDatalinksForbidden {\n\treturn &GetDatalinksForbidden{}\n}", "func NewGetPrimeEntitlementsForbidden() *GetPrimeEntitlementsForbidden {\n\treturn &GetPrimeEntitlementsForbidden{}\n}", "func (r *Reply) Forbidden() *Reply {\n\treturn r.Status(http.StatusForbidden)\n}" ]
[ "0.66143495", "0.641075", "0.5909271", "0.5888174", "0.58836365", "0.58721966", "0.5767608", "0.5767608", "0.5693569", "0.5692554", "0.5689131", "0.5659587", "0.56503874", "0.56132436", "0.561185", "0.5604984", "0.55879843", "0.55849236", "0.55363446", "0.5534028", "0.55137503", "0.5503575", "0.5443294", "0.5427588", "0.54206604", "0.53871584", "0.53699064", "0.5339268", "0.5329777", "0.5289094", "0.52745926", "0.5269479", "0.52361155", "0.5234522", "0.5232539", "0.52268475", "0.522073", "0.52122384", "0.5210472", "0.51955044", "0.5186449", "0.51776284", "0.51769894", "0.5164853", "0.5160715", "0.51327527", "0.5111244", "0.51104647", "0.51061857", "0.5105296", "0.50967604", "0.50929976", "0.5092932", "0.5082233", "0.5078964", "0.507366", "0.507254", "0.504839", "0.50293237", "0.50091785", "0.50061435", "0.4990184", "0.498015", "0.4979366", "0.49714556", "0.49643293", "0.49624345", "0.49619445", "0.49615937", "0.4958707", "0.49553224", "0.49535775", "0.49502936", "0.4924322", "0.49186328", "0.49125963", "0.49074736", "0.49070895", "0.49028462", "0.4900463", "0.48954496", "0.48880312", "0.4881672", "0.48759043", "0.48693776", "0.48529375", "0.48299655", "0.48178658", "0.48138934", "0.48086414", "0.48059276", "0.47993633", "0.47935373", "0.4781097", "0.4779285", "0.47764805", "0.47710904", "0.47667035", "0.4765677", "0.4748898" ]
0.81456625
0
NewGetNodeNotFound creates a GetNodeNotFound with default headers values
func NewGetNodeNotFound() *GetNodeNotFound { return &GetNodeNotFound{} }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func NewNotFound() error {\n\treturn requestError{\n\t\tClientError: ClientError{\n\t\t\tErrors: []clientErrorSubError{{Message: \"status code 404\"}},\n\t\t},\n\t}\n}", "func NewGetNodeBadRequest() *GetNodeBadRequest {\n\treturn &GetNodeBadRequest{}\n}", "func NewNotFound(parameters ...wparams.ParamStorer) Error {\n\treturn newGenericError(nil, DefaultNotFound, wparams.NewParamStorer(parameters...))\n}", "func NewGetNodeUnauthorized() *GetNodeUnauthorized {\n\treturn &GetNodeUnauthorized{}\n}", "func NewNotFound(s string, v ...interface{}) error {\n\treturn asNotFound(fmt.Errorf(s, v...))\n}", "func NotFound(target int) error {\n\treturn fmt.Errorf(\"could not find node matching: %d\", target)\n}", "func NotFound(msg string) Error {\n\te := err{msg: msg, code: notFoundCode, group: generic, kind: notFound}\n\treturn &e\n}", "func NewGetNotFound(body *GetNotFoundResponseBody) *goa.ServiceError {\n\tv := &goa.ServiceError{\n\t\tName: *body.Name,\n\t\tID: *body.ID,\n\t\tMessage: *body.Message,\n\t\tTemporary: *body.Temporary,\n\t\tTimeout: *body.Timeout,\n\t\tFault: *body.Fault,\n\t}\n\n\treturn v\n}", "func NewGetNotFound(body *GetNotFoundResponseBody) *goa.ServiceError {\n\tv := &goa.ServiceError{\n\t\tName: *body.Name,\n\t\tID: *body.ID,\n\t\tMessage: *body.Message,\n\t\tTemporary: *body.Temporary,\n\t\tTimeout: *body.Timeout,\n\t\tFault: *body.Fault,\n\t}\n\n\treturn v\n}", "func NewNotFound(err error, msg ...string) *Errs {\n\tif err == nil {\n\t\terr = ErrNotFound\n\t}\n\treturn &Errs{\n\t\tcodeHTTP: http.StatusNotFound,\n\t\terr: err,\n\t\tkind: trace(2),\n\t\tmessage: msg,\n\t}\n}", "func NewNotFound(msg string) error {\n\treturn &ELBError{\n\t\tmsg: msg,\n\t\tCode: http.StatusNotFound,\n\t}\n}", "func NewNotFound(a Attributes) error {\n\tname, resource, err := extractResourceName(a)\n\tif err != nil {\n\t\treturn apierrors.NewInternalError(err)\n\t}\n\treturn apierrors.NewNotFound(resource, name)\n}", "func NewNotFound(name, group, resource string) error {\n\treturn errors.NewNotFound(schema.GroupResource{Group: group, Resource: resource}, name)\n}", "func NodeGet(w http.ResponseWriter, r *http.Request) {\n\tw.Header().Set(\"Content-Type\", \"application/json; charset=UTF-8\")\n\n\tif &node != nil {\n\t\tw.WriteHeader(200)\n\t\tif err := json.NewEncoder(w).Encode(node); err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t} else {\n\t\tparamError := swl.ParamError{\n\t\t\tError: \"Node not found\",\n\t\t}\n\t\tw.WriteHeader(404)\n\t\tif err := json.NewEncoder(w).Encode(paramError); err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t}\n}", "func NewGetTagNotFound() *GetTagNotFound {\n\treturn &GetTagNotFound{}\n}", "func (nt NetworkTypeError) NotFound() {}", "func (une *UnknownNetworkError) NotFound() {}", "func NewGetConfigurationSourceTreeUsingGETNotFound() *GetConfigurationSourceTreeUsingGETNotFound {\n\treturn &GetConfigurationSourceTreeUsingGETNotFound{}\n}", "func (nsn ErrNoSuchNetwork) NotFound() {}", "func NewGetNicsNotFound() *GetNicsNotFound {\n\treturn &GetNicsNotFound{}\n}", "func (Http) getNotFound(name string) *Http {\n\treturn &Http{\n\t\tCode: http.StatusNotFound,\n\t\tStatus: http.StatusText(http.StatusNotFound),\n\t\tMessage: fmt.Sprintf(\"%s not found\", toUpperFirstChar(name)),\n\t}\n}", "func NewNotFoundError(message string)*RestErr{\n\treturn &RestErr{\n\t\tMessage: message,\n\t\tStatus: http.StatusNotFound,\n\t\tError: \"Not Found\",\n\t}\n}", "func NewGetNodeTopoesConnectionNotFound() *GetNodeTopoesConnectionNotFound {\n\treturn &GetNodeTopoesConnectionNotFound{}\n}", "func NotFound(fn http.HandlerFunc) {\n\tinfoMutex.Lock()\n\tvestigo.CustomNotFoundHandlerFunc(fn)\n\tinfoMutex.Unlock()\n}", "func NotFound(msg string) error {\n\tif msg == \"\" {\n\t\tmsg = \"no se ha encontrado el recurso solicitado.\"\n\t}\n\treturn echo.NewHTTPError(http.StatusNotFound, msg)\n}", "func NotFound(err error) error {\n\treturn New(http.StatusNotFound, err)\n}", "func (ctx *Context) NotFound(err error, message string) *HTTPError {\n\treturn notFoundError(err, message)\n}", "func NewGetNodeOK() *GetNodeOK {\n\treturn &GetNodeOK{}\n}", "func notFound(resp *ApiResponse, msg string) error {\n resp.StatusCode = http.StatusNotFound\n resp.Message = []byte(msg)\n resp.ErrorMessage = http.StatusText(http.StatusNotFound)\n\n return nil\n}", "func getNode(ctx context.Context, client client.Interface, nodeName string) *libapi.Node {\n\tnode, err := client.Nodes().Get(ctx, nodeName, options.GetOptions{})\n\tif err != nil {\n\t\tif _, ok := err.(cerrors.ErrorResourceDoesNotExist); !ok {\n\t\t\tlog.WithError(err).WithField(\"Name\", nodeName).Info(\"Unable to query node configuration\")\n\t\t\tlog.Warn(\"Unable to access datastore to query node configuration\")\n\t\t\tutils.Terminate()\n\t\t}\n\n\t\tlog.WithField(\"Name\", nodeName).Info(\"Building new node resource\")\n\t\tnode = libapi.NewNode()\n\t\tnode.Name = nodeName\n\t}\n\n\treturn node\n}", "func NewGetSectionNotFound() *GetSectionNotFound {\n\n\treturn &GetSectionNotFound{}\n}", "func NotFound(msg string) error {\n\treturn &notFoundError{errors.New(msg)}\n}", "func NewGetConstructorNotFound() *GetConstructorNotFound {\n\treturn &GetConstructorNotFound{}\n}", "func (nse ErrNoSuchEndpoint) NotFound() {}", "func newFindNodeProtocol(service service.Service, rt RoutingTable) *findNodeProtocol {\n\n\tp := &findNodeProtocol{\n\t\trt: rt,\n\t\tpending: make(map[crypto.UUID]chan findNodeResults),\n\t\tingressChannel: service.RegisterDirectProtocol(protocol),\n\t\tservice: service,\n\t}\n\n\tif srv, ok := service.(localService); ok {\n\t\tp.log = srv.LocalNode().Log\n\t} else {\n\t\tp.log = log.AppLog\n\t}\n\n\tgo p.readLoop()\n\n\treturn p\n}", "func notFound(resource string) middleware.Responder {\n\tmessage := fmt.Sprintf(\"404 %s not found\", resource)\n\treturn operations.NewGetChartDefault(http.StatusNotFound).WithPayload(\n\t\t&models.Error{Code: helpers.Int64ToPtr(http.StatusNotFound), Message: &message},\n\t)\n}", "func NewDeregisterNodeNotFound() *DeregisterNodeNotFound {\n\n\treturn &DeregisterNodeNotFound{}\n}", "func NewGetNodeForbidden() *GetNodeForbidden {\n\treturn &GetNodeForbidden{}\n}", "func NotFound(resource string) *APIError {\n\treturn NewAPIError(http.StatusNotFound, \"NOT_FOUND\", Params{\"resource\": resource}, \"\")\n}", "func NewGetEntriesNotFound() *GetEntriesNotFound {\n\treturn &GetEntriesNotFound{}\n}", "func (a *API) getNotFound(ctx context.Context, res *common.HttpResponseWriter) error {\n\tres.WriteHeader(http.StatusNotFound)\n\treturn nil\n}", "func NotFound(format string, args ...interface{}) error {\n\targs = append(args, withDefaultMessage(NotFoundDefaultMsg))\n\treturn Errorf(http.StatusNotFound, format, args...)\n}", "func NewGetModelRegistryNotFound() *GetModelRegistryNotFound {\n\treturn &GetModelRegistryNotFound{}\n}", "func NewGetLinkInfoNotFound() *GetLinkInfoNotFound {\n\n\treturn &GetLinkInfoNotFound{}\n}", "func (a *FileStorageApiService) GetNode(Ctx _context.Context, bucketId string, nodeId string) ApiGetNodeRequest {\n\treturn ApiGetNodeRequest{\n\t\tApiService: a,\n\t\tCtx: Ctx,\n\t\tP_bucketId: bucketId,\n\t\tP_nodeId: nodeId,\n\t}\n}", "func NotFound(w http.ResponseWriter, r *http.Request) {\n\tresponse := response.CreateResponse()\n\tresponse.SendDataWithStatusCode(w, \"not found\", http.StatusOK)\n}", "func NotFound(c *routing.Context, msg string, service string) error {\n\tResponse(c, `{\"error\": true, \"msg\": \"`+msg+`\"}`, 404, service, \"application/json\")\n\treturn nil\n}", "func noFound(msg string) error {\n\treturn status.Error(codes.NotFound, msg)\n}", "func NewGetDebugRequestNotFound() *GetDebugRequestNotFound {\n\treturn &GetDebugRequestNotFound{}\n}", "func NewGetEveroutePackagesNotFound() *GetEveroutePackagesNotFound {\n\treturn &GetEveroutePackagesNotFound{}\n}", "func NotFound(w http.ResponseWriter, r *http.Request) { Error(w, \"404 page not found\", http.StatusNotFound) }", "func (ctx *GetByIDHostContext) NotFound() error {\n\tctx.ResponseData.WriteHeader(404)\n\treturn nil\n}", "func NewGetClusterNotFound() *GetClusterNotFound {\n\n\treturn &GetClusterNotFound{}\n}", "func (ctx *GetLocationsContext) NotFound(r *Error) error {\n\tctx.ResponseData.Header().Set(\"Content-Type\", \"error\")\n\treturn ctx.ResponseData.Service.Send(ctx.Context, 404, r)\n}", "func NotFound(w ResponseWriter, r *Request) { Error(w, \"404 page not found\", StatusNotFound) }", "func NewGetNFTContractTokenNotFound() *GetNFTContractTokenNotFound {\n\n\treturn &GetNFTContractTokenNotFound{}\n}", "func fixedHashGetNodeFunc(depth int, index int64) (trillian.Hash, error) {\n\treturn []byte(\"12345678901234567890123456789012\"), nil\n}", "func NewGetNodesDefault(code int) *GetNodesDefault {\n\treturn &GetNodesDefault{\n\t\t_statusCode: code,\n\t}\n}", "func MakeGetNodeEndpoint(s registry.Service) endpoint.Endpoint {\n\treturn func(ctx context.Context, request interface{}) (interface{}, error) {\n\t\treq := request.(GetNodeRequest)\n\t\tnode, err := s.GetNode(ctx, req.Token, req.Id)\n\t\treturn GetNodeResponse{\n\t\t\tErr: err,\n\t\t\tNode: node,\n\t\t}, nil\n\t}\n}", "func NewGetVCenterUsingGETNotFound() *GetVCenterUsingGETNotFound {\n\treturn &GetVCenterUsingGETNotFound{}\n}", "func WrapWithNotFound(cause error, parameters ...wparams.ParamStorer) Error {\n\treturn newGenericError(cause, DefaultNotFound, wparams.NewParamStorer(parameters...))\n}", "func notFound(w http.ResponseWriter, req *http.Request) {\n\t// w.Header().Set(\"Content-Type\", \"text/plain; charset=utf-8\")\n\tapiError := apirouter.ErrorFromRequest(req, fmt.Sprintf(\"404 occurred: %s\", req.RequestURI), \"Whoops - this request is not recognized\", http.StatusNotFound, http.StatusNotFound, \"\")\n\tapirouter.ReturnResponse(w, req, apiError.Code, apiError)\n}", "func getNode(nd *wssdcloud.Node) *cloud.Node {\n\treturn &cloud.Node{\n\t\tName: &nd.Name,\n\t\tLocation: &nd.LocationName,\n\t\tNodeProperties: &cloud.NodeProperties{\n\t\t\tFQDN: &nd.Fqdn,\n\t\t\tPort: &nd.Port,\n\t\t\tAuthorizerPort: &nd.AuthorizerPort,\n\t\t\tCertificate: &nd.Certificate,\n\t\t\tStatuses: getNodeStatuses(nd),\n\t\t},\n\t\tVersion: &nd.Status.Version.Number,\n\t}\n}", "func NewGetModelNotFound() *GetModelNotFound {\n\treturn &GetModelNotFound{}\n}", "func NewGetNamespaceNotFound() *GetNamespaceNotFound {\n\treturn &GetNamespaceNotFound{}\n}", "func NewGetNamespaceNotFound() *GetNamespaceNotFound {\n\treturn &GetNamespaceNotFound{}\n}", "func NotFound(w ResponseWriter, r *Request) {\n\tw.SetHeader(CodeNotFound, \"not found\")\n}", "func (f Function) NotFound() error {\n\treturn f.unsafeWrap(f.New(\"not found\"), \"\", \"resource not found\")\n}", "func NotFound(rw http.ResponseWriter) {\n\tHttpError(rw, \"not found\", 404)\n}", "func (uee *UnknownEndpointError) NotFound() {}", "func NotFound(message ...interface{}) Err {\n\treturn Boomify(http.StatusNotFound, message...)\n}", "func (hr *httpRouter) NotFound(h http.Handler) {\n\thr.notFound = h\n}", "func NewThreadGetOneNotFound() *ThreadGetOneNotFound {\n\n\treturn &ThreadGetOneNotFound{}\n}", "func NewByNamespaceNotFound() *ByNamespaceNotFound {\n\treturn &ByNamespaceNotFound{}\n}", "func (b *Baa) DefaultNotFoundHandler(c *Context) {\n\tcode := http.StatusNotFound\n\tmsg := http.StatusText(code)\n\thttp.Error(c.Resp, msg, code)\n}", "func NewObjectsGetNotFound() *ObjectsGetNotFound {\n\treturn &ObjectsGetNotFound{}\n}", "func NewGetByUIDNotFound() *GetByUIDNotFound {\n\treturn &GetByUIDNotFound{}\n}", "func NewGetVariableRegistryUpdateRequestNotFound() *GetVariableRegistryUpdateRequestNotFound {\n\treturn &GetVariableRegistryUpdateRequestNotFound{}\n}", "func (ctx *GetAllHostContext) NotFound() error {\n\tctx.ResponseData.WriteHeader(404)\n\treturn nil\n}", "func NewNewDiscoveryDefault(code int) *NewDiscoveryDefault {\n\tif code <= 0 {\n\t\tcode = 500\n\t}\n\n\treturn &NewDiscoveryDefault{\n\t\t_statusCode: code,\n\t}\n}", "func NewGetDocumentNotFound() *GetDocumentNotFound {\n\n\treturn &GetDocumentNotFound{}\n}", "func NotFoundHandler() Handler { return HandlerFunc(NotFound) }", "func NotFoundHandler() Handler { return HandlerFunc(NotFound) }", "func NewSystemGetNotFound() *SystemGetNotFound {\n\treturn &SystemGetNotFound{}\n}", "func AsNotFound(err error) error {\n\tif err == nil {\n\t\treturn nil\n\t}\n\treturn &notFoundError{err}\n}", "func NewGetTiersNotFound() *GetTiersNotFound {\n\treturn &GetTiersNotFound{}\n}", "func (c ApiWrapper) NotFound(msg string, objs ...interface{}) revel.Result {\n\treturn c.renderErrorString(404, fmt.Sprintf(msg, objs))\n}", "func NotFound(w http.ResponseWriter, message ...interface{}) {\n\tboom(w, 404, message...)\n}", "func getNode(ex SessionExecutor, msg CommonMessage) (rep CommonReply) {\n\tnodeMsg := msg.(nodeMessage)\n\n\tselectNodeTmpl := ex.getQuery(selectNodeOp)\n\tcn := newNode()\n\trows, err := ex.Query(fmt.Sprintf(selectNodeTmpl, nodeMsg.GetNodeTable()))\n\tif err != nil {\n\t\tdbLogger.Errorf(\"getNode query error:%s\", err)\n\t\treturn newNodeReply(nil, err)\n\t}\n\tdefer closeRowsAndLog(rows)\n\n\tfor rows.Next() {\n\t\terr := rows.Scan(&cn.name, &cn.ip, &cn.isCollector, &cn.duration, &cn.description, &cn.coords, &cn.address)\n\t\tif err != nil {\n\t\t\tdbLogger.Errorf(\"getNode fetch node row:%s\", err)\n\t\t\treturn newNodeReply(nil, err)\n\t\t}\n\t\t// Try to match the node.\n\t\tdbLogger.Infof(\"trying node matching with name:%s ip:%s\", cn.name, cn.ip)\n\t\tname, ip := nodeMsg.getNodeName(), nodeMsg.getNodeIP()\n\t\tif (name == cn.name && name != \"\") || (ip == cn.ip && ip != \"\") {\n\t\t\treturn newNodeReply(cn, nil)\n\t\t}\n\t}\n\n\treturn newNodeReply(nil, errNoNode)\n}", "func NewGetDeltaNotFound() *GetDeltaNotFound {\n\treturn &GetDeltaNotFound{}\n}", "func (r *Route) NotFound(handler http.Handler) *Route {\n\tr.handlers[notFound] = handler\n\treturn r\n}", "func notfound(out http.ResponseWriter, format string, args ...interface{}) {\n\tsend(http.StatusNotFound, out, format, args...)\n}", "func NewGetNodeUpgradesDefault(code int) *GetNodeUpgradesDefault {\n\treturn &GetNodeUpgradesDefault{\n\t\t_statusCode: code,\n\t}\n}", "func newnode(id byte, name string, value string) *xmlx.Node {\n\tnode := xmlx.NewNode(id)\n\tif name != \"\" {\n\t\tnode.Name = xml.Name{\n\t\t\tLocal: name,\n\t\t}\n\t}\n\tif value != \"\" {\n\t\tnode.Value = value\n\t}\n\treturn node\n}", "func NewNotFoundErr(err error) Error {\n\treturn StatusError{\n\t\tCode: http.StatusNotFound,\n\t\tErr: err,\n\t}\n}", "func newNode(nodePath string) Node {\n\treturn &nodeImpl{nodePath: nodePath}\n}", "func NewGetTxByHashNotFound() *GetTxByHashNotFound {\n\treturn &GetTxByHashNotFound{}\n}", "func (cm *Docker) MustGetNode(id string) *entity.Node {\n\tn, ok := cm.GetNode(id)\n\tif !ok {\n\t\tcollector := collector.NewDocker(cm.client, id)\n\t\tn = entity.NewNode(id, collector)\n\t\tcm.lock.Lock()\n\t\tcm.nodes[id] = n\n\t\tcm.lock.Unlock()\n\t}\n\treturn n\n}", "func NotFound(data Serializer, logging ...interface{}) Response {\n\tif data == nil {\n\t\tdata = String(\"404 Not Found\")\n\t}\n\treturn Response{Status: http.StatusNotFound, Data: data, Logging: logging}\n}", "func NotFoundf(format string, args ...interface{}) error {\n\treturn &notFoundError{fmt.Errorf(format, args...)}\n}" ]
[ "0.64209574", "0.6250749", "0.61477804", "0.613169", "0.6112314", "0.6065866", "0.6022855", "0.5953102", "0.5953102", "0.59173906", "0.591137", "0.5910799", "0.5896313", "0.5857848", "0.5851682", "0.58320904", "0.5820121", "0.5818076", "0.577545", "0.57658714", "0.57351387", "0.5729612", "0.5727366", "0.5712866", "0.5712452", "0.57103866", "0.56723756", "0.56699026", "0.5666984", "0.5663672", "0.56626785", "0.5641187", "0.559628", "0.5586403", "0.5578773", "0.55154186", "0.5499413", "0.54956496", "0.5466836", "0.5462488", "0.5458977", "0.54348403", "0.5426345", "0.5424886", "0.5424865", "0.54199815", "0.5413946", "0.54130965", "0.5406048", "0.5394904", "0.5388235", "0.53827083", "0.5375942", "0.5374732", "0.53660583", "0.53503096", "0.53463596", "0.53384817", "0.533591", "0.5331825", "0.5329533", "0.53271955", "0.532647", "0.53243446", "0.53239024", "0.53239024", "0.53200555", "0.5313517", "0.5301132", "0.5294963", "0.528613", "0.52772135", "0.5272951", "0.52719414", "0.5262194", "0.52606094", "0.5260506", "0.52555335", "0.5254555", "0.5254425", "0.52539265", "0.52413565", "0.52413565", "0.5238865", "0.52326745", "0.5230951", "0.52287436", "0.52246255", "0.52241224", "0.5220178", "0.52183276", "0.5208816", "0.5207968", "0.5203172", "0.5202638", "0.52014476", "0.5201113", "0.5197504", "0.51878643", "0.5182742" ]
0.76597863
0
NewGetNodeUnprocessableEntity creates a GetNodeUnprocessableEntity with default headers values
func NewGetNodeUnprocessableEntity() *GetNodeUnprocessableEntity { return &GetNodeUnprocessableEntity{} }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func NewGetDocumentUnprocessableEntity() *GetDocumentUnprocessableEntity {\n\n\treturn &GetDocumentUnprocessableEntity{}\n}", "func (r *Responder) UnprocessableEntity() { r.write(http.StatusUnprocessableEntity) }", "func NewCreateLookmlModelUnprocessableEntity() *CreateLookmlModelUnprocessableEntity {\n\treturn &CreateLookmlModelUnprocessableEntity{}\n}", "func NewGetNodeUnauthorized() *GetNodeUnauthorized {\n\treturn &GetNodeUnauthorized{}\n}", "func NewGetDeltaUnprocessableEntity() *GetDeltaUnprocessableEntity {\n\treturn &GetDeltaUnprocessableEntity{}\n}", "func NewUndeleteNodeUnprocessableEntity() *UndeleteNodeUnprocessableEntity {\n\treturn &UndeleteNodeUnprocessableEntity{}\n}", "func NewGetTaskDetailsUnprocessableEntity() *GetTaskDetailsUnprocessableEntity {\n\n\treturn &GetTaskDetailsUnprocessableEntity{}\n}", "func createNewEmptyNode() Node {\n\tnextNewId--\n\treturn Node{\n\t\tId: nextNewId,\n\t\tVisible: true,\n\t\tTimestamp: time.Now().Format(\"2006-01-02T15:04:05Z\"),\n\t\tVersion: \"1\",\n\t}\n}", "func NewGetNodeBadRequest() *GetNodeBadRequest {\n\treturn &GetNodeBadRequest{}\n}", "func (r *Response) UnprocessableEntity(v interface{}) {\n\tr.writeResponse(http.StatusUnprocessableEntity, v)\n}", "func NewWeaviateKeyCreateUnprocessableEntity() *WeaviateKeyCreateUnprocessableEntity {\n\treturn &WeaviateKeyCreateUnprocessableEntity{}\n}", "func UnprocessableEntity(message ...interface{}) Err {\n\treturn Boomify(http.StatusUnprocessableEntity, message...)\n}", "func NewCreateChannelUnprocessableEntity() *CreateChannelUnprocessableEntity {\n\treturn &CreateChannelUnprocessableEntity{}\n}", "func NewModifyCryptokeyUnprocessableEntity() *ModifyCryptokeyUnprocessableEntity {\n\treturn &ModifyCryptokeyUnprocessableEntity{}\n}", "func NewCreateClientUnprocessableEntity() *CreateClientUnprocessableEntity {\n\treturn &CreateClientUnprocessableEntity{}\n}", "func (client MultipleResponsesClient) Get202None204NoneDefaultNone400InvalidPreparer() (*http.Request, error) {\n preparer := autorest.CreatePreparer(\n autorest.AsGet(),\n autorest.WithBaseURL(client.BaseURI),\n autorest.WithPath(\"/http/payloads/202/none/204/none/default/none/response/400/invalid\"))\n return preparer.Prepare(&http.Request{})\n}", "func (r Response) UnprocessableEntity(code string, payload Payload, header ...ResponseHeader) {\n\tr.Response(code, http.UnprocessableEntity, payload, header...)\n}", "func NewGetPaymentRequestEDIUnprocessableEntity() *GetPaymentRequestEDIUnprocessableEntity {\n\n\treturn &GetPaymentRequestEDIUnprocessableEntity{}\n}", "func createNewNodeNetworkObject(writer *bufio.Writer, sourceOsmNode *osm.Node) {\n\ttags := sourceOsmNode.TagMap()\n\n\t// Punktnetzwerk 'Fahrrad'\n\tnewOsmNode := *sourceOsmNode // copy content (don't modify origin/source node)\n\tnewOsmNode.ID = 0\n\tnewOsmNode.Tags = []osm.Tag{} // remove all source tags\n\trefValue, found := tags[\"icn_ref\"]\n\tif found {\n\t\ttag := osm.Tag{Key: \"node_network\", Value: \"node_bicycle\"}\n\t\tnewOsmNode.Tags = append(newOsmNode.Tags, tag)\n\t\ttag = osm.Tag{Key: \"name\", Value: refValue}\n\t\tnewOsmNode.Tags = append(newOsmNode.Tags, tag)\n\t\twriteNewNodeObject(writer, &newOsmNode)\n\t} else {\n\t\trefValue, found = tags[\"ncn_ref\"]\n\t\tif found {\n\t\t\ttag := osm.Tag{Key: \"node_network\", Value: \"node_bicycle\"}\n\t\t\tnewOsmNode.Tags = append(newOsmNode.Tags, tag)\n\t\t\ttag = osm.Tag{Key: \"name\", Value: refValue}\n\t\t\tnewOsmNode.Tags = append(newOsmNode.Tags, tag)\n\t\t\twriteNewNodeObject(writer, &newOsmNode)\n\t\t} else {\n\t\t\trefValue, found = tags[\"rcn_ref\"]\n\t\t\tif found {\n\t\t\t\ttag := osm.Tag{Key: \"node_network\", Value: \"node_bicycle\"}\n\t\t\t\tnewOsmNode.Tags = append(newOsmNode.Tags, tag)\n\t\t\t\ttag = osm.Tag{Key: \"name\", Value: refValue}\n\t\t\t\tnewOsmNode.Tags = append(newOsmNode.Tags, tag)\n\t\t\t\twriteNewNodeObject(writer, &newOsmNode)\n\t\t\t} else {\n\t\t\t\trefValue, found = tags[\"lcn_ref\"]\n\t\t\t\tif found {\n\t\t\t\t\ttag := osm.Tag{Key: \"node_network\", Value: \"node_bicycle\"}\n\t\t\t\t\tnewOsmNode.Tags = append(newOsmNode.Tags, tag)\n\t\t\t\t\ttag = osm.Tag{Key: \"name\", Value: refValue}\n\t\t\t\t\tnewOsmNode.Tags = append(newOsmNode.Tags, tag)\n\t\t\t\t\twriteNewNodeObject(writer, &newOsmNode)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\t// Punktnetzwerk 'Wandern'\n\tnewOsmNode = *sourceOsmNode // copy content (don't modify origin/source node)\n\tnewOsmNode.ID = 0\n\tnewOsmNode.Tags = []osm.Tag{} // remove all source tags\n\trefValue, found = tags[\"iwn_ref\"]\n\tif found {\n\t\ttag := osm.Tag{Key: \"node_network\", Value: \"node_hiking\"}\n\t\tnewOsmNode.Tags = append(newOsmNode.Tags, tag)\n\t\ttag = osm.Tag{Key: \"name\", Value: refValue}\n\t\tnewOsmNode.Tags = append(newOsmNode.Tags, tag)\n\t\twriteNewNodeObject(writer, &newOsmNode)\n\t} else {\n\t\trefValue, found = tags[\"nwn_ref\"]\n\t\tif found {\n\t\t\ttag := osm.Tag{Key: \"node_network\", Value: \"node_hiking\"}\n\t\t\tnewOsmNode.Tags = append(newOsmNode.Tags, tag)\n\t\t\ttag = osm.Tag{Key: \"name\", Value: refValue}\n\t\t\tnewOsmNode.Tags = append(newOsmNode.Tags, tag)\n\t\t\twriteNewNodeObject(writer, &newOsmNode)\n\t\t} else {\n\t\t\trefValue, found = tags[\"rwn_ref\"]\n\t\t\tif found {\n\t\t\t\ttag := osm.Tag{Key: \"node_network\", Value: \"node_hiking\"}\n\t\t\t\tnewOsmNode.Tags = append(newOsmNode.Tags, tag)\n\t\t\t\ttag = osm.Tag{Key: \"name\", Value: refValue}\n\t\t\t\tnewOsmNode.Tags = append(newOsmNode.Tags, tag)\n\t\t\t\twriteNewNodeObject(writer, &newOsmNode)\n\t\t\t} else {\n\t\t\t\trefValue, found = tags[\"lwn_ref\"]\n\t\t\t\tif found {\n\t\t\t\t\ttag := osm.Tag{Key: \"node_network\", Value: \"node_hiking\"}\n\t\t\t\t\tnewOsmNode.Tags = append(newOsmNode.Tags, tag)\n\t\t\t\t\ttag = osm.Tag{Key: \"name\", Value: refValue}\n\t\t\t\t\tnewOsmNode.Tags = append(newOsmNode.Tags, tag)\n\t\t\t\t\twriteNewNodeObject(writer, &newOsmNode)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\t// Punktnetzwerk 'Inline-Skaten'\n\tnewOsmNode = *sourceOsmNode // copy content (don't modify origin/source node)\n\tnewOsmNode.ID = 0\n\tnewOsmNode.Tags = []osm.Tag{} // remove all source tags\n\trefValue, found = tags[\"rin_ref\"]\n\tif found {\n\t\ttag := osm.Tag{Key: \"node_network\", Value: \"node_inline_skates\"}\n\t\tnewOsmNode.Tags = append(newOsmNode.Tags, tag)\n\t\ttag = osm.Tag{Key: \"name\", Value: refValue}\n\t\tnewOsmNode.Tags = append(newOsmNode.Tags, tag)\n\t\twriteNewNodeObject(writer, &newOsmNode)\n\t}\n\n\t// Punktnetzwerk 'Reiten'\n\tnewOsmNode = *sourceOsmNode // copy content (don't modify origin/source node)\n\tnewOsmNode.ID = 0\n\tnewOsmNode.Tags = []osm.Tag{} // remove all source tags\n\trefValue, found = tags[\"rhn_ref\"]\n\tif found {\n\t\ttag := osm.Tag{Key: \"node_network\", Value: \"node_horse\"}\n\t\tnewOsmNode.Tags = append(newOsmNode.Tags, tag)\n\t\ttag = osm.Tag{Key: \"name\", Value: refValue}\n\t\tnewOsmNode.Tags = append(newOsmNode.Tags, tag)\n\t\twriteNewNodeObject(writer, &newOsmNode)\n\t}\n\n\t// Punktnetzwerk 'Kanu'\n\tnewOsmNode = *sourceOsmNode // copy content (don't modify origin/source node)\n\tnewOsmNode.ID = 0\n\tnewOsmNode.Tags = []osm.Tag{} // remove all source tags\n\trefValue, found = tags[\"rpn_ref\"]\n\tif found {\n\t\ttag := osm.Tag{Key: \"node_network\", Value: \"node_canoe\"}\n\t\tnewOsmNode.Tags = append(newOsmNode.Tags, tag)\n\t\ttag = osm.Tag{Key: \"name\", Value: refValue}\n\t\tnewOsmNode.Tags = append(newOsmNode.Tags, tag)\n\t\twriteNewNodeObject(writer, &newOsmNode)\n\t}\n\n\t// Punktnetzwerk 'Motorboot'\n\tnewOsmNode = *sourceOsmNode // copy content (don't modify origin/source node)\n\tnewOsmNode.ID = 0\n\tnewOsmNode.Tags = []osm.Tag{} // remove all source tags\n\trefValue, found = tags[\"rmn_ref\"]\n\tif found {\n\t\ttag := osm.Tag{Key: \"node_network\", Value: \"node_motorboat\"}\n\t\tnewOsmNode.Tags = append(newOsmNode.Tags, tag)\n\t\ttag = osm.Tag{Key: \"name\", Value: refValue}\n\t\tnewOsmNode.Tags = append(newOsmNode.Tags, tag)\n\t\twriteNewNodeObject(writer, &newOsmNode)\n\t}\n}", "func NewCreateLocationUnprocessableEntity() *CreateLocationUnprocessableEntity {\n\treturn &CreateLocationUnprocessableEntity{}\n}", "func NewGetNodeForbidden() *GetNodeForbidden {\n\treturn &GetNodeForbidden{}\n}", "func NewWeaviateThingsPatchUnprocessableEntity() *WeaviateThingsPatchUnprocessableEntity {\n\treturn &WeaviateThingsPatchUnprocessableEntity{}\n}", "func NewCreatePackageUnprocessableEntity() *CreatePackageUnprocessableEntity {\n\treturn &CreatePackageUnprocessableEntity{}\n}", "func (client MultipleResponsesClient) Get202None204NoneDefaultNone202InvalidPreparer() (*http.Request, error) {\n preparer := autorest.CreatePreparer(\n autorest.AsGet(),\n autorest.WithBaseURL(client.BaseURI),\n autorest.WithPath(\"/http/payloads/202/none/204/none/default/none/response/202/invalid\"))\n return preparer.Prepare(&http.Request{})\n}", "func NewCreateMTOServiceItemUnprocessableEntity() *CreateMTOServiceItemUnprocessableEntity {\n\treturn &CreateMTOServiceItemUnprocessableEntity{}\n}", "func (client MultipleResponsesClient) GetDefaultNone400InvalidPreparer() (*http.Request, error) {\n preparer := autorest.CreatePreparer(\n autorest.AsGet(),\n autorest.WithBaseURL(client.BaseURI),\n autorest.WithPath(\"/http/payloads/default/none/response/400/invalid\"))\n return preparer.Prepare(&http.Request{})\n}", "func NewCreateProductUnprocessableEntity() *CreateProductUnprocessableEntity {\n\treturn &CreateProductUnprocessableEntity{}\n}", "func NewImportArchiveUnprocessableEntity() *ImportArchiveUnprocessableEntity {\n\treturn &ImportArchiveUnprocessableEntity{}\n}", "func NewCreateMergeQueryUnprocessableEntity() *CreateMergeQueryUnprocessableEntity {\n\treturn &CreateMergeQueryUnprocessableEntity{}\n}", "func (client MultipleResponsesClient) GetDefaultModelA400NonePreparer() (*http.Request, error) {\n preparer := autorest.CreatePreparer(\n autorest.AsGet(),\n autorest.WithBaseURL(client.BaseURI),\n autorest.WithPath(\"/http/payloads/default/A/response/400/none\"))\n return preparer.Prepare(&http.Request{})\n}", "func NewWeaviateSchemaThingsPropertiesAddUnprocessableEntity() *WeaviateSchemaThingsPropertiesAddUnprocessableEntity {\n\treturn &WeaviateSchemaThingsPropertiesAddUnprocessableEntity{}\n}", "func NewDeleteComponentUnprocessableEntity() *DeleteComponentUnprocessableEntity {\n\treturn &DeleteComponentUnprocessableEntity{}\n}", "func (client MultipleResponsesClient) GetDefaultNone200InvalidPreparer() (*http.Request, error) {\n preparer := autorest.CreatePreparer(\n autorest.AsGet(),\n autorest.WithBaseURL(client.BaseURI),\n autorest.WithPath(\"/http/payloads/default/none/response/200/invalid\"))\n return preparer.Prepare(&http.Request{})\n}", "func RenderUnprocessableEntity(w http.ResponseWriter, message ...interface{}) {\n\tRender(w, UnprocessableEntity(message...))\n}", "func NewSetRoleGroupsUnprocessableEntity() *SetRoleGroupsUnprocessableEntity {\n\treturn &SetRoleGroupsUnprocessableEntity{}\n}", "func NewDeleteDebugRequestUnprocessableEntity() *DeleteDebugRequestUnprocessableEntity {\n\treturn &DeleteDebugRequestUnprocessableEntity{}\n}", "func NewGraphqlPostUnprocessableEntity() *GraphqlPostUnprocessableEntity {\n\n\treturn &GraphqlPostUnprocessableEntity{}\n}", "func NewGetNodeNotFound() *GetNodeNotFound {\n\treturn &GetNodeNotFound{}\n}", "func NewCreateanewRtcSessionUnprocessableEntity() *CreateanewRtcSessionUnprocessableEntity {\n\treturn &CreateanewRtcSessionUnprocessableEntity{}\n}", "func NewWeaviateActionsPatchUnprocessableEntity() *WeaviateActionsPatchUnprocessableEntity {\n\n\treturn &WeaviateActionsPatchUnprocessableEntity{}\n}", "func NewObjectsPatchUnprocessableEntity() *ObjectsPatchUnprocessableEntity {\n\n\treturn &ObjectsPatchUnprocessableEntity{}\n}", "func (r *resolver) newNoopNode(name string, jobArgs map[string]interface{}) (*Node, error) {\n\tid, err := r.idGen.UID()\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"Error making id for no-op job %s: %s\", name, err)\n\t}\n\tjid := job.NewIdWithRequestId(\"noop\", name, id, r.request.Id)\n\trj, err := r.jobFactory.Make(jid)\n\tif err != nil {\n\t\tswitch err {\n\t\tcase job.ErrUnknownJobType:\n\t\t\t// No custom noop job, use built-in default\n\t\t\trj = &noopJob{\n\t\t\t\tid: jid,\n\t\t\t}\n\t\tdefault:\n\t\t\treturn nil, fmt.Errorf(\"Error making no-op job %s: %s\", name, err)\n\t\t}\n\t}\n\tif err := rj.Create(jobArgs); err != nil {\n\t\treturn nil, fmt.Errorf(\"Error creating no-op job %s: %s\", name, err)\n\t}\n\tbytes, err := rj.Serialize()\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"Error serializing no-op job %s: %s\", name, err)\n\t}\n\n\treturn &Node{\n\t\tName: name,\n\t\tId: id,\n\t\tSpec: &spec.NoopNode, // on the next refactor, we shouldn't need to set this ourselves\n\t\tJobBytes: bytes,\n\t}, nil\n}", "func NewCreateClaimUnprocessableEntity() *CreateClaimUnprocessableEntity {\n\treturn &CreateClaimUnprocessableEntity{}\n}", "func NodeGet(w http.ResponseWriter, r *http.Request) {\n\tw.Header().Set(\"Content-Type\", \"application/json; charset=UTF-8\")\n\n\tif &node != nil {\n\t\tw.WriteHeader(200)\n\t\tif err := json.NewEncoder(w).Encode(node); err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t} else {\n\t\tparamError := swl.ParamError{\n\t\t\tError: \"Node not found\",\n\t\t}\n\t\tw.WriteHeader(404)\n\t\tif err := json.NewEncoder(w).Encode(paramError); err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t}\n}", "func (a *FileStorageApiService) GetNodeExecute(r ApiGetNodeRequest) (SingleNode, *_nethttp.Response, error) {\n\tvar (\n\t\tlocalVarHTTPMethod = _nethttp.MethodGet\n\t\tlocalVarPostBody interface{}\n\t\tlocalVarFormFileName string\n\t\tlocalVarFileName string\n\t\tlocalVarFileBytes []byte\n\t\tlocalVarReturnValue SingleNode\n\t)\n\n\tlocalBasePath, err := a.client.cfg.ServerURLWithContext(r.Ctx, \"FileStorageApiService.GetNode\")\n\tif localBasePath == \"/\" {\n\t localBasePath = \"\"\n\t}\n\tif err != nil {\n\t\treturn localVarReturnValue, nil, GenericOpenAPIError{error: err.Error()}\n\t}\n\n\tlocalVarPath := localBasePath + \"/v2/file_storage/buckets/{bucket_id}/nodes/{node_id}\"\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"bucket_id\"+\"}\", _neturl.PathEscape(parameterToString(r.P_bucketId, \"\")) , -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"node_id\"+\"}\", _neturl.PathEscape(parameterToString(r.P_nodeId, \"\")) , -1)\n\n\tlocalVarHeaderParams := make(map[string]string)\n\tlocalVarQueryParams := _neturl.Values{}\n\tlocalVarFormParams := _neturl.Values{}\n\n\tif r.P_directorySize != nil {\n\t\tlocalVarQueryParams.Add(\"directory_size\", parameterToString(*r.P_directorySize, \"\"))\n\t}\n\t// to determine the Content-Type header\n\tlocalVarHTTPContentTypes := []string{}\n\n\t// set Content-Type header\n\tlocalVarHTTPContentType := selectHeaderContentType(localVarHTTPContentTypes)\n\tif localVarHTTPContentType != \"\" {\n\t\tlocalVarHeaderParams[\"Content-Type\"] = localVarHTTPContentType\n\t}\n\n\t// to determine the Accept header\n\tlocalVarHTTPHeaderAccepts := []string{\"application/json\"}\n\n\t// set Accept header\n\tlocalVarHTTPHeaderAccept := selectHeaderAccept(localVarHTTPHeaderAccepts)\n\tif localVarHTTPHeaderAccept != \"\" {\n\t\tlocalVarHeaderParams[\"Accept\"] = localVarHTTPHeaderAccept\n\t}\n\treq, err := a.client.prepareRequest(r.Ctx, localVarPath, localVarHTTPMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFormFileName, localVarFileName, localVarFileBytes)\n\tif err != nil {\n\t\treturn localVarReturnValue, nil, err\n\t}\n\n\tlocalVarHTTPResponse, err := a.client.callAPI(req)\n\tif err != nil || localVarHTTPResponse == nil {\n\t\treturn localVarReturnValue, localVarHTTPResponse, err\n\t}\n\n\tlocalVarBody, err := _ioutil.ReadAll(localVarHTTPResponse.Body)\n\tlocalVarHTTPResponse.Body.Close()\n\tif err != nil {\n\t\treturn localVarReturnValue, localVarHTTPResponse, err\n\t}\n\n\tif localVarHTTPResponse.StatusCode >= 300 {\n\t\tnewErr := GenericOpenAPIError{\n\t\t\tbody: localVarBody,\n\t\t\terror: localVarHTTPResponse.Status,\n\t\t}\n\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t}\n\n\terr = a.client.decode(&localVarReturnValue, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\tif err != nil {\n\t\tnewErr := GenericOpenAPIError{\n\t\t\tbody: localVarBody,\n\t\t\terror: err.Error(),\n\t\t}\n\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t}\n\n\treturn localVarReturnValue, localVarHTTPResponse, nil\n}", "func NewCreateThemeUnprocessableEntity() *CreateThemeUnprocessableEntity {\n\treturn &CreateThemeUnprocessableEntity{}\n}", "func NewSetDefaultThemeUnprocessableEntity() *SetDefaultThemeUnprocessableEntity {\n\treturn &SetDefaultThemeUnprocessableEntity{}\n}", "func (client MultipleResponsesClient) GetDefaultModelA200NonePreparer() (*http.Request, error) {\n preparer := autorest.CreatePreparer(\n autorest.AsGet(),\n autorest.WithBaseURL(client.BaseURI),\n autorest.WithPath(\"/http/payloads/default/A/response/200/none\"))\n return preparer.Prepare(&http.Request{})\n}", "func NewGetFlagWorkflowsUnprocessableEntity() *GetFlagWorkflowsUnprocessableEntity {\n\treturn &GetFlagWorkflowsUnprocessableEntity{}\n}", "func NewObjectsCreateUnprocessableEntity() *ObjectsCreateUnprocessableEntity {\n\treturn &ObjectsCreateUnprocessableEntity{}\n}", "func (client MultipleResponsesClient) Get202None204NoneDefaultNone400NonePreparer() (*http.Request, error) {\n preparer := autorest.CreatePreparer(\n autorest.AsGet(),\n autorest.WithBaseURL(client.BaseURI),\n autorest.WithPath(\"/http/payloads/202/none/204/none/default/none/response/400/none\"))\n return preparer.Prepare(&http.Request{})\n}", "func NewGetAPIPublicV1TeamUnprocessableEntity() *GetAPIPublicV1TeamUnprocessableEntity {\n\treturn &GetAPIPublicV1TeamUnprocessableEntity{}\n}", "func NewCreateWidgetUnprocessableEntity() *CreateWidgetUnprocessableEntity {\n\treturn &CreateWidgetUnprocessableEntity{}\n}", "func (e Department) EntNew() ent.Ent { return &Department{} }", "func NewDeleteShipmentUnprocessableEntity() *DeleteShipmentUnprocessableEntity {\n\n\treturn &DeleteShipmentUnprocessableEntity{}\n}", "func (client MultipleResponsesClient) Get202None204NoneDefaultNone204NonePreparer() (*http.Request, error) {\n preparer := autorest.CreatePreparer(\n autorest.AsGet(),\n autorest.WithBaseURL(client.BaseURI),\n autorest.WithPath(\"/http/payloads/202/none/204/none/default/none/response/204/none\"))\n return preparer.Prepare(&http.Request{})\n}", "func NewGetNodeUpgradesUnauthorized() *GetNodeUpgradesUnauthorized {\n\treturn &GetNodeUpgradesUnauthorized{}\n}", "func NewDeleteLinkUnprocessableEntity() *DeleteLinkUnprocessableEntity {\n\treturn &DeleteLinkUnprocessableEntity{}\n}", "func NewAddProductUnprocessableEntity() *AddProductUnprocessableEntity {\n\treturn &AddProductUnprocessableEntity{}\n}", "func (client MultipleResponsesClient) GetDefaultNone400NonePreparer() (*http.Request, error) {\n preparer := autorest.CreatePreparer(\n autorest.AsGet(),\n autorest.WithBaseURL(client.BaseURI),\n autorest.WithPath(\"/http/payloads/default/none/response/400/none\"))\n return preparer.Prepare(&http.Request{})\n}", "func NewCreateCategoryUnprocessableEntity() *CreateCategoryUnprocessableEntity {\n\treturn &CreateCategoryUnprocessableEntity{}\n}", "func createNode(w http.ResponseWriter, r *http.Request) {\n\tvar newNode* Nodo\n\t//leemos el body de la petición\n\treqBody, err := ioutil.ReadAll(r.Body)\n\tif err != nil {\n\t\tfmt.Fprintf(w, \"Datos Inválidos\")\n\t}\n\t//tomamos los valores del body y los colocamos en una variable de struct de Nodo\n\tjson.Unmarshal(reqBody, &newNode)\n\t//fmt.Printf(\"%d\",newNode.Carnet)\n\t//insertamos la raiz\n\traiz=crearNodo(raiz,newNode)\n\tescribir,err2:=json.Marshal(raiz)\n\tif err2 != nil {\n log.Fatal(err2)\n }\n\tdata := []byte(escribir)\n err = ioutil.WriteFile(\"persiste.json\", data, 0644)\n if err != nil {\n log.Fatal(err)\n }\n\tfmt.Println(\"----------------\")\n\t//preorden(raiz)\n\t//createDot(raiz)\n\t//Si todo ha salido bien, devolvemos un status code 201 y el arbol\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\trespuesta:= &Respuesta{Message:\"Alumno creado exitosamente\"}\n\tw.WriteHeader(http.StatusCreated)\n\tjson.NewEncoder(w).Encode(respuesta)\n\n}", "func NewKillQueryUnprocessableEntity() *KillQueryUnprocessableEntity {\n\treturn &KillQueryUnprocessableEntity{}\n}", "func getNode(ctx context.Context, client client.Interface, nodeName string) *libapi.Node {\n\tnode, err := client.Nodes().Get(ctx, nodeName, options.GetOptions{})\n\tif err != nil {\n\t\tif _, ok := err.(cerrors.ErrorResourceDoesNotExist); !ok {\n\t\t\tlog.WithError(err).WithField(\"Name\", nodeName).Info(\"Unable to query node configuration\")\n\t\t\tlog.Warn(\"Unable to access datastore to query node configuration\")\n\t\t\tutils.Terminate()\n\t\t}\n\n\t\tlog.WithField(\"Name\", nodeName).Info(\"Building new node resource\")\n\t\tnode = libapi.NewNode()\n\t\tnode.Name = nodeName\n\t}\n\n\treturn node\n}", "func newEntityExtracter() *entityExtracter {\n\tvar mapping map[string]int\n\tvar weights []float64\n\tvar labels []string\n\n\tdec := getAsset(\"Maxent\", \"mapping.gob\")\n\tcheckError(dec.Decode(&mapping))\n\n\tdec = getAsset(\"Maxent\", \"weights.gob\")\n\tcheckError(dec.Decode(&weights))\n\n\tdec = getAsset(\"Maxent\", \"labels.gob\")\n\tcheckError(dec.Decode(&labels))\n\n\treturn &entityExtracter{model: newMaxentClassifier(weights, mapping, labels)}\n}", "func (client MultipleResponsesClient) Get202None204NoneDefaultNone400InvalidSender(req *http.Request) (*http.Response, error) {\n return autorest.SendWithSender(client, req)\n}", "func NewCreateMoveTaskOrderUnprocessableEntity() *CreateMoveTaskOrderUnprocessableEntity {\n\treturn &CreateMoveTaskOrderUnprocessableEntity{}\n}", "func NewSearchLogQueryUnprocessableEntity() *SearchLogQueryUnprocessableEntity {\n\treturn &SearchLogQueryUnprocessableEntity{}\n}", "func NewObjectsClassPutUnprocessableEntity() *ObjectsClassPutUnprocessableEntity {\n\n\treturn &ObjectsClassPutUnprocessableEntity{}\n}", "func DefaultEntityHandler(rsp http.ResponseWriter, req *Request, status int, content interface{}) error {\n switch e := content.(type) {\n \n case nil:\n rsp.WriteHeader(status)\n \n case Entity:\n rsp.Header().Add(\"Content-Type\", e.ContentType())\n rsp.WriteHeader(status)\n \n n, err := io.Copy(rsp, e)\n if err != nil {\n return fmt.Errorf(\"Could not write entity: %v\\nIn response to: %v %v\\nEntity: %d bytes written\", err, req.Method, req.URL, n)\n }\n \n case json.RawMessage:\n rsp.Header().Add(\"Content-Type\", \"application/json\")\n rsp.WriteHeader(status)\n \n _, err := rsp.Write([]byte(e))\n if err != nil {\n return fmt.Errorf(\"Could not write entity: %v\\nIn response to: %v %v\\nEntity: %d bytes\", err, req.Method, req.URL, len(e))\n }\n \n default:\n rsp.Header().Add(\"Content-Type\", \"application/json\")\n rsp.WriteHeader(status)\n \n data, err := json.Marshal(content)\n if err != nil {\n return fmt.Errorf(\"Could not marshal entity: %v\\nIn response to: %v %v\", err, req.Method, req.URL)\n }\n \n _, err = rsp.Write(data)\n if err != nil {\n return fmt.Errorf(\"Could not write entity: %v\\nIn response to: %v %v\\nEntity: %d bytes\", err, req.Method, req.URL, len(data))\n }\n \n }\n return nil\n}", "func (client MultipleResponsesClient) Get200ModelA400InvalidPreparer() (*http.Request, error) {\n preparer := autorest.CreatePreparer(\n autorest.AsGet(),\n autorest.WithBaseURL(client.BaseURI),\n autorest.WithPath(\"/http/payloads/200/A/response/400/invalid\"))\n return preparer.Prepare(&http.Request{})\n}", "func NewPatchReferenceEntityAttributesCodeUnprocessableEntity() *PatchReferenceEntityAttributesCodeUnprocessableEntity {\n\treturn &PatchReferenceEntityAttributesCodeUnprocessableEntity{}\n}", "func NewGetTagDefault(code int) *GetTagDefault {\n\tif code <= 0 {\n\t\tcode = 500\n\t}\n\n\treturn &GetTagDefault{\n\t\t_statusCode: code,\n\t}\n}", "func NewNoFileNode(data []byte, flags uint32) *NoFileNode {\n\treturn &NoFileNode{\n\t\tDataNode: NewDataNode(data),\n\t\tflags: flags,\n\t\tnopen: 0,\n\t}\n}", "func NewPatchAssetCodeUnprocessableEntity() *PatchAssetCodeUnprocessableEntity {\n\treturn &PatchAssetCodeUnprocessableEntity{}\n}", "func (client MultipleResponsesClient) Get200ModelA200InvalidPreparer() (*http.Request, error) {\n preparer := autorest.CreatePreparer(\n autorest.AsGet(),\n autorest.WithBaseURL(client.BaseURI),\n autorest.WithPath(\"/http/payloads/200/A/response/200/invalid\"))\n return preparer.Prepare(&http.Request{})\n}", "func NewGetModelRegistryNoContent() *GetModelRegistryNoContent {\n\treturn &GetModelRegistryNoContent{}\n}", "func NewObjectsListUnprocessableEntity() *ObjectsListUnprocessableEntity {\n\n\treturn &ObjectsListUnprocessableEntity{}\n}", "func (client MultipleResponsesClient) GetDefaultModelA400ValidPreparer() (*http.Request, error) {\n preparer := autorest.CreatePreparer(\n autorest.AsGet(),\n autorest.WithBaseURL(client.BaseURI),\n autorest.WithPath(\"/http/payloads/default/A/response/400/valid\"))\n return preparer.Prepare(&http.Request{})\n}", "func newNode(nodePath string) Node {\n\treturn &nodeImpl{nodePath: nodePath}\n}", "func NewGetNicsBadRequest() *GetNicsBadRequest {\n\treturn &GetNicsBadRequest{}\n}", "func ErrUnprocessableEntityf(format string, arguments ...interface{}) *Status {\n\treturn &Status{Code: http.StatusUnprocessableEntity, Text: fmt.Sprintf(format, arguments...)}\n}", "func newHandleGetOrHeader(\n\tcore core.Core,\n) handleGetOrHeader {\n\treturn _handleGetOrHeader{\n\t\tcore: core,\n\t\thttp: ihttp.New(),\n\t}\n}", "func (fs *Ipfs) createNode(ctx context.Context, repoPath string) (icore.CoreAPI, error) {\n\t// Open the repo\n\trepo, err := fsrepo.Open(repoPath)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Construct the node\n\tnodeOptions := &core.BuildCfg{\n\t\tOnline: true,\n\n\t\t// This option sets the node to be a full DHT node\n\t\t// (both fetching and storing DHT Records)\n\t\tRouting: libp2p.DHTOption,\n\n\t\t// Routing: libp2p.DHTClientOption,\n\t\t// This option sets the node to be a client DHT node (only fetching records)\n\n\t\tRepo: repo,\n\t}\n\n\tnode, err := core.NewNode(ctx, nodeOptions)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tfs.ipfsNode = node\n\n\t// Attach the Core API to the constructed node\n\treturn coreapi.NewCoreAPI(node)\n}", "func NewCreateanewRewriteRuleSetUnprocessableEntity() *CreateanewRewriteRuleSetUnprocessableEntity {\n\treturn &CreateanewRewriteRuleSetUnprocessableEntity{}\n}", "func NewPostProductModelProposalUnprocessableEntity() *PostProductModelProposalUnprocessableEntity {\n\treturn &PostProductModelProposalUnprocessableEntity{}\n}", "func fromPydioNodeObjectInfo(bucket string, node *tree.Node) minio.ObjectInfo {\n\n\tcType := \"application/octet-stream\"\n\tif c := node.GetStringMeta(common.MetaNamespaceMime); c != \"\" {\n\t\tcType = c\n\t}\n\tuserDefined := map[string]string{\n\t\t\"Content-Type\": cType,\n\t}\n\tvId := node.GetStringMeta(\"versionId\")\n\n\tnodePath := node.Path\n\tif node.Type == tree.NodeType_COLLECTION {\n\t\tnodePath += \"/\"\n\t}\n\treturn minio.ObjectInfo{\n\t\tBucket: bucket,\n\t\tName: nodePath,\n\t\tModTime: time.Unix(0, node.MTime*int64(time.Second)),\n\t\tSize: node.Size,\n\t\tETag: minio.CanonicalizeETag(node.Etag),\n\t\tUserDefined: userDefined,\n\t\tContentType: cType,\n\t\tContentEncoding: \"\",\n\t\tVersionID: vId,\n\t}\n}", "func NewNoopNode() *BaseNode {\n\treturn noop\n}", "func (client MultipleResponsesClient) Get200ModelA400NonePreparer() (*http.Request, error) {\n preparer := autorest.CreatePreparer(\n autorest.AsGet(),\n autorest.WithBaseURL(client.BaseURI),\n autorest.WithPath(\"/http/payloads/200/A/response/400/none\"))\n return preparer.Prepare(&http.Request{})\n}", "func (e Account) EntNew() ent.Ent { return &Account{} }", "func (e Account) EntNew() ent.Ent { return &Account{} }", "func (client ModelClient) DeleteHierarchicalEntityPreparer(ctx context.Context, appID uuid.UUID, versionID string, hEntityID uuid.UUID) (*http.Request, error) {\n\turlParameters := map[string]interface{}{\n\t\t\"Endpoint\": client.Endpoint,\n\t}\n\n\tpathParameters := map[string]interface{}{\n\t\t\"appId\": autorest.Encode(\"path\", appID),\n\t\t\"hEntityId\": autorest.Encode(\"path\", hEntityID),\n\t\t\"versionId\": autorest.Encode(\"path\", versionID),\n\t}\n\n\tpreparer := autorest.CreatePreparer(\n\t\tautorest.AsDelete(),\n\t\tautorest.WithCustomBaseURL(\"{Endpoint}/luis/api/v2.0\", urlParameters),\n\t\tautorest.WithPathParameters(\"/apps/{appId}/versions/{versionId}/hierarchicalentities/{hEntityId}\", pathParameters))\n\treturn preparer.Prepare((&http.Request{}).WithContext(ctx))\n}", "func (r ApiCreateHyperflexHxdpVersionRequest) IfNoneMatch(ifNoneMatch string) ApiCreateHyperflexHxdpVersionRequest {\n\tr.ifNoneMatch = &ifNoneMatch\n\treturn r\n}", "func CreateOrUpdate(ctx context.Context, client client.Interface, node *libapi.Node) (*libapi.Node, error) {\n\tif node.ResourceVersion != \"\" {\n\t\treturn client.Nodes().Update(ctx, node, options.SetOptions{})\n\t}\n\n\treturn client.Nodes().Create(ctx, node, options.SetOptions{})\n}", "func NewGetNodeUpgradesDefault(code int) *GetNodeUpgradesDefault {\n\treturn &GetNodeUpgradesDefault{\n\t\t_statusCode: code,\n\t}\n}", "func NewSendMailUnprocessableEntity() *SendMailUnprocessableEntity {\n\treturn &SendMailUnprocessableEntity{}\n}", "func (client *LROSADsClient) putNonRetry201Creating400InvalidJSONCreateRequest(ctx context.Context, product Product, options *LROSADsClientBeginPutNonRetry201Creating400InvalidJSONOptions) (*policy.Request, error) {\n\turlPath := \"/lro/nonretryerror/put/201/creating/400/invalidjson\"\n\treq, err := runtime.NewRequest(ctx, http.MethodPut, runtime.JoinPaths(host, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\tif err := runtime.MarshalAsJSON(req, product); err != nil {\n\t\treturn nil, err\n\t}\n\treturn req, nil\n}", "func newnode(id byte, name string, value string) *xmlx.Node {\n\tnode := xmlx.NewNode(id)\n\tif name != \"\" {\n\t\tnode.Name = xml.Name{\n\t\t\tLocal: name,\n\t\t}\n\t}\n\tif value != \"\" {\n\t\tnode.Value = value\n\t}\n\treturn node\n}", "func (cm *Docker) MustGetNode(id string) *entity.Node {\n\tn, ok := cm.GetNode(id)\n\tif !ok {\n\t\tcollector := collector.NewDocker(cm.client, id)\n\t\tn = entity.NewNode(id, collector)\n\t\tcm.lock.Lock()\n\t\tcm.nodes[id] = n\n\t\tcm.lock.Unlock()\n\t}\n\treturn n\n}", "func (client ModelClient) GetHierarchicalEntityPreparer(ctx context.Context, appID uuid.UUID, versionID string, hEntityID uuid.UUID) (*http.Request, error) {\n\turlParameters := map[string]interface{}{\n\t\t\"Endpoint\": client.Endpoint,\n\t}\n\n\tpathParameters := map[string]interface{}{\n\t\t\"appId\": autorest.Encode(\"path\", appID),\n\t\t\"hEntityId\": autorest.Encode(\"path\", hEntityID),\n\t\t\"versionId\": autorest.Encode(\"path\", versionID),\n\t}\n\n\tpreparer := autorest.CreatePreparer(\n\t\tautorest.AsGet(),\n\t\tautorest.WithCustomBaseURL(\"{Endpoint}/luis/api/v2.0\", urlParameters),\n\t\tautorest.WithPathParameters(\"/apps/{appId}/versions/{versionId}/hierarchicalentities/{hEntityId}\", pathParameters))\n\treturn preparer.Prepare((&http.Request{}).WithContext(ctx))\n}" ]
[ "0.5806691", "0.54693794", "0.54425555", "0.53880847", "0.5306553", "0.5301339", "0.52171206", "0.51824343", "0.5155087", "0.5146561", "0.5083017", "0.50826794", "0.4999138", "0.4969035", "0.49644318", "0.4959862", "0.49523827", "0.4889642", "0.4889365", "0.48878306", "0.4878729", "0.4862437", "0.4855108", "0.48508266", "0.48466942", "0.48229748", "0.48228902", "0.4769739", "0.47686228", "0.4760551", "0.4751568", "0.47421166", "0.4740569", "0.4739544", "0.473671", "0.4735644", "0.47319683", "0.47245026", "0.46996576", "0.46909755", "0.46820444", "0.46703368", "0.4666905", "0.4658835", "0.46442556", "0.46318358", "0.46308595", "0.46170896", "0.46086594", "0.460579", "0.4593232", "0.45810577", "0.45686075", "0.45663503", "0.4556692", "0.45527813", "0.4547149", "0.45398095", "0.45364708", "0.45331594", "0.45247483", "0.45218712", "0.45203167", "0.4514753", "0.45061854", "0.45017636", "0.45001748", "0.44976005", "0.4487897", "0.44855827", "0.44816974", "0.44785258", "0.44549587", "0.44545594", "0.44488162", "0.44469777", "0.44464502", "0.44384614", "0.44305846", "0.44145814", "0.44085988", "0.4406755", "0.43906677", "0.43876666", "0.43773183", "0.43706098", "0.43703672", "0.436942", "0.43652564", "0.4365073", "0.4365073", "0.43642902", "0.4360404", "0.43556938", "0.43506676", "0.43489107", "0.4347012", "0.4342199", "0.43373907", "0.43301922" ]
0.712282
0
Parse returns the dummy data if Results contains a matching record.
func (p *Parser) Parse(ctx context.Context, url string, body io.Reader) (urls []string, errs []error) { b, err := ioutil.ReadAll(body) if err != nil { return nil, []error{err} } result, ok := p.Results[string(b)] if !ok { return nil, nil } for _, e := range result.Errs { errs = append(errs, errors.New(e)) } return result.Urls, errs }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func Parse(output *bytes.Buffer) (*Result, error) {\n\tbuf := filter(output)\n\n\tset, err := parse.ParseSet(buf)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tresult := &Result{}\n\n\tfor name, values := range set {\n\t\t(*result)[name] = NewTestResult(values[0])\n\t}\n\n\treturn result, nil\n}", "func (r Results) Parse() telebot.Results {\n\tparsedResults := make(telebot.Results, len(r))\n\n\tfor i, result := range r {\n\t\tparsedResults[i] = &telebot.ArticleResult{\n\t\t\tURL: result.SiteUrl,\n\t\t\tThumbURL: result.CoverImage.Medium,\n\t\t\tTitle: result.Title.UserPreferred,\n\t\t\tText: result.SiteUrl,\n\t\t\tDescription: result.Description,\n\t\t}\n\t\tparsedResults[i].SetResultID(strconv.Itoa(i))\n\t}\n\n\treturn parsedResults\n}", "func (h *HTTPerf) Parse() {\n h.Results = RawParser(h.Raw)\n}", "func (r *QueryResult) Parse(targets interface{}) error {\n\tbyts, _ := json.Marshal(r.Records)\n\treturn json.Unmarshal(byts, &targets)\n}", "func (r *Response) Parse(includePartialMatch bool, requireNumber bool) []Address {\n\tif r.Status != \"OK\" {\n\t\treturn nil\n\t}\n\n\tresult := make([]Address, 0, len(r.Results))\n\n\tfor _, re := range r.Results {\n\t\tif re.PartialMatch && !includePartialMatch {\n\t\t\tcontinue\n\t\t}\n\n\t\taddr := Address{}\n\n\t\taddr.AddrStr = re.Address\n\t\taddr.PlaceID = re.PlaceId\n\t\taddr.PartialMatch = re.PartialMatch\n\t\taddr.Lat = re.Geometry.Location.Lat\n\t\taddr.Lng = re.Geometry.Location.Lng\n\n\t\tfor _, part := range re.AddressParts {\n\t\t\tfor _, partType := range part.Types {\n\t\t\t\tswitch partType {\n\t\t\t\tcase \"premise\":\n\t\t\t\t\taddr.Premise = part.Name\n\t\t\t\tcase \"street_number\":\n\t\t\t\t\taddr.Number = part.Name\n\t\t\t\tcase \"route\":\n\t\t\t\t\taddr.Street = part.ShortName\n\t\t\t\tcase \"locality\":\n\t\t\t\t\taddr.City = part.Name\n\t\t\t\tcase \"postal_town\":\n\t\t\t\t\taddr.City = part.Name\n\t\t\t\tcase \"country\":\n\t\t\t\t\taddr.Country = part.ShortName\n\t\t\t\tcase \"postal_code\":\n\t\t\t\t\taddr.Postcode = part.Name\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\tif requireNumber && \"\" == addr.Number && \"\" == addr.Premise {\n\t\t\tcontinue\n\t\t}\n\n\t\tresult = append(result, addr)\n\t}\n\n\treturn result\n}", "func Parse(stdout []byte) []runtests.TestCaseResult {\n\tlines := bytes.Split(stdout, []byte{'\\n'})\n\tres := []*regexp.Regexp{\n\t\tctsTestPreamblePattern,\n\t\tdartSystemTestPreamblePattern,\n\t\ttrfTestPreamblePattern,\n\t\tgoogleTestPreamblePattern,\n\t\tgoTestPreamblePattern,\n\t\trustTestPreamblePattern,\n\t\tzirconUtestPreamblePattern,\n\t\tparseoutput.TestPreamblePattern,\n\t}\n\tremainingLines, match := firstMatch(lines, res)\n\n\tvar cases []runtests.TestCaseResult\n\tswitch match {\n\tcase ctsTestPreamblePattern:\n\t\tcases = parseVulkanCtsTest(remainingLines)\n\tcase dartSystemTestPreamblePattern:\n\t\tcases = parseDartSystemTest(remainingLines)\n\tcase trfTestPreamblePattern:\n\t\tcases = parseTrfTest(lines)\n\tcase googleTestPreamblePattern:\n\t\tcases = parseGoogleTest(remainingLines)\n\tcase goTestPreamblePattern:\n\t\tcases = parseGoTest(remainingLines)\n\tcase rustTestPreamblePattern:\n\t\tcases = parseRustTest(remainingLines)\n\tcase zirconUtestPreamblePattern:\n\t\tcases = parseZirconUtest(remainingLines)\n\tcase parseoutput.TestPreamblePattern:\n\t\tcases = parseNetworkConformanceTest(remainingLines)\n\t}\n\n\t// Ensure that an empty set of cases is serialized to JSON as an empty\n\t// array, not as null.\n\tif cases == nil {\n\t\tcases = []runtests.TestCaseResult{}\n\t}\n\treturn cases\n}", "func TestParse(t *testing.T) {\n\t/*\n\t\tf, err := os.Open(\"testdata.json\")\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t\tdefer f.Close()\n\n\t\tdec := json.NewDecoder(f)\n\t\tvar tr ResultData\n\t\terr = dec.Decode(&tr)\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t*/\n\n\tf, err := ioutil.ReadFile(\"testdata/two-series.json\")\n\tif err != nil {\n\t\tlog.Fatalln(\"Error reading test data file:\", err)\n\t}\n\n\ttr, err := parseData(f)\n\n\texpStatus := \"REQUEST_SUCCEEDED\"\n\texpSeries := \"LEU0254555900\"\n\n\tstatus := tr.Status\n\tseries1 := tr.Results.Series[0].SeriesID\n\tcatalog := tr.Results.Series[0].Catalog\n\tif status != expStatus {\n\t\tt.Errorf(\"Expected status to be '%v', but got '%v'\", expStatus, status)\n\t}\n\tif series1 != expSeries {\n\t\tt.Errorf(\"Expected first seriesID to be '%v', but got '%v'\", expSeries, series1)\n\t}\n\tif catalog != nil {\n\t\tt.Error(\"Expected a nil value for catalog data, but got\", catalog)\n\t}\n\tif len(tr.Message) != 1 {\n\t\tt.Errorf(\"Expected to find 1 message, but found %v messages instead\", len(tr.Message))\n\t}\n}", "func (h *HSTIMParser) ParseResult(msg []byte) (HSTIMResult, error) {\n\tr := HSTIMResult{}\n\n\t// Verify the first character is `R`, which indicates it's a result record\n\t// Probably redundant\n\tif msg[0] != 'R' {\n\t\treturn r, &ParseError{fmt.Sprintf(\"Not a result record\")}\n\t}\n\n\tmsgStr := removeTrailingValues(msg)\n\tsplits := strings.Split(msgStr[2:], \"|\")\n\tif len(splits) != 12 {\n\t\treturn r, &ParseError{fmt.Sprintf(\"Expected 12 fields, got %d\", len(splits))}\n\t}\n\n\tseqNum, err := strconv.Atoi(splits[0])\n\tif err != nil {\n\t\treturn r, err\n\t}\n\tr.SequenceNumber = seqNum\n\tr.AnalyteName = splits[1]\n\tr.TestValue = splits[2]\n\tr.TestUnits = splits[3]\n\tr.ReferenceRange = splits[4]\n\tr.TestFlag = splits[5]\n\tr.TestResultType = splits[7]\n\n\t// Parse the time\n\tt, err := time.Parse(hstimTimeFmt, splits[11])\n\tif err != nil {\n\t\treturn r, &ParseError{fmt.Sprintf(\"Cannot parse `%s` as valid timestamp\", splits[11])}\n\t}\n\tr.Timestamp = t\n\n\treturn r, nil\n}", "func (cmd *batchCommandExists) parseRecordResults(ifc command, receiveSize int) (bool, error) {\n\t//Parse each message response and add it to the result array\n\tcmd.dataOffset = 0\n\n\tfor cmd.dataOffset < receiveSize {\n\t\tif err := cmd.readBytes(int(_MSG_REMAINING_HEADER_SIZE)); err != nil {\n\t\t\treturn false, err\n\t\t}\n\n\t\tresultCode := types.ResultCode(cmd.dataBuffer[5] & 0xFF)\n\n\t\t// The only valid server return codes are \"ok\" and \"not found\".\n\t\t// If other return codes are received, then abort the batch.\n\t\tif resultCode != 0 && resultCode != types.KEY_NOT_FOUND_ERROR {\n\t\t\tif resultCode == types.FILTERED_OUT {\n\t\t\t\tcmd.filteredOutCnt++\n\t\t\t} else {\n\t\t\t\treturn false, types.NewAerospikeError(resultCode)\n\t\t\t}\n\t\t}\n\n\t\tinfo3 := cmd.dataBuffer[3]\n\n\t\t// If cmd is the end marker of the response, do not proceed further\n\t\tif (int(info3) & _INFO3_LAST) == _INFO3_LAST {\n\t\t\treturn false, nil\n\t\t}\n\n\t\tbatchIndex := int(Buffer.BytesToUint32(cmd.dataBuffer, 14))\n\t\tfieldCount := int(Buffer.BytesToUint16(cmd.dataBuffer, 18))\n\t\topCount := int(Buffer.BytesToUint16(cmd.dataBuffer, 20))\n\n\t\tif opCount > 0 {\n\t\t\treturn false, types.NewAerospikeError(types.PARSE_ERROR, \"Received bins that were not requested!\")\n\t\t}\n\n\t\t_, err := cmd.parseKey(fieldCount)\n\t\tif err != nil {\n\t\t\treturn false, err\n\t\t}\n\n\t\t// only set the results to true; as a result, no synchronization is needed\n\t\tcmd.existsArray[batchIndex] = resultCode == 0\n\t}\n\treturn true, nil\n}", "func parse(response string) (record *Record, err error) {\n\tfor _, line := range strings.Split(response, \"\\n\") {\n\t\tline = strings.TrimSpace(line)\n\t\tif line == \"\" || !strings.Contains(line, \":\") {\n\t\t\tcontinue\n\t\t}\n\t\tparts := strings.SplitN(line, \":\", 2)\n\t\tkey, value := strings.TrimSpace(parts[0]), strings.TrimSpace(parts[1])\n\t\tif contains(CREATED_ON_KEYWORDS, strings.ToLower(key)) {\n\t\t\tif parsedDate, parseErr := now.Parse(value); parseErr != nil {\n\t\t\t\terr = parseErr\n\t\t\t} else {\n\t\t\t\trecord = &Record{CreatedOn: parsedDate, Registered: true}\n\t\t\t}\n\t\t}\n\t\tif contains(EXPIRED_ON_KEYWORDS, strings.ToLower(key)) {\n\t\t\tif parsedDate, parseErr := now.Parse(value); parseErr != nil {\n\t\t\t\terr = parseErr\n\t\t\t} else {\n\t\t\t\trecord.ExpiredOn = parsedDate\n\t\t\t}\n\t\t\treturn\n\t\t}\n\t}\n\treturn nil, errors.New(\"Unable to parse whois record\")\n}", "func (p *testOutputParser) Parse(input *bufio.Scanner) (*api.TestSuites, error) {\n\tsuites := &api.TestSuites{}\n\n\tvar testNameStack []string\n\tvar tests map[string]*api.TestCase\n\tvar output map[string][]string\n\tvar messages map[string][]string\n\tvar currentSuite *api.TestSuite\n\tvar state int\n\tvar count int\n\tvar orderedTests []string\n\n\tfor input.Scan() {\n\t\tline := input.Text()\n\t\tcount++\n\n\t\tlog(\"Line %03d: %d: %s\\n\", count, state, line)\n\n\t\tswitch state {\n\n\t\tcase stateBegin:\n\t\t\t// this is the first state\n\t\t\tname, ok := ExtractRun(line)\n\t\t\tif !ok {\n\t\t\t\t// A test that defines a test.M handler can write output prior to test execution. We will drop this because\n\t\t\t\t// we have no place to put it, although the first test case *could* use it in the future.\n\t\t\t\tlog(\" ignored output outside of suite\\n\")\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tlog(\" found run command %s\\n\", name)\n\n\t\t\tcurrentSuite = &api.TestSuite{}\n\t\t\ttests = make(map[string]*api.TestCase)\n\t\t\toutput = make(map[string][]string)\n\t\t\tmessages = make(map[string][]string)\n\n\t\t\torderedTests = []string{name}\n\t\t\ttestNameStack = []string{name}\n\t\t\ttests[testNameStack[0]] = &api.TestCase{\n\t\t\t\tName: name,\n\t\t\t}\n\n\t\t\tstate = stateOutput\n\n\t\tcase stateOutput:\n\t\t\t// open a new test for gathering output\n\t\t\tif name, ok := ExtractRun(line); ok {\n\t\t\t\tlog(\" found run command %s\\n\", name)\n\t\t\t\ttest, ok := tests[name]\n\t\t\t\tif !ok {\n\t\t\t\t\ttest = &api.TestCase{\n\t\t\t\t\t\tName: name,\n\t\t\t\t\t}\n\t\t\t\t\ttests[name] = test\n\t\t\t\t}\n\t\t\t\torderedTests = append(orderedTests, name)\n\t\t\t\ttestNameStack = []string{name}\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\t// transition to result mode ONLY if it matches a result at the top level\n\t\t\tif result, name, depth, duration, ok := ExtractResult(line); ok && tests[name] != nil && depth == 0 {\n\t\t\t\ttest := tests[name]\n\t\t\t\tlog(\" found result %s %s %s\\n\", result, name, duration)\n\t\t\t\tswitch result {\n\t\t\t\tcase api.TestResultPass:\n\t\t\t\tcase api.TestResultFail:\n\t\t\t\t\ttest.FailureOutput = &api.FailureOutput{}\n\t\t\t\tcase api.TestResultSkip:\n\t\t\t\t\ttest.SkipMessage = &api.SkipMessage{}\n\t\t\t\t}\n\t\t\t\tif err := test.SetDuration(duration); err != nil {\n\t\t\t\t\treturn nil, fmt.Errorf(\"unexpected duration on line %d: %s\", count, duration)\n\t\t\t\t}\n\t\t\t\ttestNameStack = []string{name}\n\t\t\t\tstate = stateResults\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\t// in output mode, turn output lines into output on the particular test\n\t\t\tif _, _, ok := ExtractOutput(line); ok {\n\t\t\t\tlog(\" found output\\n\")\n\t\t\t\toutput[testNameStack[0]] = append(output[testNameStack[0]], line)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tlog(\" fallthrough\\n\")\n\n\t\tcase stateResults:\n\t\t\toutput, depth, ok := ExtractOutput(line)\n\t\t\tif !ok {\n\t\t\t\treturn nil, fmt.Errorf(\"unexpected output on line %d, can't grab results\", count)\n\t\t\t}\n\n\t\t\t// we're back to the root, we expect either a new RUN, a test suite end, or this is just an\n\t\t\t// output line that was chopped up\n\t\t\tif depth == 0 {\n\t\t\t\tif name, ok := ExtractRun(line); ok {\n\t\t\t\t\tlog(\" found run %s\\n\", name)\n\t\t\t\t\t// starting a new set of runs\n\t\t\t\t\torderedTests = append(orderedTests, name)\n\t\t\t\t\ttestNameStack = []string{name}\n\t\t\t\t\ttests[testNameStack[0]] = &api.TestCase{\n\t\t\t\t\t\tName: name,\n\t\t\t\t\t}\n\t\t\t\t\tstate = stateOutput\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tswitch {\n\t\t\t\tcase line == \"PASS\", line == \"FAIL\":\n\t\t\t\t\tlog(\" found end of suite\\n\")\n\t\t\t\t\t// at the end of the suite\n\t\t\t\t\tstate = stateComplete\n\t\t\t\tdefault:\n\t\t\t\t\t// a broken output line that was not indented\n\t\t\t\t\tlog(\" found message\\n\")\n\t\t\t\t\tname := testNameStack[len(testNameStack)-1]\n\t\t\t\t\ttest := tests[name]\n\t\t\t\t\tswitch {\n\t\t\t\t\tcase test.FailureOutput != nil, test.SkipMessage != nil:\n\t\t\t\t\t\tmessages[name] = append(messages[name], output)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\t// if this is a result AND we have already declared this as a test, parse it\n\t\t\tif result, name, _, duration, ok := ExtractResult(output); ok && tests[name] != nil {\n\t\t\t\tlog(\" found result %s %s (%d)\\n\", result, name, depth)\n\t\t\t\ttest := tests[name]\n\t\t\t\tswitch result {\n\t\t\t\tcase api.TestResultPass:\n\t\t\t\tcase api.TestResultFail:\n\t\t\t\t\ttest.FailureOutput = &api.FailureOutput{}\n\t\t\t\tcase api.TestResultSkip:\n\t\t\t\t\ttest.SkipMessage = &api.SkipMessage{}\n\t\t\t\t}\n\t\t\t\tif err := test.SetDuration(duration); err != nil {\n\t\t\t\t\treturn nil, fmt.Errorf(\"unexpected duration on line %d: %s\", count, duration)\n\t\t\t\t}\n\t\t\t\tswitch {\n\t\t\t\tcase depth >= len(testNameStack):\n\t\t\t\t\t// we found a new, more deeply nested test\n\t\t\t\t\ttestNameStack = append(testNameStack, name)\n\t\t\t\tdefault:\n\t\t\t\t\tif depth < len(testNameStack)-1 {\n\t\t\t\t\t\t// the current result is less indented than our current test, so remove the deepest\n\t\t\t\t\t\t// items from the stack\n\t\t\t\t\t\ttestNameStack = testNameStack[:depth]\n\t\t\t\t\t}\n\t\t\t\t\ttestNameStack[len(testNameStack)-1] = name\n\t\t\t\t}\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\t// treat as regular output at the appropriate depth\n\t\t\tlog(\" found message line %d %v\\n\", depth, testNameStack)\n\t\t\t// BUG: in go test, double nested output is double indented for some reason\n\t\t\tif depth >= len(testNameStack) {\n\t\t\t\tdepth = len(testNameStack) - 1\n\t\t\t}\n\t\t\tname := testNameStack[depth]\n\t\t\tlog(\" name %s\\n\", name)\n\t\t\tif test, ok := tests[name]; ok {\n\t\t\t\tswitch {\n\t\t\t\tcase test.FailureOutput != nil, test.SkipMessage != nil:\n\t\t\t\t\tmessages[name] = append(messages[name], output)\n\t\t\t\t}\n\t\t\t}\n\n\t\tcase stateComplete:\n\t\t\t// suite exit line\n\t\t\tif name, duration, coverage, ok := ExtractPackage(line); ok {\n\t\t\t\tcurrentSuite.Name = name\n\t\t\t\tif props, ok := ExtractProperties(coverage); ok {\n\t\t\t\t\tfor k, v := range props {\n\t\t\t\t\t\tcurrentSuite.AddProperty(k, v)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tfor _, name := range orderedTests {\n\t\t\t\t\ttest := tests[name]\n\t\t\t\t\tmessageLines := messages[name]\n\t\t\t\t\tvar extraOutput []string\n\t\t\t\t\tfor i, s := range messageLines {\n\t\t\t\t\t\tif s == \"=== OUTPUT\" {\n\t\t\t\t\t\t\tlog(\"test %s has OUTPUT section, %d %d\\n\", name, i, len(messageLines))\n\t\t\t\t\t\t\tif i < len(messageLines) {\n\t\t\t\t\t\t\t\tlog(\" test %s add lines: %d\\n\", name, len(messageLines[i+1:]))\n\t\t\t\t\t\t\t\textraOutput = messageLines[i+1:]\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\tmessageLines = messageLines[:i]\n\t\t\t\t\t\t\tbreak\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\n\t\t\t\t\tswitch {\n\t\t\t\t\tcase test.FailureOutput != nil:\n\t\t\t\t\t\ttest.FailureOutput.Output = strings.Join(messageLines, \"\\n\")\n\n\t\t\t\t\t\tlines := append(output[name], extraOutput...)\n\t\t\t\t\t\ttest.SystemOut = strings.Join(lines, \"\\n\")\n\n\t\t\t\t\tcase test.SkipMessage != nil:\n\t\t\t\t\t\ttest.SkipMessage.Message = strings.Join(messageLines, \"\\n\")\n\n\t\t\t\t\tdefault:\n\t\t\t\t\t\tlines := append(output[name], extraOutput...)\n\t\t\t\t\t\ttest.SystemOut = strings.Join(lines, \"\\n\")\n\t\t\t\t\t}\n\n\t\t\t\t\tcurrentSuite.AddTestCase(test)\n\t\t\t\t}\n\t\t\t\tif err := currentSuite.SetDuration(duration); err != nil {\n\t\t\t\t\treturn nil, fmt.Errorf(\"unexpected duration on line %d: %s\", count, duration)\n\t\t\t\t}\n\t\t\t\tsuites.Suites = append(suites.Suites, currentSuite)\n\n\t\t\t\tstate = stateBegin\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\t// coverage only line\n\t\t\tif props, ok := ExtractProperties(line); ok {\n\t\t\t\tfor k, v := range props {\n\t\t\t\t\tcurrentSuite.AddProperty(k, v)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\treturn suites, nil\n}", "func resultParser(response *http.Response, shortURL string) ([]Structure, error) {\r\n\tdoc, err := goquery.NewDocumentFromResponse(response)\r\n\tif err != nil {\r\n\t\treturn nil, err\r\n\t}\r\n\tresults := []Structure{}\r\n\t//if statement can change what to scrape depending on website chosen (fill in this case)\r\n\tif shortURL == \"fill\" {\r\n\t\tvar item = doc.Find(\"div.CopyProtected\")\r\n\r\n\t\t//the information we want\r\n\t\tclasses := [4]string{\"div.manga_canon\", \"div.mixed_canon/filler\", \"div.filler\", \"div.anime_canon\"}\r\n\r\n\t\tfor _, search := range classes {\r\n\r\n\t\t\t//searches for information\r\n\t\t\tselection := item.Find(search)\r\n\r\n\t\t\t//saves it as text\r\n\t\t\tepisodes := selection.Find(\"span.Episodes\")\r\n\t\t\tepisodestxt := episodes.Text()\r\n\r\n\t\t\tif episodestxt == \"\" {\r\n\t\t\t\tepisodestxt = \"0\"\r\n\t\t\t}\r\n\t\t\t//saves result in the structure struct\r\n\t\t\t//the %//DEVIDER//% is used for splitting the\r\n\t\t\t//information later\r\n\t\t\tresult := Structure{\r\n\t\t\t\t\"%//DEVIDER//%\",\r\n\t\t\t\tepisodestxt,\r\n\t\t\t}\r\n\t\t\t//append the results together\r\n\t\t\tresults = append(results, result)\r\n\t\t}\r\n\r\n\t}\r\n\treturn results, err\r\n}", "func (p *Person) Parse(contents []byte, url string) (*Result, error) {\n\tperson := extractPerson(contents)\n\tres := Result{}\n\tmatches := guessPersonRe.FindAllSubmatch(contents, -1)\n\tfor _, m := range matches {\n\t\tres.Requests = append(res.Requests, Request{\n\t\t\tUrl: string(m[1]),\n\t\t\tParser: &Person{},\n\t\t})\n\t}\n\tres.Items = append(res.Items, Item{\n\t\tUrl: url,\n\t\tId: extractString([]byte(url), idRe),\n\t\tPayload: person,\n\t})\n\n\treturn &res, nil\n}", "func parseResults(byteVal []byte) []NavResults {\n\tvar navResults NavResults\n\tvar navResultsSlice []NavResults\n\n\tvar output FfufOutput\n\tjson.Unmarshal(byteVal, &output)\n\n\tif DEBUGMODE {\n\t\tfmt.Printf(\"[debug] command line: %v\\n\", output.CommandLine)\n\t\tfmt.Printf(\"[debug] target: %v\\n\", output.Config.URL)\n\t\tfmt.Printf(\"[debug] method: %v\\n\", output.Config.Method)\n\t\tfmt.Printf(\"[debug] wordlist: %v\\n\", output.Config.InputProviders[0].Value)\n\t\tfmt.Printf(\"[debug] outputfile: %v\\n\", output.Config.Outputfile)\n\t\tfmt.Printf(\"[debug] time: %v\\n\", output.Time)\n\t\tfmt.Printf(\"[debug] url: %v\\n\", output.Results[0].URL)\n\t}\n\n\tnavResults.URL = output.Config.URL\n\tnavResults.Outputfile = output.Config.Outputfile\n\tnavResults.Wordlist = output.Config.InputProviders[0].Value\n\n\tif len(output.Results) == 0 {\n\t\t// handle storage of metadata for output file with no results\n\t\tnavResultsSlice = append(navResultsSlice, navResults)\n\t} else {\n\t\tfor i := 0; i < len(output.Results); i++ {\n\t\t\tnavResults.Endpoint = output.Results[i].URL\n\t\t\tnavResults.Status = output.Results[i].Status\n\t\t\tnavResults.Length = output.Results[i].Length\n\t\t\tnavResults.Words = output.Results[i].Words\n\t\t\tnavResults.Lines = output.Results[i].Lines\n\n\t\t\tif DEBUGMODE {\n\t\t\t\tfmt.Printf(\"%v [Status: %v, Size: %v, Words: %v, Lines: %v]\\n\", navResults.Endpoint, navResults.Status, navResults.Length, navResults.Words, navResults.Lines)\n\t\t\t}\n\n\t\t\tnavResultsSlice = append(navResultsSlice, navResults)\n\t\t}\n\t}\n\n\treturn navResultsSlice\n}", "func (p defaultStdErrParser) parseResults(b []byte) (r DefaultStdErrResults) {\n\t// Split on =\n\tvar k string\n\tfor idx, i := range bytes.Split(b, []byte(\"=\")) {\n\t\t// Split on space\n\t\tvar items = bytes.Split(bytes.TrimSpace(i), []byte(\" \"))\n\n\t\t// Parse key/value\n\t\tif len(items[0]) > 0 && len(k) > 0 {\n\t\t\tv := string(items[0])\n\t\t\tswitch k {\n\t\t\tcase \"bitrate\":\n\t\t\t\t// There may be other suffix, but we only support this one for now\n\t\t\t\tv = strings.TrimSuffix(v, \"kbits/s\")\n\t\t\t\tif p, err := strconv.ParseFloat(v, 64); err == nil {\n\t\t\t\t\tr.Bitrate = astikit.Float64Ptr(p * 1000)\n\t\t\t\t}\n\t\t\tcase \"frame\":\n\t\t\t\tif p, err := strconv.Atoi(v); err == nil {\n\t\t\t\t\tr.Frame = astikit.IntPtr(p)\n\t\t\t\t}\n\t\t\tcase \"fps\":\n\t\t\t\tif p, err := strconv.ParseFloat(v, 64); err == nil {\n\t\t\t\t\tr.FPS = astikit.IntPtr(int(p))\n\t\t\t\t}\n\t\t\tcase \"q\":\n\t\t\t\tif p, err := strconv.ParseFloat(v, 64); err == nil {\n\t\t\t\t\tr.Q = astikit.Float64Ptr(p)\n\t\t\t\t}\n\t\t\tcase \"size\":\n\t\t\t\tif n, err := numberFromString(v); err == nil {\n\t\t\t\t\tr.Size = astikit.IntPtr(int(n.float64()))\n\t\t\t\t}\n\t\t\tcase \"speed\":\n\t\t\t\tif p, err := strconv.ParseFloat(strings.TrimSuffix(v, \"x\"), 64); err == nil {\n\t\t\t\t\tr.Speed = astikit.Float64Ptr(p)\n\t\t\t\t}\n\t\t\tcase \"time\":\n\t\t\t\t// Split on .\n\t\t\t\tvar d time.Duration\n\t\t\t\tps := strings.Split(v, \".\")\n\t\t\t\tif len(ps) > 1 {\n\t\t\t\t\tif p, err := strconv.Atoi(ps[1]); err == nil {\n\t\t\t\t\t\t// For now we make the assumption that milliseconds are in this format \".99\" and not \".999\"\n\t\t\t\t\t\td += time.Duration(p*10) * time.Millisecond\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\t// Split on :\n\t\t\t\tps = strings.Split(ps[0], \":\")\n\t\t\t\tif len(ps) >= 3 {\n\t\t\t\t\tif p, err := strconv.Atoi(ps[0]); err == nil {\n\t\t\t\t\t\td += time.Duration(p) * time.Hour\n\t\t\t\t\t}\n\t\t\t\t\tif p, err := strconv.Atoi(ps[1]); err == nil {\n\t\t\t\t\t\td += time.Duration(p) * time.Minute\n\t\t\t\t\t}\n\t\t\t\t\tif p, err := strconv.Atoi(ps[2]); err == nil {\n\t\t\t\t\t\td += time.Duration(p) * time.Second\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tr.Time = astikit.DurationPtr(d)\n\t\t\t}\n\t\t}\n\n\t\t// Get key\n\t\tif len(items) > 1 && len(items[1]) > 0 {\n\t\t\tk = string(items[1])\n\t\t} else if len(items) == 1 && idx == 0 {\n\t\t\tk = string(items[0])\n\t\t}\n\t}\n\treturn\n}", "func (r LastpassResponse) Parse() (logs []LastpassLog) {\n\tfor _, log := range r.Data {\n\t\tlogs = append(logs, log.ToLog())\n\t}\n\treturn\n}", "func RawParser(raw string) Results {\n results := Results{}\n results.Command = toS(find(raw, \"^(httperf .*)\"))\n results.MaxConnectBurstLength = toI(find(raw, \"Maximum connect burst length: ([0-9]*?\\\\.?[0-9]+)$\"))\n results.TotalConnections = toI(find(raw, \"^Total: connections ([0-9]*?\\\\.?[0-9]+) \"))\n results.TotalRequests = toI(find(raw, \"^Total: connections .+ requests ([0-9]*?\\\\.?[0-9]+) \"))\n results.TotalReplies = toI(find(raw, \"^Total: connections .+ replies ([0-9]*?\\\\.?[0-9]+) \"))\n results.TotalTestDuration = toF(find(raw, \"^Total: connections .+ test-duration ([0-9]*?\\\\.?[0-9]+) \"))\n results.ConnectionRatePerSec = toF(find(raw, \"^Connection rate: ([0-9]*?\\\\.?[0-9]+) \"))\n results.ConnectionRateMsConn = toF(find(raw, \"^Connection rate: .+ \\\\(([0-9]*?\\\\.?[0-9]+) ms\"))\n results.ConnectionTimeMin = toF(find(raw, \"^Connection time \\\\[ms\\\\]: min ([0-9]*?\\\\.?[0-9]+) \"))\n results.ConnectionTimeAvg = toF(find(raw, \"^Connection time \\\\[ms\\\\]: min .+ avg ([0-9]*?\\\\.?[0-9]+) \"))\n results.ConnectionTimeMax = toF(find(raw, \"^Connection time \\\\[ms\\\\]: min .+ max ([0-9]*?\\\\.?[0-9]+) \"))\n results.ConnectionTimeMedian = toF(find(raw, \"^Connection time \\\\[ms\\\\]: min .+ median ([0-9]*?\\\\.?[0-9]+) \"))\n results.ConnectionTimeStddev = toF(find(raw, \"^Connection time \\\\[ms\\\\]: min .+ stddev ([0-9]*?\\\\.?[0-9]+)$\"))\n results.ConnectionTimeConnect = toF(find(raw, \"^Connection time \\\\[ms\\\\]: connect ([0-9]*?\\\\.?[0-9]+)$\"))\n results.ConnectionLength = toF(find(raw, \"^Connection length \\\\[replies\\\\/conn\\\\]: ([0-9]*?\\\\.?[0-9]+)$\"))\n results.RequestRatePerSec = toF(find(raw, \"^Request rate: ([0-9]*?\\\\.?[0-9]+) req\"))\n results.RequestRateMsRequest = toF(find(raw, \"^Request rate: .+ \\\\(([0-9]*?\\\\.?[0-9]+) ms\"))\n results.RequestSize = toF(find(raw, \"^Request size \\\\[B\\\\]: ([0-9]*?\\\\.?[0-9]+)$\"))\n results.ReplyRateMin = toF(find(raw, \"^Reply rate \\\\[replies\\\\/s\\\\]: min ([0-9]*?\\\\.?[0-9]+) \"))\n results.ReplyRateAvg = toF(find(raw, \"^Reply rate \\\\[replies\\\\/s\\\\]: min .+ avg ([0-9]*?\\\\.?[0-9]+) \"))\n results.ReplyRateMax = toF(find(raw, \"^Reply rate \\\\[replies\\\\/s\\\\]: min .+ max ([0-9]*?\\\\.?[0-9]+) \"))\n results.ReplyRateStddev = toF(find(raw, \"^Reply rate \\\\[replies\\\\/s\\\\]: min .+ stddev ([0-9]*?\\\\.?[0-9]+) \"))\n results.ReplyRateSamples = toI(find(raw, \"^Reply rate \\\\[replies\\\\/s\\\\]: min .+ \\\\(([0-9]*?\\\\.?[0-9]+) samples\"))\n results.ReplyTimeResponse = toF(find(raw, \"^Reply time \\\\[ms\\\\]: response ([0-9]*?\\\\.?[0-9]+) \"))\n results.ReplyTimeTransfer = toF(find(raw, \"^Reply time \\\\[ms\\\\]: response .+ transfer ([0-9]*?\\\\.?[0-9]+)$\"))\n results.ReplySizeHeader = toF(find(raw, \"^Reply size \\\\[B\\\\]: header ([0-9]*?\\\\.?[0-9]+) \"))\n results.ReplySizeContent = toF(find(raw, \"^Reply size \\\\[B\\\\]: header .+ content ([0-9]*?\\\\.?[0-9]+) \"))\n results.ReplySizeFooter = toF(find(raw, \"^Reply size \\\\[B\\\\]: header .+ footer ([0-9]*?\\\\.?[0-9]+) \"))\n results.ReplySizeTotal = toF(find(raw, \"^Reply size \\\\[B\\\\]: header .+ \\\\(total ([0-9]*?\\\\.?[0-9]+)\\\\)\"))\n results.ReplyStatus1xx = toI(find(raw, \"^Reply status: 1xx=([0-9]*?\\\\.?[0-9]+) \"))\n results.ReplyStatus2xx = toI(find(raw, \"^Reply status: .+ 2xx=([0-9]*?\\\\.?[0-9]+) \"))\n results.ReplyStatus3xx = toI(find(raw, \"^Reply status: .+ 3xx=([0-9]*?\\\\.?[0-9]+) \"))\n results.ReplyStatus4xx = toI(find(raw, \"^Reply status: .+ 4xx=([0-9]*?\\\\.?[0-9]+) \"))\n results.ReplyStatus5xx = toI(find(raw, \"^Reply status: .+ 5xx=([0-9]*?\\\\.?[0-9]+)\"))\n results.CPUTimeUserSec = toF(find(raw, \"^CPU time \\\\[s\\\\]: user ([0-9]*?\\\\.?[0-9]+) \"))\n results.CPUTimeUserPct = toF(find(raw, \"^CPU time \\\\[s\\\\]: .+ \\\\(user ([0-9]*?\\\\.?[0-9]+)\\\\% \"))\n results.CPUTimeSystemSec = toF(find(raw, \"^CPU time \\\\[s\\\\]: .+ system ([0-9]*?\\\\.?[0-9]+) \"))\n results.CPUTimeSystemPct = toF(find(raw, \"^CPU time \\\\[s\\\\]: user .+ system .+ system ([0-9]*?\\\\.?[0-9]+)\\\\% \"))\n results.CPUTimeTotalPct = toF(find(raw, \"^CPU time \\\\[s\\\\]: user .+ total ([0-9]*?\\\\.?[0-9]+)\\\\%\"))\n results.NetIoKbSec = toF(find(raw, \"^Net I\\\\/O: ([0-9]*?\\\\.?[0-9]+) KB\"))\n results.NetIoBps = toS(find(raw, \"^Net I\\\\/O: .+ \\\\((.+) bps\\\\)\"))\n results.ErrorsTotal = toI(find(raw, \"^Errors: total ([0-9]*?\\\\.?[0-9]+) \"))\n results.ErrorsClientTimeout = toI(find(raw, \"^Errors: total .+ client-timo ([0-9]*?\\\\.?[0-9]+) \"))\n results.ErrorsSocketTimeout = toI(find(raw, \"^Errors: total .+ socket-timo ([0-9]*?\\\\.?[0-9]+) \"))\n results.ErrorsConnRefused = toI(find(raw, \"^Errors: total .+ connrefused ([0-9]*?\\\\.?[0-9]+) \"))\n results.ErrorsConnReset = toI(find(raw, \"^Errors: total .+ connreset ([0-9]*?\\\\.?[0-9]+)\"))\n results.ErrorsFdUnavail = toI(find(raw, \"^Errors: fd-unavail ([0-9]*?\\\\.?[0-9]+) \"))\n results.ErrorsAddrUnavail = toI(find(raw, \"^Errors: fd-unavail .+ addrunavail ([0-9]*?\\\\.?[0-9]+) \"))\n results.ErrorsFtabFull = toI(find(raw, \"^Errors: fd-unavail .+ ftab-full ([0-9]*?\\\\.?[0-9]+) \"))\n results.ErrorsOther = toI(find(raw, \"^Errors: fd-unavail .+ other ([0-9]*?\\\\.?[0-9]+)\"))\n results.ConnectionTimes = findConnectionTimes(raw)\n results.calculatePercentiles()\n\n return results\n}", "func TestParseAndPrint(t *testing.T) {\n\tresponse := Response{\n\t\tArtists: searchItem{\n\t\t\tItem: []ResultItem{\n\t\t\t\t{\n\t\t\t\t\tName: \"Artist\",\n\t\t\t\t\tURI: \"ArtistURI\",\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\tTrack: searchItem{\n\t\t\tItem: []ResultItem{\n\t\t\t\t{\n\t\t\t\t\tName: \"Track\",\n\t\t\t\t\tURI: \"TrackURI\",\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\tkinds := []string{\"artist\", \"track\"}\n\n\tsearchResult := response.ParseAndPrint(kinds)\n\n\tassert.Equal(t, searchResult[0].Name, \"Artist\")\n\tassert.Equal(t, searchResult[0].URI, \"ArtistURI\")\n\n\tassert.Equal(t, searchResult[1].Name, \"Track\")\n\tassert.Equal(t, searchResult[1].URI, \"TrackURI\")\n}", "func TestResultsFromFile(t *testing.T) {\n\trf, err := ResultsFromReader(strings.NewReader(`401 Q0 LA110990-0013 0 13.74717580250855 BB2c1.0\n401 Q0 FBIS3-18833 1 13.662447072667604 BB2c1.0\n401 Q0 FBIS3-39117 2 13.640016012221363 BB2c1.0\n401 Q0 FT941-230 3 13.4799521334611 BB2c1.0\n401 Q0 FT924-1346 4 13.418277205894087 BB2c1.0\n401 Q0 FT941-4640 5 13.32332784351334 BB2c1.0\n401 Q0 LA122190-0057 6 13.278646892401042 BB2c1.0\n401 Q0 FBIS3-18916 7 13.00539383125854 BB2c1.0\n401 Q0 LA030690-0168 8 12.870710238224662 BB2c1.0\n401 Q0 FBIS3-17077 9 12.806848508228754 BB2c1.0\n`))\n\tresults, ok := rf.Results[\"401\"]\n\tif !ok {\n\t\tt.Error(\"Topic list missing for topic 401\")\n\t}\n\tif err != nil {\n\t\tt.Error(\"Expected no error, but got\", err)\n\t}\n\tif len(results) != 10 {\n\t\tt.Error(\"Expected 10 results, but got\", len(results))\n\t}\n\n\tCheckResult(results[0], \"401\", \"Q0\", \"LA110990-0013\", 0, 13.74717580250855, \"BB2c1.0\", \"401 Q0 LA110990-0013 0 13.74717580250855 BB2c1.0\", t)\n\tCheckResult(results[1], \"401\", \"Q0\", \"FBIS3-18833\", 1, 13.662447072667604, \"BB2c1.0\", \"401 Q0 FBIS3-18833 1 13.662447072667604 BB2c1.0\", t)\n\tCheckResult(results[2], \"401\", \"Q0\", \"FBIS3-39117\", 2, 13.640016012221363, \"BB2c1.0\", \"401 Q0 FBIS3-39117 2 13.640016012221363 BB2c1.0\", t)\n\tCheckResult(results[3], \"401\", \"Q0\", \"FT941-230\", 3, 13.4799521334611, \"BB2c1.0\", \"401 Q0 FT941-230 3 13.4799521334611 BB2c1.0\", t)\n\tCheckResult(results[4], \"401\", \"Q0\", \"FT924-1346\", 4, 13.418277205894087, \"BB2c1.0\", \"401 Q0 FT924-1346 4 13.418277205894087 BB2c1.0\", t)\n\tCheckResult(results[5], \"401\", \"Q0\", \"FT941-4640\", 5, 13.32332784351334, \"BB2c1.0\", \"401 Q0 FT941-4640 5 13.32332784351334 BB2c1.0\", t)\n\tCheckResult(results[6], \"401\", \"Q0\", \"LA122190-0057\", 6, 13.278646892401042, \"BB2c1.0\", \"401 Q0 LA122190-0057 6 13.278646892401042 BB2c1.0\", t)\n\tCheckResult(results[7], \"401\", \"Q0\", \"FBIS3-18916\", 7, 13.00539383125854, \"BB2c1.0\", \"401 Q0 FBIS3-18916 7 13.00539383125854 BB2c1.0\", t)\n\tCheckResult(results[8], \"401\", \"Q0\", \"LA030690-0168\", 8, 12.870710238224662, \"BB2c1.0\", \"401 Q0 LA030690-0168 8 12.870710238224662 BB2c1.0\", t)\n\tCheckResult(results[9], \"401\", \"Q0\", \"FBIS3-17077\", 9, 12.806848508228754, \"BB2c1.0\", \"401 Q0 FBIS3-17077 9 12.806848508228754 BB2c1.0\", t)\n}", "func (msg *InformResponse) Parse(doc *string) {\n\t\n\t//TODO\n}", "func TestParseResponse(t *testing.T) {\r\n\tcreds := GetTestCreds()\r\n\r\n\ttype Person struct {\r\n\t\tName string `sql:\"name\"`\r\n\t\tAge int32 `sql:\"age\"`\r\n\t\tMass float32 `sql:\"mass\"`\r\n\t}\r\n\r\n\tadam := Person{\"Adam\", 10, 242.0}\r\n\tbrad := Person{\"Brad\", 20, 199.9}\r\n\tchad := Person{\"Chad\", 30, 206.9}\r\n\r\n\ttests := []struct {\r\n\t\tquery string\r\n\t\twantPeople []Person\r\n\t}{\r\n\t\t{\r\n\t\t\t`SELECT * FROM People WHERE name = 'Duke'`,\r\n\t\t\t[]Person{},\r\n\t\t}, {\r\n\t\t\t`SELECT * FROM People WHERE name = 'Adam'`,\r\n\t\t\t[]Person{adam},\r\n\t\t}, {\r\n\t\t\t`SELECT * FROM People WHERE age >= 20`,\r\n\t\t\t[]Person{brad, chad},\r\n\t\t},\r\n\t}\r\n\r\n\t// Create a suitable table in the test database.\r\n\tconn, err := Connect(creds)\r\n\tif err != nil {\r\n\t\tt.Fatalf(\"Failed to connect to database: %v.\", err)\r\n\t}\r\n\tif _, err := conn.exec(`CREATE TABLE People (name TEXT, age INT, mass FLOAT4);`); err != nil {\r\n\t\tt.Fatalf(\"Failed to create table: %v.\", err)\r\n\t}\r\n\tdefer func() {\r\n\t\tconn.exec(`DROP TABLE People;`)\r\n\t\tconn.Close()\r\n\t}()\r\n\r\n\t// Add Adam, Brad, and Chad to the database.\r\n\tfor _, person := range []Person{adam, brad, chad} {\r\n\t\tcmd := fmt.Sprintf(\"INSERT INTO People (name, age, mass) VALUES ('%s', %d, %f);\", person.Name, person.Age, person.Mass)\r\n\t\tif _, err := conn.exec(cmd); err != nil {\r\n\t\t\tt.Fatalf(\"Failed to insert Person %q: %v.\", person.Name, err)\r\n\t\t}\r\n\t}\r\n\r\n\tfor i, test := range tests {\r\n\t\trows, err := conn.query(test.query)\r\n\t\tif err != nil {\r\n\t\t\tt.Errorf(\"TestParseResponse()[%d] - failed to execute query: %v.\", i, err)\r\n\t\t\tcontinue\r\n\t\t}\r\n\r\n\t\thavePeople, err := parseResponse(rows, Person{})\r\n\t\tif err != nil {\r\n\t\t\tt.Errorf(\"TestParseResponse()[%d] - failed to parse response: %v.\", i, err)\r\n\t\t\tcontinue\r\n\t\t}\r\n\r\n\t\tif len(havePeople) != len(test.wantPeople) {\r\n\t\t\tt.Errorf(\"TestParseResponse()[%d] = %d, want %d people.\", i, len(havePeople), len(test.wantPeople))\r\n\t\t\tcontinue\r\n\t\t}\r\n\t\tfor j, havePerson := range havePeople {\r\n\t\t\twantPerson := test.wantPeople[j]\r\n\t\t\tif !reflect.DeepEqual(havePerson, wantPerson) {\r\n\t\t\t\tt.Errorf(\"TestParseResponse()[%d][%d] = %v, want Person %v.\", i, j, havePerson, wantPerson)\r\n\t\t\t}\r\n\t\t}\r\n\t}\r\n}", "func (op *OutputParserV1) Parse(result api.ExecResult, req messages.ExecutionRequest, err error) *messages.ExecutionResponse {\n\tresp := &messages.ExecutionResponse{}\n\tresp.Status = \"ok\"\n\tif err != nil {\n\t\tresp.Status = \"error\"\n\t\tresp.StatusMessage = fmt.Sprintf(\"%s\", err)\n\t\treturn resp\n\t}\n\tretained := []string{}\n\tif len(result.Stdout) > 0 {\n\t\tlines := strings.Split(strings.TrimSuffix(string(result.Stdout), \"\\n\"), \"\\n\")\n\t\tfor _, line := range lines {\n\t\t\tmatched := false\n\t\t\tif resp.IsJSON == false {\n\t\t\t\tfor re, cb := range op.matchers {\n\t\t\t\t\tif re.MatchString(line) {\n\t\t\t\t\t\tlines := re.FindStringSubmatch(line)\n\t\t\t\t\t\t// Drop the first match (which is the full\n\t\t\t\t\t\t// string); we're after the submatches. This\n\t\t\t\t\t\t// also implies that all the regexes capture\n\t\t\t\t\t\t// subgroups.\n\t\t\t\t\t\tcb(lines[1:], resp, req)\n\t\t\t\t\t\tmatched = true\n\t\t\t\t\t\tbreak\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tif matched == false {\n\t\t\t\t\tretained = append(retained, line)\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tretained = append(retained, line)\n\t\t\t}\n\t\t}\n\t}\n\tif !result.GetSuccess() {\n\t\tresp.Status = \"error\"\n\t\tresp.StatusMessage = string(result.Stderr)\n\t\treturn resp\n\t}\n\n\tif resp.IsJSON == true {\n\t\tjsonBody := interface{}(nil)\n\t\tremaining := []byte(strings.Join(retained, \"\\n\"))\n\n\t\td := util.NewJSONDecoder(bytes.NewReader(remaining))\n\t\tif err := d.Decode(&jsonBody); err != nil {\n\t\t\tresp.Status = \"error\"\n\t\t\tresp.StatusMessage = \"Command returned invalid JSON.\"\n\t\t} else {\n\t\t\tresp.Body = jsonBody\n\t\t}\n\t} else {\n\t\tif len(retained) > 0 {\n\t\t\tresp.Body = []map[string][]string{\n\t\t\t\tmap[string][]string{\n\t\t\t\t\t\"body\": retained,\n\t\t\t\t},\n\t\t\t}\n\t\t}\n\t}\n\tif resp.Status == \"ok\" && resp.Aborted == true {\n\t\tresp.Status = \"abort\"\n\t}\n\treturn resp\n}", "func Parse(ua string) Device {\n\tms := matchers(ua)\n\tfor _, d := range ms {\n\t\tif d.match() {\n\t\t\treturn Device{d}\n\t\t}\n\t}\n\treturn Device{\n\t\t&core{ua: ua},\n\t}\n}", "func (g getOutputFilesRequestType) ParseResult(rawString string) []string {\n\treturn splitOrEmpty(rawString, \", \")\n}", "func (rec *RFIDrecord) Parse(f []string, rfi *RFIDinfo) bool {\n\n\tvar err error\n\n\trec.Ping, err = strconv.ParseUint(f[0], 10, 64)\n\tif err != nil {\n\t\trfi.InvalidPing++\n\t\treturn false\n\t}\n\n\t// Get the IP address as a numeric code\n\tc, ok := IPcode[f[1]]\n\tif !ok {\n\t\t// Not a known IP address\n\t\trfi.InvalidIP++\n\t\treturn false\n\t}\n\trec.IP = c\n\n\tswitch len(f[2]) {\n\tcase 24:\n\t\t// Provider record\n\t\tif !rec.parseProvider(f[2], rfi) {\n\t\t\treturn false\n\t\t}\n\tcase 32:\n\t\t// Patient record\n\t\tif !rec.parsePatient(f[2], rfi) {\n\t\t\treturn false\n\t\t}\n\tdefault:\n\t\trfi.InvalidTagLength++\n\t\treturn false\n\t}\n\n\ttm := []byte(f[3])\n\ttm[10] = 'T'\n\ttm = append(tm, 'Z')\n\trec.TimeStamp, err = time.Parse(time.RFC3339, string(tm))\n\tif err != nil {\n\t\tfmt.Printf(\"%v\\n\", err)\n\t\trfi.InvalidTimeStamp++\n\t\treturn false\n\t}\n\n\ts, err := strconv.ParseFloat(f[4], 64)\n\tif err != nil {\n\t\trfi.InvalidSignal++\n\t\treturn false\n\t}\n\trec.Signal = float32(s)\n\n\tr, err := strconv.Atoi(f[5])\n\tif err != nil {\n\t\trfi.InvalidReadCount++\n\t\treturn false\n\t}\n\trec.Reads = uint16(r)\n\n\treturn true\n}", "func Parse(pth string) (*ActionsInvocationRecord, []ActionTestPlanRunSummaries, error) {\n\tvar r ActionsInvocationRecord\n\tif err := xcresulttoolGet(pth, \"\", &r); err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tvar summaries []ActionTestPlanRunSummaries\n\tfor _, action := range r.Actions.Values {\n\t\trefID := action.ActionResult.TestsRef.ID.Value\n\t\tvar s ActionTestPlanRunSummaries\n\t\tif err := xcresulttoolGet(pth, refID, &s); err != nil {\n\t\t\treturn nil, nil, err\n\t\t}\n\t\tsummaries = append(summaries, s)\n\t}\n\treturn &r, summaries, nil\n}", "func (p *LogHandler) Parse(b []byte) bool {\n\tp.Result = map[string]interface{}{}\n\tp.Raw = b\n\tp.Parser.Parse(p.Raw, &p.Result)\n\tif len(p.Result) == 0 {\n\t\treturn false\n\t}\n\treturn true\n}", "func (me TAttlistAbstractTextNlmCategory) IsResults() bool { return me.String() == \"RESULTS\" }", "func (omad *OutputMessageAccountabilityData) Parse(record string) {\n\tomad.tag = record[:6]\n\tomad.OutputCycleDate = omad.parseStringField(record[6:14])\n\tomad.OutputDestinationID = omad.parseStringField(record[14:22])\n\tomad.OutputSequenceNumber = omad.parseStringField(record[22:28])\n\tomad.OutputDate = omad.parseStringField(record[28:32])\n\tomad.OutputTime = omad.parseStringField(record[32:36])\n\tomad.OutputFRBApplicationIdentification = omad.parseStringField(record[36:40])\n}", "func (b *BotParserAbstract) Parse(ua string) *BotMatchResult {\n\tif b.PreMatch(ua) {\n\t\tif b.discardDetails {\n\t\t\treturn EmptyBotMatchResult\n\t\t}\n\t\tfor _, regex := range b.Regexes {\n\t\t\tmatches := regex.MatchUserAgent(ua)\n\t\t\tif len(matches) > 0 {\n\t\t\t\treturn &regex.BotMatchResult\n\t\t\t}\n\t\t}\n\t}\n\treturn nil\n}", "func (f *recordingSource) parseRecord(recordNum int) *record {\n\tr, ok := f.recordDecls[recordNum]\n\tif !ok {\n\t\tpanicf(\"record with number %d must exist\", recordNum)\n\t}\n\n\t// Record fields are separated by tabs, with the first field being the name\n\t// of the driver method.\n\tfields := splitString(r, \"\\t\")\n\trecType, ok := strToRecType[fields[0]]\n\tif !ok {\n\t\tpanicf(\"record type %v is not recognized\", fields[0])\n\t}\n\n\t// Remaining fields are record arguments in \"<dataType>:<formattedValue>\"\n\t// format.\n\trec := &record{Typ: recType}\n\tfor i := 1; i < len(fields); i++ {\n\t\tval, err := parseValueWithType(fields[i])\n\t\tif err != nil {\n\t\t\tpanicf(\"error parsing %s: %v\", fields[i], err)\n\t\t}\n\t\trec.Args = append(rec.Args, val)\n\t}\n\treturn rec\n}", "func (f *Filter) Match(res *benchfmt.Result) (Match, error) {\n\tm, x := f.match(res)\n\treturn Match{len(res.Values), m, x}, nil\n}", "func Parse(data interface{}) error {\n\treturn ParseWithExternal(data, &emptyExt{})\n}", "func (handler *SQLLiteTableHandler) ParseRows(rows *sql.Rows) per.IQueryResult {\n\thandler.Parent.LogDebug(\"ParseRows\", \"Returing empty results - was this function replaced\")\n\treturn NewDataQueryResult(false, []per.IDataItem{})\n}", "func TestParser(t *testing.T) {\n\n\ttype testPair struct {\n\t\tinput string\n\t\ttext string\n\t\terror error\n\t}\n\n\tvar tests = []testPair{\n\t\t{\"test/test.html\", `<meta http-equiv=\"content-type\" content=\"text/html; charset=UTF-8\"><html lang=\"en\"><head><title>AWR Report for DB: ZP, Inst: k10szp_1, Snaps: 111755-111758</title> <style type=\"text/css\"> body.awr {font:bold 10pt Arial,Helvetica,Geneva,sans-serif;color:black; background:White;} pre.awr {font:8pt Courier;color:black; background:White;} .hidden {position:absolute;left:-10000px;top:auto;width:1px;height:1px;overflow:hidden;} .pad {margin-left:17px;} .doublepad {margin-left:34px;} </style></head><body class=\"awr\"><h1 class=\"awr\"> WORKLOAD REPOSITORY report for</h1><p /><table border=\"0\" width=\"600\" class=\"tdiff\" summary=\"This table displays database instance information\"> <tr><th class=\"awrbg\" scope=\"col\">DB Name</th><th class=\"awrbg\" scope=\"col\">DB Id</th><th class=\"awrbg\" scope=\"col\">Instance</th><th class=\"awrbg\" scope=\"col\">Inst num</th><th class=\"awrbg\" scope=\"col\">Startup Time</th><th class=\"awrbg\" scope=\"col\">Release</th><th class=\"awrbg\" scope=\"col\">RAC</th></tr> <tr><td scope=\"row\" class='awrnc'>ZP</td><td align=\"right\" class='awrnc'>2966226569</td><td class='awrnc'>sdf_test1</td><td align=\"right\" class='awrnc'>1</td><td class='awrnc'>08-Ноя-17 16:11</td><td class='awrnc'>12.1.0.2.0</td><td class='awrnc'>NO</td></tr></table><p /><p /><table border=\"0\" width=\"600\" class=\"tdiff\" summary=\"This table displays host information\"> <tr><th class=\"awrbg\" scope=\"col\">Host Name</th><th class=\"awrbg\" scope=\"col\">Platform</th><th class=\"awrbg\" scope=\"col\">CPUs</th><th class=\"awrbg\" scope=\"col\">Cores</th><th class=\"awrbg\" scope=\"col\">Sockets</th><th class=\"awrbg\" scope=\"col\">Memory (GB)</th></tr> <tr><td scope=\"row\" class='awrnc'>sdfeede</td><td class='awrnc'>Solaris[tm] OE (32-bit)</td><td align=\"right\" class='awrnc'> 256</td><td align=\"right\" class='awrnc'> 32</td><td align=\"right\" class='awrnc'> 1</td><td align=\"right\" class='awrnc'> 478.50</td></tr></table><p /><table border=\"0\" width=\"600\" class=\"tdiff\" summary=\"This table displays snapshot information\"> <tr><th class=\"awrnobg\" scope=\"col\"></th><th class=\"awrbg\" scope=\"col\">Snap Id</th><th class=\"awrbg\" scope=\"col\">Snap Time</th><th class=\"awrbg\" scope=\"col\">Sessions</th><th class=\"awrbg\" scope=\"col\">Cursors/Session</th></tr> <tr><td scope=\"row\" class='awrnc'>Begin Snap:</td><td align=\"right\" class='awrnc'>111755</td><td align=\"center\" class='awrnc'>10-Дек-17 13:30:55</td><td align=\"right\" class='awrnc'>951</td><td align=\"right\" class='awrnc'> 12.0</td></tr> <tr><td scope=\"row\" class='awrc'>End Snap:</td><td align=\"right\" class='awrc'>111758</td><td align=\"center\" class='awrc'>10-Дек-17 14:00:00</td><td align=\"right\" class='awrc'>1130</td><td align=\"right\" class='awrc'> 13.0</td></tr> <tr><td scope=\"row\" class='awrnc'>Elapsed:</td><td class='awrnc'>&#160;</td><td align=\"center\" class='awrnc'> 29.09 (mins)</td><td class='awrnc'>&#160;</td><td class='awrnc'>&#160;</td></tr> <tr><td scope=\"row\" class='awrc'>DB Time:</td><td class='awrc'>&#160;</td><td align=\"center\" class='awrc'> 204.95 (mins)</td><td class='awrc'>&#160;</td><td class='awrc'>&#160;</td></tr></table><p /><h3 class=\"awr\"><a class=\"awr\" name=\"99999\"></a>Report Summary</h3><p />Top ADDM Findings by Average Active Sessions<p /><ul></ul><table border=\"0\" width=\"600\" class=\"tdiff\" summary=\"This table displays top ADDM findings by average active sessions\"><tr><th class=\"awrbg\" scope=\"col\">Finding Name</th><th class=\"awrbg\" scope=\"col\">Avg active sessions of the task</th><th class=\"awrbg\" scope=\"col\">Percent active sessions of finding</th><th class=\"awrbg\" scope=\"col\">Task Name</th><th class=\"awrbg\" scope=\"col\">Begin Snap Time</th><th class=\"awrbg\" scope=\"col\">End Snap Time</th></tr> <tr><td class='awrc'>Фиксации и откаты</td><td align=\"right\" class='awrc'>8.08</td><td align=\"right\" class='awrc'>50.71</td><td scope=\"row\" class='awrc'>ADDM:2926569_1_111758</td><td class='awrc'>10-Дек-17 13:50</td><td class='awrc'>10-Дек-17 14:00</td></tr> <tr><td class='awrnc'>Фиксации и откаты</td><td align=\"right\" class='awrnc'>7.07</td><td align=\"right\" class='awrnc'>57.14</td><td scope=\"row\" class='awrnc'>ADDM:2926569_1_111758</td><td class='awrnc'>10-Дек-17 13:40</td><td class='awrnc'>10-Дек-17 13:50</td></tr> <tr><td class='awrc'>Фиксации и откаты</td><td align=\"right\" class='awrc'>6.09</td><td align=\"right\" class='awrc'>48.90</td><td scope=\"row\" class='awrc'>ADDM:2926569_1_111758</td><td class='awrc'>10-Дек-17 13:30</td><td class='awrc'>10-Дек-17 13:40</td></tr> <tr><td class='awrnc'>Наиболее часто используемые операторы SQL</td><td align=\"right\" class='awrnc'>8.08</td><td align=\"right\" class='awrnc'>31.41</td><td scope=\"row\" class='awrnc'>ADDM:2966226569_1_111758</td><td class='awrnc'>10-Дек-17 13:50</td><td class='awrnc'>10-Дек-17 14:00</td></tr> <tr><td class='awrc'>Наиболее часто используемые операторы SQL</td><td align=\"right\" class='awrc'>7.07</td><td align=\"right\" class='awrc'>25.59</td><td scope=\"row\" class='awrc'>ADDM:2966226569_1_111757</td><td class='awrc'>10-Дек-17 13:40</td><td class='awrc'>10-Дек-17 13:50</td></tr></table><p /><p />Load Profile<p /><table border=\"0\" width=\"600\" class=\"tdiff\" summary=\"This table displays load profile\"> <tr><th class=\"awrnobg\" scope=\"col\"></th><th class=\"awrbg\" scope=\"col\">Per Second</th><th class=\"awrbg\" scope=\"col\">Per Transaction</th><th class=\"awrbg\" scope=\"col\">Per Exec</th><th class=\"awrbg\" scope=\"col\">Per Call</th></tr> <tr><td scope=\"row\" class='awrc'>DB Time(s):</td><td align=\"right\" class='awrc'> 7.1</td><td align=\"right\" class='awrc'> 0.0</td><td align=\"right\" class='awrc'> 0.00</td><td align=\"right\" class='awrc'> 0.00</td></tr> <tr><td scope=\"row\" class='awrnc'>DB CPU(s):</td><td align=\"right\" class='awrnc'> 2.8</td><td align=\"right\" class='awrnc'> 0.0</td><td align=\"right\" class='awrnc'> 0.00</td><td align=\"right\" class='awrnc'> 0.00</td></tr> <tr><td scope=\"row\" class='awrc'>Background CPU(s):</td><td align=\"right\" class='awrc'> 0.4</td><td align=\"right\" class='awrc'> 0.0</td><td align=\"right\" class='awrc'> 0.00</td><td align=\"right\" class='awrc'> 0.00</td></tr> <tr><td scope=\"row\" class='awrnc'>Redo size (bytes):</td><td align=\"right\" class='awrnc'> 5,048,931.5</td><td align=\"right\" class='awrnc'> 8,538.4</td><td class='awrnc'>&#160;</td><td class='awrnc'>&#160;</td></tr> <tr><td scope=\"row\" class='awrc'>L</tr></table></p><h3 class=\"awr\">Background Wait Events</h3><ul> <li class=\"awr\"> ordered by wait time desc, waits desc (idle events last) </li> <li class=\"awr\"> Only events with Total Wait Time (s) &gt;= .001 are shown </li> <li class=\"awr\"> %Timeouts: value of 0 indicates value was &lt; .5%. Value of null is truly 0</li></ul><table border=\"0\" class=\"tdiff\" summary=\"This table displays background wait events statistics\"><tr><th class=\"awrbg\" scope=\"col\">Event</th><th class=\"awrbg\" scope=\"col\">Waits</th><th class=\"awrbg\" scope=\"col\">%Time -outs</th><th class=\"awrbg\" scope=\"col\">Total Wait Time (s)</th><th class=\"awrbg\" scope=\"col\">Avg wait (ms)</th><th class=\"awrbg\" scope=\"col\">Waits /txn</th><th class=\"awrbg\" scope=\"col\">% bg time</th></tr> <tr><td scope=\"row\" class='awrc'>log file parallel write</td><td align=\"right\" class='awrc'>1,085,250</td><td align=\"right\" class='awrc'>0</td><td align=\"right\" class='awrc'>482</td><td align=\"right\" class='awrc'>0.44</td><td align=\"right\" class='awrc'>1.05</td><td align=\"right\" class='awrc'>39.78</td></tr> <tr><td scope=\"row\" class='awrnc'>target log write size</td><td align=\"right\" class='awrnc'>498,563</td><td align=\"right\" class='awrnc'>0</td><td align=\"right\" class='awrnc'>164</td><td align=\"right\" class='awrnc'>0.33</td><td align=\"right\" class='awrnc'>0.48</td><td align=\"right\" class='awrnc'>13.53</td></tr> <tr><td scope=\"row\" class='awrc'>LGWR worker group ordering</td><td align=\"right\" class='awrc'>202</td><td align=\"right\" class='awrc'>0</td><td align=\"right\" class='awrc'>88</td><td align=\"right\" class='awrc'>437.54</td><td align=\"right\" class='awrc'>0.00</td><td align=\"right\" class='awrc'>7.29</td></tr> <tr><td scope=\"row\" class='awrnc'>LGWR any worker group</td><td align=\"right\" class='awrnc'>199</td><td align=\"right\" class='awrnc'>0</td><td align=\"right\" class='awrnc'>86</td><td align=\"right\" class='awrnc'>432.82</td><td align=\"right\" class='awrnc'>0.00</td><td align=\"right\" class='awrnc'>7.11</td></tr> <tr><td scope=\"row\" class='awrc'>db file parallel write</td><td align=\"right\" class='awrc'>617,149</td><td align=\"right\" class='awrc'>0</td><td align=\"right\" class='awrc'>82</td><td align=\"right\" class='awrc'>0.13</td><td align=\"right\" class='awrc'>0.60</td><td align=\"right\" class='awrc'>6.80</td></tr> <tr><td scope=\"row\" class='awrnc'>oracle thread bootstrap</td><td align=\"right\" class='awrnc'>103</td><td align=\"right\" class='awrnc'>0</td><td align=\"right\" class='awrnc'>4</td><td align=\"right\" class='awrnc'>40.41</td><td align=\"right\" class='awrnc'>0.00</td><td align=\"right\" class='awrnc'>0.34</td></tr> <tr><td scope=\"row\" class='awrc'>control file sequential read</td><td align=\"right\" class='awrc'>5,960</td><td align=\"right\" class='awrc'>0</td><td align=\"right\" class='awrc'>2</td><td align=\"right\" class='awrc'>0.28</td><td align=\"right\" class='awrc'>0.01</td><td align=\"right\" class='awrc'>0.14</td></tr> <tr><td scope=\"row\" class='awrnc'>cell single block physical read</td><td align=\"right\" class='awrnc'>1,188</td><td align=\"right\" class='awrnc'>0</td><td align=\"right\" class='awrnc'>1</td><td align=\"right\" class='awrnc'>1.05</td><td align=\"right\" class='awrnc'>0.00</td><td align=\"right\" class='awrnc'>0.10</td></tr> <tr><td scope=\"row\" class='awrc'>os thread creation</td><td align=\"right\" class='awrc'>103</td><td align=\"right\" class='awrc'>0</td><td align=\"right\" class='awrc'>1</td><td align=\"right\" class='awrc'>9.57</td><td align=\"right\" class='awrc'>0.00</td><td align=\"right\" class='awrc'>0.08</td></tr> <tr><td scope=\"row\" class='awrnc'>control file parallel write</td><td align=\"right\" class='awrnc'>1,043</td><td align=\"right\" class='awrnc'>0</td><td align=\"right\" class='awrnc'>1</td><td align=\"right\" class='awrnc'>0.57</td><td align=\"right\" class='awrnc'>0.00</td><td align=\"right\" class='awrnc'>0.05</td></tr> <tr><td scope=\"row\" class='awrc'>Disk file Mirror Read</td><td align=\"right\" class='awrc'>1,283</td><td align=\"right\" class='awrc'>0</td><td align=\"right\" class='awrc'>1</td><td align=\"right\" class='awrc'>0.44</td><td align=\"right\" class='awrc'>0.00</td><td align=\"right\" class='awrc'>0.05</td></tr> <tr><td scope=\"row\" class='awrnc'>reliable message</td><td align=\"right\" class='awrnc'>2,330</td><td align=\"right\" class='awrnc'>0</td><td align=\"right\" class='awrnc'>0</td><td align=\"right\" class='awrnc'>0.13</td><td align=\"right\" class='awrnc'>0.00</td><td align=\"right\" class='awrnc'>0.02</td></tr> <tr><td scope=\"row\" class='awrc'>ASM file metadata operation</td><td align=\"right\" class='awrc'>541</td><td align=\"right\" class='awrc'>0</td><td align=\"right\" class='awrc'>0</td><td align=\"right\" class='awrc'>0.53</td><td align=\"right\" class='awrc'>0.00</td><td align=\"right\" class='awrc'>0.02</td></tr> <tr><td scope=\"row\" class='awrnc'>CGS wait for IPC msg</td><td align=\"right\" class='awrnc'>16,988</td><td align=\"right\" class='awrnc'>100</td><td align=\"right\" class='awrnc'>0</td><td align=\"right\" class='awrnc'>0.01</td><td align=\"right\" class='awrnc'>0.02</td><td align=\"right\" class='awrnc'>0.02</td></tr> <tr><td scope=\"row\" class='awrc'>db file async I/O submit</td><td align=\"right\" class='awrc'>528,782</td><td align=\"right\" class='awrc'>0</td><td align=\"right\" class='awrc'>0</td><td align=\"right\" class='awrc'>0.00</td><td align=\"right\" class='awrc'>0.51</td><td align=\"right\" class='awrc'>0.02</td></tr> <tr><td scope=\"row\" class='awrnc'>log file sync</td><td align=\"right\" class='awrnc'>8</td><td align=\"right\" class='awrnc'>0</td><td align=\"right\" class='awrnc'>0</td><td align=\"right\" class='awrnc'>18.77</td><td align=\"right\" class='awrnc'>0.00</td><td align=\"right\" class='awrnc'>0.01</td></tr> <tr><td scope=\"row\" class='awrc'>latch free</td><td align=\"right\" class='awrc'>207</td><td align=\"right\" class='awrc'>0</td><td align=\"right\" class='awrc'>0</td><td align=\"right\" class='awrc'>0.62</td><td align=\"right\" class='awrc'>0.00</td><td align=\"right\" class='awrc'>0.01</td></tr> <tr><td scope=\"row\" class='awrnc'>log file sequential read</td><td align=\"right\" class='awrnc'>24</td><td align=\"right\" class='awrnc'>0</td><td align=\"right\" class='awrnc'>0</td><td align=\"right\" class='awrnc'>5.17</td><td align=\"right\" class='awrnc'>0.00</td><td align=\"right\" class='awrnc'>0.01</td></tr> <tr><td scope=\"row\" class='awrc'>cell statistics gather</td><td align=\"right\" class='awrc'>288</td><td align=\"right\" class='awrc'>0</td><td align=\"right\" class='awrc'>0</td><td align=\"right\" class='awrc'>0.41</td><td align=\"right\" class='awrc'>0.00</td><td align=\"right\" class='awrc'>0.01</td></tr> <tr><td scope=\"row\" class='awrnc'>LGWR wait for redo copy</td><td align=\"right\" class='awrnc'>1,941</td><td align=\"right\" class='awrnc'>0</td><td align=\"right\" class='awrnc'>0</td><td align=\"right\" class='awrnc'>0.04</td><td align=\"right\" class='awrnc'>0.00</td><td align=\"right\" class='awrnc'>0.01</td></tr> <tr><td scope=\"row\" class='awrc'>undo segment extension</td><td align=\"right\" class='awrc'>7</td><td align=\"right\" class='awrc'>86</td><td align=\"right\" class='awrc'>0</td><td align=\"right\" class='awrc'>5.74</td><td align=\"right\" class='awrc'>0.00</td><td align=\"right\" class='awrc'>0.00</td></tr> <tr><td scope=\"row\" class='awrnc'>CSS operation: data update</td><td align=\"right\" class='awrnc'>54</td><td align=\"right\" class='awrnc'>0</td><td align=\"right\" class='awrnc'>0</td><td align=\"right\" class='awrnc'>0.70</td><td align=\"right\" class='awrnc'>0.00</td><td align=\"right\" class='awrnc'>0.00</td></tr> <tr><td scope=\"row\" class='awrc'>CSS operation: data query</td><td align=\"right\" class='awrc'>54</td><td align=\"right\" class='awrc'>0</td><td align=\"right\" class='awrc'>0</td><td align=\"right\" class='awrc'>0.46</td><td align=\"right\" class='awrc'>0.00</td><td align=\"right\" class='awrc'>0.00</td></tr> <tr><td scope=\"row\" class='awrnc'>CSS operation: action</td><td align=\"right\" class='awrnc'>70</td><td align=\"right\" class='awrnc'>0</td><td align=\"right\" class='awrnc'>0</td><td align=\"right\" class='awrnc'>0.34</td><td align=\"right\" class='awrnc'>0.00</td><td align=\"right\" class='awrnc'>0.00</td></tr> <tr><td scope=\"row\" class='awrc'>CSS initialization</td><td align=\"right\" class='awrc'>6</td><td align=\"right\" class='awrc'>0</td><td align=\"right\" class='awrc'>0</td><td align=\"right\" class='awrc'>3.05</td><td align=\"right\" class='awrc'>0.00</td><td align=\"right\" class='awrc'>0.00</td></tr> <tr><td scope=\"row\" class='awrnc'>log file single write</td><td align=\"right\" class='awrnc'>24</td><td align=\"right\" class='awrnc'>0</td><td align=\"right\" class='awrnc'>0</td><td align=\"right\" class='awrnc'>0.28</td><td align=\"right\" class='awrnc'>0.00</td><td align=\"right\" class='awrnc'>0.00</td></tr> <tr><td scope=\"row\" class='awrc'>buffer busy waits</td><td align=\"right\" class='awrc'>251</td><td align=\"right\" class='awrc'>0</td><td align=\"right\" class='awrc'>0</td><td align=\"right\" class='awrc'>0.03</td><td align=\"right\" class='awrc'>0.00</td><td align=\"right\" class='awrc'>0.00</td></tr> <tr><td scope=\"row\" class='awrnc'>KSV master wait</td><td align=\"right\" class='awrnc'>107</td><td align=\"right\" class='awrnc'>0</td><td align=\"right\" class='awrnc'>0</td><td align=\"right\" class='awrnc'>0.04</td><td align=\"right\" class='awrnc'>0.00</td><td align=\"right\" class='awrnc'>0.00</td></tr> <tr><td scope=\"row\" class='awrc'>direct path write</td><td align=\"right\" class='awrc'>11</td><td align=\"right\" class='awrc'>0</td><td align=\"right\" class='awrc'>0</td><td align=\"right\" class='awrc'>0.39</td><td align=\"right\" class='awrc'>0.00</td><td align=\"right\" class='awrc'>0.00</td></tr> <tr><td scope=\"row\" class='awrnc'>db file single write</td><td align=\"right\" class='awrnc'>12</td><td align=\"right\" class='awrnc'>0</td><td align=\"right\" class='awrnc'>0</td><td align=\"right\" class='awrnc'>0.35</td><td align=\"right\" class='awrnc'>0.00</td><td align=\"right\" class='awrnc'>0.00</td></tr> <tr><td scope=\"row\" class='awrc'>cell multiblock physical read</td><td align=\"right\" class='awrc'>1</td><td align=\"right\" class='awrc'>0</td><td align=\"right\" class='awrc'>0</td><td align=\"right\" class='awrc'>4.23</td><td align=\"right\" class='awrc'>0.00</td><td align=\"right\" class='awrc'>0.00</td></tr> <tr><td scope=\"row\" class='awrnc'>direct path read</td><td align=\"right\" class='awrnc'>1</td><td align=\"right\" class='awrnc'>0</td><td align=\"right\" class='awrnc'>0</td><td align=\"right\" class='awrnc'>3.65</td><td align=\"right\" class='awrnc'>0.00</td><td align=\"right\" class='awrnc'>0.00</td></tr> <tr><td scope=\"row\" class='awrc'>CSS operation: query</td><td align=\"right\" class='awrc'>18</td><td align=\"right\" class='awrc'>0</td><td align=\"right\" class='awrc'>0</td><td align=\"right\" class='awrc'>0.16</td><td align=\"right\" class='awrc'>0.00</td><td align=\"right\" class='awrc'>0.00</td></tr> <tr><td scope=\"row\" class='awrnc'>Disk file operations I/O</td><td align=\"right\" class='awrnc'>525</td><td align=\"right\" class='awrnc'>0</td><td align=\"right\" class='awrnc'>0</td><td align=\"right\" class='awrnc'>0.00</td><td align=\"right\" class='awrnc'>0.00</td><td align=\"right\" class='awrnc'>0.00</td></tr> <tr><td scope=\"row\" class='awrc'>rdbms ipc message</td><td align=\"right\" class='awrc'>1,463,811</td><td align=\"right\" class='awrc'>2</td><td align=\"right\" class='awrc'>37,964</td><td align=\"right\" class='awrc'>25.94</td><td align=\"right\" class='awrc'>1.42</td><td align=\"right\" class='awrc'>&#160;</td></tr> <tr><td scope=\"row\" class='awrnc'>Space Manager: slave idle wait</td><td align=\"right\" class='awrnc'>2,650</td><td align=\"right\" class='awrnc'>91</td><td align=\"right\" class='awrnc'>12,569</td><td align=\"right\" class='awrnc'>4742.92</td><td align=\"right\" class='awrnc'>0.00</td><td align=\"right\" class='awrnc'>&#160;</td></tr> <tr><td scope=\"row\" class='awrc'>EMON slave idle wait</td><td align=\"right\" class='awrc'>1,745</td><td align=\"right\" class='awrc'>100</td><td align=\"right\" class='awrc'>8,725</td><td align=\"right\" class='awrc'>5000.04</td><td align=\"right\" class='awrc'>0.00</td><td align=\"right\" class='awrc'>&#160;</td></tr> <tr><td scope=\"row\" class='awrnc'>GCR sleep</td><td align=\"right\" class='awrnc'>12,273</td><td align=\"right\" class='awrnc'>7</td><td align=\"right\" class='awrnc'>3,455</td><td align=\"right\" class='awrnc'>281.48</td><td align=\"right\" class='awrnc'>0.01</td><td align=\"right\" class='awrnc'>&#160;</td></tr> <tr><td scope=\"row\" class='awrc'>DIAG idle wait</td><td align=\"right\" class='awrc'>18,337</td><td align=\"right\" class='awrc'>100</td><td align=\"right\" class='awrc'>3,422</td><td align=\"right\" class='awrc'>186.60</td><td align=\"right\" class='awrc'>0.02</td><td align=\"right\" class='awrc'>&#160;</td></tr> <tr><td scope=\"row\" class='awrnc'>LGWR worker group idle</td><td align=\"right\" class='awrnc'>1,085,206</td><td align=\"right\" class='awrnc'>0</td><td align=\"right\" class='awrnc'>2,881</td><td align=\"right\" class='awrnc'>2.65</td><td align=\"right\" class='awrnc'>1.05</td><td align=\"right\" class='awrnc'>&#160;</td></tr> <tr><td scope=\"row\" class='awrc'>AQPC idle</td><td align=\"right\" class='awrc'>59</td><td align=\"right\" class='awrc'>100</td><td align=\"right\" class='awrc'>1,770</td><td align=\"right\" class='awrc'>30000.36</td><td align=\"right\" class='awrc'>0.00</td><td align=\"right\" class='awrc'>&#160;</td></tr> <tr><td scope=\"row\" class='awrnc'>heartbeat redo informer</td><td align=\"right\" class='awrnc'>1,746</td><td align=\"right\" class='awrnc'>0</td><td align=\"right\" class='awrnc'>1,746</td><td align=\"right\" class='awrnc'>1000.03</td><td align=\"right\" class='awrnc'>0.00</td><td align=\"right\" class='awrnc'>&#160;</td></tr> <tr><td scope=\"row\" class='awrc'>pmon timer</td><td align=\"right\" class='awrc'>582</td><td align=\"right\" class='awrc'>100</td><td align=\"right\" class='awrc'>1,746</td><td align=\"right\" class='awrc'>3000.04</td><td align=\"right\" class='awrc'>0.00</td><td align=\"right\" class='awrc'>&#160;</td></tr> <tr><td scope=\"row\" class='awrnc'>ges remote message</td><td align=\"right\" class='awrnc'>1,344</td><td align=\"right\" class='awrnc'>100</td><td align=\"right\" class='awrnc'>1,746</td><td align=\"right\" class='awrnc'>1298.94</td><td align=\"right\" class='awrnc'>0.00</td><td align=\"right\" class='awrnc'>&#160;</td></tr> <tr><td scope=\"row\" class='awrc'>class slave wait</td><td align=\"right\" class='awrc'>429</td><td align=\"right\" class='awrc'>0</td><td align=\"right\" class='awrc'>1,745</td><td align=\"right\" class='awrc'>4068.00</td><td align=\"right\" class='awrc'>0.00</td><td align=\"right\" class='awrc'>&#160;</td></tr> <tr><td scope=\"row\" class='awrnc'>wait for unread message on broadcast channel</td><td align=\"right\" class='awrnc'>581</td><td align=\"right\" class='awrnc'>100</td><td align=\"right\" class='awrnc'>1,745</td><td align=\"right\" class='awrnc'>3003.58</td><td align=\"right\" class='awrnc'>0.00</td><td align=\"right\" class='awrnc'>&#160;</td></tr> <tr><td scope=\"row\" class='awrc'>PING</td><td align=\"right\" class='awrc'>403</td><td align=\"right\" class='awrc'>33</td><td align=\"right\" class='awrc'>1,745</td><td align=\"right\" class='awrc'>4329.67</td><td align=\"right\" class='awrc'>0.00</td><td align=\"right\" class='awrc'>&#160;</td></tr> <tr><td scope=\"row\" class='awrnc'>ASM background timer</td><td align=\"right\" class='awrnc'>350</td><td align=\"right\" class='awrnc'>0</td><td align=\"right\" class='awrnc'>1,744</td><td align=\"right\" class='awrnc'>4983.02</td><td align=\"right\" class='awrnc'>0.00</td><td align=\"right\" class='awrnc'>&#160;</td></tr> <tr><td scope=\"row\" class='awrc'>lreg timer</td><td align=\"right\" class='awrc'>581</td><td align=\"right\" class='awrc'>100</td><td align=\"right\" class='awrc'>1,743</td><td align=\"right\" class='awrc'>3000.48</td><td align=\"right\" class='awrc'>0.00</td><td align=\"right\" class='awrc'>&#160;</td></tr> <tr><td scope=\"row\" class='awrnc'>smon timer</td><td align=\"right\" class='awrnc'>656</td><td align=\"right\" class='awrnc'>0</td><td align=\"right\" class='awrnc'>1,740</td><td align=\"right\" class='awrnc'>2652.90</td><td align=\"right\" class='awrnc'>0.00</td><td align=\"right\" class='awrnc'>&#160;</td></tr> <tr><td scope=\"row\" class='awrc'>Streams AQ: emn coordinator idle wait</td><td align=\"right\" class='awrc'>174</td><td align=\"right\" class='awrc'>100</td><td align=\"right\" class='awrc'>1,740</td><td align=\"right\" class='awrc'>10000.11</td><td align=\"right\" class='awrc'>0.00</td><td align=\"right\" class='awrc'>&#160;</td></tr> <tr><td scope=\"row\" class='awrnc'>Streams AQ: qmn slave idle wait</td><td align=\"right\" class='awrnc'>62</td><td align=\"right\" class='awrnc'>0</td><td align=\"right\" class='awrnc'>1,736</td><td align=\"right\" class='awrnc'>28000.43</td><td align=\"right\" class='awrnc'>0.00</td><td align=\"right\" class='awrnc'>&#160;</td></tr> <tr><td scope=\"row\" class='awrc'>Streams AQ: qmn coordinator idle wait</td><td align=\"right\" class='awrc'>124</td><td align=\"right\" class='awrc'>50</td><td align=\"right\" class='awrc'>1,736</td><td align=\"right\" class='awrc'>14000.21</td><td align=\"right\" class='awrc'>0.00</td><td align=\"right\" class='awrc'>&#160;</td></tr> <tr><td scope=\"row\" class='awrnc'>SQL*Net message from client</td><td align=\"right\" class='awrnc'>3,400</td><td align=\"right\" class='awrnc'>0</td><td align=\"right\" class='awrnc'>4</td><td align=\"right\" class='awrnc'>1.18</td><td align=\"right\" class='awrnc'>0.00</td><td align=\"right\" class='awrnc'>&#160;</td></tr></table><p /><hr align=\"left\" width=\"20%\" /><p /><a class=\"awr\" href=\"#21\">Back to Wait Events Statistics</a><br /><a class=\"awr\" href=\"#top\">Back to Top</a><p /><a class=\"exa\" href=\"#CELL_TOPDB\">Back to Exadata Top Database Consumers</a><br/><a class=\"exa\" href=\"#CELL_STATISTICS\">Back to Exadata Statistics</a></div><br /><a class=\"awr\" href=\"#top\">Back to Top</a><p /><p />End of Report</body></html>`, nil},\n\t\t//\t{\"fdsfsdf\", \"\", errors.New(` open fdsfsdf: The system cannot find the file specified. `)},\n\t}\n\n\tfor _, pair := range tests {\n\t\tv, err := readFile(pair.input)\n\t\tif (v != pair.text) || (err != pair.error) {\n\t\t\tt.Error(\n\t\t\t\t\"For\", pair.input,\n\t\t\t\t\"\\n expected:\", pair.text,\n\t\t\t\t\"\\n got text:\", v,\n\t\t\t\t\"\\n got err:\", err,\n\t\t\t)\n\t\t}\n\t}\n\n}", "func checkResults(r []byte) error {\n\tif len(r) != 2 {\n\t\treturn nil\n\t}\n\n\tif r[0] == openBracketASCII && r[1] == closedBracketASCII {\n\t\treturn ErrNoResults\n\t}\n\n\treturn nil\n}", "func (r *Reader) parseRecord(raw string, id uint64) (*Task, error) {\n\n\tvar err error\n\ttask := Task{}\n\n\ttask.Raw = raw\n\ttask.Todo = raw\n\ttask.Id = id\n\n\t// TODO: check for data completed date\n\n\t// TODO: check for priority\n\n\t// TODO: check for created date\n\n\t// TODO: check for contexts and projects\n\t// Set the split function for a Scanner that returns each token inside the\n\t// line of text previously scanned\n\tscanner := bufio.NewScanner(strings.NewReader(raw))\n\tscanner.Split(bufio.ScanWords)\n\tfor scanner.Scan() {\n\t\ttoken := scanner.Text()\n\n\t\tif strings.IndexRune(token, '@') == 0 {\n\t\t\ttask.Contexts = append(task.Contexts, token)\n\t\t}\n\t\tif strings.IndexRune(token, '+') == 0 {\n\t\t\ttask.Projects = append(task.Projects, token)\n\t\t}\n\t}\n\tif err := scanner.Err(); err != nil {\n\t\tfmt.Fprintln(os.Stderr, \"reading input:\", err)\n\t}\n\t//fmt.Printf(\"task: %s (contexts: %d) (projects: %d)\\n\", raw, contexts, projects)\n\n\t// TODO: check for additional tags\n\n\t// trim any remaining white spaces\n\ttask.Todo = strings.TrimSpace(task.Todo)\n\t//fmt.Println(\"task: \", raw)\n\n\treturn &task, err\n}", "func handleResults(result hooks.InternalMessage) {\n\tres, ok := result.Results.(TableRow)\n\tmanager.Status.AddCurrentScans(-1)\n\n\tif !ok {\n\t\tmanager.Logger.Errorf(\"Couldn't assert type of result for %v\", result.Domain.DomainName)\n\t\tres = TableRow{}\n\t\tresult.StatusCode = hooks.InternalFatalError\n\t}\n\n\tswitch result.StatusCode {\n\tcase hooks.InternalFatalError:\n\t\tres.ScanStatus = hooks.StatusError\n\t\tmanager.Status.AddFatalErrorScans(1)\n\t\tmanager.Logger.Infof(\"Assessment of %v failed ultimately\", result.Domain.DomainName)\n\tcase hooks.InternalSuccess:\n\t\tres.ScanStatus = hooks.StatusDone\n\t\tmanager.Logger.Debugf(\"Assessment of %v was successful\", result.Domain.DomainName)\n\t\tmanager.Status.AddFinishedScans(1)\n\t}\n\twhere := hooks.ScanWhereCond{\n\t\tDomainID: result.Domain.DomainID,\n\t\tScanID: manager.ScanID,\n\t\tTestWithSSL: result.Domain.TestWithSSL}\n\terr := backend.SaveResults(manager.GetTableName(), structs.New(where), structs.New(res))\n\tif err != nil {\n\t\tmanager.Logger.Errorf(\"Couldn't save results for %v: %v\", result.Domain.DomainName, err)\n\t\treturn\n\t}\n\tmanager.Logger.Debugf(\"Results for %v saved\", result.Domain.DomainName)\n\n}", "func (p *LogParser) Parse(address string, b []byte) {\n\t//p.Result = map[string]interface{}{}\n\tp.Raw = b\n\tvar r Parser\n\n\tif format := p.formatByAddress[address]; format != nil {\n\t\tr = format()\n\t} else {\n\t\tr = CreateParser(p.fmt)\n\t}\n\tp.Result, err = r.Parse(b)\n\tif err != nil {\n\t\tp.Result = map[string]interface{}{\n\t\t\t\"priority\": 0,\n\t\t\t\"facility\": 0,\n\t\t\t\"severity\": 0,\n\t\t\t\"version\": NO_VERSION,\n\t\t\t\"timestamp\": time.Now(),\n\t\t\t\"message\": string(b),\n\t\t}\n\t}\n}", "func (t *TTFB) ParseResponse(res io.Reader, unique string) (Result, error) {\n\tvar data Result\n\n\tif err := json.NewDecoder(res).Decode(&data); err != nil {\n\t\treturn t.BasicResult(unique), err\n\t}\n\n\tif data.Status == 0 {\n\t\treturn t.BasicResult(unique), errors.New(unique + \":\\x20\" + data.Message)\n\t}\n\n\treturn data, nil\n}", "func (p *Parser) Parse(sql string) (*Result, []error, error) {\n\tv := NewVisitor()\n\n\tstmtNodes, warns, err := p.Parser.Parse(sql, constant.EmptyString, constant.EmptyString)\n\tif warns != nil || err != nil {\n\t\treturn nil, warns, err\n\t}\n\n\tfor _, stmtNode := range stmtNodes {\n\t\tstmtNode.Accept(v)\n\t}\n\n\treturn v.Result, nil, nil\n}", "func (f *recordingSource) Parse() error {\n\trecordDecls := make(map[int]string)\n\trecordingDecls := make(map[string]string)\n\n\tdata, err := f.source.ReadAll()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tscanner := bufio.NewScanner(bytes.NewReader(data))\n\tscanner.Buffer(nil, MaxRecordingSize)\n\tfor scanner.Scan() {\n\t\ttext := scanner.Text()\n\t\tif len(text) == 0 {\n\t\t\tcontinue\n\t\t}\n\n\t\tif text[0] != '\"' {\n\t\t\t// Split the line on the first equal sign:\n\t\t\t// 1=DriverOpen 3:nil\n\t\t\tindex := strings.Index(text, \"=\")\n\t\t\tif index == -1 {\n\t\t\t\treturn fmt.Errorf(\"expected equals: %s\", text)\n\t\t\t}\n\n\t\t\trecordNum, err := strconv.Atoi(text[:index])\n\t\t\tif err != nil {\n\t\t\t\treturn fmt.Errorf(\"expected record number: %s\", text)\n\t\t\t}\n\n\t\t\trecordDecls[recordNum-1] = text[index+1:]\n\t\t} else {\n\t\t\t// Split the line on the last equal sign:\n\t\t\t// \"some:name\":1,2,3,4\n\t\t\tindex := strings.LastIndex(text, \"=\")\n\t\t\tif index == -1 {\n\t\t\t\treturn fmt.Errorf(\"expected equals: %s\", text)\n\t\t\t}\n\t\t\trecordingName, err := strconv.Unquote(text[:index])\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\trecordingDecl := text[index+1:]\n\t\t\trecordingDecls[recordingName] = recordingDecl\n\t\t}\n\t}\n\n\tif err := scanner.Err(); err != nil {\n\t\tif err == bufio.ErrTooLong {\n\t\t\terr = errors.New(\"recording exceeds copyist.MaxRecordingSize and cannot be read\")\n\t\t}\n\t\treturn err\n\t}\n\n\tf.recordDecls = recordDecls\n\tf.recordingDecls = recordingDecls\n\treturn nil\n}", "func (g getPythonBinaryRequestType) ParseResult(rawString string) string {\n\treturn rawString\n}", "func (p *Parser) Parse(r io.Reader) (doc *Document, err error) {\n\tp.doc = NewDocument()\n\tscanner := bufio.NewScanner(r)\n\tfor scanner.Scan() {\n\t\tline := []rune(scanner.Text())\n\t\tregCode := string(line[:2])\n\t\tswitch regCode {\n\t\tcase \"01\":\n\t\t\terr = p.parseInitiatingParty(line)\n\t\t\tif err != nil {\n\t\t\t\tlog.Println(err)\n\t\t\t\treturn\n\t\t\t}\n\t\tcase \"02\":\n\t\t\terr = p.parsePaymentHeader(line)\n\t\t\tif err != nil {\n\t\t\t\tlog.Println(err)\n\t\t\t\treturn\n\t\t\t}\n\t\tcase \"03\":\n\t\t\terr = p.parseDebitTransaction(line)\n\t\t\tif err != nil {\n\t\t\t\tlog.Println(err)\n\t\t\t\treturn\n\t\t\t}\n\t\tcase \"04\":\n\t\t\terr = p.parsePaymentTotals(line)\n\t\t\tif err != nil {\n\t\t\t\tlog.Println(err)\n\t\t\t\treturn\n\t\t\t}\n\t\tcase \"05\":\n\t\t\terr = p.parseCreditorTotals(line)\n\t\t\tif err != nil {\n\t\t\t\tlog.Println(err)\n\t\t\t\treturn\n\t\t\t}\n\t\tcase \"99\":\n\t\t\terr = p.parseTotals(line)\n\t\t\tif err != nil {\n\t\t\t\tlog.Println(err)\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}\n\tdoc = p.doc\n\treturn\n}", "func (sc *Scanner) parseRecord() (record map[string]string, err error) {\n\trecord = make(map[string]string)\n\tfor {\n\t\tf, delim, err := sc.parseFieldName()\n\t\tif err != nil {\n\t\t\tif len(record) == 0 {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tbreak\n\t\t}\n\t\tif delim == '\\n' {\n\t\t\tcontinue\n\t\t}\n\t\tif delim == '%' {\n\t\t\tbreak\n\t\t}\n\t\tv, end := sc.parseFieldValue()\n\t\tif len(f) > 0 && len(v) > 0 {\n\t\t\tif _, dup := record[f]; dup {\n\t\t\t\treturn nil, errors.Errorf(\"line: %d: duplicated field %q\", sc.line, f)\n\t\t\t}\n\t\t\trecord[f] = v\n\t\t\tif !sc.fok[f] {\n\t\t\t\tsc.fok[f] = true\n\t\t\t\tsc.fields = append(sc.fields, f)\n\t\t\t}\n\t\t}\n\t\tif end {\n\t\t\tbreak\n\t\t}\n\t}\n\tif len(record) == 0 {\n\t\treturn nil, nil\n\t}\n\treturn record, nil\n}", "func parseRecord(table string, r *Record) error {\n\t// it's ok if some records don't return a value\n\tif len(r.Value) == 0 {\n\t\treturn nil\n\t}\n\tif r.Table == \"\" {\n\t\tr.Table = table\n\t} else {\n\t\t// TODO: probably never happens\n\t\tpanicIf(r.Table != table)\n\t}\n\n\t// set Block/Space etc. based on TableView type\n\tvar pRawJSON *map[string]interface{}\n\tvar obj interface{}\n\tswitch table {\n\tcase TableActivity:\n\t\tr.Activity = &Activity{}\n\t\tobj = r.Activity\n\t\tpRawJSON = &r.Activity.RawJSON\n\tcase TableBlock:\n\t\tr.Block = &Block{}\n\t\tobj = r.Block\n\t\tpRawJSON = &r.Block.RawJSON\n\tcase TableUser:\n\t\tr.User = &User{}\n\t\tobj = r.User\n\t\tpRawJSON = &r.User.RawJSON\n\tcase TableSpace:\n\t\tr.Space = &Space{}\n\t\tobj = r.Space\n\t\tpRawJSON = &r.Space.RawJSON\n\tcase TableCollection:\n\t\tr.Collection = &Collection{}\n\t\tobj = r.Collection\n\t\tpRawJSON = &r.Collection.RawJSON\n\tcase TableCollectionView:\n\t\tr.CollectionView = &CollectionView{}\n\t\tobj = r.CollectionView\n\t\tpRawJSON = &r.CollectionView.RawJSON\n\tcase TableDiscussion:\n\t\tr.Discussion = &Discussion{}\n\t\tobj = r.Discussion\n\t\tpRawJSON = &r.Discussion.RawJSON\n\tcase TableComment:\n\t\tr.Comment = &Comment{}\n\t\tobj = r.Comment\n\t\tpRawJSON = &r.Comment.RawJSON\n\t}\n\tif obj == nil {\n\t\treturn fmt.Errorf(\"unsupported table '%s'\", r.Table)\n\t}\n\tif err := jsonit.Unmarshal(r.Value, pRawJSON); err != nil {\n\t\treturn err\n\t}\n\tid := (*pRawJSON)[\"id\"]\n\tif id != nil {\n\t\tr.ID = id.(string)\n\t}\n\tif err := jsonit.Unmarshal(r.Value, &obj); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}", "func Parse(r io.Reader, pkgName string) (*Report, error) {\n\treader := bufio.NewReader(r)\n\n\treport := &Report{make([]Package, 0)}\n\n\t// keep track of tests we find\n\tvar tests []*Test\n\n\t// sum of tests' time, use this if current test has no result line (when it is compiled test)\n\ttestsTime := 0\n\n\t// current test\n\tvar cur string\n\n\t// coverage percentage report for current package\n\tvar coveragePct string\n\n\t// parse lines\n\tfor {\n\t\tl, _, err := reader.ReadLine()\n\t\tif err != nil && err == io.EOF {\n\t\t\tbreak\n\t\t} else if err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tline := string(l)\n\n\t\tif strings.HasPrefix(line, \"=== RUN \") {\n\t\t\t// new test\n\t\t\tcur = strings.TrimSpace(line[8:])\n\t\t\ttests = append(tests, &Test{\n\t\t\t\tName:\tcur,\n\t\t\t\tResult:\tFAIL,\n\t\t\t\tOutput:\tmake([]string, 0),\n\t\t\t})\n\t\t} else if matches := regexResult.FindStringSubmatch(line); len(matches) == 5 {\n\t\t\tif matches[4] != \"\" {\n\t\t\t\tcoveragePct = matches[4]\n\t\t\t}\n\n\t\t\t// all tests in this package are finished\n\t\t\treport.Packages = append(report.Packages, Package{\n\t\t\t\tName:\t\tmatches[2],\n\t\t\t\tTime:\t\tparseTime(matches[3]),\n\t\t\t\tTests:\t\ttests,\n\t\t\t\tCoveragePct:\tcoveragePct,\n\t\t\t})\n\n\t\t\ttests = make([]*Test, 0)\n\t\t\tcoveragePct = \"\"\n\t\t\tcur = \"\"\n\t\t\ttestsTime = 0\n\t\t} else if matches := regexStatus.FindStringSubmatch(line); len(matches) == 4 {\n\t\t\tcur = matches[2]\n\t\t\ttest := findTest(tests, cur)\n\t\t\tif test == nil {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\t// test status\n\t\t\tif matches[1] == \"PASS\" {\n\t\t\t\ttest.Result = PASS\n\t\t\t} else if matches[1] == \"SKIP\" {\n\t\t\t\ttest.Result = SKIP\n\t\t\t} else {\n\t\t\t\ttest.Result = FAIL\n\t\t\t}\n\n\t\t\ttest.Name = matches[2]\n\t\t\ttestTime := parseTime(matches[3]) * 10\n\t\t\ttest.Time = testTime\n\t\t\ttestsTime += testTime\n\t\t} else if matches := regexCoverage.FindStringSubmatch(line); len(matches) == 2 {\n\t\t\tcoveragePct = matches[1]\n\t\t} else if matches := regexOutput.FindStringSubmatch(line); len(matches) == 3 {\n\t\t\t// Sub-tests start with one or more series of 4-space indents, followed by a hard tab,\n\t\t\t// followed by the test output\n\t\t\t// Top-level tests start with a hard tab.\n\t\t\ttest := findTest(tests, cur)\n\t\t\tif test == nil {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\ttest.Output = append(test.Output, matches[2])\n\t\t}\n\t}\n\n\tif len(tests) > 0 {\n\t\t// no result line found\n\t\treport.Packages = append(report.Packages, Package{\n\t\t\tName:\t\tpkgName,\n\t\t\tTime:\t\ttestsTime,\n\t\t\tTests:\t\ttests,\n\t\t\tCoveragePct:\tcoveragePct,\n\t\t})\n\t}\n\n\treturn report, nil\n}", "func (a *API) Parse(t string, id int) (err error) {\n\tswitch t {\n\tcase constant.AnimeType:\n\t\terr = a.parseAnime(id)\n\tcase constant.MangaType:\n\t\terr = a.parseManga(id)\n\tcase constant.CharacterType:\n\t\terr = a.parseCharacter(id)\n\tcase constant.PeopleType:\n\t\terr = a.parsePeople(id)\n\tdefault:\n\t\terr = fmt.Errorf(\"invalid type [%s:%v]\", t, id)\n\t}\n\n\tif err != nil {\n\t\t// Re-queue if error.\n\t\tif errQ := a.enqueue(t, id); errQ != nil {\n\t\t\ta.logger.Error(errQ.Error())\n\t\t}\n\t}\n\n\tif a.es == nil {\n\t\treturn err\n\t}\n\n\tif errEs := a.es.Send(\"mal-db-parse\", queueLog{\n\t\tType: t,\n\t\tID: id,\n\t\tError: err,\n\t\tCreatedAt: time.Now(),\n\t}); errEs != nil {\n\t\ta.logger.Error(errEs.Error())\n\t}\n\n\treturn err\n}", "func (up *UnitMapParser) Parse(line string, store Store) error {\n\tm := util.MapRegex(line, up.regex)\n\terr := util.ValidateRoman(m[\"roman\"])\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = store.Add(\"unit\", m[\"intergalactic\"], m[\"roman\"])\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}", "func (da *DefaultAdvisor) parseResult(result string) (string, string, error) {\n\tvar (\n\t\tadvice string\n\t\tmessage string\n\t\terrMsg string\n\t)\n\n\tisLogMsg := true\n\tregExp, err := regexp.Compile(logExpression)\n\tif err != nil {\n\t\treturn constant.EmptyString, constant.EmptyString, err\n\t}\n\n\tlines := strings.Split(result, constant.CRLFString)\n\tfor _, line := range lines {\n\t\tif isLogMsg {\n\t\t\tisLogMsg = regExp.Match([]byte(line))\n\t\t}\n\n\t\tif isLogMsg {\n\t\t\tmessage += line + constant.CRLFString\n\t\t\tstringList := strings.Split(line, constant.SpaceString)\n\t\t\tif len(stringList) >= 3 {\n\t\t\t\tlogLevel := string(stringList[2][1])\n\t\t\t\tif logLevel == \"E\" || logLevel == \"F\" {\n\t\t\t\t\terrMsg += line + constant.CRLFString\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tcontinue\n\t\t}\n\n\t\tadvice += line\n\t}\n\n\tif errMsg != constant.EmptyString {\n\t\treturn constant.EmptyString, constant.EmptyString, errors.New(fmt.Sprintf(\"parse result failed. error:\\n%s\", errMsg))\n\t}\n\n\treturn advice, message, nil\n}", "func (l *LinkHeaderPager) Parse(resp *http.Response) Paging {\n\tpaging := Paging{}\n\theader := resp.Header.Get(\"Link\")\n\tif header != \"\" {\n\t\tlinks := linkheader.Parse(header)\n\n\t\tfor _, link := range links.FilterByRel(\"next\") {\n\t\t\tpaging.Next = link.URL\n\t\t\tbreak\n\t\t}\n\t}\n\n\treturn paging\n}", "func Parse(reader io.Reader) (Result, error) {\n\tabort := make(chan bool, 1)\n\n\tlexer, stream := newLexer(reader, abort)\n\tparser, success, failure := newParser(stream, abort)\n\n\tgo lexer.run()\n\tgo parser.run()\n\n\tselect {\n\tcase result := <-success:\n\t\treturn result, nil\n\tcase err := <-failure:\n\t\treturn Result{}, err\n\t}\n}", "func (fa *FindAndCountAll) Result(records interface{}, count int) {\n\tfa.Arguments[1] = mock.AnythingOfType(fmt.Sprintf(\"*%T\", records))\n\n\tfa.Run(func(args mock.Arguments) {\n\t\treflect.ValueOf(args[1]).Elem().Set(reflect.ValueOf(records))\n\t}).Return(count, nil)\n}", "func TestParseResponseData(t *testing.T) {\n\tdata, err := os.ReadFile(responseFile)\n\tif err != nil {\n\t\tt.Fatalf(\"cannot read from file: %v\", err)\n\t}\n\n\tgot, err := parseResponseData(data)\n\tif err != nil {\n\t\tt.Fatalf(\"error parsing response data: %v\", err)\n\t}\n\n\twant := Batch{\n\t\tID: \"MC0x\",\n\t\tLogs: []Log{\n\t\t\t{SequenceNum: 1, RequestStart: 1601645172164667, Stream: \"stdout\", RequestID: \"44a1eedd-5831-49fe-b094-7435908ba1fb\", Message: \"1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1\"},\n\t\t\t{SequenceNum: 2, RequestStart: 1601645172164667, Stream: \"stdout\", RequestID: \"44a1eedd-5831-49fe-b094-7435908ba1fb\", Message: \"2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2\"},\n\t\t\t{SequenceNum: 3, RequestStart: 1601645172164667, Stream: \"stdout\", RequestID: \"44a1eedd-5831-49fe-b094-7435908ba1fb\", Message: \"3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3\"},\n\t\t\t{SequenceNum: 4, RequestStart: 1601645172164667, Stream: \"stdout\", RequestID: \"44a1eedd-5831-49fe-b094-7435908ba1fb\", Message: \"4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4\"},\n\t\t\t{SequenceNum: 5, RequestStart: 1601645172164667, Stream: \"stderr\", RequestID: \"44a1eedd-5831-49fe-b094-7435908ba1fb\", Message: \"5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5\"},\n\t\t\t{SequenceNum: 6, RequestStart: 1601645172164667, Stream: \"stderr\", RequestID: \"44a1eedd-5831-49fe-b094-7435908ba1fb\", Message: \"6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6\"},\n\t\t\t{SequenceNum: 7, RequestStart: 1601645172164667, Stream: \"stdout\", RequestID: \"44a1eedd-5831-49fe-b094-7435908ba1fb\", Message: \"7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7\"},\n\t\t\t{SequenceNum: 8, RequestStart: 1601645172164667, Stream: \"stdout\", RequestID: \"44a1eedd-5831-49fe-b094-7435908ba1fb\", Message: \"8 8 8 8 8 8 8 8 8 8 8 8 8 8 8 8 8 8 8 8 8\"},\n\t\t\t{SequenceNum: 9, RequestStart: 1601645172164667, Stream: \"stderr\", RequestID: \"44a1eedd-5831-49fe-b094-7435908ba1fb\", Message: \"9 9 9 9 9 9 9 9 9 9 9 9 9 9 9 9 9 9 9 9 9\"},\n\t\t\t{SequenceNum: 10, RequestStart: 1601645172164667, Stream: \"stdout\", RequestID: \"44a1eedd-5831-49fe-b094-7435908ba1fb\", Message: \"10 10 10 10 10 10 10 10 10 10 10 10 10 10\"},\n\t\t},\n\t}\n\n\tif diff := cmp.Diff(want, got); diff != \"\" {\n\t\tt.Errorf(\"parseResponseData mismatch (-want +got):\\n%s\", diff)\n\t}\n}", "func (up *UnitParser) Parse(line string, store Store) error {\n\tm := util.MapRegex(line, up.regex)\n\tvar u Unit\n\tutil.Unmarshal(m, &u)\n\terr := util.ValidateRoman(u.Roman)\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = store.Add(\"unit\", u.Intergalactic, u.Roman)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}", "func (m *Match) Apply(res *benchfmt.Result) bool {\n\tif m.All() {\n\t\treturn true\n\t}\n\tif !m.Any() {\n\t\tres.Values = res.Values[:0]\n\t\treturn false\n\t}\n\n\tj := 0\n\tfor i, val := range res.Values {\n\t\tif m.Test(i) {\n\t\t\tres.Values[j] = val\n\t\t\tj++\n\t\t}\n\t}\n\tres.Values = res.Values[:j]\n\treturn j > 0\n}", "func (p *Parser) Parse(input string) (criteria List) {\n\tdefer func() {\n\t\tif r := recover(); r != nil {\n\t\t\tglog.Errorf(\"%v: %q\\n\", r, input)\n\t\t\tcriteria = NewList()\n\t\t}\n\t}()\n\tp.lexer = NewLexer(input)\n\tp.tokens = make([]*Token, 0)\n\tcriteria = p.parseSegment(tokenEOF)\n\tcriteria.TrimItems()\n\treturn\n}", "func (a Analyzer) Parse(re *syntax.Regexp) *Node {\n\treturn analyze(re, true)\n}", "func (p RowData) Parse(f []*Field, binary bool) ([]interface{}, error) {\n\tif binary {\n\t\treturn p.ParseBinary(f)\n\t}\n\treturn p.ParseText(f)\n}", "func (r *PAFFile) ParseRecords() {\n\tr.Records = []PAFRecord{}\n\tfh := mustOpen(r.PafFile)\n\n\tlog.Noticef(\"Parse paffile `%s`\", r.PafFile)\n\treader := bufio.NewReader(fh)\n\tvar rec PAFRecord\n\n\tfor {\n\t\trow, err := reader.ReadString('\\n')\n\t\trow = strings.TrimSpace(row)\n\t\tif row == \"\" && err == io.EOF {\n\t\t\tbreak\n\t\t}\n\t\twords := strings.Split(row, \"\\t\")\n\t\tif len(words) < 12 || len(words[4]) < 1 {\n\t\t\tcontinue\n\t\t}\n\n\t\t// Parse the first 12 columns\n\t\trec.Query = words[0]\n\t\trec.QueryLength, _ = strconv.Atoi(words[1])\n\t\trec.QueryStart, _ = strconv.Atoi(words[2])\n\t\trec.QueryEnd, _ = strconv.Atoi(words[3])\n\t\trec.RelativeStrand = words[4][0]\n\t\trec.Target = words[5]\n\t\trec.TargetLength, _ = strconv.Atoi(words[6])\n\t\trec.TargetStart, _ = strconv.Atoi(words[7])\n\t\trec.TargetEnd, _ = strconv.Atoi(words[8])\n\t\trec.NumMatches, _ = strconv.Atoi(words[9])\n\t\trec.AlignmentLength, _ = strconv.Atoi(words[10])\n\t\tmappingQuality, _ := strconv.Atoi(words[11])\n\t\trec.MappingQuality = uint8(mappingQuality)\n\t\trec.Tags = map[string]Tag{}\n\t\tvar tag Tag\n\n\t\t// Parse columns 12+\n\t\tfor i := 12; i < len(words); i++ {\n\t\t\ttokens := strings.Split(words[i], \":\")\n\t\t\tif len(tokens) < 3 {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\ttagName := tokens[0]\n\t\t\tvalue := tokens[2]\n\t\t\tswitch tokens[1] {\n\t\t\tcase \"i\":\n\t\t\t\ttag, _ = strconv.Atoi(value)\n\t\t\tcase \"f\":\n\t\t\t\ttag, _ = strconv.ParseFloat(value, 32)\n\t\t\tdefault:\n\t\t\t\ttag = value\n\t\t\t}\n\t\t\trec.Tags[tagName] = tag\n\t\t}\n\n\t\tr.Records = append(r.Records, rec)\n\t}\n}", "func (e *CzechNationalBankDataSource) Parse(c *core.Context, content []byte) (*models.LatestExchangeRateResponse, error) {\n\tlines := strings.Split(string(content), \"\\n\")\n\n\tif len(lines) < 3 {\n\t\tlog.ErrorfWithRequestId(c, \"[czech_national_bank_datasource.Parse] content is invalid, content is %s\", string(content))\n\t\treturn nil, errs.ErrFailedToRequestRemoteApi\n\t}\n\n\theaderLineItems := strings.Split(lines[0], \"#\")\n\n\tif len(headerLineItems) != 2 {\n\t\tlog.ErrorfWithRequestId(c, \"[czech_national_bank_datasource.Parse] first line of content is invalid, content is %s\", lines[0])\n\t\treturn nil, errs.ErrFailedToRequestRemoteApi\n\t}\n\n\tupdateDate := strings.TrimSpace(headerLineItems[0])\n\n\ttitleLineItems := strings.Split(lines[1], \"|\")\n\ttitleItemMap := make(map[string]int)\n\n\tfor i := 0; i < len(titleLineItems); i++ {\n\t\ttitleItemMap[titleLineItems[i]] = i\n\t}\n\n\tcurrencyCodeColumnIndex, exists := titleItemMap[\"Code\"]\n\n\tif !exists {\n\t\tlog.ErrorfWithRequestId(c, \"[czech_national_bank_datasource.Parse] missing currency code column in title line, title line is %s\", lines[1])\n\t\treturn nil, errs.ErrFailedToRequestRemoteApi\n\t}\n\n\tamountColumnIndex, exists := titleItemMap[\"Amount\"]\n\n\tif !exists {\n\t\tlog.ErrorfWithRequestId(c, \"[czech_national_bank_datasource.Parse] missing amount column in title line, title line is %s\", lines[1])\n\t\treturn nil, errs.ErrFailedToRequestRemoteApi\n\t}\n\n\trateColumnIndex, exists := titleItemMap[\"Rate\"]\n\n\tif !exists {\n\t\tlog.ErrorfWithRequestId(c, \"[czech_national_bank_datasource.Parse] missing rate column in title line, title line is %s\", lines[1])\n\t\treturn nil, errs.ErrFailedToRequestRemoteApi\n\t}\n\n\texchangeRates := make(models.LatestExchangeRateSlice, 0, len(lines)-2)\n\n\tfor i := 2; i < len(lines); i++ {\n\t\tline := strings.TrimSpace(lines[i])\n\t\texchangeRate := e.parseExchangeRate(c, line, currencyCodeColumnIndex, amountColumnIndex, rateColumnIndex)\n\n\t\tif exchangeRate != nil {\n\t\t\texchangeRates = append(exchangeRates, exchangeRate)\n\t\t}\n\t}\n\n\ttimezone, err := time.LoadLocation(czechNationalBankDataUpdateDateTimezone)\n\n\tif err != nil {\n\t\tlog.ErrorfWithRequestId(c, \"[czech_national_bank_datasource.Parse] failed to get timezone, timezone name is %s\", czechNationalBankDataUpdateDateTimezone)\n\t\treturn nil, errs.ErrFailedToRequestRemoteApi\n\t}\n\n\tupdateDateTime := updateDate + \" 14:30\" // Exchange rates of commonly traded currencies are declared every working day after 2.30 p.m.\n\tupdateTime, err := time.ParseInLocation(czechNationalBankDataUpdateDateFormat, updateDateTime, timezone)\n\n\tif err != nil {\n\t\tlog.ErrorfWithRequestId(c, \"[czech_national_bank_datasource.Parse] failed to parse update date, datetime is %s\", updateDateTime)\n\t\treturn nil, errs.ErrFailedToRequestRemoteApi\n\t}\n\n\tlatestExchangeRateResp := &models.LatestExchangeRateResponse{\n\t\tDataSource: czechNationalBankDataSource,\n\t\tReferenceUrl: czechNationalBankExchangeRateReferenceUrl,\n\t\tUpdateTime: updateTime.Unix(),\n\t\tBaseCurrency: czechNationalBankBaseCurrency,\n\t\tExchangeRates: exchangeRates,\n\t}\n\n\treturn latestExchangeRateResp, nil\n}", "func TestParser_Parse(t *testing.T) {\n\tp := &Parser{}\n\n\tdoc, err := p.Parse(\"test.json\", []byte(have))\n\trequire.NoError(t, err)\n\trequire.Len(t, doc, 1)\n\trequire.Contains(t, doc[0], \"martin\")\n}", "func ParseOne(data []byte) (redis.Reply, error) {\n\tch := make(chan *Payload)\n\treader := bytes.NewReader(data)\n\tgo parse0(reader, ch)\n\tpayload := <-ch // parse0 will close the channel\n\tif payload == nil {\n\t\treturn nil, errors.New(\"no reply\")\n\t}\n\treturn payload.Data, payload.Err\n}", "func (p *Plain) Parse(data []byte) (T, error) {\n\tt := newPlainT(p.archetypes)\n\n\t//convert to string and remove spaces according to unicode\n\tstr := strings.TrimRightFunc(string(data), unicode.IsSpace)\n\n\t//creat element\n\te := NewElement(str)\n\n\t//set it as single table value\n\tt.Set(\".0\", e)\n\n\treturn t, nil\n}", "func ParseDataSet(r *bufio.Reader, bcc *Bcc) (*DataSet, error) {\n\t// read chars til Front boundary.\n\tvar b byte\n\tvar err error\n\tvar va [100]byte\n\tvar v = va[:0]\n\tres := &DataSet{}\n\n\t// Read the address till FrontBoundaryChar == (\n\tif verbose {\n\t\tlog.Println(\"Starting ParseDataSet\")\n\t}\n\n\tif verbose {\n\t\tlog.Println(\"Scanning for Address\")\n\t}\nScanAddress:\n\tfor {\n\t\tb, err = r.ReadByte()\n\t\tif err != nil {\n\t\t\treturn nil, ErrFormatNoChars\n\t\t}\n\t\tswitch b {\n\t\tcase CR, LF:\n\t\t\tr.UnreadByte()\n\t\t\treturn nil, ErrCRFound\n\t\tcase FrontBoundaryChar:\n\t\t\tbcc.Digest(b)\n\t\t\tbreak ScanAddress\n\t\tdefault:\n\t\t\tbcc.Digest(b)\n\t\t\tif !ValidAddressChar(b) {\n\t\t\t\treturn nil, ErrFormatError\n\t\t\t}\n\t\t\tv = append(v, b)\n\t\t\tif len(v) > 16 {\n\t\t\t\treturn nil, ErrAddressTooLong\n\t\t\t}\n\t\t}\n\t}\n\t// Address read.\n\tres.Address = string(v)\n\tv = v[:0]\n\n\t// Scan for value till * or )\n\tif verbose {\n\t\tlog.Println(\"Scanning for Value\")\n\t}\nScanValue:\n\tfor {\n\t\tb, err = r.ReadByte()\n\t\tif err != nil {\n\t\t\treturn nil, ErrFormatError\n\t\t}\n\t\tbcc.Digest(b)\n\t\tswitch b {\n\t\tcase RearBoundaryChar, UnitSeparator:\n\t\t\tbreak ScanValue\n\t\tdefault:\n\t\t\tif !ValidValueChar(b) {\n\t\t\t\treturn nil, ErrFormatError\n\t\t\t}\n\t\t\tv = append(v, b)\n\t\t\tif len(v) > 32 {\n\t\t\t\treturn nil, ErrValueTooLong\n\t\t\t}\n\t\t}\n\t}\n\tres.Value = string(v)\n\tif b == RearBoundaryChar {\n\t\tres.Unit = \"\"\n\t\treturn res, nil\n\t}\n\tv = v[:0]\n\n\tif verbose {\n\t\tlog.Println(\"Scanning for Unit\")\n\t}\nScanUnit:\n\tfor {\n\t\tb, err = r.ReadByte()\n\t\tif err != nil {\n\t\t\treturn nil, ErrFormatError\n\t\t}\n\t\tbcc.Digest(b)\n\t\tswitch b {\n\t\tcase RearBoundaryChar:\n\t\t\tbreak ScanUnit\n\t\tdefault:\n\t\t\tif !ValidValueChar(b) {\n\t\t\t\treturn nil, ErrFormatError\n\t\t\t}\n\t\t\tv = append(v, b)\n\t\t\tif len(v) > 16 {\n\t\t\t\treturn nil, ErrUnitTooLong\n\t\t\t}\n\t\t}\n\t}\n\tres.Unit = string(v)\n\treturn res, nil\n}", "func (m *MappingInfo) ParseRecord(record []string, mystructptr interface{}, dateFormat string) bool {\n\n\tif reflect.TypeOf(mystructptr) != reflect.PtrTo(m.typ) {\n\t\tlog.Fatalf(\"Expected type %s but got type %s\\n\", reflect.PtrTo(m.typ), reflect.TypeOf(mystructptr))\n\n\t\treturn false\n\t}\n\n\t// Cast to the structure to update\n\tmystruct := reflect.ValueOf(mystructptr).Elem()\n\n\t// Run over all the expected mappings\n\n\tfor _, f := range m.mappings {\n\n\t\tif f.name != \"\" && record[f.index] != \"\" {\n\n\t\t\tfield := mystruct.FieldByName(f.name)\n\n\t\t\tswitch field.Type().Kind() {\n\t\t\tcase reflect.String:\n\t\t\t\tfield.SetString(record[f.index])\n\t\t\tcase reflect.Int64:\n\t\t\t\ti, err := strconv.ParseInt(record[f.index], 10, 64)\n\t\t\t\tif err != nil {\n\t\t\t\t\tlog.Fatal(\"Bad int value \" + record[i])\n\t\t\t\t\tos.Exit(-1)\n\t\t\t\t}\n\t\t\t\tfield.SetInt(i)\n\t\t\tcase reflect.Float64:\n\t\t\t\tfp, err := strconv.ParseFloat(record[f.index], 64)\n\t\t\t\tif err != nil {\n\t\t\t\t\tlog.Fatal(\"Bad float value \" + record[f.index])\n\t\t\t\t\tos.Exit(-1)\n\t\t\t\t}\n\t\t\t\tfield.SetFloat(fp)\n\t\t\tcase reflect.Struct:\n\t\t\t\tif field.Type().Name() == \"Time\" {\n\n\t\t\t\t\ttheTime, err := time.Parse(dateFormat, record[f.index])\n\t\t\t\t\tif err == nil {\n\n\t\t\t\t\t\tfield.Set(reflect.ValueOf(theTime))\n\t\t\t\t\t} else {\n\t\t\t\t\t\tfmt.Printf(\"Bad time %s\\n\", record[f.index])\n\t\t\t\t\t\tlog.Fatal(\"Bad time \\n\" + record[f.index])\n\t\t\t\t\t\tos.Exit(-1)\n\t\t\t\t\t}\n\t\t\t\t\t//\thandle times\n\t\t\t\t} else {\n\t\t\t\t\tlog.Fatal(\"Unexpected struct type \" + field.Type().Name())\n\t\t\t\t\tos.Exit(-1)\n\t\t\t\t}\n\t\t\tdefault:\n\t\t\t\tlog.Fatal(\"Unexpected field type \" + field.Type().Kind().String())\n\t\t\t\tos.Exit(-1)\n\t\t\t}\n\n\t\t}\n\t}\n\n\treturn true\n}", "func (p *Parser) Parse() error {\n\tchunks, err := p.chunks()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tp.data = chunks\n\treturn nil\n}", "func (rdAddendumB *ReturnDetailAddendumB) Parse(record string) {\n\tif utf8.RuneCountInString(record) < 80 {\n\t\treturn // line too short\n\t}\n\n\t// Character position 1-2, Always \"33\"\n\trdAddendumB.setRecordType()\n\t// 03-20\n\trdAddendumB.PayorBankName = rdAddendumB.parseStringField(record[2:20])\n\t// 21-35\n\trdAddendumB.AuxiliaryOnUs = rdAddendumB.parseStringField(record[20:35])\n\t// 36-50\n\trdAddendumB.PayorBankSequenceNumber = rdAddendumB.parseStringField(record[35:50])\n\t// 51-58\n\trdAddendumB.PayorBankBusinessDate = rdAddendumB.parseYYYYMMDDDate(record[50:58])\n\t// 59-80\n\trdAddendumB.PayorAccountName = rdAddendumB.parseStringField(record[58:80])\n}", "func (p *DefaultParser) Parse(fields []string, b []byte) (*bytes.Buffer, error) {\n\tregexps, err := p.CompileRegex(fields)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\trecords := [][]string{p.CSVHeader(regexps)}\n\tvar f []string\n\tfor _, r := range regexps {\n\t\tm := r.FindSubmatch(b)\n\t\tif len(m) != 2 {\n\t\t\tlog.Printf(\"Could not match regex %s\\n\", r.String())\n\t\t\tf = append(f, \"\\\"\\\"\")\n\t\t\tcontinue\n\t\t}\n\t\tlog.Printf(\"Matched %#v\", strings.TrimSpace(string(m[1])))\n\t\tf = append(f, strings.TrimSpace(string(m[1])))\n\t}\n\trecords = append(records, f)\n\n\tvar buf bytes.Buffer\n\t// This makes sure records are parsable CSV.\n\tw := csv.NewWriter(&buf)\n\tw.WriteAll(records)\n\tif err := w.Error(); err != nil {\n\t\tlog.Println(err)\n\t}\n\n\treturn &buf, nil\n}", "func (pq *PatientQuery) Parse(u *url.URL) {\n\tq := u.Query()\n\tpq.ID = q.Get(\"id\")\n\tpq.BirthDate = q.Get(\"birth_date\")\n}", "func tryEmptyResponse(data []byte) (interface{}, error) {\n\ttype emptyResponse struct {\n\t\tList []Article\n\t\tStatus int\n\t\tComplete int\n\t\tSince int\n\t}\n\n\tvar er emptyResponse\n\n\tif err := json.Unmarshal(data, &er); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &RetrieveResult{\n\t\tStatus: er.Status,\n\t\tComplete: er.Status,\n\t\tSince: er.Since,\n\t\tList: map[string]Article{},\n\t}, nil\n}", "func (o StringObj) Parse() ([][]string, error) {\n\treturn [][]string{\n\t\t{string(*o.Prefix)},\n\t\t{o.Val},\n\t}, nil\n}", "func Parse(matcher []byte) (gomegatypes.GomegaMatcher, error) {\n\tvar m interface{}\n\tif err := json.Unmarshal(matcher, &m); err != nil {\n\t\treturn nil, err\n\t}\n\treturn generateMatcher(m)\n}", "func (parser CsvParser) Parse() (interface{}, error) {\n\n\tio, err := parser.CsvReader.Reader()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tvar csvReader = csv.NewReader(io)\n\tcsvReader.Comma = parser.CsvSeparator\n\tcsvRows, err := csvReader.ReadAll()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar resultType = GetMetaType(parser.BindObject)\n\tif !checkType(resultType) {\n\t\treturn nil, errors.New(fmt.Sprintf(\"type %v not supported\", resultType.Name()))\n\t}\n\tresults := reflect.New(reflect.SliceOf(reflect.PtrTo(resultType)))\n\n\theaders := csvRows[0]\n\tbody := csvRows[1:]\n\tvar csvField = make(map[string]int)\n\tfor _, col := range headers {\n\t\tfor j := 0; j < resultType.NumField(); j+=1 {\n\t\t\tfield := resultType.Field(j)\n\t\t\ttag := field.Tag.Get(\"csv\")\n\t\t\tif col == tag {\n\t\t\t\tcsvField[col] = j\n\t\t\t}\n\t\t}\n\t}\n\n\n\tfor _, csvRow := range body {\n\t\tobj := reflect.New(resultType)\n\t\tfor j, csvCol := range csvRow {\n\t\t\tcolName := headers[j]\n\t\t\tidx, ok := csvField[colName]\n\t\t\tif !ok {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tcurrentField := obj.Elem().Field(idx)\n\t\t\tif parser.Setter != nil && parser.Setter(currentField, colName, csvCol) {\n\t\t\t\tcontinue\n\t\t\t}else {\n\t\t\t\tsetField(currentField, csvCol, true)\n\t\t\t}\n\t\t}\n\t\tele := reflect.Append(results.Elem(), obj)\n\t\tresults.Elem().Set(ele)\n\t}\n\treturn results.Interface(), err\n}", "func NilParser([]byte) ParseResult {\n\treturn ParseResult{}\n}", "func loadTableTestResults(filename string, srcData *mockstore.Snapshot) (resultsVerifier, error) {\n\tf, err := os.Open(filename)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer f.Close()\n\ts := bufio.NewScanner(bufio.NewReader(f))\n\trow := 0\n\texp := ResultChunk{\n\t\tColumns: make(exec.Columns, 0, 4),\n\t}\n\tfor s.Scan() {\n\t\tline := strings.TrimSpace(s.Text())\n\t\trow++\n\t\tif len(line) == 0 || line[0] == '#' {\n\t\t\tcontinue\n\t\t}\n\t\tif len(exp.Columns) == 0 {\n\t\t\t// column headers. The Fields func splits the input on unicode.isSpace boundaries\n\t\t\tfor _, c := range strings.Fields(line) {\n\t\t\t\texp.Columns = append(exp.Columns, &plandef.Variable{Name: c})\n\t\t\t}\n\t\t\tcontinue\n\t\t}\n\t\tcells := strings.Fields(line)\n\t\tif len(cells) != len(exp.Columns) {\n\t\t\treturn nil, fmt.Errorf(\"row %d has %d columns, but expecting %d\", row, len(cells), len(exp.Columns))\n\t\t}\n\t\tfor i, cell := range cells {\n\t\t\tparsed, err := parser.ParseTerm(cell)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, fmt.Errorf(\"row %d column %d unable to parse '%s' into a valid term: %v\", row, i, cell, err)\n\t\t\t}\n\t\t\tval := exec.Value{}\n\n\t\t\tresolveUnitLanguageID := func(xid string) uint64 {\n\t\t\t\tif len(xid) > 0 {\n\t\t\t\t\treturn srcData.ResolveXID(xid)\n\t\t\t\t}\n\t\t\t\treturn uint64(0)\n\t\t\t}\n\t\t\tswitch t := parsed.(type) {\n\t\t\tcase *parser.Variable:\n\t\t\t\treturn nil, fmt.Errorf(\"row %d column %d unexpected variable %s found in results\", row, i, t)\n\t\t\tcase *parser.LiteralID:\n\t\t\t\treturn nil, fmt.Errorf(\"row %d column %d unexpected literal ID %s found in results\", row, i, t)\n\t\t\tcase *parser.LiteralBool:\n\t\t\t\tval.KGObject = rpc.ABool(t.Value, resolveUnitLanguageID(t.Unit.Value))\n\t\t\t\tif len(t.Unit.Value) > 0 {\n\t\t\t\t\tval.SetUnitExtID(t.Unit.Value)\n\t\t\t\t}\n\t\t\tcase *parser.LiteralFloat:\n\t\t\t\tval.KGObject = rpc.AFloat64(t.Value, resolveUnitLanguageID(t.Unit.Value))\n\t\t\t\tif len(t.Unit.Value) > 0 {\n\t\t\t\t\tval.SetUnitExtID(t.Unit.Value)\n\t\t\t\t}\n\t\t\tcase *parser.LiteralInt:\n\t\t\t\tval.KGObject = rpc.AInt64(t.Value, resolveUnitLanguageID(t.Unit.Value))\n\t\t\t\tif len(t.Unit.Value) > 0 {\n\t\t\t\t\tval.SetUnitExtID(t.Unit.Value)\n\t\t\t\t}\n\t\t\tcase *parser.LiteralTime:\n\t\t\t\tval.KGObject = rpc.ATimestamp(t.Value,\n\t\t\t\t\tlogentry.TimestampPrecision(t.Precision),\n\t\t\t\t\tresolveUnitLanguageID(t.Unit.Value))\n\t\t\t\tif len(t.Unit.Value) > 0 {\n\t\t\t\t\tval.SetUnitExtID(t.Unit.Value)\n\t\t\t\t}\n\t\t\tcase *parser.LiteralString:\n\t\t\t\tval.KGObject = rpc.AString(t.Value, resolveUnitLanguageID(t.Language.Value))\n\t\t\t\tval.SetLangExtID(t.Language.Value)\n\t\t\tcase *parser.QName:\n\t\t\t\tval.KGObject = rpc.AKID(srcData.ResolveXID(t.Value))\n\t\t\t\tval.ExtID = t.String()\n\t\t\tcase *parser.Entity:\n\t\t\t\tval.KGObject = rpc.AKID(srcData.ResolveXID(t.Value))\n\t\t\t\tval.ExtID = t.String()\n\t\t\tdefault:\n\t\t\t\treturn nil, fmt.Errorf(\"row %d column %d unexpected value type %T %s in results\", row, i, t, t)\n\t\t\t}\n\t\t\texp.Values = append(exp.Values, val)\n\t\t}\n\t}\n\tformat := func(r ResultChunk) []string {\n\t\tformatted := make([]string, r.NumRows()+1)\n\t\tformatted[0] = r.Columns.String()\n\t\tfor i := 0; i < r.NumRows(); i++ {\n\t\t\tformatted[i+1] = fmt.Sprintf(\"%v\", r.Row(i))\n\t\t}\n\t\treturn formatted\n\t}\n\tverify := func(t *testing.T, queryError error, actual ResultChunk) {\n\t\tassert.NoError(t, queryError)\n\t\tassert.Equal(t, format(exp), format(actual))\n\t}\n\treturn verify, nil\n}", "func Parse(input string) (tree *ParseResult, err error) {\n\tprotobufTree, err := parser.ParseToProtobuf(input)\n\tif err != nil {\n\t\treturn\n\t}\n\n\ttree = &ParseResult{}\n\terr = proto.Unmarshal(protobufTree, tree)\n\treturn\n}", "func (p *defaultParser) Parse(ctx context.Context, header http.Header) (*kit.Kit, error) {\n\tif ctx == nil {\n\t\tctx = context.Background()\n\t}\n\n\tkt := &kit.Kit{\n\t\tCtx: ctx,\n\t\tUser: header.Get(constant.UserKey),\n\t\tRid: header.Get(constant.RidKey),\n\t\tAppCode: header.Get(constant.AppCodeKey),\n\t}\n\n\tif err := kt.Validate(); err != nil {\n\t\treturn nil, errors.Wrapf(err, \"validate kit\")\n\t}\n\n\treturn kt, nil\n}", "func (m *Measurement) Result(name string) *Result {\n\tfor _, x := range m.Results {\n\t\tif x.Name == name {\n\t\t\treturn x\n\t\t}\n\t}\n\n\tr := &Result{}\n\tr.Name = name\n\tm.Results = append(m.Results, r)\n\treturn r\n}", "func (c *TITLE) Parse() string {\n\tEmpty := TITLE{}\n\tif *c != Empty {\n\t\treturn fmt.Sprintf(`\"title\": \"%v\"`, c.Title)\n\t}\n\treturn \"\"\n}", "func ParseAnalysis(content string) (*AnalysisResult, error) {\n\tdec := json.NewDecoder(strings.NewReader(content))\n\tres := &AnalysisResult{}\n\terr := dec.Decode(res)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn res, nil\n}", "func (fmp *FMPXMLResult) PopulateRecords() error {\n\tfmp.populateFieldEncoders()\n\n\t// Empty out our record destination, and allocate it in a single go\n\tfmp.Records = make([]Record, len(fmp.ResultSet.Rows))\n\n\tfor i, row := range fmp.ResultSet.Rows {\n\t\trecord := Record{}\n\n\t\tif fmp.RecordIDField != \"\" {\n\t\t\trecordID, _ := json.Marshal(row.RecordID) // You can never fail to marshal a string\n\t\t\trecord[fmp.RecordIDField] = recordID\n\t\t}\n\n\t\tif fmp.ModIDField != \"\" {\n\t\t\tmodID, _ := json.Marshal(row.ModID) // You can never fail to marshal a string\n\t\t\trecord[fmp.ModIDField] = modID\n\t\t}\n\n\t\tif len(row.Cols) != len(fmp.positionalColumnData) {\n\t\t\treturn fmt.Errorf(\"Row %d column count mismatch: have %d, expect %d\", i, len(row.Cols), len(fmp.positionalColumnData))\n\t\t}\n\n\t\tfor j, col := range row.Cols {\n\t\t\tpositionalItem := fmp.positionalColumnData[j]\n\n\t\t\tencoder := positionalItem.encoder\n\t\t\tname := positionalItem.name\n\n\t\t\tencoded, err := encoder(col.Data)\n\n\t\t\tif err != nil {\n\t\t\t\treturn fmt.Errorf(\"Unable to encode row %d column %d: %v\", i, j, err)\n\t\t\t}\n\n\t\t\trecord[name] = encoded\n\t\t}\n\n\t\tfmp.Records[i] = record\n\t}\n\n\treturn nil\n}", "func Parse(REPORT_FILE *string) (*nmaprun, error) {\n\tREPORT := nmaprun{}\n\txmlContent, _ := ioutil.ReadFile(*REPORT_FILE)\n\terr := xml.Unmarshal(xmlContent, &REPORT)\n\tif err != nil {\n\t\treturn &nmaprun{}, err\n\t}\n\treturn &REPORT, nil\n}", "func ParseReport(message string) *JobReport {\n\tcontents := strings.Split(message, \"\\n\")\n\tstart := 0\n\tisReport := false\n\tfor start < len(contents) {\n\t\tif isHeaderMessageLine(contents[start]) {\n\t\t\tisReport = true\n\t\t\tbreak\n\t\t}\n\t\tstart++\n\t}\n\tif !isReport {\n\t\treturn nil\n\t}\n\tvar report JobReport\n\treport.Header = contents[start] + \"\\n\"\n\tfor i := start + 1; i < len(contents); i++ {\n\t\tif contents[i] == \"\" || isErrorMessageLine(contents[i]) {\n\t\t\tcontinue\n\t\t}\n\t\tvar j Job\n\t\tif err := deserialize(contents[i], &j); err != nil {\n\t\t\tlogrus.Warn(err)\n\t\t\tcontinue\n\t\t}\n\t\treport.Total++\n\t\tif j.State == v1.SuccessState {\n\t\t\treport.Success++\n\t\t}\n\t\treport.Jobs = append(report.Jobs, j)\n\t}\n\treport.Message = strings.TrimPrefix(message, report.Header+\"\\n\")\n\treturn &report\n}", "func Parse(r io.Reader, pkgName string) (*Report, error) {\n\treader := bufio.NewReader(r)\n\n\treport := &Report{make([]Package, 0)}\n\n\t// keep track of tests we find\n\tvar tests []*Test\n\n\t// sum of tests' time, use this if current test has no result line (when it is compiled test)\n\tvar testsTime time.Duration\n\n\t// current test\n\tvar cur string\n\n\t// coverage percentage report for current package\n\tvar coveragePct string\n\n\t// stores mapping between package name and output of build failures\n\tvar packageCaptures = map[string][]string{}\n\n\t// the name of the package which it's build failure output is being captured\n\tvar capturedPackage string\n\n\t// capture any non-test output\n\tvar buffers = map[string][]string{}\n\n\t// parse lines\n\tlogContinuing := false\n\tfor {\n\t\tl, _, err := reader.ReadLine()\n\t\tif err != nil && err == io.EOF {\n\t\t\tbreak\n\t\t} else if err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tline := string(l)\n\n\t\twasOutput := false\n\t\tif strings.HasPrefix(line, \"=== RUN \") {\n\t\t\t// new test\n\t\t\tcur = strings.TrimSpace(line[8:])\n\t\t\ttests = append(tests, &Test{\n\t\t\t\tName: cur,\n\t\t\t\tResult: FAIL,\n\t\t\t\tOutput: make([]string, 0),\n\t\t\t})\n\n\t\t\t// clear the current build package, so output lines won't be added to that build\n\t\t\tcapturedPackage = \"\"\n\t\t} else if matches := regexBenchmark.FindStringSubmatch(line); len(matches) > 0 {\n\t\t\tif test := findTest(tests, cur); test != nil &&\n\t\t\t\tlen(test.Output) >= 3 &&\n\t\t\t\tstrings.HasPrefix(test.Output[len(test.Output)-3], \"goos: \") &&\n\t\t\t\tstrings.HasPrefix(test.Output[len(test.Output)-2], \"goarch: \") &&\n\t\t\t\tstrings.HasPrefix(test.Output[len(test.Output)-1], \"pkg: \") {\n\t\t\t\t// benchmarks header was interpreted as test output for the last test; discard it\n\t\t\t\ttest.Output = test.Output[:len(test.Output)-3]\n\t\t\t}\n\t\t\tcur = matches[1]\n\n\t\t\t//bytes, _ := strconv.Atoi(matches[4])\n\t\t\t//allocs, _ := strconv.Atoi(matches[5])\n\n\t\t\ttest := findTest(tests, cur)\n\t\t\tif test == nil {\n\t\t\t\t// first execution of this benchmark\n\t\t\t\ttest = &Test{\n\t\t\t\t\tName: cur,\n\t\t\t\t\tResult: PASS,\n\t\t\t\t\tDuration: parseNanoseconds(matches[3]),\n\t\t\t\t\tOutput: make([]string, 0),\n\t\t\t\t}\n\t\t\t\ttests = append(tests, test)\n\t\t\t} else {\n\t\t\t\t// repeated execution of the same benchmark with different N\n\t\t\t\ttest.Duration = parseNanoseconds(matches[3])\n\t\t\t}\n\t\t\ttest.Output = append(test.Output, line)\n\t\t} else if strings.HasPrefix(line, \"=== PAUSE \") {\n\t\t\tcontinue\n\t\t} else if strings.HasPrefix(line, \"=== CONT \") {\n\t\t\tcur = strings.TrimSpace(line[8:])\n\t\t\tcontinue\n\t\t} else if matches := regexResult.FindStringSubmatch(line); len(matches) == 6 {\n\t\t\tif matches[5] != \"\" {\n\t\t\t\tcoveragePct = matches[5]\n\t\t\t}\n\t\t\tif strings.HasSuffix(matches[4], \"failed]\") {\n\t\t\t\t// the build of the package failed, inject a test error into the package\n\t\t\t\t// which indicate about the error and contain the error description.\n\t\t\t\ttests = append(tests, &Test{\n\t\t\t\t\tName: matches[4],\n\t\t\t\t\tResult: ERROR,\n\t\t\t\t\tOutput: packageCaptures[matches[2]],\n\t\t\t\t})\n\t\t\t} else if matches[1] == \"FAIL\" && !containsFailures(tests) && len(buffers[cur]) > 0 {\n\t\t\t\t// This package didn't have any failing tests, but still it\n\t\t\t\t// failed with some output. Create a dummy test with the\n\t\t\t\t// output.\n\t\t\t\ttests = append(tests, &Test{\n\t\t\t\t\tName: \"Error\",\n\t\t\t\t\tResult: ERROR,\n\t\t\t\t\tOutput: buffers[cur],\n\t\t\t\t})\n\t\t\t\tbuffers[cur] = buffers[cur][0:0]\n\t\t\t}\n\n\t\t\t// all tests in this package are finished\n\t\t\treport.Packages = append(report.Packages, Package{\n\t\t\t\tName: matches[2],\n\t\t\t\tDuration: parseSeconds(matches[3]),\n\t\t\t\tTests: tests,\n\t\t\t\tCoveragePct: coveragePct,\n\n\t\t\t\tTime: int(parseSeconds(matches[3]) / time.Millisecond), // deprecated\n\t\t\t})\n\n\t\t\tbuffers[cur] = buffers[cur][0:0]\n\t\t\ttests = make([]*Test, 0)\n\t\t\tcoveragePct = \"\"\n\t\t\tcur = \"\"\n\t\t\ttestsTime = 0\n\t\t} else if matches := regexStatus.FindStringSubmatch(line); len(matches) == 4 {\n\t\t\tcur = matches[2]\n\t\t\ttest := findTest(tests, cur)\n\t\t\tif test == nil {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\t// test status\n\t\t\tif matches[1] == \"PASS\" {\n\t\t\t\ttest.Result = PASS\n\t\t\t} else if matches[1] == \"SKIP\" {\n\t\t\t\ttest.Result = SKIP\n\t\t\t} else {\n\t\t\t\ttest.Result = FAIL\n\t\t\t}\n\n\t\t\tif matches := regexIndent.FindStringSubmatch(line); len(matches) == 2 {\n\t\t\t\ttest.SubtestIndent = countIndent(matches[1])\n\t\t\t}\n\n\t\t\ttest.Output = append(test.Output, buffers[cur]...)\n\n\t\t\ttest.Name = matches[2]\n\t\t\ttest.Duration = parseSeconds(matches[3])\n\t\t\ttestsTime += test.Duration\n\n\t\t\ttest.Time = int(test.Duration / time.Millisecond) // deprecated\n\t\t} else if matches := regexCoverage.FindStringSubmatch(line); len(matches) == 2 {\n\t\t\tcoveragePct = matches[1]\n\t\t} else if strings.HasPrefix(line, \"# \") {\n\t\t\t// indicates a capture of build output of a package. set the current build package.\n\n\t\t\tline = strings.TrimPrefix(line, \"# \")\n\t\t\t// when go test -cover is run, a build error looks different\n\t\t\t// e.g.: \"# cover package/name\"\n\t\t\tline = strings.TrimPrefix(line, \"cover \")\n\n\t\t\tpackageWithTestBinary := regexPackageWithTest.FindStringSubmatch(line)\n\t\t\tif packageWithTestBinary != nil {\n\t\t\t\t// Sometimes, the text after \"# \" shows the name of the test binary\n\t\t\t\t// (\"<package>.test\") in addition to the package\n\t\t\t\t// e.g.: \"# package/name [package/name.test]\"\n\t\t\t\tcapturedPackage = packageWithTestBinary[1]\n\t\t\t} else {\n\t\t\t\tcapturedPackage = line\n\t\t\t}\n\t\t} else if capturedPackage != \"\" {\n\t\t\t// current line is build failure capture for the current built package\n\t\t\tpackageCaptures[capturedPackage] = append(packageCaptures[capturedPackage], line)\n\t\t} else if regexSummary.MatchString(line) {\n\t\t\t// unset current test name so any additional output after the\n\t\t\t// summary is captured separately.\n\t\t\tcur = \"\"\n\t\t} else {\n\t\t\t// if we have a current test, append to its output\n\t\t\ttest := findTest(tests, cur)\n\n\t\t\tif test != nil && regexLog.MatchString(line) {\n\t\t\t\t// strip the correct amount of indentation\n\t\t\t\tline = stripIndent(line, test.SubtestIndent+1)\n\t\t\t\tlogContinuing = true\n\t\t\t} else if logContinuing && countIndent(line) >= test.SubtestIndent+2 {\n\t\t\t\t// continuation of the previous log line\n\t\t\t\tline = stripIndent(line, test.SubtestIndent+1)\n\t\t\t} else {\n\t\t\t\tlogContinuing = false\n\t\t\t}\n\n\t\t\tif test != nil {\n\t\t\t\ttest.Output = append(test.Output, line)\n\t\t\t} else {\n\t\t\t\t// buffer anything else that we didn't recognize\n\t\t\t\tbuffers[cur] = append(buffers[cur], line)\n\t\t\t}\n\t\t\twasOutput = true\n\t\t}\n\t\tif !wasOutput {\n\t\t\tlogContinuing = false\n\t\t}\n\t}\n\n\tif len(tests) > 0 {\n\t\t// no result line found\n\t\treport.Packages = append(report.Packages, Package{\n\t\t\tName: pkgName,\n\t\t\tDuration: testsTime,\n\t\t\tTime: int(testsTime / time.Millisecond),\n\t\t\tTests: tests,\n\t\t\tCoveragePct: coveragePct,\n\t\t})\n\t}\n\n\treturn report, nil\n}", "func (o *Output) Record(result testrunner.TestResult) {\n\tif o.Summary != nil {\n\t\to.Summary.Record(result)\n\t}\n\tif o.TAP != nil {\n\t\to.TAP.Record(result)\n\t}\n\tif o.Tar != nil {\n\t\to.Tar.Record(result)\n\t}\n}", "func TestSimpleParse(t *testing.T) {\n\tp := &Parser{\n\t\tPatterns: []string{\"%{TESTLOG}\"},\n\t\tCustomPatterns: `\n\t\t\tTESTLOG %{NUMBER:num:long} %{WORD:client}\n\t\t`,\n\t}\n\tassert.NoError(t, p.compile())\n\n\tm, err := p.parse(`142 bot`)\n\tassert.Nil(t, err)\n\trequire.NotNil(t, m)\n\n\tassert.Equal(t,\n\t\tData{\n\t\t\t\"num\": int64(142),\n\t\t\t\"client\": \"bot\",\n\t\t},\n\t\tm)\n\n\tp.labels = GetGrokLabels([]string{\"app logkit\", \"client pandora\"}, make(map[string]struct{}))\n\tm, err = p.parse(`142 bot`)\n\tassert.Nil(t, err)\n\tassert.EqualValues(t, Data{\"client\": \"bot\", \"app\": \"logkit\", \"num\": int64(142)}, m)\n\n\tp = &Parser{\n\t\tPatterns: []string{\"%{TESTLOG}\"},\n\t\tCustomPatterns: `\n\t\t\tTESTLOG %{NUMBER:num:date} %{WORD:client}\n\t\t`,\n\t}\n\tm, err = p.parse(`142 bot`)\n\tassert.NotNil(t, err)\n\tassert.EqualValues(t, Data(nil), m)\n}", "func (o *DataMatrix) HasResults() bool {\n\tif o != nil && o.Results != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func (m *MockParser) Parse(arg0 string) ([]*srtfix.Caption, error) {\n\tret := m.ctrl.Call(m, \"Parse\", arg0)\n\tret0, _ := ret[0].([]*srtfix.Caption)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func (e cfxManager) ParseTestResponse(data []byte) error {\n\tif e.p == subscriber.RPC {\n\t\tvar msg JsonrpcMessage\n\t\tif err := json.Unmarshal(data, &msg); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tvar res string\n\t\tif err := json.Unmarshal(msg.Result, &res); err != nil {\n\t\t\treturn err\n\t\t}\n\t\te.fq.FromEpoch = res\n\t}\n\n\treturn nil\n}", "func ProcessRecords(records [][]string, headers []string, headerMap map[string]int) (Result, error) {\n\tresult := Result{}\n\tfield := models.Fields{}\n\tvalidRecords := [][]string{}\n\tinValidRecords := [][]string{}\n\temailMap := map[string]string{}\n\tfor _, record := range records {\n\t\terr := field.FormData(record, headerMap)\n\t\tif err != nil {\n\t\t\tinValidRecords = append(inValidRecords, record)\n\t\t\tfmt.Println(fmt.Sprintf(\"error processing record: %v, error: %v\", record, err))\n\t\t\tcontinue\n\t\t}\n\t\temployee := field.GetData()\n\t\tif emailMap[employee.Email] == \"\" {\n\t\t\tvalidRecords = append(validRecords, employee.ToSlice())\n\t\t\temailMap[employee.Email] = employee.Id\n\t\t\tcontinue\n\t\t}\n\t\tinValidRecords = append(inValidRecords, record)\n\t\tfmt.Println(fmt.Sprintf(\"error processing record: %v, error: %v\", record, \"email already exist\"))\n\t}\n\tresult.InValidRecords = inValidRecords\n\tresult.InValidRecordsHeader = headers\n\tresult.ValidRecords = validRecords\n\tresult.ValidRecordsHeader = field.GetHeader()\n\treturn result, nil\n}", "func TestGetDataAndParseResponse(t *testing.T) {\n\telectricCount, boogalooCount := getDataAndParseResponse()\n\tif electricCount < 1 {\n\t\tt.Errorf(\"expected more than one name 'Electric', recieved: %d\", electricCount)\n\t}\n\tif boogalooCount < 1 {\n\t\tt.Errorf(\"expected more than one name 'Boogaloo', recieved: %d\", boogalooCount)\n\t}\n}", "func (s *IndexQuery) Parse(req *http.Request) error {\n\tswitch req.Header.Get(contentType) {\n\tcase mimeJSON:\n\t\tbody, err := ioutil.ReadAll(req.Body)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tif err := json.Unmarshal(body, s); err != nil {\n\t\t\treturn err\n\t\t}\n\tcase mimeForm:\n\t\tif err := req.ParseForm(); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tfor key, val := range req.Form {\n\t\t\tswitch key {\n\t\t\tcase \"jsonp\":\n\t\t\t\ts.JSONP = val[0]\n\t\t\t}\n\t\t}\n\t}\n\n\tparams := req.URL.Query()\n\tif len(params.Get(\"jsonp\")) != 0 {\n\t\ts.JSONP = params.Get(\"jsonp\")\n\t}\n\n\treturn nil\n}", "func (dr *Templates) MatchParsed(r map[string]interface{}, templateid string) bool {\n\ttpl := dr.GetParsed(templateid)\n\treturn (strings.Compare(tpl[\"CODE\"].(string), r[\"CODE\"].(string)) == 0 &&\n\t\tstrings.Compare(tpl[\"DESCRIPTION\"].(string), r[\"DESCRIPTION\"].(string)) == 0)\n}", "func (p *Parser) Parse() int {\n\tp.rootNode = nil\n\n\treturn yyParse(p)\n}", "func parse(scanner *bufio.Scanner) (data.MEntries, error) {\n\tvar e data.MEntries\n\tseen := make(map[string]bool)\n\n\ts := result.MEntry{}\n\tfor scanner.Scan() {\n\t\tline := scanner.Text()\n\t\tlog.Println(\"Line: %s\", line)\n\t\tm := titleRe.FindStringSubmatch(line)\n\t\tif len(m) > 0 && line == strings.ToUpper(line) {\n\t\t\tlog.Println(\"Title: %s\", m[1])\n\t\t\t// Clear the previous entry.\n\t\t\tif s.Name != \"\" && s.SRating > 0 {\n\t\t\t\ts.Key = key(s.Name, s.Locale)\n\t\t\t\tif _, exists := seen[s.Key]; exists {\n\t\t\t\t\tlog.Println(\"Ignoring duplicate: %s (its ok)\", s.Key)\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tseen[s.Key] = true\n\t\t\t\te.Entries = append(e.Entries, s)\n\t\t\t}\n\t\t\ts = result.MEntry{Name: m[1]}\n\t\t\tcontinue\n\t\t}\n\t\tm = sRatingRe.FindStringSubmatch(line)\n\t\tif len(m) > 0 {\n\t\t\tlog.Println(\"SRating: %s\", m[1])\n\t\t\ts.SRating, _ = strconv.Atoi(m[1])\n\t\t\tcontinue\n\t\t}\n\t\tm = LocaleRe.FindStringSubmatch(line)\n\t\tif s.SRating > 0 && len(m) > 0 {\n\t\t\tlog.Println(\"Locale: %s\", m[1])\n\t\t\ts.Locale = m[1]\n\t\t}\n\n\t\tm = DescRe.FindStringSubmatch(line)\n\t\tif s.SRating > 0 && len(m) > 0 {\n\t\t\tlog.Println(\"Desc: %s\", m[1])\n\t\t\tif s.Desc == \"\" {\n\t\t\t\ts.Desc = m[1]\n\t\t\t} else if len(strings.Split(s.Desc, \" \")) < MaxDescWords {\n\t\t\t\ts.Desc = s.Desc + \" \" + m[1]\n\t\t\t}\n\t\t\twords := strings.Split(s.Desc, \" \")\n\t\t\tif len(words) > MaxDescWords {\n\t\t\t\ts.Desc = strings.Join(words[0:MaxDescWords], \" \") + \" ...\"\n\t\t\t}\n\t\t\tcontinue\n\t\t}\n\n\t}\n\ts.Key = key(s.Name, s.Locale)\n\tif _, exists := seen[s.Key]; exists {\n\t\treturn e, fmt.Errorf(\"%s was already seen\", s.Key)\n\t}\n\tseen[s.Key] = true\n\te.Entries = append(e.Entries, s)\n\treturn e, nil\n}", "func TestMyParser(t *testing.T) {\n\ttables := []struct {\n\t\tmyQuery string\n\t\terr error\n\t\treqCols []string\n\t\talias string\n\t\tmyLimit int\n\t\taggFuncs []string\n\t\theader []string\n\t}{\n\t\t{\"SELECT * FROM S3OBJECT\", nil, []string{\"*\"}, \"S3OBJECT\", 0, make([]string, 1), []string{\"name1\", \"name2\", \"name3\", \"name4\"}},\n\t\t{\"SELECT * FROM S3OBJECT AS A\", nil, []string{\"*\"}, \"A\", 0, make([]string, 1), []string{\"name1\", \"name2\", \"name3\", \"name4\"}},\n\t\t{\"SELECT col_name FROM S3OBJECT AS A\", nil, []string{\"col_name\"}, \"A\", 0, make([]string, 1), []string{\"col_name\", \"name2\", \"name3\", \"name4\"}},\n\t\t{\"SELECT col_name,col_other FROM S3OBJECT AS A LIMIT 5\", nil, []string{\"col_name\", \"col_other\"}, \"A\", 5, make([]string, 2), []string{\"col_name\", \"col_other\", \"name3\", \"name4\"}},\n\t\t{\"SELECT col_name,col_other FROM S3OBJECT AS A WHERE col_name = 'Name' LIMIT 5\", nil, []string{\"col_name\", \"col_other\"}, \"A\", 5, make([]string, 2), []string{\"col_name\", \"col_other\", \"name3\", \"name4\"}},\n\t\t{\"SELECT col_name,col_other FROM S3OBJECT AS A WHERE col_name = 'Name LIMIT 5\", ErrLexerInvalidChar, nil, \"\", 0, nil, []string{\"col_name\", \"col_other\", \"name3\", \"name4\"}},\n\t\t{\"SELECT count(*) FROM S3OBJECT AS A WHERE col_name = 'Name' LIMIT 5\", nil, []string{\"*\"}, \"A\", 5, []string{\"count\"}, []string{\"col_name\", \"col_other\", \"name3\", \"name4\"}},\n\t\t{\"SELECT sum(col_name),sum(col_other) FROM S3OBJECT AS A WHERE col_name = 'Name' LIMIT 5\", nil, []string{\"col_name\", \"col_other\"}, \"A\", 5, []string{\"sum\", \"sum\"}, []string{\"col_name\", \"col_other\"}},\n\t\t{\"SELECT A.col_name FROM S3OBJECT AS A\", nil, []string{\"col_name\"}, \"A\", 0, make([]string, 1), []string{\"col_name\", \"col_other\", \"name3\", \"name4\"}},\n\t\t{\"SELECT A._col_name FROM S3OBJECT AS A\", nil, []string{\"col_name\"}, \"A\", 0, make([]string, 1), []string{\"col_name\", \"col_other\", \"name3\", \"name4\"}},\n\t\t{\"SELECT A._col_name FROM S3OBJECT AS A WHERE randomname > 5\", ErrMissingHeaders, nil, \"\", 0, nil, []string{\"col_name\", \"col_other\", \"name3\", \"name4\"}},\n\t\t{\"SELECT A._col_name FROM S3OBJECT AS A WHERE A._11 > 5\", ErrInvalidColumnIndex, nil, \"\", 0, nil, []string{\"col_name\", \"col_other\", \"name3\", \"name4\"}},\n\t\t{\"SELECT COALESCE(col_name,col_other) FROM S3OBJECT AS A WHERE A._3 > 5\", nil, []string{\"\"}, \"A\", 0, []string{\"\"}, []string{\"col_name\", \"col_other\", \"name3\", \"name4\"}},\n\t\t{\"SELECT COALESCE(col_name,col_other),COALESCE(col_name,col_other) FROM S3OBJECT AS A WHERE A._3 > 5\", nil, []string{\"\", \"\"}, \"A\", 0, []string{\"\", \"\"}, []string{\"col_name\", \"col_other\", \"name3\", \"name4\"}},\n\t\t{\"SELECT COALESCE(col_name,col_other) ,col_name , COALESCE(col_name,col_other) FROM S3OBJECT AS A WHERE col_name > 5\", nil, []string{\"\", \"col_name\", \"\"}, \"A\", 0, []string{\"\", \"\", \"\"}, []string{\"col_name\", \"col_other\", \"name3\", \"name4\"}},\n\t\t{\"SELECT NULLIF(col_name,col_other) ,col_name , COALESCE(col_name,col_other) FROM S3OBJECT AS A WHERE col_name > 5\", nil, []string{\"\", \"col_name\", \"\"}, \"A\", 0, []string{\"\", \"\", \"\"}, []string{\"col_name\", \"col_other\", \"name3\", \"name4\"}},\n\t\t{\"SELECT NULLIF(col_name,col_other) FROM S3OBJECT AS A WHERE col_name > 5\", nil, []string{\"\"}, \"A\", 0, []string{\"\"}, []string{\"col_name\", \"col_other\", \"name3\", \"name4\"}},\n\t\t{\"SELECT NULLIF(randomname,col_other) FROM S3OBJECT AS A WHERE col_name > 5\", ErrMissingHeaders, nil, \"\", 0, nil, []string{\"col_name\", \"col_other\", \"name3\", \"name4\"}},\n\t\t{\"SELECT col_name FROM S3OBJECT AS A WHERE COALESCE(random,5) > 5\", ErrMissingHeaders, nil, \"\", 0, nil, []string{\"col_name\", \"col_other\", \"name3\", \"name4\"}},\n\t\t{\"SELECT col_name FROM S3OBJECT AS A WHERE NULLIF(random,5) > 5\", ErrMissingHeaders, nil, \"\", 0, nil, []string{\"col_name\", \"col_other\", \"name3\", \"name4\"}},\n\t\t{\"SELECT col_name FROM S3OBJECT AS A WHERE LOWER(col_name) BETWEEN 5 AND 7\", nil, []string{\"col_name\"}, \"A\", 0, []string{\"\"}, []string{\"col_name\", \"col_other\", \"name3\", \"name4\"}},\n\t\t{\"SELECT UPPER(col_name) FROM S3OBJECT AS A WHERE LOWER(col_name) BETWEEN 5 AND 7\", nil, []string{\"\"}, \"A\", 0, []string{\"\"}, []string{\"col_name\", \"col_other\", \"name3\", \"name4\"}},\n\t\t{\"SELECT UPPER(*) FROM S3OBJECT AS A WHERE LOWER(col_name) BETWEEN 5 AND 7\", ErrParseUnsupportedCallWithStar, nil, \"\", 0, nil, []string{\"col_name\", \"col_other\", \"name3\", \"name4\"}},\n\t\t{\"SELECT NULLIF(col_name,col_name) FROM S3OBJECT AS A WHERE NULLIF(LOWER(col_name),col_name) BETWEEN 5 AND 7\", nil, []string{\"\"}, \"A\", 0, []string{\"\"}, []string{\"col_name\", \"col_other\", \"name3\", \"name4\"}},\n\t\t{\"SELECT COALESCE(col_name,col_name) FROM S3OBJECT AS A WHERE NULLIF(LOWER(col_name),col_name) BETWEEN 5 AND 7\", nil, []string{\"\"}, \"A\", 0, []string{\"\"}, []string{\"col_name\", \"col_other\", \"name3\", \"name4\"}},\n\t}\n\tfor _, table := range tables {\n\t\toptions := &Options{\n\t\t\tHasHeader: false,\n\t\t\tRecordDelimiter: \"\\n\",\n\t\t\tFieldDelimiter: \",\",\n\t\t\tComments: \"\",\n\t\t\tName: \"S3Object\", // Default table name for all objects\n\t\t\tReadFrom: bytes.NewReader([]byte(\"name1,name2,name3,name4\" + \"\\n\" + \"5,is,a,string\" + \"\\n\" + \"random,random,stuff,stuff\")),\n\t\t\tCompressed: \"\",\n\t\t\tExpression: \"\",\n\t\t\tOutputFieldDelimiter: \",\",\n\t\t\tStreamSize: 20,\n\t\t\tHeaderOpt: true,\n\t\t}\n\t\ts3s, err := NewInput(options)\n\t\tif err != nil {\n\t\t\tt.Error(err)\n\t\t}\n\t\ts3s.header = table.header\n\t\treqCols, alias, myLimit, _, aggFunctionNames, _, err := s3s.ParseSelect(table.myQuery)\n\t\tif table.err != err {\n\t\t\tt.Error()\n\t\t}\n\t\tif !reflect.DeepEqual(reqCols, table.reqCols) {\n\t\t\tt.Error()\n\t\t}\n\t\tif alias != table.alias {\n\t\t\tt.Error()\n\t\t}\n\t\tif myLimit != int64(table.myLimit) {\n\t\t\tt.Error()\n\t\t}\n\t\tif !reflect.DeepEqual(table.aggFuncs, aggFunctionNames) {\n\t\t\tt.Error()\n\t\t}\n\t}\n}", "func (h *Header) Parse(ctx context.Context, r io.ReaderAt) error {\n\tif err := h.loadArenas(ctx, r); err != nil {\n\t\treturn fmt.Errorf(\"rpm: failed to parse header: %w\", err)\n\t}\n\tvar isBDB bool\n\tswitch err := h.verifyRegion(ctx); {\n\tcase errors.Is(err, nil):\n\tcase errors.Is(err, errNoRegion):\n\t\tisBDB = true\n\tdefault:\n\t\treturn fmt.Errorf(\"rpm: failed to parse header: %w\", err)\n\t}\n\tif err := h.verifyInfo(ctx, isBDB); err != nil {\n\t\treturn fmt.Errorf(\"rpm: failed to parse header: %w\", err)\n\t}\n\treturn nil\n}", "func Parse(lexer *Lexer) int {\n\treturn yyParse(lexer)\n}", "func Parse(src string) (*Query, error) {\n\tvar query Query\n\tif err := parser.ParseString(src, &query); err != nil {\n\t\treturn nil, err\n\t}\n\treturn &query, nil\n}", "func (rm RowMapInterface) Parse(field string, val any) any {\n\tif err := json.Unmarshal(rm.Bytes(field), val); err != nil {\n\t\treturn nil\n\t}\n\treturn reflect.ValueOf(val).Elem().Interface()\n}" ]
[ "0.65030944", "0.640545", "0.6295678", "0.58526266", "0.5814548", "0.5708484", "0.5522844", "0.54461235", "0.52731514", "0.5248069", "0.52401406", "0.522363", "0.519883", "0.51841474", "0.5167972", "0.5140995", "0.51321316", "0.5074069", "0.50669277", "0.5043279", "0.50262934", "0.50150234", "0.49937123", "0.4992432", "0.49835226", "0.49578834", "0.49507457", "0.4941648", "0.4909966", "0.49087992", "0.4897667", "0.4892911", "0.48922634", "0.48723304", "0.48687795", "0.4864553", "0.4851694", "0.48498356", "0.48478952", "0.48393822", "0.48150122", "0.4792823", "0.47877973", "0.47599754", "0.47539338", "0.47483966", "0.47446722", "0.4741681", "0.47373933", "0.47243264", "0.47195554", "0.47145414", "0.47143492", "0.47077888", "0.47050515", "0.47043183", "0.4696809", "0.4676531", "0.46710652", "0.4668404", "0.4664024", "0.46578866", "0.4657642", "0.4656801", "0.46563286", "0.46509415", "0.46415505", "0.46412623", "0.4638492", "0.4625162", "0.46132657", "0.46132585", "0.4611246", "0.4604814", "0.45968106", "0.45960602", "0.4591601", "0.45814887", "0.4572404", "0.4568896", "0.4564012", "0.45594427", "0.45555022", "0.45517707", "0.45395425", "0.45265657", "0.4521395", "0.45185176", "0.45075408", "0.45054373", "0.4505174", "0.44985852", "0.44974947", "0.44972244", "0.44927332", "0.4484313", "0.44795075", "0.44780204", "0.4471309", "0.44713047", "0.44684157" ]
0.0
-1
SingleNumber returns an element from an array that appears one time
func SingleNumber(nums []int) int { if len(nums) == 1 { return nums[0] } result := 0 for _, n := range nums { result ^= n } return result }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func singleNumber(nums []int) int {\n\tt := make(map[int]int)\n\n\tfor _, v := range nums {\n\t\tt[v]++\n\t}\n\n\tfor v, c := range t {\n\t\tif c == 1 {\n\t\t\treturn v\n\t\t}\n\t}\n\n\treturn 0\n}", "func singleNumber(nums []int) int {\n\tln := len(nums)\n\tif ln == 1 {\n\t\treturn nums[0]\n\t}\n\tsingle := make(map[int]bool)\n\tfor _, n := range nums {\n\t\t_, ok := single[n]\n\t\tif ok {\n\t\t\tdelete(single, n)\n\t\t} else {\n\t\t\tsingle[n] = true\n\t\t}\n\t}\n\tfor n, _ := range single {\n\t\treturn n\n\t}\n\treturn 0 // error condition\n}", "func singleNumber(nums []int) int {\n\tres := nums[0]\n\tfor i:=1; i<len(nums); i++{\n\t\tres ^= nums[i]\n\t}\n\n\treturn res\n}", "func singleNumber(nums []int) int {\n\tones, twos := 0, 0\n\tfor i := range nums {\n\t\tones = (ones ^ nums[i]) & (^twos)\n\t\ttwos = (twos ^ nums[i]) & (^ones)\n\t}\n\treturn ones\n}", "func singleNumber(nums []int) int {\r\n\tbit := 0\r\n\tfor i := 0; i < len(nums); i++ {bit ^= nums[i]}\r\n\treturn bit\r\n}", "func singleNumber(nums []int) int {\n\tones, twos := 0, 0\n\tfor i := 0; i < len(nums); i++ {\n\t\tones = (ones ^ nums[i]) & ^twos\n\t\ttwos = (twos ^ nums[i]) & ^ones\n\t}\n\treturn ones\n}", "func singleNumberA(nums []int) int {\n result:=0\n for _,data := range nums{\n result^=data\n }\n return result\n}", "func singleNumber(nums []int) []int {\n\tbitmap := 0\n\tfor _, num := range nums {\n\t\tbitmap = bitmap ^ num\n\t}\n\tdiff, x := bitmap&(-bitmap), 0\n\tfor _, num := range nums {\n\t\tif num&diff != 0 {\n\t\t\tx = x ^ num\n\t\t}\n\t}\n\treturn []int{x, bitmap ^ x}\n}", "func singleNumber(nums []int) int {\n res := 0\n for _, v := range nums {\n res = res ^ v\n }\n\n return res\n}", "func FirstDuplicateValue(array []int) int {\n\tm := make(map[int]int)\n\n\tfor i := 0; i < len(array); i++ {\n\t\tm[array[i]]++\n\n\t\tif m[array[i]] == 2 {\n\t\t\treturn array[i]\n\t\t}\n\t}\n\treturn -1\n}", "func uniqueNumber(slice []int) int {\n\tunique := slice[0]\n\tfor i := 1; i < len(slice); i++ {\n\t\tunique ^= slice[i]\n\t}\n\treturn unique\n}", "func singleNumberB(nums []int) int {\n \n mp := make(map[int]int)\n result:= 0\n for _,data := range nums{\n if _,exist := mp[data]; exist{ //if element exist we remove the elemtn\n delete(mp, data)\n \n }else{\n mp[data] = data\n }\n\t}\n\t//really O(1) since there's always one element left\n for _, data := range mp{ \n result = data\n }\n return result \n}", "func TestFirstduplicate_SmallArray(t *testing.T) {\n\texpectedResult := 3\n\tassert.Equal(t, expectedResult, firstDuplicate(smallArray))\n}", "func singleNumberBitwise(nums []int) int {\n\tsingles := 0\n\tdoubles := 0\n\n\tfor _, v := range nums {\n\t\t// add current value into singles if it is not in doubles\n\t\t// exclude from singles if it's there already\n\t\tsingles = (^doubles) & (singles ^ v)\n\t\t// add current value into doubles if it is not in singles\n\t\t// exclude from double if it's there already\n\t\tdoubles = (^singles) & (doubles ^ v)\n\t}\n\n\treturn singles\n}", "func GetRandomOne(arr interface{}) interface{} {\n\trv := reflect.ValueOf(arr)\n\tif rv.Kind() != reflect.Slice && rv.Kind() != reflect.Array {\n\t\treturn arr\n\t}\n\n\ti := mathutil.RandomInt(0, rv.Len())\n\tr := rv.Index(i).Interface()\n\n\treturn r\n}", "func getDuplicate(numbers []int) int {\n\tfor _, num1 := range numbers {\n\t\tcheck := false\n\t\tfor _, num2 := range numbers {\n\t\t\tflag := num1 == num2\n\t\t\tif flag && check {\n\t\t\t\treturn num1\n\t\t\t} else if flag {\n\t\t\t\tcheck = true\n\t\t\t}\n\t\t}\n\t}\n\treturn 0\n}", "func FindFirstRepeated(in []int) (int, error) {\n\n\thmap := make(map[int]bool)\n\n\tfor _, val := range in {\n\t\tif hmap[val] == true {\n\t\t\treturn val, nil\n\t\t} else {\n\t\t\thmap[val] = true\n\t\t}\n\t}\n\n\treturn 0, errFindFirstRepeated\n}", "func minIncrementForUnique(A []int) int {\n \n}", "func repeatedNTimes(A []int) int {\n cache := map[int]bool {}\n for _,a := range A {\n if cache[a] {\n return a\n }\n cache[a] = true\n }\n return -1\n}", "func (b Bits) Single() bool {\n\t// like OnesCount, but stop as soon as two are found\n\tif b.Num == 0 {\n\t\treturn false\n\t}\n\tc := 0\n\tlast := len(b.Bits) - 1\n\tfor _, w := range b.Bits[:last] {\n\t\tc += mb.OnesCount64(w)\n\t\tif c > 1 {\n\t\t\treturn false\n\t\t}\n\t}\n\tc += mb.OnesCount64(b.Bits[last] << uint(len(b.Bits)*64-b.Num))\n\treturn c == 1\n}", "func (p IntArray) First() int {\n\treturn p[0]\n}", "func repeatedNTimes(A []int) int {\n m := make(map[int]int)\n for _, v := range A {\n m[v]++\n if m[v] > 1 {\n return v\n }\n }\n return 0\n}", "func SimpleRemoveDuplicates(arr []int) []int {\n\t//..\n\tpocket := make([]int, 0)\n\tpocket = append(pocket, arr...)\n\n\tfor index, num := range pocket {\n\t\tif contains(pocket, num) == true {\n\t\t\tfmt.Println(\"pocket: \", pocket)\n\t\t\tfmt.Println(\"pocket[index]: \", pocket[index:])\n\t\t\tpocket = pocket[index:]\n\t\t}\n\t}\n\n\t// for _, item := range arr {\n\t// \tif contains(pocket, item) == true {\n\t// \t\tcontinue\n\t// \t} else {\n\t// \t\tpocket = append(pocket, item)\n\t// \t}\n\t// }\n\treturn pocket\n}", "func (s *mustRunAs) GenerateSingle(_ *api.Pod) (*int64, error) {\n\tsingle := new(int64)\n\t*single = s.ranges[0].Min\n\treturn single, nil\n}", "func main() {\r\n\tOddNum := [5]int{1, 3, 5, 7, 9}\r\n\tfmt.Println(OddNum[3])\r\n}", "func (sudoku *Sudoku) deduce_nakedSingle() bool {\n\tnakedSingleCouldProgress := false\n\n\tfor i := 1; i<=9; i++ {\n\t\tfor j := 1; j<=9; j++ {\n\t\t\tif sudoku.grid[rowCol(i,j)] == 0 {\n\t\t\t\tp := sudoku.returnOnlyPossibility(i,j)\n\t\t\t\tif p != 0 {\n\t\t\t\t\tnakedSingleCouldProgress = true\n\t\t\t\t\tsudoku.setNumber(i, j, p)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\treturn nakedSingleCouldProgress\n}", "func FindDuplicateNumber(nums []int) int {\n\ti := 0\n\tfor i < len(nums) {\n\t\tvalue := nums[i]\n\t\tif nums[value-1] != value {\n\t\t\tnums[value-1], nums[i] = value, nums[value-1]\n\t\t} else if value-1 != i {\n\t\t\treturn value\n\t\t} else {\n\t\t\ti++\n\t\t}\n\t}\n\n\treturn -1\n}", "func main() {\n\tnums := []int{1,3,5,2,6,4,5}\n\tfmt.Println(findDuplicate(nums))\n\tnums2 := []int{3,1,3,4,2}\n\tfmt.Println(findDuplicate(nums2))\n}", "func singleNumber137(nums []int) int {\n\t// 注意将所有中间变量声明成 int32\n\tvar ans int32\n\tfor i := 0; i < 32; i++ {\n\t\tvar total int32\n\t\tfor _, num := range nums {\n\t\t\ttotal += int32(num) >> i & 1\n\t\t}\n\t\tif total%3 > 0 {\n\t\t\tans |= 1 << i\n\t\t}\n\t}\n\t// 最后返回的时候再转换成 int\n\treturn int(ans)\n}", "func nthUglyNumber(n int) int {\n\treturn foundNumbers[n-1]\n}", "func (m multiSigner) Single(n int) SingleSigner {\n\tif len(m.accounts) <= n {\n\t\tpanic(\"invalid index\")\n\t}\n\treturn NewSingleSigner(wallet.NewAccountFromPrivateKey(m.accounts[n].PrivateKey()))\n}", "func (s *singleton) GetNumber() int {\n\treturn s.number\n}", "func Single(t *testing.T, d []byte) [Size]byte {\n\t// Place the same data in every lane.\n\tvar data [Lanes][]byte\n\tfor l := range data {\n\t\tdata[l] = d\n\t}\n\n\tif err := Validate(data); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t// Hash and check the lanes are the same.\n\tdigest := Sum(data)\n\tfor l := range data {\n\t\tif digest[0] != digest[l] {\n\t\t\tt.Logf(\"lane %02d: %x\", 0, digest[0])\n\t\t\tt.Logf(\"lane %02d: %x\", l, digest[l])\n\t\t\tt.Fatal(\"lane mismatch\")\n\t\t}\n\t}\n\n\treturn digest[0]\n}", "func Get1(bm []uint64, i int32) uint64 {\n\treturn (bm[i>>6] >> uint(i&63)) & 1\n}", "func Gimme(array [3]int) int {\n\ts := make([]int, len(array))\n\tcopy(s, array[:])\n\n\tsort.Ints(s)\n\n\tvar middle int\n\tfor i, value := range array {\n\t\tif value == s[1] {\n\t\t\tmiddle = i\n\t\t}\n\t}\n\treturn middle\n}", "func GetSinglePokemon(number int) pokemon.Pkmn {\n\tallPokemon := GetPokemonFromFile()\n\tvar targetPokemon pokemon.Pkmn\n\tfor _, singlePokemon := range allPokemon {\n\t\tif singlePokemon.Number == number {\n\t\t\ttargetPokemon = singlePokemon\n\t\t\tbreak\n\t\t}\n\t}\n\treturn targetPokemon\n}", "func TestNumbersFirstGivenCase(t *testing.T) {\n\n\tinput := []int32{4, 6, 5, 3, 3, 1}\n\texpected := int32(3)\n\n\tresult := pickingNumbers(input)\n\n\tif result != expected {\n\t\tt.Errorf(\"Picking numbers first case was incorrect, got: %d, want: %d.\", result, expected)\n\t}\n}", "func FindRepeatNumber(nums []int) int {\n\texist := make(map[int]bool)\n\tfor i := range nums {\n\t\tif _, ok := exist[nums[i]]; ok {\n\t\t\treturn nums[i]\n\t\t}\n\t\texist[nums[i]] = true\n\t}\n\treturn 0\n}", "func main() {\n\tnums := []int{1, 2, 3, 1}\n\tre := containsDuplicate(nums)\n\tfmt.Print(re)\n}", "func (b *BoolMatrixLinear)GetNumber()int64{\n\tvar result float64 = 0\n\tfor i,v := range b.matrix{\n\t\tif v {\n\t\t\tresult += math.Exp2(float64((b.width*b.heigh - 1) - i))\n\t\t}\n\t}\n\n\treturn int64(result)\n}", "func missingNumber(nums []int) int {\n\ttarget := len(nums)\n\tfor i, n := range nums {\n\t\ttarget ^= i ^ n\n\n\t}\n\treturn target\n}", "func duplicateElement(value, n int) []int {\n\tarr := make([]int, n)\n\tfor i := 0; i < n; i++ {\n\t\tarr[i] = value\n\t}\n\n\treturn arr\n}", "func ToIndexOne(numString string) (string, error) {\n\tnum, err := strconv.Atoi(numString)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\t// add one to make it match pulse eight.\n\t// we are going to use 0 based indexing on video matrixing,\n\t// and the kramer uses 1-based indexing.\n\tnum++\n\n\treturn strconv.Itoa(num), nil\n}", "func NextOne(bm []uint64, i, end int32) int32 {\n\n\twordIdx := i >> 6\n\tbitIdx := i & 63\n\tvar nxt int32 = -1\n\n\tword := bm[wordIdx] & RMask[bitIdx]\n\tif word != 0 {\n\t\tnxt = wordIdx<<6 + int32(bits.TrailingZeros64(word))\n\t} else {\n\n\t\ti = (i + 63) & ^63\n\n\t\tfor ; i < end; i += 64 {\n\n\t\t\tword := bm[i>>6]\n\t\t\tif word != 0 {\n\t\t\t\tnxt = i + int32(bits.TrailingZeros64(word))\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t}\n\tif nxt >= end {\n\t\treturn -1\n\t}\n\treturn nxt\n}", "func One(out1 *TightFieldElement) {\n\tout1[0] = 0x1\n\tout1[1] = 0x0\n\tout1[2] = 0x0\n}", "func (s *singleton) AddOne() int {\n\ts.count++\n\treturn s.count\n}", "func TestLargestElementArrayOne(t *testing.T) {\n\tgot := LargestElement([]int{66}, 1)\n\twant := 66\n\n\tif got != want {\n\t\tt.Errorf(\"got %q, wanted %q\", got, want)\n\t}\n}", "func get1N(indices *[]uint16, start, end int) []uint16 {\n\tif end > cap(*indices) {\n\t\t*indices = make([]uint16, end)\n\t\tfor i := range *indices {\n\t\t\t(*indices)[i] = uint16(i)\n\t\t}\n\t}\n\treturn (*indices)[start:end]\n}", "func (c *compiler) numIndex(n float64) int {\n\tif index, ok := c.indexes.nums[n]; ok {\n\t\treturn index // reuse existing constant\n\t}\n\tindex := len(c.program.Nums)\n\tc.program.Nums = append(c.program.Nums, n)\n\tc.indexes.nums[n] = index\n\treturn index\n}", "func (O ObjectCollection) First() (int, error) {\n\tvar exists C.int\n\tvar idx C.int32_t\n\tif C.dpiObject_getFirstIndex(O.dpiObject, &idx, &exists) == C.DPI_FAILURE {\n\t\treturn 0, errors.Errorf(\"first: %w\", O.getError())\n\t}\n\tif exists == 1 {\n\t\treturn int(idx), nil\n\t}\n\treturn 0, ErrNotExist\n}", "func nth_element(buildData []bvhPrimitiveInfo, first, nth, last int, comp func(info0, info1 *bvhPrimitiveInfo) bool) {\n\tBy(comp).Sort(buildData[first:last])\n}", "func minIncrementForUnique(A []int) int {\n\tarr := [80000]int{}\n\tfor _, v := range A {\n\t\tarr[v]++\n\t}\n\n\tvar (\n\t\tans int\n\t\ttoken int\n\t)\n\tfor i := 0; i < 80000; i++ {\n\t\tif arr[i] > 1 {\n\t\t\ttoken += arr[i] - 1\n\t\t\tans -= i * (arr[i] - 1)\n\t\t} else if token > 0 && arr[i] == 0 {\n\t\t\ttoken--\n\t\t\tans += i\n\t\t}\n\t}\n\n\treturn ans\n}", "func (s SecurePlainPhoneArray) First() (v SecurePlainPhone, ok bool) {\n\tif len(s) < 1 {\n\t\treturn\n\t}\n\treturn s[0], true\n}", "func someOtherNumber(num int, s []int) (int, bool) {\n\tfor _, other := range s {\n\t\tif num != other {\n\t\t\treturn other, true\n\t\t}\n\t}\n\treturn num, false\n}", "func majorityElement(nums []int) []int {\n \n}", "func (s *Singleton) AddOne() int {\n\ts.count++\n\treturn s.count\n}", "func (pList *LinkedListNumber) GetNumber() (uint64, error) {\n\tvar n uint64\n\tvar x uint64 = 1\n\tp := pList\n\tfor p != nil {\n\t\tu := uint(p.Data)\n\t\tm, err := mathex.MultiplyUint64(x, uint64(u))\n\t\tif err != nil {\n\t\t\treturn 0, err\n\t\t}\n\t\tn, err = mathex.SumUint64(n, m)\n\t\tif err != nil {\n\t\t\treturn 0, err\n\t\t}\n\t\tp = p.Next\n\t\tx *= 10\n\t}\n\treturn n, nil\n}", "func part1(arr []int) int {\n\tvar i, j int\n\tn := len(arr)\n\n\ti = 0\n\tj = n - 1\n\n\tfor i < n && j >= 0 {\n\t\tif arr[i]+arr[j] == target {\n\t\t\tbreak\n\t\t}\n\t\tif arr[i]+arr[j] < target {\n\t\t\ti++\n\t\t} else {\n\t\t\tj--\n\t\t}\n\t}\n\treturn arr[i] * arr[j]\n}", "func flexibleGetSmallest(r *ring.Ring, index int) (float64, error) {\n var arr []float64\n\n // Put each ring value into an array\n for i := 0; i < r.Len(); i++ {\n switch r.Value.(type) {\n case int:\n arr = append(arr, float64(r.Value.(int)))\n r = r.Next()\n case float32:\n arr = append(arr, float64(r.Value.(float32)))\n r = r.Next()\n case float64:\n arr = append(arr, r.Value.(float64))\n r = r.Next()\n case string:\n if val, err := strconv.ParseFloat(r.Value.(string), 64); err == nil {\n arr = append(arr, val)\n r = r.Next()\n } else {\n return -1, errors.New(err.Error())\n }\n default:\n return -1, errors.New(\"Data type not yet supported\")\n } \n }\n\n // Sort the array (smallest to largest)\n sort.Float64s(arr)\n\n return arr[index], nil\n}", "func TestNakedSingles(t *testing.T) {\n\tinputBoard := []byte(nakedSinglesParam)\n\tb := NewBoard(inputBoard)\n\n\tassert.NotEqual(t, '8', b.finalValue[5])\n\tassert.NotEqual(t, '8', b.finalValue[11])\n\n\tb.NakedSingles()\n\n\tassert.Equal(t, '8', b.finalValue[5])\n\tassert.Equal(t, '8', b.finalValue[11])\n}", "func XorSearchMissingNumber(a []int) int {\n\tn := len(a)\n\tresult := len(a)\n\tfor i := 0; i < n; i++ {\n\t\tresult ^= i ^ a[i]\n\t}\n\treturn result\n}", "func (this *ExDomain) GetAnyElement() int {\n\tif this.Min > this.Max {\n\t\tlogger.If(\"Domain %s\", *this)\n\t\tdebug.PrintStack()\n\t\tpanic(\"GetAnyElement on empty domain\")\n\t}\n\treturn this.Min // whatever\n}", "func perfGetSmallest(r *ring.Ring) int {\n // Set smallest to first value\n var smallest int = r.Value.(int)\n\n // Do a single loop through ring to determine smallest number\n r.Do(func(p interface{}) {\n\t\tif p.(int) < smallest {\n smallest = p.(int)\n }\n })\n \n return smallest\n}", "func main() {\n\tfmt.Println(missingNumber([]int{0}))\n\tfmt.Println(missingNumber([]int{0, 1}))\n}", "func main() {\n\tnums := []int{1, 2, 3, 1}\n\tprintln(containsDuplicate(nums))\n}", "func (m *BoolMatrix)GetNumber()int64{\n\tvar result float64 = 0\n\tfor i := 0; i < m.heigh; i++{\n\t\tfor j := 0; j < m.width ; j++{\n\t\t\tif m.GetBool(i,j) {\n\t\t\t\tresult += math.Exp2(float64((m.width*m.heigh - 1) - (i*m.width+j)))\n\t\t\t}\n\t\t}\n\t} \n\n\treturn int64(result)\n}", "func (i *ArrayConcreteIterator) First() {\n i.current_index_ = 0\n}", "func (m *TestAllTypes) GetSingleFixed32() (x uint32) {\n\tif m == nil {\n\t\treturn x\n\t}\n\treturn m.SingleFixed32\n}", "func (a *_Atom) firstMultiplyBondedNeighbourId() (uint16, *_Bond) {\n\tif a.doubleBondCount == 0 && a.tripleBondCount == 0 {\n\t\treturn 0, nil\n\t}\n\n\tmol := a.mol\n\tfor bid, ok := a.bonds.NextSet(0); ok; bid, ok = a.bonds.NextSet(bid + 1) {\n\t\tb := mol.bondWithId(uint16(bid))\n\t\tif b.bType >= cmn.BondTypeDouble {\n\t\t\treturn b.otherAtomIid(a.iId), b\n\t\t}\n\t}\n\n\tpanic(\"Should never be here!\")\n}", "func main() {\n\tnums := []int{0, 0, 1, 1, 1, 2, 2, 3, 3, 4}\n\tlength := removeDuplicates(nums)\n\tfor i := 0; i < length; i++ {\n\t\tfmt.Printf(\"%d \", nums[i])\n\t}\n\n\tnums = []int{1, 1, 1, 1, 1, 1}\n\tlength = removeDuplicates(nums)\n\tfor i := 0; i < length; i++ {\n\t\tfmt.Printf(\"%d \", nums[i])\n\t}\n\n\tnums = []int{1}\n\tlength = removeDuplicates(nums)\n\tfor i := 0; i < length; i++ {\n\t\tfmt.Printf(\"%d \", nums[i])\n\t}\n}", "func (m MultiMetaGetter) GetSingle(idx int) MetaGetter {\n\tif idx >= len(m) || m[idx] == nil {\n\t\treturn nullMetaGetter\n\t}\n\treturn m[idx]\n}", "func MajoirtyElement(arr []int) (majority int) {\n\tmajority = -1\n\tcount := 1\n\tfor i := 1; i < len(arr); i++ {\n\t\tif arr[i] == majority {\n\t\t\tcount++\n\t\t} else {\n\t\t\tcount--\n\t\t}\n\n\t\tif count == 0 {\n\t\t\tmajority = arr[i]\n\t\t\tcount = 1\n\t\t}\n\t}\n\n\tnewCount := 0\n\tfor i := 0; i < len(arr); i++ {\n\t\tif arr[i] == majority {\n\t\t\tnewCount++\n\t\t}\n\t}\n\n\tif newCount > len(arr)/2 {\n\t\treturn majority\n\t}\n\treturn -1\n\n}", "func (ss *Rate) GetSingle(key string, nowTs time.Time) (float64, float64) {\n\tnow := nowTs.UnixNano()\n\tvar bucket *bucket\n\tif bucketno, found := ss.keytobucketno[key]; found {\n\t\tbucket = &ss.buckets[bucketno]\n\t\trate := ss.recount(bucket.rate, bucket.lastTs, now)\n\t\terrRate := ss.recount(bucket.errRate, bucket.errLastTs, now)\n\t\treturn rate - errRate, rate\n\t} else {\n\t\tbucketno = uint32(ss.sh.h[0])\n\t\tbucket = &ss.buckets[bucketno]\n\t\terrRate := ss.recount(bucket.rate, bucket.lastTs, now)\n\t\treturn 0, errRate\n\t}\n\n}", "func NoRepeated(numbersInput []int) []int {\n\n\tnumbersInput = algoritms.Quick_sort(numbersInput)\n\n\tr := make([]int, 0, len(numbersInput))\n\tr = append(r, numbersInput[0])\n\n\tfor _, number := range numbersInput {\n\t\trTemp := r\n\t\tsearchResult := algoritms.BinarySearch(rTemp, number)\n\n\t\t//Add number if not present\n\t\tif searchResult == -1 {\n\t\t\tr = append(r, number)\n\t\t}\n\t}\n\treturn r\n}", "func findDuplicate(nums []int) int {\n\tif nums == nil || len(nums) == 0 {\n\t\treturn -1\n\t}\n\tslow, fast := nums[0], nums[nums[0]]\n\tfor slow != fast {\n\t\tslow, fast = nums[slow], nums[nums[fast]]\n\t}\n\n\tslow = 0\n\tfor slow != fast {\n\t\tslow, fast = nums[slow], nums[fast]\n\t}\n\treturn slow\n}", "func Find_Smallest(arr []int ) {\n\tfmt.Println(\"FIND SMALLEST VALUE\")\n\tvar smallest = 99999999\n\tfor i:=1 ; i < len(arr) ; i++ {\n\t\t\t//fmt.Println(arr[i])\n\t\t\tif arr[i] <= smallest {\n\t\t\t\tsmallest = arr[i]\n\t\t\t}\n\t\t}\n\t\tfmt.Println(\"Smallest: \" , smallest )\n}", "func (fr *FakeResult) One(ptr interface{}) error {\n\tvar count = make(map[string]int64)\n\tif fr.Force == \"true\" {\n\t\treturn errors.New(\"Function One forced error\")\n\t}\n\tcount[\"count\"] = int64(1234)\n\t*ptr.(*map[string]int64) = count\n\treturn nil\n}", "func getNumber(api *StackAPI, index int, numType interface{}) bool {\n\tif !api.IsNumber(index) {\n\t\tapi.ArgTypeError(index, ValueTNumber)\n\t\treturn false\n\t}\n\n\tif num, ok := numType.(*int); ok {\n\t\t*num = int(api.GetNumber(index))\n\t\treturn true\n\t}\n\tpanic(\"assert\")\n}", "func duplicateZeros1(arr []int) {\n\tvar count int\n\tn := len(arr)\n\tfor _, v := range arr {\n\t\tif v == 0 {\n\t\t\tcount++\n\t\t}\n\t}\n\n\tfor i := n - 1; i >= 0; i-- {\n\t\tif i+count >= n {\n\t\t\tif arr[i] == 0 {\n\t\t\t\tif i+count-1 < n {\n\t\t\t\t\tarr[i+count-1] = 0\n\t\t\t\t}\n\t\t\t\tcount--\n\t\t\t}\n\t\t\tcontinue\n\t\t}\n\n\t\tif count != 0 {\n\t\t\tarr[i+count] = arr[i]\n\t\t\tif arr[i] == 0 {\n\t\t\t\tarr[i+count-1] = 0\n\t\t\t\tcount--\n\t\t\t}\n\t\t}\n\t}\n}", "func (arr *FloatArray) Uniq() {\n\tif len(*arr) == 0 {\n\t\treturn\n\t}\n\n\tstrMap := make(map[float64]int)\n\n\tfor _, el := range *arr {\n\t\t// value doesn't matter here cause we collect just keys\n\t\tstrMap[el] = 1\n\t}\n\n\tresArr := make([]float64, 0, 0)\n\tfor k := range strMap {\n\t\tresArr = append(resArr, k)\n\t}\n\n\t*arr = resArr\n}", "func naive(a []int) []int {\n\n\t/* if the array has 0 or zero elements, return an empty array */ \n\tif (len(a) < 2) {\n\t\treturn make([]int, 0)\n\t}\n\n\tp := make([]int, len(a))\n\tfor i := 0; i < len(a); i++ {\n\t\n\t\tproduct := 1; \n\t\tfor j := 0; j < len(a); j++ {\n\t\t\tif i != j {\n\t\t\t\tproduct = product * a[j]\n\t\t\t}\n \t\t}\n\t\tp[i] = product;\n\t}\n\n\treturn p\n}", "func Solution(X int, A []int) int {\n // write your code in Go 1.4\n bucket := make([]bool, X + 1)\n count := 0\n \n for i, n := range A {\n if (bucket[n] == false) {\n bucket[n] = true\n count++\n }\n \n if (count == X) {\n return i\n }\n\t}\n\t\n\treturn -1\n}", "func TestLonelyIntegerFirstGivenCase(t *testing.T) {\n\n\tarr := []int32{1, 2, 3, 4, 3, 2, 1}\n\texpected := int32(4)\n\n\tresult := lonelyInteger(arr)\n\n\tif result != expected {\n\t\tt.Errorf(\"Lonely integer first case was incorrect, got: %d, want: %d.\", result, expected)\n\t}\n}", "func lonelyinteger(a []int32) int32 {\n\n\tvar value int32\n\tfor _, element := range a {\n\t\tvalue ^= element\n\t}\n\treturn value\n\n}", "func (m *TestAllTypes) GetSingleSfixed32() (x int32) {\n\tif m == nil {\n\t\treturn x\n\t}\n\treturn m.SingleSfixed32\n}", "func removeDuplicates(nums []int) int {\n size := len(nums)\n if size == 0 {return 0}\n fixedNum := 1\n for i:=1; i<size; i++ {\n if nums[i] != nums[i-1] {\n nums[fixedNum] = nums[i]\n fixedNum++\n }\n }\n return fixedNum\n}", "func (z *Float64) One() *Float64 {\n\tz.l = 1\n\tz.r = 0\n\treturn z\n}", "func missingNumber(nums []int) int {\n\tvar n = len(nums)\n\tvar res = n\n\tfor i := 0; i < n; i++ {\n\t\tres = res ^ i ^ nums[i]\n\t}\n\treturn res\n}", "func (p *PostingsIterator) DocNum1Hit() (uint64, bool) {\n\tif p.normBits1Hit != 0 && p.docNum1Hit != DocNum1HitFinished {\n\t\treturn p.docNum1Hit, true\n\t}\n\treturn 0, false\n}", "func minSliceElement(a []float64) float64", "func (m *RepairinvoiceMutation) Num() (r int, exists bool) {\n\tv := m.num\n\tif v == nil {\n\t\treturn\n\t}\n\treturn *v, true\n}", "func ArrayElement(i int32) {\n C.glowArrayElement(gpArrayElement, (C.GLint)(i))\n}", "func RandomNumber (num int) int{\n return rand.Intn(num) + 1\n}", "func (m *TestAllTypes) GetRepeatedDouble() (x []float64) {\n\tif m == nil {\n\t\treturn x\n\t}\n\treturn m.RepeatedDouble\n}", "func (i *in) Number() int {\n\treturn i.number\n}", "func (i *in) Number() int {\n\treturn i.number\n}", "func (m NoStipulationsRepeatingGroup) Get(i int) NoStipulations {\n\treturn NoStipulations{m.RepeatingGroup.Get(i)}\n}", "func (g Euclidean) Num(num int) model.Collection {\n\n\tl := model.NewSeries(label.Num(\"x\"), label.Num(\"f(x)\"))\n\n\tfor i := 0; i < num; i++ {\n\n\t\tif x, ok, n := g.Next(); ok {\n\t\t\tp := g.Call(x.Coords[0], 0)\n\t\t\tl.Add(model.NewVector([]string{\"f\", fmt.Sprintf(\"%d\", i)}, p...))\n\t\t\tif !n {\n\t\t\t\tlog.Printf(\"could not generate more elements, source iterator ended at %d of %d\", i, num)\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\n\t}\n\treturn l\n}", "func detectRepeats(numbers []int) error {\n searched_nums := map[int]int{}\n\n for i, num := range numbers {\n _, searched := searched_nums[num]\n\n num_count := 0\n if !searched {\n for j := i; j < len(numbers); j++ {\n if numbers[j] == num {\n num_count++\n }\n\n if num_count > 3 {\n return fmt.Errorf(\"%d found >3 times.\", numbers[j])\n }\n }\n searched_nums[num] = num\n }\n }\n return nil\n}", "func exampleNum(n []int) string {\n\tif len(n) == 1 {\n\t\treturn fmt.Sprintf(\"%d.\", n[0])\n\t}\n\treturn \"\"\n}" ]
[ "0.7355575", "0.7091691", "0.6871725", "0.6758138", "0.67200637", "0.6717797", "0.64064044", "0.61524177", "0.6090381", "0.58458525", "0.5845142", "0.58402747", "0.5831533", "0.5796477", "0.57190686", "0.57127076", "0.55824995", "0.55505824", "0.5509434", "0.5496342", "0.5486918", "0.54114604", "0.5313964", "0.5222407", "0.5211962", "0.51972574", "0.51455396", "0.5130353", "0.5120034", "0.51156217", "0.5099193", "0.50852025", "0.50799096", "0.50704086", "0.5045565", "0.50452507", "0.49881178", "0.4980906", "0.4941575", "0.4931915", "0.49108747", "0.49059707", "0.4898976", "0.48877504", "0.48859298", "0.48674798", "0.48471943", "0.4836356", "0.4828955", "0.48204806", "0.481721", "0.48139703", "0.4811216", "0.48046893", "0.48024598", "0.47908813", "0.4788402", "0.4785328", "0.47802696", "0.4769013", "0.47609663", "0.47462863", "0.47399625", "0.47384703", "0.47378448", "0.4735303", "0.47318873", "0.47315136", "0.4727424", "0.47144008", "0.47106066", "0.47074068", "0.46903816", "0.46862847", "0.46758115", "0.4671787", "0.46670255", "0.46606028", "0.46579847", "0.46554962", "0.4652343", "0.46498364", "0.46353492", "0.46315634", "0.4627939", "0.4626338", "0.46237704", "0.46234676", "0.4623377", "0.4617923", "0.46159166", "0.46142244", "0.4606644", "0.45974207", "0.45944154", "0.45944154", "0.4592966", "0.4592869", "0.4591369", "0.45745713" ]
0.68762535
2
WithClient returns a context with an OAPI client (accessible using GetClient) which can conduct operations against the provided target.
func WithClient(ctx context.Context, target url.URL) context.Context { cfg := eth2spec.NewConfiguration() cfg.BasePath = "http://" + target.Host client := eth2spec.NewAPIClient(cfg) ctx = context.WithValue(ctx, clientKey, client) return ctx }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func NewClientWithTarget(ctx context.Context, target string) (*Client, error) {\n\treturn NewClient(ctx, option.WithEndpoint(target))\n}", "func WithClient(ctx context.Context, clt client.Client) context.Context {\n\treturn context.WithValue(ctx, clientCtxKey{}, clt)\n}", "func withHTTPClient(target *http.Client) ClientOption {\n\treturn func(subject *client) {\n\t\tsubject.client = target\n\t}\n}", "func WithClient(ctx context.Context, c *client.Client) context.Context {\n\treturn context.WithValue(ctx, clientKey{}, c)\n}", "func WithClient(client *dgo.Dgraph) OperationExecutorOptionFn {\n\treturn func(executor *OperationExecutor) {\n\t\texecutor.client = client\n\t}\n}", "func NewClientWithTarget(target string) *Client {\n\tcli, _ := NewClient(\"\", target, \"\", \"\")\n\treturn cli\n}", "func WithClient(c *http.Client) TransportOption {\n\treturn func(tr *Transport) {\n\t\ttr.client = c\n\t}\n}", "func (ctx Context) WithClient(client CometRPC) Context {\n\tctx.Client = client\n\treturn ctx\n}", "func (ix *Reindexer) TargetClient(c *Client) *Reindexer {\n\tix.targetClient = c\n\treturn ix\n}", "func ClientContext(ctx context.Context) ClientOpt {\n\treturn func(client *Client) {\n\t\tclient.ctx = ctx\n\t}\n}", "func (t *Tapa) WithClient(client *http.Client) {\n\tt.client = client\n}", "func (b *ReflectClientBuilder) WithContext(ctx context.Context) {\n\tb.ctx = ctx\n}", "func WithClient(ctx context.Context, client Client) context.Context {\n\tif client == nil {\n\t\treturn ctx\n\t}\n\treturn context.WithValue(ctx, rfcontext.MetricsClientKey, client)\n}", "func WithClientContext(ctx context.Context) Option {\n\treturn func(opts *Options) {\n\t\topts.clientContext = ctx\n\t}\n}", "func WithContext(ctx context.Context) ClientOption {\n\treturn func(c *Client) error {\n\t\tc.ctx = ctx\n\t\treturn nil\n\t}\n}", "func WithClient(client *github.Client) OptFn {\n\treturn func(o *Opt) {\n\t\to.client = client\n\t}\n}", "func WithTestHTTPClient(ctx context.Context, client *http.Client) context.Context {\n\treturn context.WithValue(ctx, &httpClientCtxKey, client)\n}", "func WithClient(cl client.Client) Option {\n\treturn func(featureGateClient *FeatureGateClient) *FeatureGateClient {\n\t\tfeatureGateClient.crClient = cl\n\t\treturn featureGateClient\n\t}\n}", "func WithClient(x *xray.XRay) Option {\n\treturn func(o *Options) {\n\t\to.Client = x\n\t}\n}", "func WithClientTracing(client *http.Client) error {\n\tprev := client.Transport\n\tclient.Transport = &ochttp.Transport{\n\t\tBase: prev,\n\t\tPropagation: tracecontextb3.TraceContextEgress,\n\t}\n\treturn nil\n}", "func WithClient(cl client.Client) Option {\n\treturn func(r *Reconciler) error {\n\t\tr.client = cl\n\t\treturn nil\n\t}\n}", "func WithClient(cl client.Client) Option {\n\treturn func(r *Reconciler) error {\n\t\tr.client = cl\n\t\treturn nil\n\t}\n}", "func withClient(c *http.Client) option {\n\treturn func(m *matcher) error {\n\t\tm.client = c\n\t\treturn nil\n\t}\n}", "func Use(c context.Context, s Client) context.Context {\n\treturn context.WithValue(c, &contextKey, s)\n}", "func WithHTTPClient(client *http.Client) OptionFunc {\n\treturn func(c *Client) {\n\t\tc.client = client\n\t}\n}", "func WithClient(hc *http.Client) Options {\n\treturn func(s *SPDX) { s.hc = hc }\n}", "func NewWithClientCredentials(target string, zoneID string, clientID string, clientSecret string, tokenFormat TokenFormat) (*API, error) {\n\tu, err := BuildTargetURL(target)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\ttokenURL := urlWithPath(*u, \"/oauth/token\")\n\tv := url.Values{}\n\tv.Add(\"token_format\", tokenFormat.String())\n\tc := &clientcredentials.Config{\n\t\tClientID: clientID,\n\t\tClientSecret: clientSecret,\n\t\tTokenURL: tokenURL.String(),\n\t\tEndpointParams: v,\n\t}\n\tclient := &http.Client{Transport: http.DefaultTransport}\n\treturn &API{\n\t\tUnauthenticatedClient: client,\n\t\tAuthenticatedClient: c.Client(context.WithValue(context.Background(), oauth2.HTTPClient, client)),\n\t\tTargetURL: u,\n\t\tZoneID: zoneID,\n\t}, nil\n}", "func WithRESTClient(ctx context.Context, clt *resty.Client) context.Context {\n\treturn context.WithValue(ctx, clientCtxKey{}, clt)\n}", "func (o *Request) WithClient(client *http.Client) *Request {\n\to.Client = client\n\treturn o\n}", "func WithHTTPClient(c *http.Client) Option {\n\treturn func(args *Client) {\n\t\targs.httpClient = c\n\t}\n}", "func WithClient(client *http.Client) Option {\n\treturn func(o *Options) {\n\t\to.HTTPClient = client\n\t}\n}", "func WithOpsClient(opsClient ops.ClientIface) ClientOpt {\n\treturn func(c *Client) {\n\t\tc.client.Operations = opsClient\n\t}\n}", "func WithClientTrace(ctx context.Context, trace *httptrace.ClientTrace,) context.Context", "func WithHTTPClient(h HTTPClient) ClientOption {\n\treturn clientOptionFunc(func(c interface{}) {\n\t\tswitch c := c.(type) {\n\t\tcase *Client:\n\t\t\tc.httpClient = h\n\t\tdefault:\n\t\t\tpanic(\"unknown type\")\n\t\t}\n\t})\n}", "func NewWithClientCredentials(target string, zoneID string, clientID string, clientSecret string, tokenFormat TokenFormat, skipSSLValidation bool) (*API, error) {\n\ta := New(target, zoneID).WithClientCredentials(clientID, clientSecret, tokenFormat).WithSkipSSLValidation(skipSSLValidation)\n\terr := a.Validate()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn a, err\n}", "func withClient(cassetteName string, f func(*Client)) {\n\twithCassette(cassetteName, func(r *recorder.Recorder) {\n\t\tc := NewClient(DynCustomerName)\n\t\tc.SetTransport(r)\n\t\tc.Verbose(true)\n\n\t\tf(c)\n\t})\n}", "func WrapClient(ctx context.Context, origClient *redis.Client) *redis.Client {\n\tclient := origClient.WithContext(ctx)\n\tclient.WrapProcess(PerCommandTracer(ctx))\n\treturn client\n}", "func WithHTTPClient(hClient *http.Client) clientOption {\n\treturn func(c *client) {\n\t\tc.httpClient = hClient\n\t}\n}", "func WithClient(c client.Client) Option {\n\treturn func(o *Options) {\n\t\to.Client = c\n\t}\n}", "func WithHTTPClient(client *http.Client) ClientOption {\n\treturn func(c *Client) {\n\t\tc.httpClient = client\n\t}\n}", "func WithHTTPClient(client *http.Client) Opt {\n\treturn func(c *Client) error {\n\t\tif client != nil {\n\t\t\tc.client = client\n\t\t}\n\t\treturn nil\n\t}\n}", "func WithHTTPClient(client *http.Client) Opt {\n\treturn func(c *Client) {\n\t\tc.httpClient = client\n\t}\n}", "func Client(c *api.Client) Option {\n\treturn func(s *Source) { s.client = c }\n}", "func Wrap(client redis.UniversalClient, tracer opentracing.Tracer, config Config) Client {\n\tif tracer == nil {\n\t\ttracer = opentracing.GlobalTracer()\n\t}\n\n\tclient.AddHook(&opentracingHook{tracer: tracer, config: config})\n\n\tswitch client.(type) {\n\tcase *redis.Client:\n\t\treturn contextClient{Client: client.(*redis.Client), tracer: tracer}\n\tcase *redis.ClusterClient:\n\t\treturn contextClusterClient{ClusterClient: client.(*redis.ClusterClient), tracer: tracer}\n\tcase *redis.Ring:\n\t\treturn contextRingClient{Ring: client.(*redis.Ring), tracer: tracer}\n\t}\n\n\treturn client.(Client)\n}", "func WithHTTPClient(httpClient ops.HTTPClient) Option {\n\treturn func(o *clientOptions) {\n\t\to.httpClient = httpClient\n\t}\n}", "func WithHTTPClient(httpClient *http.Client) ClientOption {\n\treturn func(client *Client) {\n\t\tclient.httpClient = httpClient\n\t}\n}", "func WithHTTPClient(client *http.Client) DialOption {\n\treturn optionFn(func(opts *dialOpts) {\n\t\topts.client = client\n\t})\n}", "func WithClient(config Config, fn func(client Client) error) (err error) {\n\tclient := NewClient(config)\n\n\tif err = client.Open(); err != nil {\n\t\terr = fmt.Errorf(\"opening DB client %T: %s\", client, err)\n\t\treturn\n\t}\n\tdefer func() {\n\t\tif closeErr := client.Close(); closeErr != nil {\n\t\t\tif err == nil {\n\t\t\t\terr = fmt.Errorf(\"closing DB client %T: %s\", client, closeErr)\n\t\t\t} else {\n\t\t\t\tlog.Errorf(\"Existing error before attempt to close DB client %T: %s\", client, err)\n\t\t\t\tlog.Errorf(\"Also encountered problem closing DB client %T: %s\", client, closeErr)\n\t\t\t}\n\t\t}\n\t}()\n\n\tif err = fn(client); err != nil {\n\t\treturn\n\t}\n\n\treturn\n}", "func NewWithClient(chainID string, client SignStatusClient) provider.Provider {\n\treturn &http{\n\t\tchainID: chainID,\n\t\tclient: client,\n\t}\n}", "func Client() func(http.Handler) http.Handler {\n\treturn func(next http.Handler) http.Handler {\n\t\treturn http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\t\tclient := r.Header.Get(\"client\")\n\n\t\t\t// adding client into context\n\t\t\tctx := context.WithValue(r.Context(), \"client\", client)\n\n\t\t\t// and call the next with our new context\n\t\t\tr = r.WithContext(ctx)\n\t\t\tnext.ServeHTTP(w, r)\n\t\t})\n\t}\n}", "func ContextClientTrace(ctx context.Context) *httptrace.ClientTrace", "func WithHTTPClient(doer HttpRequestDoer) ClientOption {\n\treturn func(c *Client) error {\n\t\tc.Client = doer\n\t\treturn nil\n\t}\n}", "func WithHTTPClient(doer HttpRequestDoer) ClientOption {\n\treturn func(c *Client) error {\n\t\tc.Client = doer\n\t\treturn nil\n\t}\n}", "func WithHTTPClient(doer HttpRequestDoer) ClientOption {\n\treturn func(c *Client) error {\n\t\tc.Client = doer\n\t\treturn nil\n\t}\n}", "func WithHTTPClient(doer HttpRequestDoer) ClientOption {\n\treturn func(c *Client) error {\n\t\tc.Client = doer\n\t\treturn nil\n\t}\n}", "func WithHTTPClient(doer HttpRequestDoer) ClientOption {\n\treturn func(c *Client) error {\n\t\tc.Client = doer\n\t\treturn nil\n\t}\n}", "func WithHTTPClient(doer HttpRequestDoer) ClientOption {\n\treturn func(c *Client) error {\n\t\tc.Client = doer\n\t\treturn nil\n\t}\n}", "func WithHTTPClient(doer HttpRequestDoer) ClientOption {\n\treturn func(c *Client) error {\n\t\tc.Client = doer\n\t\treturn nil\n\t}\n}", "func WithHTTPClient(doer HttpRequestDoer) ClientOption {\n\treturn func(c *Client) error {\n\t\tc.Client = doer\n\t\treturn nil\n\t}\n}", "func WithHTTPClient(doer HttpRequestDoer) ClientOption {\n\treturn func(c *Client) error {\n\t\tc.Client = doer\n\t\treturn nil\n\t}\n}", "func WithHTTPClient(doer HttpRequestDoer) ClientOption {\n\treturn func(c *Client) error {\n\t\tc.Client = doer\n\t\treturn nil\n\t}\n}", "func NewClientContext(ctx context.Context, tr Transporter) context.Context {\n\treturn context.WithValue(ctx, clientTransportKey{}, tr)\n}", "func (c *Client) WithClient(hc *http.Client) *Client {\n\tc.c = hc\n\treturn c\n}", "func NewClient(app App, opts ...Option) *Client {\n\tbaseURL, err := url.Parse(app.APIURL)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tc := &Client{\n\t\tClient: &http.Client{},\n\t\tlog: &LeveledLogger{},\n\t\tapp: app,\n\t\tbaseURL: baseURL,\n\t}\n\n\tc.Util = &UtilServiceOp{client: c}\n\tc.Auth = &AuthServiceOp{client: c}\n\tc.Media=&MediaSpaceServiceOp{client: c}\n\tc.Product=&ProductServiceOp{client: c}\n\tc.Logistics=&LogisticsServiceOp{client: c}\n\tc.Shop=&ShopServiceOp{client: c}\n\tc.Discount=&DiscountServiceOp{client: c}\n\tc.Order=&OrderServiceOp{client: c}\n\t\n\t// apply any options\n\tfor _, opt := range opts {\n\t\topt(c)\n\t}\n\n\treturn c\n}", "func WithClient(c client.APIClient) AgentCliOption {\n\treturn func(cli *AgentCli) error {\n\t\tcli.client = c\n\t\treturn nil\n\t}\n}", "func Client(ctx context.Context) *client.Client {\n\tc, _ := ctx.Value(clientKey{}).(*client.Client)\n\treturn c\n}", "func WithHTTPClient(httpClient *http.Client) ClientOption {\n\treturn func(c *client) error {\n\t\tif httpClient == nil {\n\t\t\treturn errors.InvalidParameterError{Parameter: \"httpClient\", Reason: \"cannot be empty\"}\n\t\t}\n\n\t\tc.requester.Client = httpClient\n\t\treturn nil\n\t}\n}", "func WithClient(c *http.Client) Option {\n\treturn func(u *Updater) error {\n\t\tu.Fetcher.Client = c\n\t\treturn nil\n\t}\n}", "func RequestWithClient(client *http.Client, method, uri string, header http.Header, body io.Reader) (Response, error) {\n\treq, err := http.NewRequest(method, uri, body)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tfor name, field := range header {\n\t\tfor _, v := range field {\n\t\t\treq.Header.Set(name, v)\n\t\t}\n\t}\n\n\tvar out response\n\treq = req.WithContext(WithTraces(req.Context(), &out.traces))\n\n\tres, err := client.Do(req)\n\tif err != nil {\n\t\treturn nil, normalizeError(err)\n\t}\n\tdefer res.Body.Close()\n\n\tout.status = res.StatusCode\n\n\tif _, err := io.Copy(&out.bodySize, res.Body); err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar resHeader bytes.Buffer\n\tres.Header.Write(&resHeader)\n\tout.header = res.Header\n\tout.headerSize = resHeader.Len()\n\n\treturn &out, nil\n}", "func WithClient(cl m3admin.Client) Option {\n\treturn optionFn(func(n *namespaceClient) error {\n\t\tn.client = cl\n\t\treturn nil\n\t})\n}", "func WithHTTPClient(client *http.Client) Option {\n\treturn func(c *Client) error {\n\t\tif client == nil {\n\t\t\treturn errors.New(\"client cannot be nil\")\n\t\t}\n\n\t\tc.client = client\n\t\treturn nil\n\t}\n}", "func WithHTTPClient(httpclient *http.Client) ClientOption {\n\treturn func(client *Client) {\n\t\tclient.httpClient = httpclient\n\t}\n}", "func WithContext(parent context.Context, projID string, c *http.Client) context.Context {\n\tif _, ok := c.Transport.(*internal.Transport); !ok {\n\t\tc.Transport = &internal.Transport{Base: c.Transport}\n\t}\n\tvals := make(map[string]interface{})\n\tvals[\"project_id\"] = projID\n\tvals[\"http_client\"] = c\n\t// TODO(jbd): Lazily initiate the service objects.\n\t// There is no datastore service as we use the proto directly\n\t// without passing through google-api-go-client.\n\tvals[\"pubsub_service\"], _ = pubsub.New(c)\n\tvals[\"storage_service\"], _ = storage.New(c)\n\tvals[\"container_service\"], _ = container.New(c)\n\treturn context.WithValue(parent, internal.ContextKey(\"base\"), vals)\n}", "func WithHTTPClient(client HTTPClient) Option {\n\treturn func(opts *Client) {\n\t\topts.httpClient = client\n\t}\n}", "func WithHTTPClient(client *http.Client) func(c *Client) error {\n\treturn func(c *Client) error {\n\t\tif client == nil {\n\t\t\treturn errors.New(\"HTTP client is nil\")\n\t\t}\n\t\tc.client = client\n\t\treturn nil\n\t}\n}", "func WithHTTPClient(httpClient *http.Client) ClientOption {\n\treturn func(c *Client) {\n\t\tc.httpClient = httpClient\n\t}\n}", "func WithHTTPClient(h *http.Client) ClientOption {\n\treturn func(c *Client) error {\n\t\tc.httpClient = h\n\t\treturn nil\n\t}\n}", "func contextClient(ctx context.Context) *http.Client {\n\tif hc, ok := ctx.Value(HTTPClient).(*http.Client); ok {\n\t\treturn hc\n\t}\n\tfor _, fn := range contextClientFuncs {\n\t\tc, err := fn(ctx)\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t\tif c != nil {\n\t\t\treturn c\n\t\t}\n\t}\n\treturn http.DefaultClient\n}", "func WithClient(client nethttp.Client) Option {\n\treturn func(p *Protocol) error {\n\t\tif p == nil {\n\t\t\treturn fmt.Errorf(\"client option can not set nil protocol\")\n\t\t}\n\t\tp.Client = &client\n\t\treturn nil\n\t}\n}", "func DoRequestWithClient(\n\tctx context.Context,\n\tclient *http.Client, req *http.Request) (*http.Response, error) {\n\treq = req.WithContext(ctx)\n\treturn client.Do(req)\n}", "func DoRequestWithClient(\n\tctx context.Context,\n\tclient *http.Client,\n\treq *http.Request) (*http.Response, error) {\n\treq = req.WithContext(ctx)\n\treturn client.Do(req)\n}", "func (c *Client) WithHTTPClient(cl *http.Client) *Client {\n\tc.Client = cl\n\treturn c\n}", "func WithHTTPClient(c *http.Client) func(*Client) {\n\treturn func(mr *Client) {\n\t\tmr.client = c\n\t}\n}", "func WithHTTPClient(client *http.Client) Option {\n\treturn func(o *Options) {\n\t\to.Client = client\n\t}\n}", "func SetClient(ctx context.Context, client statsd.ClientInterface) context.Context {\n\treturn context.WithValue(ctx, statsdClient, client)\n}", "func NewClientWith(cfg *as.ClientConfig, token string) *Client {\n\tcl := as.NewClient(cfg)\n\tcl.Config.Client = &http.Client{Timeout: timeoutSec * time.Second}\n\tcl.Token = token\n\treturn &Client{*cl}\n}", "func AuthorizedAPIClientWith(jwt string) *baloo.Client {\n\treturn baloo.New(constants.API_URL).\n\t\tUseRequest(AuthMiddleware(jwt))\n}", "func NewClient(shopwareAPIBaseURL string, privateToken string) *Client {\n\n\tc := &Client{\n\t\tshopwareAPIBaseURL: shopwareAPIBaseURL,\n\t\tprivateToken: privateToken,\n\t}\n\tfuture := &models.Context{}\n\tc.do(\"GET\", \"/store-api/v3/context\", nil, future)\n\tc.contextToken = future.Token\n\tc.Context = *future\n\n\treturn c\n\n}", "func Connect (ctx context.Context, controller string) (ClientContext) {\n var client ClientContext\n\n client.Ctx = ctx\n client.Controller = controller\n\n new_client, err := otgclient.NewClientWithResponses(\n controller,\n otgclient.WithHTTPClient(&http.Client{\n Transport: &http.Transport{\n TLSClientConfig: &tls.Config{InsecureSkipVerify: true},\n },\n }))\n\n if (err != nil) {\n\tclient.Error = err\n return client\n }\n\n client.Client = new_client\n response, err := new_client.SetConfigWithResponse(ctx, otgclient.SetConfigJSONRequestBody {})\n client.Response = response\n client.Error = nil\n\n return client\n}", "func Client(c *http.Client) func(*Attacker) {\n\treturn func(a *Attacker) { a.client = *c }\n}", "func (c *Config) Client(ctx context.Context) *http.Client {\n\treturn oauth2.NewClient(ctx, c.TokenSource())\n}", "func WithHTTPClient(httpClient *http.Client) ClientOption {\n\treturn func(c *Client) {\n\t\tc.sling.Client(httpClient)\n\t}\n}", "func WithClient() Option {\n\treturn optionFunc(func(c *config) error {\n\t\tc.subsystem = \"client\"\n\t\tc.usages = []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth}\n\t\treturn nil\n\t})\n}", "func WithDialContext(dialContext func(ctx context.Context, network, addr string) (net.Conn, error)) Opt {\n\treturn func(c *Client) error {\n\t\tif transport, ok := c.client.Transport.(*http.Transport); ok {\n\t\t\ttransport.DialContext = dialContext\n\t\t\treturn nil\n\t\t}\n\t\treturn errors.Errorf(\"cannot apply dialer to transport: %T\", c.client.Transport)\n\t}\n}", "func NewRequestWithClient(base *url.URL, versionedAPIPath string,\n\tcontent ClientContentConfig, client *gorequest.SuperAgent) *Request {\n\treturn NewRequest(&RESTClient{\n\t\tbase: base,\n\t\tversionedAPIPath: versionedAPIPath,\n\t\tcontent: content,\n\t\tClient: client,\n\t})\n}", "func WithClient(c cclient.Client) Option {\n\treturn func(opts *serverOptions) {\n\t\topts.cl = c\n\t}\n}", "func withContext(key, value string) Adapter {\n\treturn func(client gokismet.Client) gokismet.Client {\n\t\treturn gokismet.ClientFunc(func(req *http.Request) (*http.Response, error) {\n\t\t\tctx := context.WithValue(req.Context(), contextKey(key), value)\n\t\t\treturn client.Do(req.WithContext(ctx))\n\t\t})\n\t}\n}", "func wrappedClient(t *testing.T, testID string) (*Client, error) {\n\tctx := context.Background()\n\tbase := http.DefaultTransport\n\n\ttrans, err := htransport.NewTransport(ctx, base, option.WithoutAuthentication(), option.WithUserAgent(\"custom-user-agent\"))\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to create http client: %v\", err)\n\t}\n\n\tc := http.Client{Transport: trans}\n\n\t// Add RoundTripper to the created HTTP client\n\twrappedTrans := &retryTestRoundTripper{rt: c.Transport, testID: testID, T: t}\n\tc.Transport = wrappedTrans\n\n\t// Supply this client to storage.NewClient\n\t// STORAGE_EMULATOR_HOST takes care of setting the correct endpoint\n\tclient, err := NewClient(ctx, option.WithHTTPClient(&c))\n\treturn client, err\n}", "func NewTargetDBClient() (TargetDBClient, error) {\n\tvar result TargetDBClient\n\n\t// Load ENVs\n\terr := godotenv.Load(\"../local.env\")\n\tif err != nil {\n\t\treturn result, err\n\t}\n\n\tmysqlDB := os.Getenv(\"MYSQL_DATABASE\")\n\tmysqlUser := os.Getenv(\"MYSQL_USER\")\n\tmysqlPassword := os.Getenv(\"MYSQL_PASSWORD\")\n\tmysqlHost := os.Getenv(\"MYSQL_HOST\")\n\tmysqlPort := os.Getenv(\"MYSQL_PORT\")\n\n\t// Create connection string\n\tconnectionString := mysqlUser + \":\" + mysqlPassword + \"@tcp(\" + mysqlHost + \":\" + mysqlPort + \")/\" + mysqlDB + \"?charset=utf8&parseTime=True&loc=Local\"\n\n\t// Open connection\n\tresult.db, err = gorm.Open(\"mysql\", connectionString)\n\tif err != nil {\n\t\treturn result, err\n\t}\n\n\t// Migrate the schema\n\tresult.db.AutoMigrate(&Product{})\n\n\t// Enable Logger, show detailed log\n\tresult.db.LogMode(true)\n\n\tresult.db.DB().SetMaxIdleConns(0)\n\tresult.db.DB().SetConnMaxLifetime(1 * time.Second)\n\n\treturn result, err\n}", "func WithHTTPClient(h *http.Client) Opts {\n\treturn func(r *retryable) {\n\t\tr.httpClient = h\n\t}\n}" ]
[ "0.6210201", "0.6146586", "0.61264163", "0.60994583", "0.6070385", "0.6007224", "0.60030824", "0.59520465", "0.57900596", "0.5726346", "0.57151294", "0.5657475", "0.565246", "0.560597", "0.5580666", "0.55180055", "0.5488585", "0.54805547", "0.5442334", "0.5439255", "0.54297936", "0.54297936", "0.5409881", "0.54044044", "0.54042095", "0.5393104", "0.53861266", "0.5358606", "0.5351478", "0.5310625", "0.53070396", "0.53056496", "0.5304078", "0.52960795", "0.52906924", "0.5289414", "0.52867496", "0.5275192", "0.52619475", "0.5255479", "0.52460194", "0.52442265", "0.52098256", "0.51989114", "0.5197714", "0.51923984", "0.5189679", "0.51788235", "0.51737565", "0.5170548", "0.5168672", "0.5167135", "0.5167135", "0.5167135", "0.5167135", "0.5167135", "0.5167135", "0.5167135", "0.5167135", "0.5167135", "0.5167135", "0.51546633", "0.5153965", "0.514933", "0.51486796", "0.51424545", "0.51368034", "0.5135968", "0.5130938", "0.5128597", "0.51237744", "0.51088893", "0.51019794", "0.50982237", "0.5094033", "0.508631", "0.50811243", "0.50794166", "0.50746036", "0.50662196", "0.5060852", "0.5058182", "0.50530046", "0.50244665", "0.5024198", "0.5002836", "0.4987355", "0.49850768", "0.49845687", "0.49773794", "0.49730834", "0.49694446", "0.49656817", "0.49648544", "0.49481192", "0.49324253", "0.49255028", "0.4915047", "0.49019706", "0.48976737" ]
0.73107415
0
GetClient returns an eth2spec.APIClient from the provided context, if one exists in the context.
func GetClient(ctx context.Context) *eth2spec.APIClient { if _, ok := ctx.Value(clientKey).(*eth2spec.APIClient); !ok { return nil } return ctx.Value(clientKey).(*eth2spec.APIClient) }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func Get(c context.Context) Client {\n\ts, ok := c.Value(&contextKey).(Client)\n\tif !ok {\n\t\tpanic(errors.New(\"git.Client not installed in context\"))\n\t}\n\treturn s\n}", "func (a *API) getClient(ctx context.Context) *http.Client {\n\tif a.Client != nil {\n\t\treturn a.Client\n\t}\n\tif httpClient != nil {\n\t\treturn httpClient\n\t}\n\treturn &http.Client{Timeout: a.getTimeout()}\n}", "func Get(ctx context.Context) *fake.FakeDynamicClient {\n\tuntyped := ctx.Value(dynamicclient.Key{})\n\tif untyped == nil {\n\t\tlogging.FromContext(ctx).Panicf(\n\t\t\t\"Unable to fetch %T from context.\", (*fake.FakeDynamicClient)(nil))\n\t}\n\treturn untyped.(*fake.FakeDynamicClient)\n}", "func (client *ClientImpl) GetClient(ctx context.Context, args GetClientArgs) (interface{}, error) {\n\trouteValues := make(map[string]string)\n\tif args.ClientType == nil || *args.ClientType == \"\" {\n\t\treturn nil, &azuredevops.ArgumentNilOrEmptyError{ArgumentName: \"args.ClientType\"}\n\t}\n\trouteValues[\"clientType\"] = *args.ClientType\n\n\tlocationId, _ := uuid.Parse(\"79c83865-4de3-460c-8a16-01be238e0818\")\n\tresp, err := client.Client.Send(ctx, http.MethodGet, locationId, \"6.0-preview.1\", routeValues, nil, nil, \"\", \"application/json\", nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar responseValue interface{}\n\terr = client.Client.UnmarshalBody(resp, responseValue)\n\treturn responseValue, err\n}", "func (conf *Configuration) GetClient(ctx *context.Context) (*Client, error) {\n\tvar u *url.URL\n\tvar sURL string\n\tvar err error\n\tvar c *govmomi.Client\n\n\tif sURL, err = conf.getURL(); err == nil {\n\t\tif u, err = soap.ParseURL(sURL); err == nil {\n\t\t\t// Connect and log in to ESX or vCenter\n\t\t\tif c, err = govmomi.NewClient(ctx, u, conf.Insecure); err == nil {\n\t\t\t\treturn &Client{\n\t\t\t\t\tClient: c,\n\t\t\t\t\tConfiguration: conf,\n\t\t\t\t}, nil\n\t\t\t}\n\t\t}\n\t}\n\treturn nil, err\n}", "func Get(ctx context.Context) CEClient {\n\tuntyped := ctx.Value(CECKey{})\n\tif untyped == nil {\n\t\tlogging.FromContext(ctx).Errorf(\n\t\t\t\"Unable to fetch client from context.\")\n\t\treturn nil\n\t}\n\treturn untyped.(CEClient)\n}", "func GetClient(ctx *pulumi.Context,\n\tname string, id pulumi.IDInput, state *ClientState, opts ...pulumi.ResourceOption) (*Client, error) {\n\tvar resource Client\n\terr := ctx.ReadResource(\"gcp:iap/client:Client\", name, id, state, &resource, opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &resource, nil\n}", "func GetClient(c context.Context, instance string) (*client.Client, error) {\n\tcc, err := clientCache(c)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn cc.Get(instance)\n}", "func (s serviceimpl) GetClient(ctx context.Context) *http.Client {\n\n\tdata, err := ioutil.ReadFile(\"cred.json\")\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tconf, err := google.JWTConfigFromJSON(data, calendar.CalendarScope)\n\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tclient := conf.Client(ctx)\n\n\treturn client\n}", "func getClient(ctx context.Context, config *oauth2.Config) *http.Client {\n\tcacheFile, err := tokenCacheFile()\n\tif err != nil {\n\t\tlog.Fatalf(\"Unable to get path to cached credential file. %v\", err)\n\t}\n\ttok, err := tokenFromFile(cacheFile)\n\tif err != nil {\n\t\ttok = getTokenFromWeb(config)\n\t\tsaveToken(cacheFile, tok)\n\t}\n\treturn config.Client(ctx, tok)\n}", "func getClient(ctx context.Context, config *oauth2.Config) *http.Client {\n\tcacheFile, err := tokenCacheFile()\n\tif err != nil {\n\t\tlog.Fatalf(\"Unable to get path to cached credential file. %v\", err)\n\t}\n\ttok, err := tokenFromFile(cacheFile)\n\tif err != nil {\n\t\ttok = getTokenFromWeb(config)\n\t\tsaveToken(cacheFile, tok)\n\t}\n\treturn config.Client(ctx, tok)\n}", "func (a *OAuth2ApiService) GetOAuth2Client(ctx context.Context, id string) ApiGetOAuth2ClientRequest {\n\treturn ApiGetOAuth2ClientRequest{\n\t\tApiService: a,\n\t\tctx: ctx,\n\t\tid: id,\n\t}\n}", "func getClient(ctx context.Context, config *oauth2.Config) *http.Client {\n\tvar err error\n\tvar cacheFile string\n\tif cacheFile, err = tokenCacheFile(); err != nil {\n\t\tlog.Fatalf(\"Unable to get path to cached credential file. %v\", err)\n\t}\n\tvar tok *oauth2.Token\n\tif tok, err = tokenFromFile(cacheFile); err != nil {\n\t\ttok = getTokenFromWeb(config)\n\t\tsaveToken(cacheFile, tok)\n\t}\n\treturn config.Client(ctx, tok)\n}", "func GetClient(willTimedout bool)(*etcdClient.Client, err){\n\tvar(\n\t\terr error\n\t\tclient *etcdClient.Client\n\t)\n\tetcdClient.New(etcdClient.Config{\n\n\t})\n\treturn err, client\n}", "func (c *Context) GetClient() *http.Client {\n\treturn c.Client\n}", "func GetClient(pluginInfo string) (client ClientIntf, err error) {\n\t// To support testability requirement client protocol is not taken from config currently.\n\tswitch clientProtocol {\n\tcase \"grpc\":\n\t\tclientConfig := ClientGRPCConfig{Address: pluginInfo, ChunkSize: chunkSize,\n\t\t\tRootCertificate: \"HTTPSClientCA\"}\n\t\tvar client, err = NewClientGRPC(clientConfig)\n\t\tif err != nil {\n\t\t\tlog.Errorf(util.FailedToCreateClient, err)\n\t\t\treturn nil, err\n\t\t}\n\t\treturn client, nil\n\tdefault:\n\t\treturn nil, errors.New(\"no client is found\")\n\t}\n}", "func (e *ApiClientService) Get(name string) (client ApiClient, err error) {\n\turl := fmt.Sprintf(\"clients/%s\", name)\n\terr = e.client.magicRequestDecoder(\"GET\", url, nil, &client)\n\treturn\n}", "func GetClientV2() (client *horizonclient.Client, err error) {\n\tenv, err := agoraenv.FromEnvVariable()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tswitch env {\n\tcase agoraenv.AgoraEnvironmentProd:\n\t\treturn kinProdHorizonClientV2, nil\n\tdefault:\n\t\treturn kinTestHorizonClientV2, nil\n\t}\n}", "func getClient(config *oauth2.Config) (*http.Client, error) {\n\ttokFile := \"configs/token.json\"\n\ttok, err := tokenFromFile(tokFile)\n\tif err != nil {\n\t\treturn &http.Client{}, err\n\t}\n\treturn config.Client(context.Background(), tok), nil\n}", "func getKubeClient(context string) (*rest.Config, kubernetes.Interface, error) {\n\tconfig, err := configForContext(context)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\tclient, err := kubernetes.NewForConfig(config)\n\tif err != nil {\n\t\treturn nil, nil, fmt.Errorf(\"could not get Kubernetes client: %s\", err)\n\t}\n\treturn config, client, nil\n}", "func GetClient(config *oauth2.Config) *http.Client {\n\t// The file token.json stores the user's access and refresh tokens, and is\n\t// created automatically when the authorization flow completes for the first\n\t// time.\n\ttokFile := \"token.json\"\n\ttok, err := TokenFromFile(tokFile)\n\tif err != nil {\n\t\ttok = GetTokenFromWeb(config)\n\t\tSaveToken(tokFile, tok)\n\t}\n\treturn config.Client(context.Background(), tok)\n}", "func (f ClientFactoryFunc) GetClient(kubeconfig []byte) (client.Client, string, error) {\n\treturn f(kubeconfig)\n}", "func getClient(r *http.Request) (*models.OauthClient, error) {\n\tval, ok := context.GetOk(r, clientKey)\n\tif !ok {\n\t\treturn nil, ErrClientNotPresent\n\t}\n\n\tclient, ok := val.(*models.OauthClient)\n\tif !ok {\n\t\treturn nil, ErrClientNotPresent\n\t}\n\n\treturn client, nil\n}", "func getClient(ctx context.Context, config *oauth2.Config) *http.Client {\n cacheFile, err := tokenCacheFile()\n if err != nil {\n log.Fatalf(\"Unable to get path to cached credential file. %v\", err)\n }\n tok, err := tokenFromFile(cacheFile)\n if err != nil {\n tok = getTokenFromWeb(config)\n saveToken(cacheFile, tok)\n }\n return config.Client(ctx, tok)\n}", "func getClient(ctx context.Context, config *oauth2.Config) *http.Client {\n cacheFile, err := tokenCacheFile()\n if err != nil {\n log.Fatalf(\"Unable to get path to cached credential file. %v\", err)\n }\n tok, err := tokenFromFile(cacheFile)\n if err != nil {\n tok = getTokenFromWeb(config)\n saveToken(cacheFile, tok)\n }\n return config.Client(ctx, tok)\n}", "func getClient(ctx context.Context, config *oauth2.Config) *http.Client {\n cacheFile, err := tokenCacheFile()\n if err != nil {\n log.Fatalf(\"Unable to get path to cached credential file. %v\", err)\n }\n tok, err := tokenFromFile(cacheFile)\n if err != nil {\n tok = getTokenFromWeb(config)\n saveToken(cacheFile, tok)\n }\n return config.Client(ctx, tok)\n}", "func GetClient(logger *zap.SugaredLogger, cfg *config.Config) (Interface, error) {\n\tcp, err := config.NewConfigurationProvider(cfg)\n\tif err != nil {\n\t\tlogger.With(zap.Error(err)).Fatal(\"Unable to create client.\")\n\t\treturn nil, err\n\t}\n\n\trateLimiter := NewRateLimiter(logger, cfg.RateLimiter)\n\n\tc, err := New(logger, cp, &rateLimiter)\n\treturn c, err\n}", "func getKubeClient(context string) (*rest.Config, kubernetes.Interface, error) {\n\tconfig, err := getK8sConfig(context).ClientConfig()\n\tif err != nil {\n\t\treturn nil, nil, fmt.Errorf(\"could not get Kubernetes config for context %q: %s\", context, err)\n\t}\n\tclient, err := kubernetes.NewForConfig(config)\n\tif err != nil {\n\t\treturn nil, nil, fmt.Errorf(\"could not get Kubernetes client: %s\", err)\n\t}\n\treturn config, client, nil\n}", "func (cf *ClientFactory) GetClient(ctx context.Context) (*Client, error) {\n\tcf.mux.Lock()\n\tdefer cf.mux.Unlock()\n\tretrying := false\n\tfor {\n\t\tif cf.client == nil {\n\t\t\tvar err error\n\t\t\tif cf.client, err = NewClient(ctx, cf.vSphereURL, cf.parent); err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t}\n\n\t\terr := cf.testClient(ctx)\n\t\tif err != nil {\n\t\t\tif !retrying {\n\t\t\t\t// The client went stale. Probably because someone rebooted vCenter. Clear it to\n\t\t\t\t// force us to create a fresh one. We only get one chance at this. If we fail a second time\n\t\t\t\t// we will simply skip this collection round and hope things have stabilized for the next one.\n\t\t\t\tretrying = true\n\t\t\t\tcf.client = nil\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\treturn nil, err\n\t\t}\n\n\t\treturn cf.client, nil\n\t}\n}", "func (c *Connection) GetClient() GenericClient {\n\treturn connectionClient{c}\n}", "func (c *Client) Get(ctx context.Context) (*WrapperClient, error) {\n\tstart := time.Now()\n\n\tc.mu.RLock()\n\tdefer c.mu.RUnlock()\n\n\t// Scheduler returns Factory interface\n\tfactory, err := c.scheduler.Schedule(ctx)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\twrapperCli, ok := factory.(*WrapperClient)\n\tif !ok {\n\t\treturn nil, fmt.Errorf(\"failed to convert Factory interface to *WrapperClient\")\n\t}\n\n\tend := time.Now()\n\telapsed := end.Sub(start)\n\tlogrus.WithFields(logrus.Fields{\n\t\t\"elapsed\": elapsed,\n\t}).Debug(\"Get a grpc client\")\n\n\treturn wrapperCli, nil\n}", "func getKubeClient(context string, kubeconfig string) (*rest.Config, kubernetes.Interface, error) {\n\tconfig, err := configForContext(context, kubeconfig)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\tclient, err := kubernetes.NewForConfig(config)\n\tif err != nil {\n\t\treturn nil, nil, fmt.Errorf(\"could not get Kubernetes client: %s\", err)\n\t}\n\treturn config, client, nil\n}", "func (server *server) getClient(ctx context.Context) *Client {\n\n\tuser := getUUID(ctx)\n\tif user == uuid.Nil {\n\t\treturn nil\n\t}\n\n\tif client, ok := server.clients[user]; ok {\n\t\treturn client\n\t}\n\n\treturn nil\n}", "func (c *Context) GetAPIClient() (*APIClient) {\n\tmutableMutex.Lock()\n mutableMutex.Unlock()\n\treturn c.APIClient\n}", "func getClient(ctx context.Context, config *oauth2.Config, tokenFile string) (*http.Client, error) {\n\tloadedTok, err := tokenFromFile(tokenFile)\n\tif err != nil {\n\t\tlog.Printf(\"Could not load token from '%s'\\n\", tokenFile)\n\t\treturn nil, err\n\t}\n\t// force refresh\n\ttokenSource := config.TokenSource(ctx, loadedTok)\n\ttok, err := tokenSource.Token()\n\tif err != nil {\n\t\tlog.Printf(\"Could not refresh token from '%s'\\n\", tokenFile)\n\t\treturn nil, err\n\t}\n\treturn config.Client(ctx, tok), nil\n}", "func GetClient() kubernetes.Interface {\n\tvar kubeClient kubernetes.Interface\n\t_, err := rest.InClusterConfig()\n\tif err != nil {\n\t\tkubeClient = getClientOutOfCluster()\n\t} else {\n\t\tkubeClient = getClientInCluster()\n\t}\n\n\treturn kubeClient\n}", "func GetClient() *ethclient.Client {\n\tnodeURL := utils.EthNodeURI\n\tclient, _ := ethclient.Dial(nodeURL)\n\treturn client\n}", "func getClient(config *oauth2.Config) *http.Client {\n\ttokFile := \"token.json\"\n\ttok, err := tokenFromFile(tokFile)\n\tif err != nil {\n\t\ttok = getTokenFromWeb(config)\n\t\tsaveToken(tokFile, tok)\n\t}\n\treturn config.Client(context.Background(), tok)\n}", "func getClient(config *oauth2.Config) *http.Client {\n\ttokFile := \"token.json\"\n\ttok, err := tokenFromFile(tokFile)\n\tif err != nil {\n\t\ttok = getTokenFromWeb(config)\n\t\tsaveToken(tokFile, tok)\n\t}\n\treturn config.Client(context.Background(), tok)\n}", "func getClient(config *oauth2.Config) *http.Client {\n\ttokFile := \"token.json\"\n\ttok, err := tokenFromFile(tokFile)\n\tif err != nil {\n\t\ttok = getTokenFromWeb(config)\n\t\tsaveToken(tokFile, tok)\n\t}\n\treturn config.Client(context.Background(), tok)\n}", "func (p *TestProvider) GetClient(creds *common.Credentials) (*http.Client, error) {\n\targs := p.Called(creds)\n\treturn args.Get(0).(*http.Client), args.Error(1)\n}", "func getClient(config *oauth2.Config) *http.Client {\n\tconf, _ := DefaultConfig()\n\ttok, err := tokenFromFile(conf.Path.TokenFile)\n\tif err != nil {\n\t\ttok = getTokenFromWeb(config)\n\t\tsaveToken(conf.Path.TokenFile, tok)\n\t}\n\treturn config.Client(context.Background(), tok)\n}", "func (s *APIClientService) Get(ctx context.Context, id string) (APIClient, *http.Response, error) {\n\tresource := new(APIClient)\n\n\treq, err := s.client.NewRequest(ctx, http.MethodGet, apiClientBasePath+\"/\"+id, nil)\n\tif err != nil {\n\t\treturn *resource, nil, err\n\t}\n\n\tresp, _, err := s.client.Do(ctx, req, resource, false)\n\tif err != nil {\n\t\treturn *resource, nil, err\n\t}\n\n\treturn *resource, resp, nil\n}", "func GetClient() (*client.Client, error) {\n\tvar kubeConfig restclient.Config\n\n\t// Set the Kubernetes configuration based on the environment\n\tif _, err := os.Stat(\"/var/run/secrets/kubernetes.io/serviceaccount/token\"); err == nil {\n\t\tconfig, err := restclient.InClusterConfig()\n\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"Failed to create in-cluster config: %v.\", err)\n\t\t}\n\n\t\tkubeConfig = *config\n\t} else {\n\t\tkubeConfig = restclient.Config{\n\t\t\tHost: os.Getenv(\"KUBE_HOST\"),\n\t\t}\n\n\t\tif kubeConfig.Host == \"\" {\n\t\t\treturn nil, fmt.Errorf(ErrNeedsKubeHostSet)\n\t\t}\n\t}\n\n\t// Create the Kubernetes client based on the configuration\n\treturn client.New(&kubeConfig)\n}", "func GetClient(config Config) (*client.Client, error) {\n\topts := []client.Option{\n\t\tclient.WithNamespace(config.GetNamespace()),\n\t\tclient.WithScope(config.GetScope()),\n\t}\n\tmember := config.GetMember()\n\thost := config.GetHost()\n\tif host != \"\" {\n\t\topts = append(opts, client.WithPeerHost(config.GetHost()))\n\t\topts = append(opts, client.WithPeerPort(config.GetPort()))\n\t\tfor _, s := range serviceRegistry.services {\n\t\t\tservice := func(service cluster.Service) func(peer.ID, *grpc.Server) {\n\t\t\t\treturn func(id peer.ID, server *grpc.Server) {\n\t\t\t\t\tservice(cluster.NodeID(id), server)\n\t\t\t\t}\n\t\t\t}(s)\n\t\t\topts = append(opts, client.WithPeerService(service))\n\t\t}\n\t}\n\tif member != \"\" {\n\t\topts = append(opts, client.WithMemberID(config.GetMember()))\n\t} else if host != \"\" {\n\t\topts = append(opts, client.WithMemberID(config.GetHost()))\n\t}\n\n\treturn client.New(config.GetController(), opts...)\n}", "func (m ClientManager) Get(id string) (*Client, error) {\n\turl := joinURL(m.Endpoint, id).String()\n\n\treq, err := http.NewRequest(\"GET\", url, nil)\n\tif err != nil {\n\t\treturn nil, errors.Annotatef(err, \"new request for %s\", url)\n\t}\n\n\tvar client *Client\n\n\terr = bind(m.Client, req, client)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn client, nil\n}", "func (a *AuthClient) GetClient() (*http.Client, error) {\n\ttok, err := tokenFromFile(a.tokenPath)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn a.config.Client(context.Background(), tok), nil\n}", "func GetClient(id int) components.Client {\n\treturn getClient(id)\n}", "func ClientFromContext(ctx context.Context) statsd.ClientInterface {\n\tvalue := ctx.Value(statsdClient)\n\tif value == nil {\n\t\tpanic(\"No statsd client found in context\")\n\t}\n\n\treturn value.(statsd.ClientInterface)\n}", "func (a APIKeyAuthentication) GetClient() *http.Client {\n\treturn &a.Client\n}", "func GetClient() *goname.GoName {\n\tif apiClient != nil {\n\t\treturn apiClient\n\t}\n\n\tconfigErr := ValidateGlobalConfig()\n\tif configErr != nil {\n\t\tlog.WithError(configErr).Fatal(\"configuration error\")\n\t}\n\n\tclient := goname.New(viper.GetString(\"username\"), viper.GetString(\"api-key\"))\n\tclient.BaseURL = viper.GetString(\"api-url\")\n\n\tloginErr := client.Login()\n\tif loginErr != nil {\n\t\tlog.WithError(loginErr).Fatal(\"could not authenticate\")\n\t}\n\n\tapiClient = client\n\treturn client\n}", "func Client(ctx context.Context) client.Client {\n\tval := ctx.Value(clientCtxKey{})\n\tif val == nil {\n\t\treturn nil\n\t}\n\treturn val.(client.Client)\n}", "func getClient(ctx context.Context, config *oauth2.Config) (*http.Client, error) {\n\ttokFile, err := xdg.CacheFile(\"gphotos-fb/token.json\")\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"xdg.CacheFile: %w\", err)\n\t}\n\n\ttok, err := tokenFromFile(tokFile)\n\tif err != nil {\n\t\ttok, err := getTokenFromWeb(ctx, config)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"getTokenFromWeb: %w\", err)\n\t\t}\n\n\t\tif err := saveToken(tokFile, tok); err != nil {\n\t\t\treturn nil, fmt.Errorf(\"saveToken: %w\", err)\n\t\t}\n\t}\n\treturn config.Client(ctx, tok), nil\n}", "func (ctl *taskController) GetClient(timeout time.Duration) (*http.Client, error) {\n\t// TODO(vadimsh): Use per-project service accounts, not a global cron service\n\t// account.\n\tctx, _ := clock.WithTimeout(ctl.ctx, timeout)\n\ttransport, err := client.Transport(ctx, nil, nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &http.Client{Transport: transport}, nil\n}", "func contextClient(ctx context.Context) *http.Client {\n\tif hc, ok := ctx.Value(HTTPClient).(*http.Client); ok {\n\t\treturn hc\n\t}\n\tfor _, fn := range contextClientFuncs {\n\t\tc, err := fn(ctx)\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t\tif c != nil {\n\t\t\treturn c\n\t\t}\n\t}\n\treturn http.DefaultClient\n}", "func getEventClient(spec *Spec) (*event.Client, error) {\n\tcid := clientID(spec)\n\tclient, ok := clientMap[cid]\n\tif ok && client != nil {\n\t\treturn client, nil\n\t}\n\n\t// create new event client\n\tsdk, err := fabsdk.New(networkConfigProvider(spec.NetworkConfig, spec.EntityMatchers))\n\tif err != nil {\n\t\treturn nil, errors.Wrapf(err, \"Failed to create new SDK\")\n\t}\n\n\topts := []fabsdk.ContextOption{fabsdk.WithUser(spec.UserName)}\n\tif spec.OrgName != \"\" {\n\t\topts = append(opts, fabsdk.WithOrg(spec.OrgName))\n\t}\n\n\tif spec.EventType == EventFiltered {\n\t\tclient, err = event.New(sdk.ChannelContext(spec.ChannelID, opts...))\n\t} else {\n\t\tclient, err = event.New(sdk.ChannelContext(spec.ChannelID, opts...), event.WithBlockEvents())\n\t}\n\tif err != nil {\n\t\tclientMap[cid] = client\n\t}\n\treturn client, err\n}", "func (c *API) GetClient(out interface{}, clientName string) (*http.Response, error) {\n\ts := []string{clientsURI, clientName}\n\turi := strings.Join(s, \"/\")\n\tresp, err := c.get(uri, out)\n\treturn resp, err\n}", "func getClient(instanceAddr string) *api.Client {\n\tif vaultClients[instanceAddr] == nil {\n\t\tlog.Fatalf(\"[Vault Client] client does not exist for address: %s\", instanceAddr)\n\t}\n\treturn vaultClients[instanceAddr]\n}", "func getClient(config *oauth2.Config) *http.Client {\n\t// The file token.json stores the user's access and refresh tokens, and is\n\t// created automatically when the authorization flow completes for the first\n\t// time.\n\ttokFile := \"token.json\"\n\ttok, err := tokenFromFile(tokFile)\n\tif err != nil {\n\t\ttok = getTokenFromWeb(config)\n\t\tsaveToken(tokFile, tok)\n\t}\n\treturn config.Client(context.Background(), tok)\n}", "func getClient(config *oauth2.Config) *http.Client {\n\t// The file token.json stores the user's access and refresh tokens, and is\n\t// created automatically when the authorization flow completes for the first\n\t// time.\n\ttokFile := \"token.json\"\n\ttok, err := tokenFromFile(tokFile)\n\tif err != nil {\n\t\ttok = getTokenFromWeb(config)\n\t\tsaveToken(tokFile, tok)\n\t}\n\treturn config.Client(context.Background(), tok)\n}", "func getClient(config *oauth2.Config) *http.Client {\n\t// The file token.json stores the user's access and refresh tokens, and is\n\t// created automatically when the authorization flow completes for the first\n\t// time.\n\ttokFile := \"token.json\"\n\ttok, err := tokenFromFile(tokFile)\n\tif err != nil {\n\t\ttok = getTokenFromWeb(config)\n\t\tsaveToken(tokFile, tok)\n\t}\n\treturn config.Client(context.Background(), tok)\n}", "func getClient(config *oauth2.Config) *http.Client {\n\t// The file token.json stores the user's access and refresh tokens, and is\n\t// created automatically when the authorization flow completes for the first\n\t// time.\n\ttokFile := \"token.json\"\n\ttok, err := tokenFromFile(tokFile)\n\tif err != nil {\n\t\ttok = getTokenFromWeb(config)\n\t\tsaveToken(tokFile, tok)\n\t}\n\treturn config.Client(context.Background(), tok)\n}", "func getClient(config *oauth2.Config) *http.Client {\n\t// The file token.json stores the user's access and refresh tokens, and is\n\t// created automatically when the authorization flow completes for the first\n\t// time.\n\ttokFile := \"token.json\"\n\ttok, err := tokenFromFile(tokFile)\n\tif err != nil {\n\t\ttok = getTokenFromWeb(config)\n\t\tsaveToken(tokFile, tok)\n\t}\n\treturn config.Client(context.Background(), tok)\n}", "func getClient(config *oauth2.Config) *http.Client {\n\t// The file token.json stores the user's access and refresh tokens, and is\n\t// created automatically when the authorization flow completes for the first\n\t// time.\n\ttokFile := \"token.json\"\n\ttok, err := tokenFromFile(tokFile)\n\tif err != nil {\n\t\ttok = getTokenFromWeb(config)\n\t\tsaveToken(tokFile, tok)\n\t}\n\treturn config.Client(context.Background(), tok)\n}", "func GetClient() *http.Client {\n\tb, err := ioutil.ReadFile(credentials)\n\tif err != nil {\n\t\tlog.Fatalf(\"Unable to read client secret file: %v\", err)\n\t}\n\n\tcred, err := google.CredentialsFromJSON(context.Background(), b, scopes...)\n\tif err != nil {\n\t\tlog.Fatalf(\"Unable to parse credentials file: %v\", err)\n\t}\n\n\ttok, err := cred.TokenSource.Token()\n\tif err != nil {\n\t\tlog.Fatalf(\"Unable to get token: %v\", err)\n\t}\n\n\treturn config.Client(context.Background(), tok)\n}", "func GetClient(apiKey string) *Client {\n\t//default to prod API\n\tapi := &Client{url: \"https://api.smartsheet.com/2.0\", apiKey: apiKey}\n\tapi.client = &http.Client{} //per docs clients should be made once, https://golang.org/pkg/net/http/\n\n\treturn api\n}", "func GetClient(username, password string) *Client {\n\treturn &Client{\n\t\tUsername: username,\n\t\tPassword: password,\n\t\tbaseURL: base_url,\n\t}\n}", "func (m MongoManager) GetClient(ctx context.Context, id string) (fosite.Client, error) {\n\treturn m.GetConcreteClient(id)\n}", "func getClient(config *oauth2.Config) *http.Client {\n\t// The file token.json stores the user's access and refresh tokens, and is\n\t// created automatically when the authorization flow completes for the first\n\t// time.\n\ttokFile := \"data/token.json\"\n\ttok, err := tokenFromFile(tokFile)\n\tif err != nil {\n\t\ttok = getTokenFromWeb(config)\n\t\tsaveToken(tokFile, tok)\n\t}\n\treturn config.Client(context.Background(), tok)\n}", "func GetEthClient() (*ethclient.Client, error) {\n\n\tclient, err := ethclient.Dial(\"https://mainnet.infura.io/v3/af5e28806ac04f57b1e10567f7a2946b\")\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t\treturn nil, err\n\t}\n\n\treturn client, nil\n}", "func GetAPIClient(serviceURL string) *Client {\n\treturn &Client{serviceURL: serviceURL}\n}", "func getClient(config vaultAuthOptions) (*api.Client, string, error) {\n\t// create config for connection\n\tvaultCFG := api.DefaultConfig()\n\tvaultCFG.Address = config.URL\n\n\tlog.Debugf(\"Attempting to authenticate user %s to vault %s\", config.Username, config.URL)\n\t// create client\n\tvClient, err := api.NewClient(vaultCFG)\n\tif err != nil {\n\t\tlog.WithError(err).Errorf(color.New(color.Bold).Sprintf(\"Could not connect to Vault at %s.\", config.URL))\n\t\tterminate(1)\n\t\treturn nil, \"\", err\n\t}\n\n\tsys := vClient.Sys()\n\tsealStatus, _ := sys.SealStatus()\n\tif sealStatus.Sealed == true {\n\t\tlog.Fatal(\"Error, Vault is sealed. We cannot authenticate at this time.\")\n\t\tterminate(1)\n\t\treturn nil, \"\", nil\n\t}\n\n\tlog.Infof(\"Requesting Vault authentication for user %s\", config.Username)\n\tauthOptions := map[string]interface{}{\n\t\t\"password\": config.Password,\n\t}\n\n\tloginPath := fmt.Sprintf(\"auth/userpass/login/%s\", config.Username)\n\tsecret, err := vClient.Logical().Write(loginPath, authOptions)\n\tif err != nil {\n\t\tlog.WithError(err).Errorf(color.New(color.Bold).Sprintf(\"Could not authenticate user %s\", config.Username))\n\t\tterminate(1)\n\t\treturn nil, \"\", nil\n\t}\n\n\tvClient.SetToken(secret.Auth.ClientToken)\n\n\tlog.Info(\"Authentication Success\")\n\n\treturn vClient, secret.Auth.ClientToken, nil\n}", "func GetClient() *blizzard.Client {\n\tconfig := Config.LoadConfiguration(\"config.json\")\n\tclientID := config.ClientID\n\tkey := config.Key\n\n\tctx := context.Background()\n\n\tblizz := blizzard.NewClient(clientID, key, blizzard.US, blizzard.EnUS)\n\n\terr := blizz.AccessTokenRequest(ctx)\n\tif err != nil {\n\t\tfmt.Println(err)\n\t}\n\n\treturn blizz\n}", "func (kv *Keyvaults) GetClient(kvname string) (*Client, error) {\n\tif client, ok := kv.clients[kvname]; ok {\n\t\treturn client, nil\n\t}\n\n\tif kvname == \"\" {\n\t\treturn nil, errors.New(\"Unspecified keyvault\")\n\t}\n\n\tclient := keyvault.New()\n\tclient.Authorizer = kv.authorizer\n\n\tkvc := &Client{\n\t\tname: kvname,\n\t\tbaseUrl: fmt.Sprintf(\"https://%s.%s\", strings.ToLower(kvname), kv.env.KeyVaultDNSSuffix),\n\t\tclient: client,\n\t\tsecretCache: make(map[string]*secretCacheItem),\n\t\tcertCache: make(map[string]*certCacheItem),\n\t\tkeyCache: make(map[string]*keyCacheItem),\n\t\tcertListCache: nil,\n\t\tsecretListCache: nil,\n\t}\n\n\tkv.clients[kvname] = kvc\n\treturn kvc, nil\n}", "func (m *MockSession) GetVaultClient() *api.Client {\n\targs := m.Mock.Called()\n\treturn args.Get(0).(*api.Client)\n}", "func getClient(id string, config *oauth2.Config) (*http.Client, error) {\n\t// The file token.json stores the user's access and refresh tokens, and is\n\t// created automatically when the authorization flow completes for the first\n\t// time.\n\n\ttokFile := \"./secrets/token_\" + id + \".json\"\n\ttok, err := tokenFromFile(tokFile)\n\tif err != nil {\n\t\tif os.IsNotExist(err) {\n\t\t\ttok, err = getTokenFromWeb(config)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\terr = saveToken(tokFile, tok)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t} else {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\treturn config.Client(context.Background(), tok), nil\n}", "func (c *Clients) GetInspectorClient() (*gophercloud.ServiceClient, error) {\n\t// Terraform concurrently creates some resources which means multiple callers can request an Inspector client. We\n\t// only need to check if the API is available once, so we use a mux to restrict one caller to polling the API.\n\t// When the mux is released, the other callers will fall through to the check for inspectorUp.\n\tc.inspectorMux.Lock()\n\tdefer c.inspectorMux.Unlock()\n\n\tif c.inspector == nil {\n\t\treturn nil, fmt.Errorf(\"no inspector endpoint was specified\")\n\t} else if c.inspectorUp || c.timeout == 0 {\n\t\treturn c.inspector, nil\n\t} else if c.inspectorFailed {\n\t\treturn nil, fmt.Errorf(\"could not contact API: timeout reached\")\n\t}\n\n\t// Let's poll the API until it's up, or times out.\n\tduration := time.Duration(c.timeout) * time.Second\n\tctx, cancel := context.WithTimeout(context.Background(), duration)\n\tdefer cancel()\n\n\tdone := make(chan struct{})\n\tgo func() {\n\t\tlog.Printf(\"[INFO] Waiting for Inspector API...\")\n\t\twaitForAPI(ctx, c.inspector)\n\t\tclose(done)\n\t}()\n\n\t// Wait for done or time out\n\tselect {\n\tcase <-ctx.Done():\n\t\tif err := ctx.Err(); err != nil {\n\t\t\tc.ironicFailed = true\n\t\t\treturn nil, err\n\t\t}\n\tcase <-done:\n\t}\n\n\tif err := ctx.Err(); err != nil {\n\t\tc.inspectorFailed = true\n\t\treturn nil, err\n\t}\n\n\tc.inspectorUp = true\n\treturn c.inspector, ctx.Err()\n}", "func getClient(url string, groupID uint) (*client.Client) {\n\t// RPC API\n\tc, err := client.Dial(url, groupID) // change to your RPC and groupID\n\tif err != nil {\n fmt.Println(\"can not dial to the RPC API, please check the config file gobcos_config.yaml: \", err)\n os.Exit(1)\n\t}\n\treturn c\n}", "func (a *Auth) GetClient() (*http.Client, error) {\n\tif a.client == nil {\n\t\tclient, err := getClient(a.credentialsPath, a.tokenPath, a.scopes)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"Error: Could not get client: %v\", err)\n\t\t}\n\t\ta.client = client\n\t}\n\treturn a.client, nil\n}", "func (b *OGame) GetClient() *httpclient.Client {\n\treturn b.client\n}", "func GetClient(token string) *godo.Client {\n\ttokenSource := &TokenSource{AccessToken: token}\n\toauthClient := oauth2.NewClient(oauth2.NoContext, tokenSource)\n\tclient := godo.NewClient(oauthClient)\n\tclient.BaseURL = GodoBase\n\treturn client\n}", "func (a *API) GetClient() *http.Client {\n\treturn &http.Client{}\n}", "func GetPreparedApiClient(sdkConfig *SdkConfig) (*apiclient.PydioCellsRest, context.Context, error) {\n\n\ttransport := httptransport.New(sdkConfig.Url, apiResourcePath, []string{sdkConfig.Protocol})\n\tjwt, err := retrieveToken(sdkConfig)\n\tif err != nil {\n\t\treturn nil, nil, fmt.Errorf(\n\t\t\t\"cannot retrieve token with config:\\n%s - %s - %s - %s - %s - %s - %v\\nerror cause: %s\",\n\t\t\tsdkConfig.Protocol, sdkConfig.Url, sdkConfig.ClientKey, sdkConfig.ClientSecret,\n\t\t\tsdkConfig.User, sdkConfig.Password, sdkConfig.SkipVerify, err.Error())\n\t}\n\tbearerTokenAuth := httptransport.BearerToken(jwt)\n\ttransport.DefaultAuthentication = bearerTokenAuth\n\n\tclient := apiclient.New(transport, strfmt.Default)\n\n\treturn client, context.Background(), nil\n}", "func getClient(cCmd *cobra.Command) (client.API, error) {\n\tclientDriver, _ := cmdFleet.PersistentFlags().GetString(\"driver\")\n\n\tswitch clientDriver {\n\tcase clientDriverAPI:\n\t\treturn getHTTPClient(cCmd)\n\tcase clientDriverEtcd:\n\t\treturn getRegistryClient(cCmd)\n\t}\n\n\treturn nil, fmt.Errorf(\"unrecognized driver %q\", clientDriver)\n}", "func (m *Manager) GetClient(clientID string) (cli oauth2.ClientInfo, err error) {\n\t_, ierr := m.injector.Invoke(func(stor oauth2.ClientStore) {\n\t\tcli, err = stor.GetByID(clientID)\n\t\tif err != nil {\n\t\t\treturn\n\t\t} else if cli == nil {\n\t\t\terr = errors.ErrInvalidClient\n\t\t}\n\t})\n\tif err == nil && ierr != nil {\n\t\terr = ierr\n\t}\n\treturn\n}", "func getClient(config *oauth2.Config, tokFile string) *http.Client {\n\ttok, err := tokenFromFile(tokFile)\n\tif err != nil {\n\t\ttok = getTokenFromWeb(config)\n\t\tsaveToken(tokFile, tok)\n\t}\n\treturn config.Client(context.Background(), tok)\n}", "func GetClient() *rest.RESTClient {\n\tconfig, err := buildOutOfClusterConfig()\n\tif err != nil {\n\t\tlog.Warnf(\"Can not get kubernetes config: %v\", err)\n\n\t\tconfig, err = rest.InClusterConfig()\n\t\tif err != nil {\n\t\t\tlog.Fatalf(\"Can not get kubernetes config: %v\", err)\n\t\t}\n\t}\n\tconfig.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs}\n\tc, err := rest.UnversionedRESTClientFor(config)\n\n\tif err != nil {\n\t\tlog.Fatalf(\"Cannot create REST client, try setting KUBECONFIG environment variable: %v\", err)\n\t}\n\treturn c\n}", "func FromContext(ctx context.Context) (*Client, bool) {\n\tc, ok := ctx.Value(ctxKey{}).(*Client)\n\treturn c, ok\n}", "func GetClient(ctx *fiber.Ctx) *Client {\n\treturn ctx.Locals(\"nats_client\").(*Client)\n}", "func (m ClientMocker) Get(id string) (*Client, error) {\n\tclient := new(Client)\n\n\treturn client, nil\n}", "func getKubeClient(kubeConfig, kubeContext string) (*rest.Config, *clientset.Clientset, error) {\n\tconfig, err := configForContext(kubeConfig, kubeContext)\n\tif err != nil {\n\t\treturn nil, nil, fmt.Errorf(\"could not load Kubernetes configuration (%s)\", err)\n\t}\n\n\tclient, err := clientset.NewForConfig(config)\n\treturn nil, client, err\n}", "func FromCtx(ctx context.Context) (*Client, error) {\n\tconst op errors.Op = \"apiclient/FromCtx\"\n\n\tclient, ok := ctx.Value(ctxKey).(*Client)\n\tif ok {\n\t\treturn client, nil\n\t}\n\treturn client, errors.E(op, \"Client is not set properly to context\")\n}", "func GetClient() (client *horizon.Client, err error) {\n\tenv, err := agoraenv.FromEnvVariable()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tswitch env {\n\tcase agoraenv.AgoraEnvironmentProd:\n\t\treturn kinProdHorizonClient, nil\n\tdefault:\n\t\treturn kinTestHorizonClient, nil\n\t}\n}", "func (f ClientGetterFunc) GetClient() (Client, error) {\n\treturn f()\n}", "func GetClient() *Client {\n\treturn organizationServiceClient\n}", "func GetClient() *client.Client {\n\treturn cli\n}", "func GetClient(timeout int) *http.Client {\n\tclient := http.Client{\n\t\tTimeout: time.Duration(timeout) * time.Second,\n\t}\n\treturn &client\n}", "func RESTClient(ctx context.Context) *resty.Client {\n\tval := ctx.Value(clientCtxKey{})\n\tif val == nil {\n\t\treturn nil\n\t}\n\treturn val.(*resty.Client)\n}", "func (cf *clientFactory) GetClient(authInfo *api.AuthInfo) (ClientInterface, error) {\n\tclientEntry, err := cf.getClientEntry(authInfo)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn clientEntry.client, nil\n}", "func (a *API) GetClient(clientID string) (*Client, error) {\n\tu := urlWithPath(*a.TargetURL, fmt.Sprintf(\"%s/%s\", ClientsEndpoint, clientID))\n\tclient := &Client{}\n\terr := a.doJSON(http.MethodGet, &u, nil, client, true)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn client, nil\n}" ]
[ "0.72948647", "0.68114364", "0.6591933", "0.65879595", "0.65823406", "0.65391773", "0.6518996", "0.65158796", "0.64835215", "0.64815354", "0.64815354", "0.64766634", "0.6468629", "0.6466018", "0.6444596", "0.64060843", "0.63972735", "0.638421", "0.63750327", "0.6355736", "0.63529915", "0.6347422", "0.63218933", "0.6309158", "0.6309158", "0.6309158", "0.6306552", "0.6294159", "0.6265345", "0.6252255", "0.62506926", "0.6237063", "0.62270945", "0.6225124", "0.62091285", "0.6206219", "0.6202112", "0.61896175", "0.61896175", "0.61896175", "0.6172543", "0.6161653", "0.6161377", "0.616022", "0.61570626", "0.6156381", "0.61314607", "0.61261755", "0.6121091", "0.61042404", "0.6100746", "0.6097114", "0.6093744", "0.60891926", "0.60704875", "0.60691303", "0.60640186", "0.6039764", "0.60307086", "0.60307086", "0.60307086", "0.60307086", "0.60307086", "0.60307086", "0.60280836", "0.6019111", "0.6017268", "0.6009659", "0.6009344", "0.6006774", "0.6004684", "0.60040677", "0.59900826", "0.5980366", "0.59668314", "0.59635717", "0.59601647", "0.59565485", "0.5955888", "0.59452456", "0.5942901", "0.5939071", "0.59369475", "0.5934185", "0.59159774", "0.5912894", "0.5885177", "0.5873762", "0.5868704", "0.5864713", "0.5829244", "0.581827", "0.58111674", "0.5808027", "0.5802825", "0.57898897", "0.57868075", "0.5775889", "0.5759879", "0.5753584" ]
0.80457664
0
NewGoCollector is the obsolete version of collectors.NewGoCollector. See there for documentation. Deprecated: Use collectors.NewGoCollector instead.
func NewGoCollector(opts ...func(o *internal.GoCollectorOptions)) Collector { opt := defaultGoCollectorOptions() for _, o := range opts { o(&opt) } exposedDescriptions := matchRuntimeMetricsRules(opt.RuntimeMetricRules) // Collect all histogram samples so that we can get their buckets. // The API guarantees that the buckets are always fixed for the lifetime // of the process. var histograms []metrics.Sample for _, d := range exposedDescriptions { if d.Kind == metrics.KindFloat64Histogram { histograms = append(histograms, metrics.Sample{Name: d.Name}) } } if len(histograms) > 0 { metrics.Read(histograms) } bucketsMap := make(map[string][]float64) for i := range histograms { bucketsMap[histograms[i].Name] = histograms[i].Value.Float64Histogram().Buckets } // Generate a collector for each exposed runtime/metrics metric. metricSet := make([]collectorMetric, 0, len(exposedDescriptions)) // SampleBuf is used for reading from runtime/metrics. // We are assuming the largest case to have stable pointers for sampleMap purposes. sampleBuf := make([]metrics.Sample, 0, len(exposedDescriptions)+len(opt.RuntimeMetricSumForHist)+len(rmNamesForMemStatsMetrics)) sampleMap := make(map[string]*metrics.Sample, len(exposedDescriptions)) for _, d := range exposedDescriptions { namespace, subsystem, name, ok := internal.RuntimeMetricsToProm(&d.Description) if !ok { // Just ignore this metric; we can't do anything with it here. // If a user decides to use the latest version of Go, we don't want // to fail here. This condition is tested in TestExpectedRuntimeMetrics. continue } sampleBuf = append(sampleBuf, metrics.Sample{Name: d.Name}) sampleMap[d.Name] = &sampleBuf[len(sampleBuf)-1] var m collectorMetric if d.Kind == metrics.KindFloat64Histogram { _, hasSum := opt.RuntimeMetricSumForHist[d.Name] unit := d.Name[strings.IndexRune(d.Name, ':')+1:] m = newBatchHistogram( NewDesc( BuildFQName(namespace, subsystem, name), d.Description.Description, nil, nil, ), internal.RuntimeMetricsBucketsForUnit(bucketsMap[d.Name], unit), hasSum, ) } else if d.Cumulative { m = NewCounter(CounterOpts{ Namespace: namespace, Subsystem: subsystem, Name: name, Help: d.Description.Description, }, ) } else { m = NewGauge(GaugeOpts{ Namespace: namespace, Subsystem: subsystem, Name: name, Help: d.Description.Description, }) } metricSet = append(metricSet, m) } // Add exact sum metrics to sampleBuf if not added before. for _, h := range histograms { sumMetric, ok := opt.RuntimeMetricSumForHist[h.Name] if !ok { continue } if _, ok := sampleMap[sumMetric]; ok { continue } sampleBuf = append(sampleBuf, metrics.Sample{Name: sumMetric}) sampleMap[sumMetric] = &sampleBuf[len(sampleBuf)-1] } var ( msMetrics memStatsMetrics msDescriptions []metrics.Description ) if !opt.DisableMemStatsLikeMetrics { msMetrics = goRuntimeMemStats() msDescriptions = bestEffortLookupRM(rmNamesForMemStatsMetrics) // Check if metric was not exposed before and if not, add to sampleBuf. for _, mdDesc := range msDescriptions { if _, ok := sampleMap[mdDesc.Name]; ok { continue } sampleBuf = append(sampleBuf, metrics.Sample{Name: mdDesc.Name}) sampleMap[mdDesc.Name] = &sampleBuf[len(sampleBuf)-1] } } return &goCollector{ base: newBaseGoCollector(), sampleBuf: sampleBuf, sampleMap: sampleMap, rmExposedMetrics: metricSet, rmExactSumMapForHist: opt.RuntimeMetricSumForHist, msMetrics: msMetrics, msMetricsEnabled: !opt.DisableMemStatsLikeMetrics, } }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func NewCollector() Collector {\n\treturn make(Collector)\n}", "func NewCollector(store *store.MemoryStore) *Collector {\n\treturn &Collector{\n\t\tstore: store,\n\t\tstopChan: make(chan struct{}),\n\t\tdoneChan: make(chan struct{}),\n\t}\n}", "func NewCollector(client *api.Client, collectSnaphots, collectNetwork bool) prometheus.Collector {\n\treturn &VMCollector{client: client, collectSnapshots: collectSnaphots, collectNetwork: collectNetwork}\n}", "func NewCollector(cl client.Client) prometheus.Collector {\n\treturn &collector{\n\t\tcl: cl,\n\t}\n}", "func New() *Collector { return &Collector{} }", "func NewCollector() Collector {\n\treturn Collector{client: NewClient(time.Second * 5)}\n}", "func NewCollector() (prometheus.Collector, error) {\n\treturn &collector{}, nil\n}", "func NewCollector(config *CollectorConfig) (Collector, error) {\n\tc := &standardCollector{\n\t\trunning: true,\n\t\tevents: make(chan Event, config.EventBufferSize),\n\t\tconfig: config,\n\t\tneighbors: make(map[string]neighbor),\n\t\tRWMutex: &sync.RWMutex{},\n\t}\n\n\treturn c, nil\n}", "func NewCollector(config *Config) (coll *Collector, err error) {\n\tvar gelfWriter *gelf.Writer\n\n\tif gelfWriter, err = gelf.NewWriter(config.Graylog.Address); err != nil {\n\t\treturn nil, err\n\t}\n\n\tcoll = new(Collector)\n\tcoll.writer = gelfWriter\n\tcoll.host = config.Collector.Hostname\n\n\treturn coll, nil\n}", "func NewCollector(l *logrus.Entry, updateInterval time.Duration) *Collector {\n\tcol := &Collector{\n\t\tMsgEvtChan: make(chan *discordgo.Message, 1000),\n\t\tinterval: updateInterval,\n\t\tl: l,\n\t\tchannels: make(map[int64]*entry),\n\t}\n\n\tgo col.run()\n\n\treturn col\n}", "func NewCollector() collector.RPCCollector {\n\treturn &interfaceCollector{}\n}", "func NewCollector(cfg *config.AgentConfig, ctx context.Context) (Collector, error) {\n\tsysInfo, err := checks.CollectSystemInfo(cfg)\n\tif err != nil {\n\t\treturn Collector{}, err\n\t}\n\n\tenabledChecks := make([]checks.Check, 0)\n\tfor _, c := range checks.All {\n\t\tif cfg.CheckIsEnabled(c.Name()) {\n\t\t\tc.Init(cfg, sysInfo)\n\t\t\tenabledChecks = append(enabledChecks, c)\n\t\t}\n\t}\n\n\treturn NewCollectorWithChecks(cfg, enabledChecks, ctx), nil\n}", "func NewCollector(m Metric) (prometheus.Collector, error) {\n\tif len(m.Name) == 0 {\n\t\treturn nil, errors.New(\"A name is required for a metric\")\n\t}\n\n\tvar (\n\t\tnamespace = m.Namespace\n\t\tsubsystem = m.Subsystem\n\t\thelp = m.Help\n\t)\n\n\tif len(namespace) == 0 {\n\t\tnamespace = DefaultNamespace\n\t}\n\n\tif len(subsystem) == 0 {\n\t\tsubsystem = DefaultSubsystem\n\t}\n\n\tif len(help) == 0 {\n\t\thelp = m.Name\n\t}\n\n\tswitch m.Type {\n\tcase CounterType:\n\t\treturn prometheus.NewCounterVec(prometheus.CounterOpts{\n\t\t\tNamespace: namespace,\n\t\t\tSubsystem: subsystem,\n\t\t\tName: m.Name,\n\t\t\tHelp: help,\n\t\t\tConstLabels: prometheus.Labels(m.ConstLabels),\n\t\t}, m.LabelNames), nil\n\n\tcase GaugeType:\n\t\treturn prometheus.NewGaugeVec(prometheus.GaugeOpts{\n\t\t\tNamespace: namespace,\n\t\t\tSubsystem: subsystem,\n\t\t\tName: m.Name,\n\t\t\tHelp: help,\n\t\t\tConstLabels: prometheus.Labels(m.ConstLabels),\n\t\t}, m.LabelNames), nil\n\n\tcase HistogramType:\n\t\treturn prometheus.NewHistogramVec(prometheus.HistogramOpts{\n\t\t\tNamespace: namespace,\n\t\t\tSubsystem: subsystem,\n\t\t\tName: m.Name,\n\t\t\tHelp: help,\n\t\t\tBuckets: m.Buckets,\n\t\t\tConstLabels: prometheus.Labels(m.ConstLabels),\n\t\t}, m.LabelNames), nil\n\n\tcase SummaryType:\n\t\treturn prometheus.NewSummaryVec(prometheus.SummaryOpts{\n\t\t\tNamespace: namespace,\n\t\t\tSubsystem: subsystem,\n\t\t\tName: m.Name,\n\t\t\tHelp: help,\n\t\t\tObjectives: m.Objectives,\n\t\t\tMaxAge: m.MaxAge,\n\t\t\tAgeBuckets: m.AgeBuckets,\n\t\t\tBufCap: m.BufCap,\n\t\t\tConstLabels: prometheus.Labels(m.ConstLabels),\n\t\t}, m.LabelNames), nil\n\n\tdefault:\n\t\treturn nil, fmt.Errorf(\"Unsupported metric type: %s\", m.Type)\n\t}\n}", "func NewUpgradeCollector(c client.Client) (prometheus.Collector, error) {\n\tupgradeConfigManager, err := upgradeconfigmanager.NewBuilder().NewManager(c)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tmanagedMetrics := bootstrapMetrics()\n\n\treturn &UpgradeCollector{\n\t\tupgradeConfigManager,\n\t\tcv.NewCVClient(c),\n\t\tmanagedMetrics,\n\t}, nil\n}", "func NewCollector(storageLocation v1.StorageLocation, gitter gits.Gitter, gitKind string) (Collector, error) {\n\tclassifier := storageLocation.Classifier\n\tif classifier == \"\" {\n\t\tclassifier = \"default\"\n\t}\n\tgitURL := storageLocation.GitURL\n\tif gitURL != \"\" {\n\t\treturn NewGitCollector(gitter, gitURL, storageLocation.GetGitBranch(), gitKind)\n\t}\n\tbucketProvider, err := factory.NewBucketProviderFromTeamSettingsConfigurationOrDefault(clients.NewFactory(), storageLocation)\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"there was a problem obtaining the bucket provider from cluster configuratio\")\n\t}\n\treturn NewBucketCollector(storageLocation.BucketURL, classifier, bucketProvider)\n}", "func NewCollector(rcClientId string, kubernetesClusterId string) TelemetryCollector {\n\treturn &telemetryCollector{\n\t\tclient: httputils.NewResetClient(httpClientResetInterval, httpClientFactory(httpClientTimeout)),\n\t\thost: utils.GetMainEndpoint(config.Datadog, mainEndpointPrefix, mainEndpointUrlKey),\n\t\tuserAgent: \"Datadog Cluster Agent\",\n\t\trcClientId: rcClientId,\n\t\tkubernetesClusterId: kubernetesClusterId,\n\t}\n}", "func NewCollector() collector.RPCCollector {\n\treturn &vpwsCollector{}\n}", "func NewCollector(api API) *Collector {\n\treturn &Collector{api: api}\n}", "func NewCollector(logicalSystem string) collector.RPCCollector {\n\treturn &bgpCollector{LogicalSystem: logicalSystem}\n}", "func newPrometheusCollector(name string, db *sql.DB) prometheus.Collector {\n\tvar labels = prometheus.Labels{\n\t\t\"name\": name,\n\t}\n\n\treturn &prometheusCollector{\n\t\tdb: db,\n\n\t\tmaxOpenConnections: prometheus.NewDesc(\n\t\t\t\"max_open_connections\",\n\t\t\t\"Maximum number of open connections to the database\",\n\t\t\tnil, labels,\n\t\t),\n\t\topenConnections: prometheus.NewDesc(\n\t\t\t\"open_connections\",\n\t\t\t\"The number of established connections both in use and idle\",\n\t\t\tnil, labels,\n\t\t),\n\t\tinUse: prometheus.NewDesc(\n\t\t\t\"in_use_connections\",\n\t\t\t\"The number of connections currently in use\",\n\t\t\tnil, labels,\n\t\t),\n\t\tidle: prometheus.NewDesc(\n\t\t\t\"idle_connections\",\n\t\t\t\"The number of idle connections\",\n\t\t\tnil, labels,\n\t\t),\n\t\twaitCount: prometheus.NewDesc(\n\t\t\t\"wait_connections\",\n\t\t\t\"The total number of connections waited for\",\n\t\t\tnil, labels,\n\t\t),\n\t\twaitDuration: prometheus.NewDesc(\n\t\t\t\"wait_duration_connections\",\n\t\t\t\"The total time blocked waiting for a new connection\",\n\t\t\tnil, labels,\n\t\t),\n\t\tmaxIdleClosed: prometheus.NewDesc(\n\t\t\t\"max_idle_closed_connections\",\n\t\t\t\"The total number of connections closed due to SetMaxIdleConns\",\n\t\t\tnil, labels,\n\t\t),\n\t\tmaxIdleTimeClosed: prometheus.NewDesc(\n\t\t\t\"max_idle_time_closed_connections\",\n\t\t\t\"The total number of connections closed due to SetConnMaxIdleTime\",\n\t\t\tnil, labels,\n\t\t),\n\t\tmaxLifetimeClosed: prometheus.NewDesc(\n\t\t\t\"max_lifetime_closed_connections\",\n\t\t\t\"The total number of connections closed due to SetConnMaxLifetime\",\n\t\t\tnil, labels,\n\t\t),\n\t}\n}", "func NewCollect() *cobra.Command {\n\tcollectOptions := newCollectOptions()\n\n\tcmd := &cobra.Command{\n\t\tUse: \"collect\",\n\t\tShort: \"Obtain all the data of the current node\",\n\t\tLong: edgecollectLongDescription,\n\t\tExample: edgecollectExample,\n\t\tRun: func(cmd *cobra.Command, args []string) {\n\t\t\terr := ExecuteCollect(collectOptions)\n\t\t\tif err != nil {\n\t\t\t\tfmt.Println(err)\n\t\t\t}\n\t\t},\n\t}\n\tcmd.AddCommand()\n\taddCollectOtherFlags(cmd, collectOptions)\n\treturn cmd\n}", "func NewCollector(defaultGroup string) *MemoryMetricsCollector {\n\treturn &MemoryMetricsCollector{defaultGroup: defaultGroup, metrics: make([]operation.MetricOperation, 0)}\n}", "func NewCollector() *Collector {\n\tcollector := &Collector{\n\t\tresults: make(chan interface{}, 100),\n\t\tdone: make(chan interface{}),\n\t}\n\tgo collector.process()\n\treturn collector\n}", "func NewCollector(\n\tlogger *log.Logger, server lxd.InstanceServer) prometheus.Collector {\n\treturn &collector{logger: logger, server: server}\n}", "func NewCollector(cfg *config.AgentConfig) TelemetryCollector {\n\tif !cfg.TelemetryConfig.Enabled {\n\t\treturn &noopTelemetryCollector{}\n\t}\n\n\tvar endpoints []config.Endpoint\n\tfor _, endpoint := range cfg.TelemetryConfig.Endpoints {\n\t\tu, err := url.Parse(endpoint.Host)\n\t\tif err != nil {\n\t\t\tcontinue\n\t\t}\n\t\tu.Path = \"/api/v2/apmtelemetry\"\n\t\tendpointWithPath := *endpoint\n\t\tendpointWithPath.Host = u.String()\n\n\t\tendpoints = append(endpoints, endpointWithPath)\n\t}\n\n\treturn &telemetryCollector{\n\t\tclient: cfg.NewHTTPClient(),\n\t\tendpoints: endpoints,\n\t\tuserAgent: fmt.Sprintf(\"Datadog Trace Agent/%s/%s\", cfg.AgentVersion, cfg.GitCommit),\n\n\t\tcfg: cfg,\n\t\tcollectedStartupError: &atomic.Bool{},\n\t}\n}", "func NewCollector(username string, token string, source string, timeout time.Duration, waitGroup *sync.WaitGroup) Collector {\n\treturn &collector{\n\t\turl: metricsEndpont,\n\t\tusername: username,\n\t\ttoken: token,\n\t\tsource: source,\n\t\ttimeout: timeout,\n\n\t\thttpClient: &http.Client{\n\t\t\tTimeout: time.Second * 30,\n\t\t},\n\t\twaitGroup: waitGroup,\n\t\tstop: make(chan bool),\n\t\tbuffer: make(chan gauge, 10000),\n\t}\n}", "func NewMetricCollector(logger *zap.SugaredLogger) *MetricCollector {\n\tcollector := &MetricCollector{\n\t\tlogger: logger,\n\t\tcollections: make(map[string]*collection),\n\t}\n\n\treturn collector\n}", "func NewCollector() collector.RPCCollector {\n\treturn &storageCollector{}\n}", "func NewVMwareCollector(ctx *pulumi.Context,\n\tname string, args *VMwareCollectorArgs, opts ...pulumi.ResourceOption) (*VMwareCollector, error) {\n\tif args == nil {\n\t\treturn nil, errors.New(\"missing one or more required arguments\")\n\t}\n\n\tif args.ProjectName == nil {\n\t\treturn nil, errors.New(\"invalid value for required argument 'ProjectName'\")\n\t}\n\tif args.ResourceGroupName == nil {\n\t\treturn nil, errors.New(\"invalid value for required argument 'ResourceGroupName'\")\n\t}\n\taliases := pulumi.Aliases([]pulumi.Alias{\n\t\t{\n\t\t\tType: pulumi.String(\"azure-nextgen:migrate:VMwareCollector\"),\n\t\t},\n\t\t{\n\t\t\tType: pulumi.String(\"azure-native:migrate/v20191001:VMwareCollector\"),\n\t\t},\n\t\t{\n\t\t\tType: pulumi.String(\"azure-nextgen:migrate/v20191001:VMwareCollector\"),\n\t\t},\n\t})\n\topts = append(opts, aliases)\n\tvar resource VMwareCollector\n\terr := ctx.RegisterResource(\"azure-native:migrate:VMwareCollector\", name, args, &resource, opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &resource, nil\n}", "func NewCollector(cm *clientmanager.ClientManager) prometheus.Collector {\n\treturn &grpcClientManagerCollector{\n\t\tcm: cm,\n\t}\n}", "func NewDirCollector() DirCollector {\n\treturn make(DirCollector)\n}", "func New() ([]collector.Collector, error) {\n\tnone := []collector.Collector{}\n\n\tl := log.With().Str(\"pkg\", PackageName).Logger()\n\n\tenbledCollectors := viper.GetStringSlice(config.KeyCollectors)\n\tif len(enbledCollectors) == 0 {\n\t\tl.Info().Msg(\"no builtin collectors enabled\")\n\t\treturn none, nil\n\t}\n\n\tcollectors := make([]collector.Collector, 0, len(enbledCollectors))\n\tinitErrMsg := \"initializing builtin collector\"\n\tfor _, name := range enbledCollectors {\n\t\tif !strings.HasPrefix(name, NamePrefix) {\n\t\t\tcontinue\n\t\t}\n\t\tname = strings.ReplaceAll(name, NamePrefix, \"\")\n\t\tcfgBase := \"generic_\" + name + \"_collector\"\n\t\tswitch name {\n\t\tcase NameCPU:\n\t\t\tc, err := NewCPUCollector(path.Join(defaults.EtcPath, cfgBase), l)\n\t\t\tif err != nil {\n\t\t\t\tl.Error().Str(\"name\", name).Err(err).Msg(initErrMsg)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tcollectors = append(collectors, c)\n\n\t\tcase NameDisk:\n\t\t\tc, err := NewDiskCollector(path.Join(defaults.EtcPath, cfgBase), l)\n\t\t\tif err != nil {\n\t\t\t\tl.Error().Str(\"name\", name).Err(err).Msg(initErrMsg)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tcollectors = append(collectors, c)\n\n\t\tcase NameFS:\n\t\t\tc, err := NewFSCollector(path.Join(defaults.EtcPath, cfgBase), l)\n\t\t\tif err != nil {\n\t\t\t\tl.Error().Str(\"name\", name).Err(err).Msg(initErrMsg)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tcollectors = append(collectors, c)\n\n\t\tcase NameLoad:\n\t\t\tc, err := NewLoadCollector(path.Join(defaults.EtcPath, cfgBase), l)\n\t\t\tif err != nil {\n\t\t\t\tl.Error().Str(\"name\", name).Err(err).Msg(initErrMsg)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tcollectors = append(collectors, c)\n\n\t\tcase NameIF:\n\t\t\tc, err := NewNetIFCollector(path.Join(defaults.EtcPath, cfgBase), l)\n\t\t\tif err != nil {\n\t\t\t\tl.Error().Str(\"name\", name).Err(err).Msg(initErrMsg)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tcollectors = append(collectors, c)\n\n\t\tcase NameProto:\n\t\t\tc, err := NewNetProtoCollector(path.Join(defaults.EtcPath, cfgBase), l)\n\t\t\tif err != nil {\n\t\t\t\tl.Error().Str(\"name\", name).Err(err).Msg(initErrMsg)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tcollectors = append(collectors, c)\n\n\t\tcase NameVM:\n\t\t\tc, err := NewVMCollector(path.Join(defaults.EtcPath, cfgBase), l)\n\t\t\tif err != nil {\n\t\t\t\tl.Error().Str(\"name\", name).Err(err).Msg(initErrMsg)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tcollectors = append(collectors, c)\n\n\t\tdefault:\n\t\t\tl.Warn().Str(\"name\", name).Msg(\"unknown builtin collector, ignoring\")\n\t\t}\n\t}\n\n\treturn collectors, nil\n}", "func NewCollector(period time.Duration, collectFunc func() []Measurement) *Collector {\n\tcollector := &Collector{\n\t\tperiod: period,\n\t\tcollectFunc: collectFunc,\n\t\tlastSendingDate: -1,\n\t}\n\n\tif sources == nil {\n\t\tsources = make([]DataSource, 0)\n\t\tgo sendingLoop()\n\t}\n\n\tif UseGlobalEngine {\n\t\tcollector.Engine = Engine\n\t} else {\n\t\tcollector.Engine = &req.Engine{}\n\t}\n\n\tsources = append(sources, collector)\n\n\treturn collector\n}", "func New() Collector {\n\treturn &collector{\n\t\tinner: sigar.ConcreteSigar{},\n\t}\n}", "func NewFileCollector() FileCollector {\n\treturn make(FileCollector)\n}", "func NewCollector() *Collector {\n\twg := &sync.WaitGroup{}\n\tevtCh := make(chan *eventsapi.ClientEvent, collChanBufferSize)\n\n\tc := &Collector{&atomic.Value{}, wg, evtCh}\n\n\twg.Add(1)\n\tgo func() {\n\t\tdefer wg.Done()\n\n\t\tvar events []*eventsapi.ClientEvent\n\t\tfor evt := range evtCh {\n\t\t\tevents = append(events, evt)\n\t\t}\n\n\t\tc.val.Store(events)\n\t}()\n\n\treturn c\n}", "func NewKubernetesCollector(logger log.Logger, errors *prometheus.CounterVec, client *godo.Client, timeout time.Duration) *KubernetesCollector {\n\terrors.WithLabelValues(\"kubernetes\").Add(0)\n\n\t// Version refers to the upstream Kubernetes version as well as the DigitalOcean revision\n\tclusterLabels := []string{\"id\", \"name\", \"region\", \"version\"}\n\tnodeLabels := []string{\"id\", \"name\", \"region\"}\n\treturn &KubernetesCollector{\n\t\tlogger: logger,\n\t\terrors: errors,\n\t\tclient: client,\n\t\ttimeout: timeout,\n\n\t\tUp: prometheus.NewDesc(\n\t\t\t\"digitalocean_kubernetes_cluster_up\",\n\t\t\t\"If 1 the kubernetes cluster is up and running, 0 otherwise\",\n\t\t\tclusterLabels, nil,\n\t\t),\n\t\tNodePools: prometheus.NewDesc(\n\t\t\t\"digitalocean_kubernetes_nodepools_count\",\n\t\t\t\"Number of Kubernetes nodepools\",\n\t\t\tclusterLabels, nil,\n\t\t),\n\t\tNodes: prometheus.NewDesc(\n\t\t\t\"digitalocean_kubernetes_nodes_count\",\n\t\t\t\"Number of Kubernetes nodes\",\n\t\t\tnodeLabels, nil,\n\t\t),\n\t}\n}", "func NewLibvirtCollector() *Libvirt {\n\treturn &Libvirt{}\n\n}", "func NewRGWCollector(exporter *Exporter, background bool) *RGWCollector {\n\tlabels := make(prometheus.Labels)\n\tlabels[\"cluster\"] = exporter.Cluster\n\n\trgw := &RGWCollector{\n\t\tconfig: exporter.Config,\n\t\tbackground: background,\n\t\tlogger: exporter.Logger,\n\t\tgetRGWGCTaskList: rgwGetGCTaskList,\n\n\t\tActiveTasks: prometheus.NewGaugeVec(\n\t\t\tprometheus.GaugeOpts{\n\t\t\t\tNamespace: cephNamespace,\n\t\t\t\tName: \"rgw_gc_active_tasks\",\n\t\t\t\tHelp: \"RGW GC active task count\",\n\t\t\t\tConstLabels: labels,\n\t\t\t},\n\t\t\t[]string{},\n\t\t),\n\t\tActiveObjects: prometheus.NewGaugeVec(\n\t\t\tprometheus.GaugeOpts{\n\t\t\t\tNamespace: cephNamespace,\n\t\t\t\tName: \"rgw_gc_active_objects\",\n\t\t\t\tHelp: \"RGW GC active object count\",\n\t\t\t\tConstLabels: labels,\n\t\t\t},\n\t\t\t[]string{},\n\t\t),\n\t\tPendingTasks: prometheus.NewGaugeVec(\n\t\t\tprometheus.GaugeOpts{\n\t\t\t\tNamespace: cephNamespace,\n\t\t\t\tName: \"rgw_gc_pending_tasks\",\n\t\t\t\tHelp: \"RGW GC pending task count\",\n\t\t\t\tConstLabels: labels,\n\t\t\t},\n\t\t\t[]string{},\n\t\t),\n\t\tPendingObjects: prometheus.NewGaugeVec(\n\t\t\tprometheus.GaugeOpts{\n\t\t\t\tNamespace: cephNamespace,\n\t\t\t\tName: \"rgw_gc_pending_objects\",\n\t\t\t\tHelp: \"RGW GC pending object count\",\n\t\t\t\tConstLabels: labels,\n\t\t\t},\n\t\t\t[]string{},\n\t\t),\n\t}\n\n\tif rgw.background {\n\t\t// rgw stats need to be collected in the background as this can take a while\n\t\t// if we have a large backlog\n\t\tgo rgw.backgroundCollect()\n\t}\n\n\treturn rgw\n}", "func NewCollector() collector.RPCCollector {\n\treturn &accountingCollector{}\n}", "func NewGoLimiter() *GoLimiter {\n\tl := &GoLimiter{visitors: make(map[string]*visitor)}\n\t// Run a background goroutine to remove old entries from the visitors map.\n\tgo l.cleanupVisitors()\n\treturn l\n}", "func New(computeAPI ComputeAPI, dnsAPI DNSAPI, removalPredicate IPAddressRemovalPredicate) *Collector {\n\treturn &Collector{computeAPI, dnsAPI, removalPredicate}\n}", "func NewCollector() collector.RPCCollector {\n\treturn &environmentCollector{}\n}", "func NewCollector() collector.RPCCollector {\n\treturn &environmentCollector{}\n}", "func NewGC(s *Storage, d time.Duration) *GarbageCollector {\n\treturn &GarbageCollector{s: s, period: d}\n}", "func New(client *statsd.Client, interval time.Duration) *Collector {\n\treturn &Collector{\n\t\tinterval: interval,\n\t\tclient: client,\n\t\tdone: make(chan struct{}),\n\t}\n}", "func (c *SkipperCollectorPlugin) NewCollector(hpa *autoscalingv2.HorizontalPodAutoscaler, config *MetricConfig, interval time.Duration) (Collector, error) {\n\tif strings.HasPrefix(config.Metric.Name, rpsMetricName) {\n\t\tbackend, ok := config.Config[\"backend\"]\n\t\tif !ok {\n\t\t\t// TODO: remove the deprecated way of specifying\n\t\t\t// optional backend at a later point in time.\n\t\t\tif len(config.Metric.Name) > len(rpsMetricName) {\n\t\t\t\tmetricNameParts := strings.Split(config.Metric.Name, rpsMetricBackendSeparator)\n\t\t\t\tif len(metricNameParts) == 2 {\n\t\t\t\t\tbackend = metricNameParts[1]\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\treturn NewSkipperCollector(c.client, c.rgClient, c.plugin, hpa, config, interval, c.backendAnnotations, backend)\n\t}\n\treturn nil, fmt.Errorf(\"metric '%s' not supported\", config.Metric.Name)\n}", "func NewStringCollector(name, help string) *StringCollector {\n\treturn &StringCollector{\n\t\tname: name,\n\t\thelp: help,\n\t\tvalue: \"\",\n\t}\n}", "func New() *CPUCollector {\n\tcpuMetrics := newCPUMetrics()\n\tcpuStats := cpuclient.New()\n\n\treturn &CPUCollector{\n\t\tcpuMetrics: cpuMetrics,\n\t\tcpuStats: cpuStats,\n\t}\n}", "func newCollectionStatsCollector(ctx context.Context, client *mongo.Client, logger *logrus.Logger, compatible, discovery bool, topology labelsGetter, collections []string) *collstatsCollector {\n\treturn &collstatsCollector{\n\t\tctx: ctx,\n\t\tbase: newBaseCollector(client, logger),\n\n\t\tcompatibleMode: compatible,\n\t\tdiscoveringMode: discovery,\n\t\ttopologyInfo: topology,\n\n\t\tcollections: collections,\n\t}\n}", "func NewUseCollector() *Use {\n\treturn &Use{}\n}", "func NewCollector(ctx context.Context, cc *collector.CollectorContext, collectDuration prometheus.Observer) prometheus.Collector {\n\treturn &StorageDomainCollector{\n\t\trootCtx: ctx,\n\t\tcc: cc,\n\t\tcollectDuration: collectDuration,\n\t}\n}", "func newCollectOptions() *common.CollectOptions {\n\topts := &common.CollectOptions{}\n\n\topts.Config = common.EdgecoreConfigPath\n\topts.OutputPath = \".\"\n\topts.Detail = false\n\treturn opts\n}", "func (c *ClusterScalingScheduleCollectorPlugin) NewCollector(hpa *autoscalingv2.HorizontalPodAutoscaler, config *MetricConfig, interval time.Duration) (Collector, error) {\n\treturn NewClusterScalingScheduleCollector(c.store, c.defaultScalingWindow, c.defaultTimeZone, c.rampSteps, c.now, hpa, config, interval)\n}", "func newPoolCollector(config monitoring.MetricsConfig, logger *zap.Logger,\n\tspectrumClient spectrumservice.Client) (Collector, error) {\n\n\tlabelPool := []string{\"pool_name\", \"storage_system\"}\n\n\tproperties := make(map[string]*prometheus.Desc)\n\n\tfor _, p := range config.Metrics.Pools.Properties {\n\t\tproperties[p.PropertyName] = prometheus.NewDesc(p.PrometheusName, p.PrometheusHelp, labelPool, nil)\n\t}\n\n\treturn &poolCollector{\n\t\tibmSpectrumClient: spectrumClient,\n\t\tlogger: logger.Sugar(),\n\t\tproperties: properties,\n\t}, nil\n}", "func New(cfg Collector, nodeInfo collectors.NodeInfo, rels *ContainerTaskRels) (Collector, chan producers.MetricsMessage) {\n\tc := cfg\n\tc.log = logrus.WithFields(logrus.Fields{\"collector\": \"mesos-agent\"})\n\tc.nodeInfo = nodeInfo\n\tc.metricsChan = make(chan producers.MetricsMessage)\n\tc.ContainerTaskRels = rels\n\treturn c, c.metricsChan\n}", "func NewLocalCollector(s Store) Collector {\n\treturn s\n}", "func NewKubeletCmdCollector(osIdentifier utils.OSIdentifier, runtimeInfo *utils.RuntimeInfo) *KubeletCmdCollector {\n\treturn &KubeletCmdCollector{\n\t\tKubeletCommand: \"\",\n\t\tosIdentifier: osIdentifier,\n\t\truntimeInfo: runtimeInfo,\n\t}\n}", "func NewDefaultCollector(args map[string]interface{}) (DataCollector, error) {\n\t// Path (required)\n\tpathOrUrl := \"\"\n\tif v, ok := args[\"path\"]; ok {\n\t\tif pathOrUrl, ok = v.(string); !ok {\n\t\t\treturn nil, fmt.Errorf(\"Unexpected type for 'compressed'. Expected bool, got %t\", v)\n\t\t}\n\t} else {\n\t\treturn nil, errors.New(\"Missing 'path' argument. A path is required for the default collector\")\n\t}\n\n\tcompressed := false\n\n\tfor _, s := range []string{\"compress\", \"compressed\"} {\n\t\tif v, ok := args[s]; ok {\n\t\t\tif compressed, ok = v.(bool); !ok {\n\t\t\t\treturn nil, fmt.Errorf(\"Unexpected type for 'compressed'. Expected bool, got %t\", v)\n\t\t\t}\n\t\t}\n\t}\n\n\taggregate := false\n\tif v, ok := args[\"aggregate\"]; ok {\n\t\tif aggregate, ok = v.(bool); !ok {\n\t\t\treturn nil, fmt.Errorf(\"Unexpected type for 'aggregate'. Expected bool, got %t\", v)\n\t\t}\n\t}\n\n\tserializer, err := serialize.DetectSerializer(pathOrUrl)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &DefaultCollector{\n\t\tPath: pathOrUrl,\n\t\tCompressed: compressed,\n\t\tSerializer: serializer,\n\t\tAggregateData: aggregate,\n\t\tallData: make([]interface{}, 0),\n\t}, nil\n}", "func New(dir string) *GC {\n\treturn &GC{\n\t\tdir: dir,\n\t\tmaxAge: DefaultMaxAge,\n\t\tinterval: DefaultInterval,\n\t}\n}", "func NewCollector(dyno *Dynomite) *Collector {\n\treturn &Collector{\n\t\tdyno: dyno,\n\t\tstate: typedDesc{\n\t\t\tdesc: prometheus.NewDesc(\n\t\t\t\t\"dynomite_state\",\n\t\t\t\t\"State as reported by Dynomite.\",\n\t\t\t\t[]string{\"state\", \"rack\", \"dc\", \"token\", \"ip_address\"}, nil),\n\t\t\tvalueType: prometheus.GaugeValue,\n\t\t},\n\t\tdbSize: typedDesc{\n\t\t\tdesc: prometheus.NewDesc(\n\t\t\t\t\"dynomite_db_size\",\n\t\t\t\t\"Key database size as reported by the Redis backend.\",\n\t\t\t\t[]string{\"rack\", \"dc\", \"token\", \"ip_address\"}, nil),\n\t\t\tvalueType: prometheus.GaugeValue,\n\t\t},\n\t\tuptime: typedDesc{\n\t\t\tdesc: prometheus.NewDesc(\n\t\t\t\t\"dynomite_uptime\",\n\t\t\t\t\"Uptime as reported by Dynomite info.\",\n\t\t\t\t[]string{\"rack\", \"dc\", \"token\", \"ip_address\"}, nil),\n\t\t\tvalueType: prometheus.GaugeValue,\n\t\t},\n\t\tclientConnections: typedDesc{\n\t\t\tdesc: prometheus.NewDesc(\n\t\t\t\t\"dynomite_client_connections\",\n\t\t\t\t\"Client connections as reported by Dynomite info.\",\n\t\t\t\t[]string{\"rack\", \"dc\", \"token\", \"ip_address\"}, nil),\n\t\t\tvalueType: prometheus.GaugeValue,\n\t\t},\n\t\tclientReadRequests: typedDesc{\n\t\t\tdesc: prometheus.NewDesc(\n\t\t\t\t\"dynomite_client_read_requests\",\n\t\t\t\t\"Client read requests as reported by Dynomite info.\",\n\t\t\t\t[]string{\"rack\", \"dc\", \"token\", \"ip_address\"}, nil),\n\t\t\tvalueType: prometheus.GaugeValue,\n\t\t},\n\t\tclientWriteRequests: typedDesc{\n\t\t\tdesc: prometheus.NewDesc(\n\t\t\t\t\"dynomite_client_write_requests\",\n\t\t\t\t\"Client write requests as reported by Dynomite info.\",\n\t\t\t\t[]string{\"rack\", \"dc\", \"token\", \"ip_address\"}, nil),\n\t\t\tvalueType: prometheus.GaugeValue,\n\t\t},\n\t\tclientDroppedRequests: typedDesc{\n\t\t\tdesc: prometheus.NewDesc(\n\t\t\t\t\"dynomite_client_dropped_requests\",\n\t\t\t\t\"Client dropped requests as reported by Dynomite info.\",\n\t\t\t\t[]string{\"rack\", \"dc\", \"token\", \"ip_address\"}, nil),\n\t\t\tvalueType: prometheus.GaugeValue,\n\t\t},\n\t}\n}", "func NewCollector(brokerURL string, s storage.Storage) *Collector {\n\tbroker, err := NewBroker(brokerURL)\n\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tch, err := broker.Channel()\n\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\t_, err = ch.QueueDeclare(\n\t\tspecsQueueName, // name\n\t\ttrue, // durable\n\t\tfalse, // delete when usused\n\t\tfalse, // exclusive\n\t\tfalse, // noWait\n\t\tnil, // arguments\n\t)\n\n\tdc, _ := ch.Consume(\n\t\tspecsQueueName, // queue\n\t\t\"\", // consumer\n\t\ttrue, // auto-ack\n\t\tfalse, // exclusive\n\t\tfalse, // no-local\n\t\tfalse, // no-wait\n\t\tnil, // args\n\t)\n\n\treturn &Collector{broker, ch, dc, s}\n}", "func NewGithubCollector() *GithubCollector {\n\treturn &GithubCollector{}\n}", "func NewFileCollector(name string, limit Size, pipe bool) File {\n\treturn &FileCollector{Name: name, Limit: limit, Pipe: pipe}\n}", "func NewStatsCollector(cliContext *cli.Context) (*StatsCollector, error) {\n\n\t// fill the Collector struct\n\tcollector := &StatsCollector{\n\t\tcliContext: cliContext,\n\t\tsocketPath: cliContext.String(\"socketPath\"),\n\t\tkamailioHost: cliContext.String(\"host\"),\n\t\tkamailioPort: cliContext.Int(\"port\"),\n\t}\n\n\t// fine, return the created object struct\n\treturn collector, nil\n}", "func NewCollector(url, token, xSecret string) (*Collector, error) {\n\tc := Collector{}\n\n\tif url == \"\" {\n\t\treturn nil, fmt.Errorf(\"URL should not be empty\")\n\t}\n\tc.dadataAPIURL = url\n\tif token == \"\" {\n\t\treturn nil, fmt.Errorf(\"Token should not be empty. Please specify it via DADATA_TOKEN env var\")\n\t}\n\tc.dadataToken = token\n\tif xSecret == \"\" {\n\t\treturn nil, fmt.Errorf(\"X-Secret should not be empty. Please specify it via DADATA_X_SECRET env var\")\n\t}\n\tc.dadataXSecret = xSecret\n\n\terr := c.dadataCheck()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tc.totalScrapes = prometheus.NewCounter(prometheus.CounterOpts{\n\t\tNamespace: namespace,\n\t\tName: \"exporter_scrapes_total\",\n\t\tHelp: \"Count of total scrapes\",\n\t})\n\n\tc.failedBalanceScrapes = prometheus.NewCounter(prometheus.CounterOpts{\n\t\tNamespace: namespace,\n\t\tName: \"exporter_failed_balance_scrapes_total\",\n\t\tHelp: \"Count of failed balance scrapes\",\n\t})\n\n\tc.failedStatsScrapes = prometheus.NewCounter(prometheus.CounterOpts{\n\t\tNamespace: namespace,\n\t\tName: \"exporter_failed_stats_scrapes_total\",\n\t\tHelp: \"Count of failed stats scrapes\",\n\t})\n\n\tc.CurrentBalance = prometheus.NewGauge(prometheus.GaugeOpts{\n\t\tNamespace: namespace,\n\t\tName: \"current_balance\",\n\t\tHelp: \"Current balance on Dadata\",\n\t})\n\n\tc.ServicesMerging = prometheus.NewGauge(prometheus.GaugeOpts{\n\t\tNamespace: namespace,\n\t\tSubsystem: \"services\",\n\t\tName: \"merging_total\",\n\t\tHelp: \"Merging count for today\",\n\t})\n\n\tc.ServicesSuggestions = prometheus.NewGauge(prometheus.GaugeOpts{\n\t\tNamespace: namespace,\n\t\tSubsystem: \"services\",\n\t\tName: \"suggestions_total\",\n\t\tHelp: \"Suggestions count for today\",\n\t})\n\n\tc.ServicesClean = prometheus.NewGauge(prometheus.GaugeOpts{\n\t\tNamespace: namespace,\n\t\tSubsystem: \"services\",\n\t\tName: \"clean_total\",\n\t\tHelp: \"Clean count for today\",\n\t})\n\n\treturn &c, nil\n}", "func NewSyncCollector() *SyncCollector {\n\tso := SyncCollector{c: make(Collector)}\n\treturn &so\n}", "func (c *ScalingScheduleCollectorPlugin) NewCollector(hpa *autoscalingv2.HorizontalPodAutoscaler, config *MetricConfig, interval time.Duration) (Collector, error) {\n\treturn NewScalingScheduleCollector(c.store, c.defaultScalingWindow, c.defaultTimeZone, c.rampSteps, c.now, hpa, config, interval)\n}", "func newGoGetter() *goGetter {\n\treturn &goGetter{}\n}", "func NewGauge(gv *prometheus.GaugeVec) metrics.Gauge {\n\treturn &gauge{\n\t\tgv: gv,\n\t}\n}", "func NewPrometheusCollector(namespace string, labels map[string]string) func(string) metricCollector.MetricCollector {\n\treturn func(name string) metricCollector.MetricCollector {\n\t\tname = strings.Replace(name, \"/\", \"_\", -1)\n\t\tname = strings.Replace(name, \":\", \"_\", -1)\n\t\tname = strings.Replace(name, \".\", \"_\", -1)\n\t\tname = strings.Replace(name, \"-\", \"_\", -1)\n\n\t\tcollector := &PrometheusCollector{\n\t\t\tnamespace: namespace,\n\t\t\tsubsystem: name,\n\t\t\tgauges: map[string]prometheus.Gauge{},\n\t\t\tcounters: map[string]prometheus.Counter{},\n\t\t}\n\n\t\t// make gauges\n\t\tfor _, metric := range gauges {\n\t\t\topts := prometheus.GaugeOpts{\n\t\t\t\tNamespace: collector.namespace,\n\t\t\t\tSubsystem: collector.subsystem,\n\t\t\t\tName: metric,\n\t\t\t\tHelp: fmt.Sprintf(\"[gauge] namespace : %s, metric : %s\", collector.namespace, metric),\n\t\t\t}\n\t\t\tif labels != nil {\n\t\t\t\topts.ConstLabels = labels\n\t\t\t}\n\t\t\tgauge := prometheus.NewGauge(opts)\n\t\t\tcollector.gauges[metric] = gauge\n\t\t\tprometheus.MustRegister(gauge)\n\t\t}\n\n\t\t// make counters\n\t\tfor _, metric := range counters {\n\t\t\topts := prometheus.CounterOpts{\n\t\t\t\tNamespace: collector.namespace,\n\t\t\t\tSubsystem: collector.subsystem,\n\t\t\t\tName: metric,\n\t\t\t\tHelp: fmt.Sprintf(\"[counter] namespace : %s, metric : %s\", collector.namespace, metric),\n\t\t\t}\n\t\t\tif labels != nil {\n\t\t\t\topts.ConstLabels = labels\n\t\t\t}\n\t\t\tcounter := prometheus.NewCounter(opts)\n\t\t\tcollector.counters[metric] = counter\n\t\t\tprometheus.MustRegister(counter)\n\t\t}\n\n\t\treturn collector\n\t}\n}", "func NewNVMeCollector(logger log.Logger) (Collector, error) {\n\tfs, err := sysfs.NewFS(*sysPath)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to open sysfs: %w\", err)\n\t}\n\n\treturn &nvmeCollector{\n\t\tfs: fs,\n\t\tlogger: logger,\n\t}, nil\n}", "func NewCollector(config CollectorConfig, rawConfig *common.Config) (*Collector, error) {\n\n\t// Compile the configured pattern\n\tpattern, err := regexp.Compile(config.Pattern)\n\tif err != nil {\n\t\tlogp.Warn(\"Unable to parse regular expression: %s\", err)\n\t\treturn nil, err\n\t}\n\n\t// Create our Collector with its channel signals\n\tcollector := Collector{\n\t\tPattern: pattern,\n\t\tconfig: config,\n\n\t\tprospectorDone: make(chan struct{}),\n\t\tlines: make(chan string),\n\t\tDone: make(chan struct{}),\n\t\tStopped: make(chan struct{}),\n\t}\n\n\t// Initialize our ticker for handling timeouts\n\tif config.Timeout.Interval > 0 {\n\t\t// If a timeout is set then create a new ticker and save wrap its channel with a variable\n\t\tcollector.ticker = time.NewTicker(config.Timeout.Interval)\n\t\tcollector.timeoutChannel = collector.ticker.C\n\t} else {\n\t\t// If a timeout is not set then create just a generic channel that will never return.\n\t\t// It just makes generalizing the code easier.\n\t\tcollector.timeoutChannel = make(chan time.Time)\n\t}\n\n\t// Configure a new FileBeat Prospector with our rawConfig that will send it's data to a\n\t// CollectorOutleter\n\tp, err := prospector.NewProspector(\n\t\trawConfig,\n\t\tcollector.collectorOutleterFactory,\n\t\tcollector.prospectorDone,\n\t\t[]file.State{},\n\t)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tcollector.prospector = p\n\treturn &collector, nil\n}", "func NewCollector(store *forensicstore.ForensicStore, tempDir string, definitions []goartifacts.ArtifactDefinition) (*LiveCollector, error) {\n\tprovidesMap := map[string][]goartifacts.Source{}\n\n\tdefinitions = goartifacts.FilterOS(definitions)\n\n\tfor _, definition := range definitions {\n\t\tfor _, source := range definition.Sources {\n\t\t\tfor _, provide := range source.Provides {\n\t\t\t\tkey := strings.TrimPrefix(provide.Key, \"environ_\")\n\t\t\t\tif providingSources, ok := providesMap[key]; !ok {\n\t\t\t\t\tprovidesMap[key] = []goartifacts.Source{source}\n\t\t\t\t} else {\n\t\t\t\t\tprovidesMap[key] = append(providingSources, source)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\tsourceFS, err := systemfs.New()\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"system fs creation failed: %w\", err)\n\t}\n\n\treturn &LiveCollector{\n\t\tSourceFS: sourceFS,\n\t\tregistryfs: registryfs.New(),\n\t\tStore: store,\n\t\tTempDir: tempDir,\n\t\tprovidesMap: providesMap,\n\t\tknowledgeBase: map[string][]string{},\n\t}, nil\n}", "func newMetricsCollector(devices func() ([]*wgtypes.Device, error)) prometheus.Collector {\n\t// common labels for all metrics\n\tlabels := []string{\"device\", \"public_key\"}\n\n\treturn &collector{\n\t\tDeviceInfo: prometheus.NewDesc(\n\t\t\t\"semaphore_wg_device_info\",\n\t\t\t\"Metadata about a device.\",\n\t\t\tlabels,\n\t\t\tnil,\n\t\t),\n\t\tPeerInfo: prometheus.NewDesc(\n\t\t\t\"semaphore_wg_peer_info\",\n\t\t\t\"Metadata about a peer. The public_key label on peer metrics refers to the peer's public key; not the device's public key.\",\n\t\t\tappend(labels, []string{\"endpoint\"}...),\n\t\t\tnil,\n\t\t),\n\t\tPeerAllowedIPsInfo: prometheus.NewDesc(\n\t\t\t\"semaphore_wg_peer_allowed_ips_info\",\n\t\t\t\"Metadata about each of a peer's allowed IP subnets for a given device.\",\n\t\t\tappend(labels, []string{\"allowed_ips\"}...),\n\t\t\tnil,\n\t\t),\n\t\tPeerReceiveBytes: prometheus.NewDesc(\n\t\t\t\"semaphore_wg_peer_receive_bytes_total\",\n\t\t\t\"Number of bytes received from a given peer.\",\n\t\t\tlabels,\n\t\t\tnil,\n\t\t),\n\t\tPeerTransmitBytes: prometheus.NewDesc(\n\t\t\t\"semaphore_wg_peer_transmit_bytes_total\",\n\t\t\t\"Number of bytes transmitted to a given peer.\",\n\t\t\tlabels,\n\t\t\tnil,\n\t\t),\n\t\tPeerLastHandshake: prometheus.NewDesc(\n\t\t\t\"semaphore_wg_peer_last_handshake_seconds\",\n\t\t\t\"UNIX timestamp for the last handshake with a given peer.\",\n\t\t\tlabels,\n\t\t\tnil,\n\t\t),\n\t\tdevices: devices,\n\t}\n}", "func New(log log.Logger, c *Config) (integrations.Integration, error) {\n\tconfigMap := exporter.GenerateConfigs()\n\tc.applyConfig(configMap)\n\twc, err := exporter.NewWindowsCollector(c.Name(), c.EnabledCollectors, configMap)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\t_ = level.Info(log).Log(\"msg\", \"Enabled windows_exporter collectors\")\n\treturn integrations.NewCollectorIntegration(c.Name(), integrations.WithCollectors(wc)), nil\n}", "func NewVpcCollector(logger log.Logger) (Collector, error) {\n\treturn &vpcCollector{\n\t\tdesc: vpcDesc,\n\t\tlogger: logger,\n\t}, nil\n}", "func New(config Config) (*Collector, error) {\n\tif config.Logger == nil {\n\t\treturn nil, microerror.Maskf(invalidConfigError, \"%T.Logger must not be empty\", config)\n\t}\n\n\tif config.IFace == \"\" {\n\t\treturn nil, microerror.Maskf(invalidConfigError, \"%T.IFace must not be empty\", config)\n\t}\n\n\tcollector := &Collector{\n\t\tiface: config.IFace,\n\t}\n\n\tnicStats, err := ethtool.Stats(collector.iface)\n\tif err != nil {\n\t\treturn nil, microerror.Mask(err)\n\t}\n\n\tcollector.metrics = make(map[string]*prometheus.Desc)\n\tfor label, _ := range nicStats {\n\t\tfqName := prometheus.BuildFQName(nic_metric_namespace, \"\", label)\n\t\tcollector.metrics[label] = prometheus.NewDesc(fqName, fmt.Sprintf(\"Generated description for metric %#q\", label), []string{\"iface\"}, nil)\n\t}\n\n\treturn collector, nil\n}", "func newGauge(namespace, subsystem, name string, labelNames []string, client *statsd.Statter, isPrometheusEnabled bool) *Gauge {\n\topts := prometheus.GaugeOpts{\n\t\tNamespace: namespace,\n\t\tSubsystem: subsystem,\n\t\tName: name,\n\t}\n\tvec := prometheus.NewGaugeVec(opts, labelNames)\n\tif isPrometheusEnabled {\n\t\tprometheus.MustRegister(vec)\n\t}\n\n\treturn &Gauge{\n\t\twatcher: vec,\n\t\tlabels: labelNames,\n\t\tclient: client,\n\t\tprefix: strings.Join([]string{namespace, subsystem, name}, \".\"),\n\t}\n}", "func NewDfCollector() *DfCollector {\n\treturn &DfCollector{}\n}", "func New() Go { return Go{} }", "func NewGo(re string) (Matcher, Disposer, error) {\n\tt := time.Now()\n\treg, err := regexp.Compile(re)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\tCompileHistogram.With(\"regex\", re, \"duration\", \"seconds\").Observe(time.Since(t).Seconds())\n\n\tr := Go{\n\t\treg: reg,\n\t}\n\treturn &r, &r, nil\n}", "func NewRemoteCollector(addr string) *RemoteCollector {\n\treturn &RemoteCollector{\n\t\taddr: addr,\n\t\tdial: func() (net.Conn, error) {\n\t\t\treturn net.Dial(\"tcp\", addr)\n\t\t},\n\t}\n}", "func NewCollector() collector.RPCCollector {\n\treturn &isisCollector{}\n}", "func NewGoMetrics(parent *monitoring.Registry, name string, filters ...MetricFilter) *GoMetricsRegistry {\n\treturn &GoMetricsRegistry{\n\t\treg: parent.NewRegistry(name, monitoring.IgnorePublishExpvar),\n\t\tshadow: metrics.NewRegistry(),\n\t\tfilters: makeFilters(filters...),\n\t}\n}", "func GetCollector() *Collector {\n\tif collector == nil {\n\t\tlogger.Errorf(\"Collector need to be init correctly\")\n\t\treturn collector\n\t}\n\n\treturn collector\n}", "func NewCollector(apiKey, hmacKey string) (*Collector, error) {\n\thmacKeyBuf, err := hex.DecodeString(hmacKey)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"parsing hmac key: %v\", err)\n\t}\n\tbaseURL := IngestionBaseURL\n\thost := os.Getenv(\"EI_HOST\")\n\tif host == \"localhost\" {\n\t\tbaseURL = \"http://localhost:4810\"\n\t} else if strings.HasSuffix(host, \"test.edgeimpulse.com\") {\n\t\tbaseURL = \"http://ingestion.\" + host\n\t} else if strings.HasSuffix(host, \"edgeimpulse.com\") {\n\t\tbaseURL = \"https://ingestion.\" + host\n\t}\n\tc := &Collector{http.DefaultClient, baseURL, hmacKeyBuf, apiKey}\n\treturn c, nil\n}", "func NewNFSCollector(g getNFSStats) *nfsCollector {\n\treturn &nfsCollector{\n\t\tg,\n\t}\n}", "func NewLEDCollector() (LEDCollector, error) {\n\tleds, err := leds.LEDs()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tbrightness := prometheus.NewDesc(\n\t\tprometheus.BuildFQName(\n\t\t\tnamespace,\n\t\t\t\"led\",\n\t\t\t\"brightness\",\n\t\t),\n\t\t\"LED brightness\",\n\t\t[]string{\"led\"},\n\t\tnil,\n\t)\n\n\tmaxBrightness := prometheus.NewDesc(\n\t\tprometheus.BuildFQName(\n\t\t\tnamespace,\n\t\t\t\"led\",\n\t\t\t\"max_brightness\",\n\t\t),\n\t\t\"LED max brightness\",\n\t\t[]string{\"led\"},\n\t\tnil,\n\t)\n\n\treturn &ledCollector{\n\t\tbrightness: brightness,\n\t\tmaxBrightness: maxBrightness,\n\t\tleds: leds,\n\t}, nil\n}", "func NewKubernetesCollector(config monitoring.KubeConfig) (*KubernetesCollector, error) {\n\treturn &KubernetesCollector{\n\t\tclient: config.Client,\n\t\tnodeIsReady: typedDesc{prometheus.NewDesc(\n\t\t\tprometheus.BuildFQName(namespace, \"k8s\", \"node_ready\"),\n\t\t\t\"Status of Kubernetes node\",\n\t\t\t[]string{\"node\"}, nil,\n\t\t), prometheus.GaugeValue},\n\t}, nil\n}", "func NewGauge(name, desc string, labelKeys []string) *Gauge {\n\t// new GaugeVec\n\tgaugeVec := prometheus.NewGaugeVec(prometheus.GaugeOpts{\n\t\tName: name,\n\t\tHelp: desc,\n\t}, labelKeys)\n\n\t// register\n\tprometheus.MustRegister(gaugeVec)\n\n\treturn &Gauge{\n\t\tgaugeVec: gaugeVec,\n\t}\n}", "func NewServer(l net.Listener, c Collector) *CollectorServer {\n\tcs := &CollectorServer{c: c, l: l}\n\treturn cs\n}", "func NewGo(ctx context.Context, dir string, options ...Option) (Interface, error) {\n\tgbo := &gobuildOpener{\n\t\tctx: ctx,\n\t\tbuild: build,\n\t\tdir: dir,\n\t\tsbom: spdx(\"(none)\"),\n\t}\n\n\tfor _, option := range options {\n\t\tif err := option(gbo); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\treturn gbo.Open()\n}", "func (*noOpConntracker) Collect(ch chan<- prometheus.Metric) {}", "func NewKubeletCmdCollector(exporter interfaces.Exporter) *KubeletCmdCollector {\n\treturn &KubeletCmdCollector{\n\t\tBaseCollector: BaseCollector{\n\t\t\tcollectorType: KubeletCmd,\n\t\t\texporter: exporter,\n\t\t},\n\t}\n}", "func newReconciler(mgr manager.Manager) reconcile.Reconciler {\n\treturn &ReconcileCollectd{client: mgr.GetClient(), scheme: mgr.GetScheme()}\n}", "func NewDiskCollector() (Collector, error) {\n\treturn &diskCollector{\n\t\tdiskBusyAll: prometheus.NewDesc(\n\t\t\tprometheus.BuildFQName(namespace, nodeCollectorSubsystem, \"disk_busy_all\"),\n\t\t\t\"Current disk busy percentage represented in 0.0-1.0.\",\n\t\t\t[]string{\"node\", \"disk\"}, ConstLabels,\n\t\t),\n\t\tdiskIoschedQueueAll: prometheus.NewDesc(\n\t\t\tprometheus.BuildFQName(namespace, nodeCollectorSubsystem, \"disk_iosched_queued_all\"),\n\t\t\t\"Current queue depth for IO sceduler.\",\n\t\t\t[]string{\"node\", \"disk\"}, ConstLabels,\n\t\t),\n\t\tdiskXfersInRateAll: prometheus.NewDesc(\n\t\t\tprometheus.BuildFQName(namespace, nodeCollectorSubsystem, \"disk_xfers_in_rate_all\"),\n\t\t\t\"Current disk ingest transfer rate.\",\n\t\t\t[]string{\"node\", \"disk\"}, ConstLabels,\n\t\t),\n\t\tdiskXfersOutRateAll: prometheus.NewDesc(\n\t\t\tprometheus.BuildFQName(namespace, nodeCollectorSubsystem, \"disk_xfers_out_rate_all\"),\n\t\t\t\"Current disk egress transfer rate.\",\n\t\t\t[]string{\"node\", \"disk\"}, ConstLabels,\n\t\t),\n\t\tdiskLatencyAll: prometheus.NewDesc(\n\t\t\tprometheus.BuildFQName(namespace, nodeCollectorSubsystem, \"disk_latency_all\"),\n\t\t\t\"Current disk latency.\",\n\t\t\t[]string{\"node\", \"disk\"}, ConstLabels,\n\t\t),\n\t}, nil\n}", "func NewNatGWCollector(logger log.Logger) (Collector, error) {\n\treturn &natgwCollector{\n\t\tdesc: natgwDesc,\n\t\tlogger: logger,\n\t}, nil\n}", "func New() *IpmiCollector {\n\tcollector := &IpmiCollector{Initialized: false}\n\treturn collector\n}", "func NewXFRMCollector() prometheus.Collector {\n\treturn newXFRMCollector(procfs.NewXfrmStat)\n}" ]
[ "0.66337353", "0.6526897", "0.6425859", "0.63669264", "0.6254919", "0.6202103", "0.61925757", "0.6130463", "0.61028224", "0.6087886", "0.60859865", "0.60501295", "0.6017658", "0.60055315", "0.5995613", "0.59854144", "0.5978756", "0.59634596", "0.5916075", "0.59078556", "0.5884585", "0.5876889", "0.58652514", "0.58611846", "0.5859921", "0.5837396", "0.5730075", "0.5720161", "0.56939006", "0.5685596", "0.5675706", "0.56482416", "0.55779016", "0.5552725", "0.55115855", "0.5498211", "0.54971504", "0.5491605", "0.54620904", "0.5462069", "0.54595935", "0.53988534", "0.536353", "0.536353", "0.53537476", "0.5351772", "0.5342112", "0.5321121", "0.5315615", "0.53057694", "0.52990746", "0.5272021", "0.5263731", "0.52496094", "0.52396584", "0.5218805", "0.52134836", "0.5199195", "0.5185334", "0.5171265", "0.51440364", "0.5138683", "0.511895", "0.5116318", "0.51076144", "0.5106574", "0.50999725", "0.50954586", "0.50777936", "0.50772274", "0.5073231", "0.5071748", "0.5052986", "0.5052639", "0.50483954", "0.50391144", "0.50290745", "0.502813", "0.5020532", "0.5009915", "0.500311", "0.49997726", "0.49905923", "0.4982187", "0.49795213", "0.49647146", "0.49518433", "0.49504507", "0.49428955", "0.4926079", "0.49159762", "0.4907068", "0.49010542", "0.48998597", "0.48869967", "0.48848495", "0.48769006", "0.48753205", "0.48688865", "0.4868369" ]
0.6147009
7
Describe returns all descriptions of the collector.
func (c *goCollector) Describe(ch chan<- *Desc) { c.base.Describe(ch) for _, i := range c.msMetrics { ch <- i.desc } for _, m := range c.rmExposedMetrics { ch <- m.Desc() } }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (c *collector) Describe(ch chan<- *prometheus.Desc) {\n\tfor _, co := range c.collectors {\n\t\tco.describe(ch)\n\t}\n}", "func (c *collector) Describe(ch chan<- *prometheus.Desc) {\n\tc.config.getAllDescs(ch)\n}", "func (c *Collector) Describe(chan<- *prometheus.Desc) {}", "func (r *RGWCollector) Describe(ch chan<- *prometheus.Desc) {\n\tfor _, metric := range r.collectorList() {\n\t\tmetric.Describe(ch)\n\t}\n}", "func (o *OSDCollector) Describe(ch chan<- *prometheus.Desc) {\n\tfor _, metric := range o.collectorList() {\n\t\tmetric.Describe(ch)\n\t}\n\tch <- o.OSDDownDesc\n\tch <- o.ScrubbingStateDesc\n\tch <- o.PGObjectsRecoveredDesc\n}", "func (c *InterfacesCollector) Describe(ch chan<- *prometheus.Desc) {\n\tfor _, m := range c.collectors() {\n\t\tm.Describe(ch)\n\t}\n}", "func (p *pool) Describe(ch chan<- *prometheus.Desc) {\n\tfor _, col := range p.collectors() {\n\t\tcol.Describe(ch)\n\t}\n}", "func (c *Collector) Describe(ch chan<- *prometheus.Desc) {\n\tprometheus.DescribeByCollect(c, ch)\n}", "func (e *Exporter) Describe(ch chan<- *prometheus.Desc) {\n\tfor _, cc := range e.collectors {\n\t\tcc.Describe(ch)\n\t}\n}", "func (c *collector) Describe(ch chan<- *prometheus.Desc) {\n\tfor _, d := range descriptors {\n\t\tch <- d\n\t}\n}", "func (o *OSDCollector) Describe(ch chan<- *prometheus.Desc) {\n\tfor _, metric := range o.collectorList() {\n\t\tmetric.Describe(ch)\n\t}\n\tch <- o.ScrubbingStateDesc\n}", "func (b Blackbox) Describe(descs chan<- *prometheus.Desc) {\n\tprometheus.DescribeByCollect(b, descs)\n}", "func (c *solarCollector) Describe(ch chan<- *prometheus.Desc) {\n\n\t// Describe the Collector's member that are of type Desc\n\tds := []*prometheus.Desc{\n\t\tc.panelVoltage,\n\t}\n\n\tfor _, d := range ds {\n\t\tch <- d\n\t}\n\t// Describe the other types\n\tc.scrapeFailures.Describe(ch)\n}", "func (c *Exporter) Describe(ch chan<- *prometheus.Desc) {\n\tfor _, cc := range c.collectors {\n\t\tcc.Describe(ch)\n\t}\n}", "func (c *Exporter) Describe(ch chan<- *prometheus.Desc) {\n\tfor _, cc := range c.collectors {\n\t\tcc.Describe(ch)\n\t}\n}", "func (collector *collector) Describe(ch chan<- *prometheus.Desc) {\n\tch <- cpuUsageDesc\n\tch <- memUsageDesc\n\tch <- memUsagePeakDesc\n\tch <- swapUsageDesc\n\tch <- swapUsagePeakDesc\n\tch <- processCountDesc\n\tch <- containerPIDDesc\n\tch <- runningStatusDesc\n\tch <- diskUsageDesc\n\tch <- networkUsageDesc\n}", "func (c *auditdCollector) Describe(ch chan<- *prometheus.Desc) {\n\n\tfor _, metric := range c.metrics {\n\t\tch <- metric.desc\n\t}\n\n}", "func (collector *MetricsCollector) Describe(ch chan<- *prometheus.Desc) {\n\tfor k := range collector.metrics {\n\t\tfor idxMColl := range collector.metrics[k] {\n\t\t\tch <- collector.metrics[k][idxMColl].metricDesc\n\t\t}\n\t}\n\tcollector.defMetrics.describe(ch)\n}", "func (c *CephExporter) Describe(ch chan<- *prometheus.Desc) {\n\tfor _, cc := range c.collectors {\n\t\tcc.Describe(ch)\n\t}\n}", "func (c *SecurityGroupCollector) Describe(ch chan<- *prometheus.Desc) {\n\tch <- c.Defined\n\tch <- c.EnableDefault\n\tch <- c.ProjectDefault\n\tch <- c.Stateful\n\tch <- c.InboundDefault\n\tch <- c.OutboundDefault\n\tch <- c.Servers\n\tch <- c.Created\n\tch <- c.Modified\n}", "func (c *VMCollector) Describe(ch chan<- *prometheus.Desc) {\n\tfor _, m := range c.getMetrics() {\n\t\tch <- m.Desc()\n\t}\n}", "func (c *KubernetesCollector) Describe(ch chan<- *prometheus.Desc) {\n\tch <- c.Up\n\tch <- c.NodePools\n\tch <- c.Nodes\n}", "func (c *Collector) Describe(ch chan<- *prometheus.Desc) {\n\tc.state.describe(ch)\n\tc.dbSize.describe(ch)\n\tc.uptime.describe(ch)\n\tc.clientConnections.describe(ch)\n\tc.clientReadRequests.describe(ch)\n\tc.clientWriteRequests.describe(ch)\n\tc.clientDroppedRequests.describe(ch)\n}", "func (n LXCCollector) Describe(ch chan<- *prometheus.Desc) {\n\tscrapeDurations.Describe(ch)\n}", "func (coll WmiCollector) Describe(ch chan<- *prometheus.Desc) {\n\tdefer trace()()\n\tscrapeDurations.Describe(ch)\n}", "func (e *Exporter) Describe(ch chan<- *prometheus.Desc) {\n\tprometheus.DescribeByCollect(e, ch)\n}", "func (sc *SlurmCollector) Describe(ch chan<- *prometheus.Desc) {\n\tfor _, element := range sc.descPtrMap {\n\t\tch <- element\n\t}\n}", "func (c *StorageDomainCollector) Describe(ch chan<- *prometheus.Desc) {\n\tch <- upDesc\n\tch <- masterDesc\n\tch <- availableDesc\n\tch <- usedDesc\n\tch <- committedDesc\n}", "func (pc *PrometheusCollector) Describe(ch chan<- *prometheus.Desc) {\n\tprometheus.DescribeByCollect(pc, ch)\n}", "func (c *ComputeCollector) Describe(ch chan<- *prometheus.Desc) {\n\tch <- c.Instances\n\tch <- c.ForwardingRules\n}", "func (c *OrchestratorCollector) Describe(ch chan<- *prometheus.Desc) {\n\tch <- c.upMetric.Desc()\n\n\tfor _, m := range c.metrics {\n\t\tch <- m\n\t}\n}", "func (e *Exporter) Describe(ch chan<- *prometheus.Desc) {\n\te.withCollectors(func(cs []prometheus.Collector) {\n\t\tfor _, c := range cs {\n\t\t\tc.Describe(ch)\n\t\t}\n\t})\n}", "func (c *filebeatCollector) Describe(ch chan<- *prometheus.Desc) {\n\n\tfor _, metric := range c.metrics {\n\t\tch <- metric.desc\n\t}\n\n}", "func (c *metricbeatCollector) Describe(ch chan<- *prometheus.Desc) {\n\n\tfor _, metric := range c.metrics {\n\t\tch <- metric.desc\n\t}\n\n}", "func (c *collector) Describe(ch chan<- *prometheus.Desc) {\n\tch <- scrapeDurationDesc\n\tpower.Describe(ch)\n\tthermal.Describe(ch)\n}", "func (pc *PBSCollector) Describe(ch chan<- *prometheus.Desc) {\n\tfor _, element := range pc.descPtrMap {\n\t\tch <- element\n\t}\n}", "func (a *AttunityCollector) Describe(ch chan<- *prometheus.Desc) {\n\t// Hi I do nothing\n}", "func (*storageCollector) Describe(ch chan<- *prometheus.Desc) {\n\tch <- totalBlocksDesc\n\tch <- usedBlocksDesc\n\tch <- availableBlocksDesc\n\tch <- usedPercentDesc\n}", "func (c *beatCollector) Describe(ch chan<- *prometheus.Desc) {\n\n\tfor _, metric := range c.metrics {\n\t\tch <- metric.desc\n\t}\n\n}", "func (c *unidataCollector) Describe(ch chan<- *prometheus.Desc) {\n\tch <- c.licenseLimit\n\tch <- c.licenseUsage\n\tch <- c.up\n}", "func (c *LoadBalancerCollector) Describe(ch chan<- *prometheus.Desc) {\n\tch <- c.Created\n\tch <- c.Services\n\tch <- c.MaxServices\n\tch <- c.Targets\n\tch <- c.MaxTargets\n\tch <- c.TargetsHealthy\n\tch <- c.TargetsUnhealthy\n\tch <- c.TargetsUnknown\n\tch <- c.AssignedCertificates\n\tch <- c.MaxAssignedCertificates\n\tch <- c.IngoingTraffic\n\tch <- c.OutgoingTraffic\n\tch <- c.IncludedTraffic\n\tch <- c.Connections\n\tch <- c.MaxConnections\n\tch <- c.ConnectionsPerSecond\n\tch <- c.RequestsPerSecond\n\tch <- c.IncomingBandwidth\n\tch <- c.OutgoingBandwidth\n}", "func (*interfaceCollector) Describe(ch chan<- *prometheus.Desc) {\n\tch <- receiveBytesDesc\n\tch <- receivePacketsDesc\n\tch <- receiveErrorsDesc\n\tch <- receiveDropsDesc\n\tch <- transmitBytesDesc\n\tch <- transmitPacketsDesc\n\tch <- transmitDropsDesc\n\tch <- transmitErrorsDesc\n\tch <- ipv6receiveBytesDesc\n\tch <- ipv6receivePacketsDesc\n\tch <- ipv6transmitBytesDesc\n\tch <- ipv6transmitPacketsDesc\n\tch <- adminStatusDesc\n\tch <- operStatusDesc\n\tch <- errorStatusDesc\n}", "func (e *Exporter) Describe(ch chan<- *prometheus.Desc) {\n\tch <- e.up.Desc()\n\n\tfor _, vec := range e.counters {\n\t\tvec.Describe(ch)\n\t}\n\n\tfor _, vec := range e.gauges {\n\t\tvec.Describe(ch)\n\t}\n}", "func (k *KACollector) Describe(ch chan<- *prometheus.Desc) {\n\tfor _, m := range k.metrics {\n\t\tch <- m\n\t}\n}", "func (e *Exporter) Describe(ch chan<- *prometheus.Desc) {\n\t// We cannot know in advance what metrics the exporter will generate\n\t// from clickhouse. So we use the poor man's describe method: Run a collect\n\t// and send the descriptors of all the collected metrics.\n\n\tmetricCh := make(chan prometheus.Metric)\n\tdoneCh := make(chan struct{})\n\n\tgo func() {\n\t\tfor m := range metricCh {\n\t\t\tch <- m.Desc()\n\t\t}\n\t\tclose(doneCh)\n\t}()\n\n\te.Collect(metricCh)\n\tclose(metricCh)\n\t<-doneCh\n}", "func (c *ImageCollector) Describe(ch chan<- *prometheus.Desc) {\n\tch <- c.Active\n\tch <- c.ImageSize\n\tch <- c.DiskSize\n\tch <- c.Created\n\tch <- c.Deprecated\n}", "func (collector *OpenweatherCollector) Describe(ch chan<- *prometheus.Desc) {\n\n\t//Update this section with the each metric you create for a given collector\n\tch <- collector.temperatureMetric\n\tch <- collector.humidity\n\tch <- collector.feelslike\n\tch <- collector.pressure\n\tch <- collector.windspeed\n\tch <- collector.rain1h\n\tch <- collector.winddegree\n\tch <- collector.cloudiness\n\tch <- collector.sunrise\n\tch <- collector.sunset\n\tch <- collector.currentconditions\n}", "func (n NodeCollector) Describe(ch chan<- *prometheus.Desc) {\n\tscrapeDurations.Describe(ch)\n}", "func (m *metricVec) Describe(ch chan<- *Desc) { m.metricMap.Describe(ch) }", "func (c *MetricsCollector) Describe(ch chan<- *prometheus.Desc) {\n\tch <- c.verify\n}", "func (c *Collector) Describe(ch chan<- *prometheus.Desc) {\n\tfor _, scraper := range c.Scrapers {\n\t\tfor _, metric := range scraper.Metrics {\n\t\t\tch <- metric.metric\n\t\t}\n\t}\n}", "func (e *Exporter) Describe(ch chan<- *prometheus.Desc) {\n\tch <- e.poolUsage.Desc()\n\tch <- e.providersOnline.Desc()\n\tch <- e.providersFaulted.Desc()\n}", "func (coll WmiCollector) Describe(ch chan<- *prometheus.Desc) {\n\tch <- scrapeDurationDesc\n\tch <- scrapeSuccessDesc\n}", "func (a collectorAdapter) Describe(ch chan<- *prometheus.Desc) {\n\t// We have to send *some* metric in Describe, but we don't know which ones\n\t// we're going to get, so just send a dummy metric.\n\tch <- prometheus.NewDesc(\"dummy_metric\", \"Dummy metric.\", nil, nil)\n}", "func (e *Exporter) Describe(ch chan<- *prometheus.Desc) {\n\tch <- domainsBeingBlocked\n\tch <- dnsQueries\n\tch <- adsBlocked\n\tch <- adsPercentage\n\tch <- domainsOverTime\n\tch <- adsOverTime\n\tch <- topQueries\n\tch <- topAds\n\tch <- topSources\n\tch <- queryTypes\n}", "func (*accountingCollector) Describe(ch chan<- *prometheus.Desc) {\n\tch <- inlineActiveFlowsDesc\n\tch <- inlineIpv4ActiveFlowsDesc\n\tch <- inlineIpv6ActiveFlowsDesc\n\n\tch <- inlineFlowsDesc\n\tch <- inlineIpv4TotalFlowsDesc\n\tch <- inlineIpv6TotalFlowsDesc\n\n\tch <- inlineFlowCreationFailuresDesc\n\tch <- inlineIpv4FlowCreationFailuresDesc\n\tch <- inlineIpv6FlowCreationFailuresDesc\n}", "func (e *Exporter) Describe(ch chan<- *prometheus.Desc) {\n\tmetricCh := make(chan prometheus.Metric)\n\tdoneCh := make(chan struct{})\n\n\tgo func() {\n\t\tfor m := range metricCh {\n\t\t\tch <- m.Desc()\n\t\t}\n\t\tclose(doneCh)\n\t}()\n\n\te.Collect(metricCh)\n\tclose(metricCh)\n\t<-doneCh\n}", "func (dc *daemonsetCollector) Describe(ch chan<- *prometheus.Desc) {\n\tch <- descDaemonSetCreated\n\tch <- descDaemonSetCurrentNumberScheduled\n\tch <- descDaemonSetNumberAvailable\n\tch <- descDaemonSetNumberMisscheduled\n\tch <- descDaemonSetNumberUnavailable\n\tch <- descDaemonSetDesiredNumberScheduled\n\tch <- descDaemonSetNumberReady\n\tch <- descDaemonSetUpdatedNumberScheduled\n\tch <- descDaemonSetMetadataGeneration\n\tch <- descDaemonSetLabels\n}", "func (c *PricingCollector) Describe(ch chan<- *prometheus.Desc) {\n\tch <- c.Image\n\tch <- c.FloatingIP\n\tch <- c.Traffic\n\tch <- c.ServerBackup\n\tch <- c.Volume\n\tch <- c.Servers\n\tch <- c.LoadBalancers\n}", "func (e *Exporter) Describe(ch chan<- *prometheus.Desc) {\n\tfor _, m := range replicationMetrics {\n\t\tch <- m\n\t}\n\tfor _, m := range securityMetrics {\n\t\tch <- m\n\t}\n\tfor _, m := range storageMetrics {\n\t\tch <- m\n\t}\n\tfor _, m := range systemMetrics {\n\t\tch <- m\n\t}\n\tch <- artifactoryUp\n\tch <- e.totalScrapes.Desc()\n\tch <- e.jsonParseFailures.Desc()\n}", "func (e *Exporter) Describe(ch chan<- *prometheus.Desc) {\n\tfor _, m := range e.gaugeVecs {\n\t\tm.Describe(ch)\n\t}\n}", "func (*poolCollector) Describe(ch chan<- *prometheus.Desc) {\n\tch <- totalPoolCapacity\n\tch <- availablePoolCapacity\n\tch <- allocatedPoolCapacity\n\tch <- poolCapacityUsedPercent\n}", "func (*environmentCollector) Describe(ch chan<- *prometheus.Desc) {\n\tch <- temperaturesDesc\n\tch <- powerSupplyDesc\n}", "func (c *OSCollector) Describe(ch chan<- *prometheus.Desc) {\n\n\tch <- c.PhysicalMemoryFreeBytes\n\tch <- c.PagingFreeBytes\n\tch <- c.VirtualMemoryFreeBytes\n\tch <- c.ProcessesMax\n\tch <- c.ProcessMemoryMaxBytes\n\tch <- c.Processes\n\tch <- c.Users\n\tch <- c.PagingMaxBytes\n\tch <- c.VirtualMemoryBytes\n\tch <- c.VisibleMemoryBytes\n}", "func (e *Exporter) Describe(ch chan<- *prometheus.Desc) {\n\tch <- e.up.Desc()\n\n\te.authCacheHits.Describe(ch)\n\te.authCacheMisses.Describe(ch)\n\te.databaseReads.Describe(ch)\n\te.databaseWrites.Describe(ch)\n\te.openDatabases.Describe(ch)\n\te.openOsFiles.Describe(ch)\n\te.requestTime.Describe(ch)\n\n\te.httpdStatusCodes.Describe(ch)\n\te.httpdRequestMethods.Describe(ch)\n\n\te.bulkRequests.Describe(ch)\n\te.clientsRequestingChanges.Describe(ch)\n\te.requests.Describe(ch)\n\te.temporaryViewReads.Describe(ch)\n\te.viewReads.Describe(ch)\n\n\te.diskSize.Describe(ch)\n\te.dataSize.Describe(ch)\n\te.diskSizeOverhead.Describe(ch)\n\n\te.activeTasks.Describe(ch)\n\te.activeTasksDatabaseCompaction.Describe(ch)\n\te.activeTasksViewCompaction.Describe(ch)\n\te.activeTasksIndexer.Describe(ch)\n\te.activeTasksReplication.Describe(ch)\n}", "func (c *StatsCollector) Describe(descriptionChannel chan<- *prometheus.Desc) {\n\t// DescribeByCollect is a helper to implement the Describe method of a custom\n\t// Collector. It collects the metrics from the provided Collector and sends\n\t// their descriptors to the provided channel.\n\tprometheus.DescribeByCollect(c, descriptionChannel)\n}", "func (c *PTVMetricsCollector) Describe(ch chan<- *prometheus.Desc) {\n\tdefer timeTrack(time.Now(), \"PTVMetricsCollector.Describe\")\n\n\tc.metrics.Describe(ch)\n}", "func (p PlanetCollector) Describe(ch chan<- *prometheus.Desc) {\n\tch <- scrapeDurationDesc\n\tch <- scrapeSuccessDesc\n}", "func (c *DiskCache) Describe(descs chan<- *prometheus.Desc) {\n\tprometheus.DescribeByCollect(c, descs)\n}", "func (uc *UpgradeCollector) Describe(ch chan<- *prometheus.Desc) {\n\t// .spec\n\tch <- uc.managedMetrics.upgradeAt\n\tch <- uc.managedMetrics.pdbTimeout\n\n\t// .status\n\tch <- uc.managedMetrics.startTime\n\tch <- uc.managedMetrics.completeTime\n\n\t// .status.conditions[]\n\tch <- uc.managedMetrics.sendStartedNotification\n\tch <- uc.managedMetrics.preHealthCheck\n\tch <- uc.managedMetrics.extDepAvailCheck\n\tch <- uc.managedMetrics.scaleUpExtraNodes\n\tch <- uc.managedMetrics.controlPlaneMaintWindow\n\tch <- uc.managedMetrics.commenceUpgrade\n\tch <- uc.managedMetrics.controlPlaneUpgraded\n\tch <- uc.managedMetrics.removeControlPlaneMaint\n\tch <- uc.managedMetrics.workersMaintWindow\n\tch <- uc.managedMetrics.allWorkerNodesUpgraded\n\tch <- uc.managedMetrics.removeExtraScaledNodes\n\tch <- uc.managedMetrics.removeMaintWindow\n\tch <- uc.managedMetrics.postClusterHealthCheck\n\tch <- uc.managedMetrics.sendCompletedNotification\n}", "func (c *DebugFsStatCollector) Describe(ch chan<- *prometheus.Desc) {\n\tch <- c.hits\n\tch <- c.misses\n}", "func (e *Exporter) Describe(ch chan<- *prometheus.Desc) {\n\tch <- metric_uptime.Desc()\n\tch <- nomad_up.Desc()\n\tch <- metric_request_response_time_total.Desc()\n\tch <- metric_request_response_time_avg.Desc()\n\n\tfor _, metric := range metric_request_status_count_current {\n\t\tch <- metric.Desc()\n\t}\n\tfor _, metric := range metric_request_status_count_total {\n\t\tch <- metric.Desc()\n\t}\n}", "func (collStatList *CollectionStatList) Describe(ch chan<- *prometheus.Desc) {\n\tcollectionSize.Describe(ch)\n\tcollectionObjectCount.Describe(ch)\n\tcollectionAvgObjSize.Describe(ch)\n\tcollectionStorageSize.Describe(ch)\n\tcollectionIndexes.Describe(ch)\n\tcollectionIndexesSize.Describe(ch)\n}", "func (e *Exporter) Describe(ch chan<- *prometheus.Desc) {\n\tch <- systemStatus\n\tch <- systemTemperature\n\tch <- systemPowerStatus\n\tch <- systemFanStatus\n\tch <- systemCPUFanStatus\n\tch <- systemUpgradeAvailable\n\n\tch <- memTotalSwap\n\tch <- memAvailSwap\n\tch <- memTotalReal\n\tch <- memAvailReal\n\tch <- memTotalFree\n\tch <- memShared\n\tch <- memBuffer\n\tch <- memCached\n\n\tch <- loadShort\n\tch <- loadMid\n\tch <- loadLong\n\n\tch <- cpuUser\n\tch <- cpuNice\n\tch <- cpuSystem\n\tch <- cpuIdle\n\tch <- cpuWait\n\tch <- cpuKernel\n\tch <- cpuInterrupt\n\n\tch <- netIn\n\tch <- netOut\n}", "func (e *Exporter) Describe(ch chan<- *prometheus.Desc) {\n\tfor _, m := range e.serverMetrics {\n\t\tch <- m\n\t}\n\tch <- solaceUp\n}", "func (t *TimestampCollector) Describe(ch chan<- *prometheus.Desc) {\n\tch <- t.Description\n}", "func (e *UwsgiExporter) Describe(ch chan<- *prometheus.Desc) {\n\te.uwsgiUp.Describe(ch)\n\te.scrapeDurations.Describe(ch)\n\n\tfor _, descs := range e.descriptorsMap {\n\t\tfor _, desc := range descs {\n\t\t\tch <- desc\n\t\t}\n\t}\n}", "func (e *Exporter) Describe(ch chan<- *prometheus.Desc) {\n\tfor _, desc := range junosDesc {\n\t\tch <- desc\n\t}\n}", "func (c *UPSCollector) Describe(ch chan<- *prometheus.Desc) {\n\tds := []*prometheus.Desc{\n\t\tc.UPSLoadPercent,\n\t\tc.BatteryChargePercent,\n\t\tc.LineVolts,\n\t\tc.LineNominalVolts,\n\t\tc.BatteryVolts,\n\t\tc.BatteryNominalVolts,\n\t\tc.BatteryNumberTransfersTotal,\n\t\tc.BatteryTimeLeftSeconds,\n\t\tc.BatteryTimeOnSeconds,\n\t\tc.BatteryCumulativeTimeOnSecondsTotal,\n\t\tc.UPSStatus,\n\t\tc.UPSInfo,\n\t}\n\n\tfor _, d := range ds {\n\t\tch <- d\n\t}\n}", "func (c *prometheusCollector) Describe(ch chan<- *prometheus.Desc) {\n\tch <- c.maxOpenConnections\n\tch <- c.openConnections\n\tch <- c.inUse\n\tch <- c.idle\n\tch <- c.waitCount\n\tch <- c.waitDuration\n\tch <- c.maxIdleClosed\n\tch <- c.maxIdleTimeClosed\n\tch <- c.maxLifetimeClosed\n}", "func (m *MetricVec) Describe(ch chan<- *Desc) {\n\tch <- m.desc\n}", "func (e *Exporter) Describe(ch chan<- *prometheus.Desc) {\n\tch <- connected\n\tch <- up\n\tch <- distance\n\tch <- latency\n\tch <- users\n\tch <- channels\n\tch <- ison\n}", "func (collector *Metrics) Describe(ch chan<- *prometheus.Desc) {\n\tch <- collector.issue\n}", "func (e *Exporter) Describe(ch chan<- *prometheus.Desc) {\n\tch <- up\n\tch <- clusterServers\n\tch <- clusterLeader\n\tch <- nodeCount\n\tch <- memberStatus\n\tch <- memberWanStatus\n\tch <- serviceCount\n\tch <- serviceNodesHealthy\n\tch <- nodeChecks\n\tch <- serviceChecks\n\tch <- keyValues\n\tch <- serviceTag\n\tch <- serviceCheckNames\n}", "func (collector *atlassianUPMCollector) Describe(ch chan<- *prometheus.Desc) {\n\tch <- collector.atlassianUPMTimeMetric\n\tch <- collector.atlassianUPMUpMetric\n\tch <- collector.atlassianUPMPlugins\n\tch <- collector.atlassianUPMVersionsMetric\n}", "func (c *NFSCollector) Describe(ch chan<- *prometheus.Desc) {\n\tch <- c.Up\n\tch <- c.NFSInfo\n\tch <- c.DiskFree\n\tch <- c.NICInfo\n\tch <- c.NICReceive\n\tch <- c.NICSend\n\n\tch <- c.MaintenanceScheduled\n\tch <- c.MaintenanceInfo\n\tch <- c.MaintenanceStartTime\n\tch <- c.MaintenanceEndTime\n}", "func (c *StorageQuotaCollector) Describe(ch chan<- *prometheus.Desc) {\n\tds := []*prometheus.Desc{\n\t\tc.HardDesc,\n\t\tc.UsedDesc,\n\t}\n\n\tfor _, d := range ds {\n\t\tch <- d\n\t}\n}", "func (p *Collector) Describe(c chan<- *prometheus.Desc) {\n\t// We must emit some description otherwise an error is returned. This\n\t// description isn't shown to the user!\n\tprometheus.NewGauge(prometheus.GaugeOpts{Name: \"Dummy\", Help: \"Dummy\"}).Describe(c)\n}", "func (e *Exporter) Describe(ch chan<- *prometheus.Desc) {\n\te.up.Describe(ch)\n\te.scrapeDuration.Describe(ch)\n\te.failedScrapes.Describe(ch)\n\te.totalScrapes.Describe(ch)\n\tch <- e.priceDesc\n\tch <- e.openDesc\n\tch <- e.detailsDesc\n}", "func (collector *Collector) Describe(ch chan<- *prometheus.Desc) {\n\tch <- collector.incidentsCreatedCount\n}", "func (c *TeamsCollector) Describe(ch chan<- *prometheus.Desc) {\n\tch <- c.totalTeamsGaugeDesc\n}", "func (dc *deploymentconfigurationCollector) Describe(ch chan<- *prometheus.Desc) {\n\tch <- descDeploymentConfigurationCreated\n\tch <- descDeploymentConfigurationStatusReadyReplicas\n\tch <- descDeploymentConfigurationStatusAvailableReplicas\n}", "func (*systemCollector) Describe(ch chan<- *prometheus.Desc) {\n\tch <- totalSystemCapacity\n\tch <- availableSystemCapacity\n\tch <- allocatedSystemCapacity\n\tch <- systemCapacityUsedPercent\n\tch <- rawSystemCapacity\n}", "func (c *analysisRunCollector) Describe(ch chan<- *prometheus.Desc) {\n\tch <- descAnalysisRunInfo\n}", "func (*isisCollector) Describe(ch chan<- *prometheus.Desc) {\n\tch <- upCount\n\tch <- totalCount\n}", "func (m httpReferenceDiscoveryMetrics) Describe(descs chan<- *prometheus.Desc) {\n\tprometheus.DescribeByCollect(m, descs)\n}", "func (n DellHWCollector) Describe(ch chan<- *prometheus.Desc) {\n\tch <- scrapeDurationDesc\n\tch <- scrapeSuccessDesc\n}", "func (bc *BookCollector) Describe(ch chan<- *prometheus.Desc) {\n\tch <- bc.BookCount\n\tch <- bc.BookGenreUniqueCount\n\tch <- bc.BookInfo\n}", "func (e *Exporter) Describe(ch chan<- *prometheus.Desc) {\n\tch <- e.up.Desc()\n\n\tfor _, vec := range e.gauges {\n\t\tvec.Describe(ch)\n\t}\n}", "func (c *ledCollector) Describe(ch chan<- *prometheus.Desc) {\n\tch <- c.brightness\n\tch <- c.maxBrightness\n}" ]
[ "0.7873847", "0.78222793", "0.7819363", "0.78171426", "0.78148025", "0.78116834", "0.78031814", "0.7784248", "0.7769007", "0.7761224", "0.77533114", "0.7714344", "0.7713539", "0.77107584", "0.77107584", "0.7662911", "0.76532143", "0.7635757", "0.7634063", "0.76263374", "0.7620168", "0.76154304", "0.7613049", "0.7604958", "0.7594655", "0.75918883", "0.75833434", "0.7568194", "0.75487506", "0.7543976", "0.753325", "0.7500003", "0.7489972", "0.7482971", "0.7473419", "0.7471214", "0.7463519", "0.745969", "0.74553216", "0.7439691", "0.74216914", "0.7419373", "0.74186987", "0.74157125", "0.7414268", "0.741155", "0.7399557", "0.73960924", "0.73923874", "0.7389056", "0.7368263", "0.73520184", "0.7350839", "0.7348663", "0.7343851", "0.73415136", "0.73325837", "0.7330572", "0.73238766", "0.73213536", "0.7307921", "0.73036593", "0.72924614", "0.7277126", "0.72632027", "0.72631395", "0.7262827", "0.7261999", "0.7244493", "0.72382504", "0.72337383", "0.72291553", "0.72265756", "0.7213868", "0.7208964", "0.7200569", "0.71813166", "0.71754146", "0.71718574", "0.7167822", "0.7167648", "0.71609735", "0.71567404", "0.7148872", "0.7142324", "0.71391934", "0.71310306", "0.7128607", "0.7119615", "0.7116969", "0.71156305", "0.7113942", "0.7113848", "0.71107954", "0.7093797", "0.70821893", "0.70745057", "0.7048526", "0.70373434", "0.70229286" ]
0.7838444
1
Collect returns the current state of all metrics of the collector.
func (c *goCollector) Collect(ch chan<- Metric) { // Collect base non-memory metrics. c.base.Collect(ch) if len(c.sampleBuf) == 0 { return } // Collect must be thread-safe, so prevent concurrent use of // sampleBuf elements. Just read into sampleBuf but write all the data // we get into our Metrics or MemStats. // // This lock also ensures that the Metrics we send out are all from // the same updates, ensuring their mutual consistency insofar as // is guaranteed by the runtime/metrics package. // // N.B. This locking is heavy-handed, but Collect is expected to be called // relatively infrequently. Also the core operation here, metrics.Read, // is fast (O(tens of microseconds)) so contention should certainly be // low, though channel operations and any allocations may add to that. c.mu.Lock() defer c.mu.Unlock() // Populate runtime/metrics sample buffer. metrics.Read(c.sampleBuf) // Collect all our runtime/metrics user chose to expose from sampleBuf (if any). for i, metric := range c.rmExposedMetrics { // We created samples for exposed metrics first in order, so indexes match. sample := c.sampleBuf[i] // N.B. switch on concrete type because it's significantly more efficient // than checking for the Counter and Gauge interface implementations. In // this case, we control all the types here. switch m := metric.(type) { case *counter: // Guard against decreases. This should never happen, but a failure // to do so will result in a panic, which is a harsh consequence for // a metrics collection bug. v0, v1 := m.get(), unwrapScalarRMValue(sample.Value) if v1 > v0 { m.Add(unwrapScalarRMValue(sample.Value) - m.get()) } m.Collect(ch) case *gauge: m.Set(unwrapScalarRMValue(sample.Value)) m.Collect(ch) case *batchHistogram: m.update(sample.Value.Float64Histogram(), c.exactSumFor(sample.Name)) m.Collect(ch) default: panic("unexpected metric type") } } if c.msMetricsEnabled { // ms is a dummy MemStats that we populate ourselves so that we can // populate the old metrics from it if goMemStatsCollection is enabled. var ms runtime.MemStats memStatsFromRM(&ms, c.sampleMap) for _, i := range c.msMetrics { ch <- MustNewConstMetric(i.desc, i.valType, i.eval(&ms)) } } }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (c *VMCollector) Collect(ch chan<- prometheus.Metric) {\n\tfor _, m := range c.getMetrics() {\n\t\tch <- m\n\t}\n}", "func (c *MetricsCollector) Collect(ch chan<- prometheus.Metric) {\n\tfor _, s := range c.status {\n\t\ts.RLock()\n\t\tdefer s.RUnlock()\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tc.verify,\n\t\t\tprometheus.GaugeValue,\n\t\t\tfloat64(s.VerifyRestore),\n\t\t\t\"verify_restore\",\n\t\t\ts.BackupService,\n\t\t\ts.StorageService,\n\t\t)\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tc.verify,\n\t\t\tprometheus.GaugeValue,\n\t\t\tfloat64(s.VerifyDiff),\n\t\t\t\"verify_diff\",\n\t\t\ts.BackupService,\n\t\t\ts.StorageService,\n\t\t)\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tc.verify,\n\t\t\tprometheus.GaugeValue,\n\t\t\tfloat64(s.VerifyChecksum),\n\t\t\t\"verify_checksum\",\n\t\t\ts.BackupService,\n\t\t\ts.StorageService,\n\t\t)\n\t}\n\n}", "func (c *Collector) Collect(ch chan<- prometheus.Metric) {\n\tc.Lock()\n\tdefer c.Unlock()\n\n\tc.totalScrapes.Inc()\n\terr := c.getDadataBalance()\n\tif err != nil {\n\t\tc.failedBalanceScrapes.Inc()\n\t}\n\terr = c.getDadataStats()\n\tif err != nil {\n\t\tc.failedStatsScrapes.Inc()\n\t}\n\n\tch <- c.totalScrapes\n\tch <- c.failedBalanceScrapes\n\tch <- c.failedStatsScrapes\n\tch <- c.CurrentBalance\n\tch <- c.ServicesClean\n\tch <- c.ServicesMerging\n\tch <- c.ServicesSuggestions\n}", "func (e *Exporter) Collect(ch chan<- prometheus.Metric) {\n\te.mutex.Lock() // To protect metrics from concurrent collects.\n\tdefer e.mutex.Unlock()\n\n\tif err := e.scrape(); err != nil {\n\t\tlog.Error(err)\n\t\tnomad_up.Set(0)\n\t\tch <- nomad_up\n\t\treturn\n\t}\n\n\tch <- nomad_up\n\tch <- metric_uptime\n\tch <- metric_request_response_time_total\n\tch <- metric_request_response_time_avg\n\n\tfor _, metric := range metric_request_status_count_current {\n\t\tch <- metric\n\t}\n\tfor _, metric := range metric_request_status_count_total {\n\t\tch <- metric\n\t}\n}", "func (c *collector) Collect(ch chan<- prometheus.Metric) {\n\tc.m.Lock()\n\tfor _, m := range c.metrics {\n\t\tch <- m.metric\n\t}\n\tc.m.Unlock()\n}", "func (c *OrchestratorCollector) Collect(ch chan<- prometheus.Metric) {\n\tc.mutex.Lock() // To protect metrics from concurrent collects\n\tdefer c.mutex.Unlock()\n\n\tstats, err := c.orchestratorClient.GetMetrics()\n\tif err != nil {\n\t\tc.upMetric.Set(serviceDown)\n\t\tch <- c.upMetric\n\t\tlog.Printf(\"Error getting Orchestrator stats: %v\", err)\n\t\treturn\n\t}\n\n\tc.upMetric.Set(serviceUp)\n\tch <- c.upMetric\n\n\tch <- prometheus.MustNewConstMetric(c.metrics[\"cluter_size\"],\n\t\tprometheus.GaugeValue, float64(len(stats.Status.Details.AvailableNodes)))\n\tch <- prometheus.MustNewConstMetric(c.metrics[\"is_active_node\"],\n\t\tprometheus.GaugeValue, boolToFloat64(stats.Status.Details.IsActiveNode))\n\tch <- prometheus.MustNewConstMetric(c.metrics[\"problems\"],\n\t\tprometheus.GaugeValue, float64(len(stats.Problems)))\n\tch <- prometheus.MustNewConstMetric(c.metrics[\"last_failover_id\"],\n\t\tprometheus.CounterValue, float64(stats.LastFailoverID))\n\tch <- prometheus.MustNewConstMetric(c.metrics[\"is_healthy\"],\n\t\tprometheus.GaugeValue, boolToFloat64(stats.Status.Details.Healthy))\n\tch <- prometheus.MustNewConstMetric(c.metrics[\"failed_seeds\"],\n\t\tprometheus.CounterValue, float64(stats.FailedSeeds))\n}", "func (e *Exporter) Collect(ch chan<- prometheus.Metric) {\n\te.mu.Lock()\n\tdefer e.mu.Unlock()\n\n\tfor _, cc := range e.collectors {\n\t\tcc.Collect(ch)\n\t}\n}", "func (c *Exporter) Collect(ch chan<- prometheus.Metric) {\n\tc.mu.Lock()\n\tdefer c.mu.Unlock()\n\n\tfor _, cc := range c.collectors {\n\t\tcc.Collect(ch)\n\t}\n}", "func (c *Exporter) Collect(ch chan<- prometheus.Metric) {\n\tc.mu.Lock()\n\tdefer c.mu.Unlock()\n\n\tfor _, cc := range c.collectors {\n\t\tcc.Collect(ch)\n\t}\n}", "func (h *Metrics) Collect(in chan<- prometheus.Metric) {\n\th.duration.Collect(in)\n\th.totalRequests.Collect(in)\n\th.requestSize.Collect(in)\n\th.responseSize.Collect(in)\n\th.handlerStatuses.Collect(in)\n\th.responseTime.Collect(in)\n}", "func (o *requestMetrics) Collect(ch chan<- prometheus.Metric) {\n\tmetricFamilies, err := o.stStore.GetPromDirectMetrics()\n\tif err != nil {\n\t\tklog.Errorf(\"fetch prometheus metrics failed: %v\", err)\n\t\treturn\n\t}\n\to.handleMetrics(metricFamilies, ch)\n}", "func (c *Collector) Collect(ch chan<- prometheus.Metric) {\n\tc.mut.RLock()\n\tdefer c.mut.RUnlock()\n\n\tif c.inner != nil {\n\t\tc.inner.Collect(ch)\n\t}\n}", "func (c *auditdCollector) Collect(ch chan<- prometheus.Metric) {\n\n\tfor _, i := range c.metrics {\n\t\tch <- prometheus.MustNewConstMetric(i.desc, i.valType, i.eval(c.stats))\n\t}\n\n}", "func (c *ClusterManager) Collect(ch chan<- prometheus.Metric) {\n\toomCountByHost, ramUsageByHost := c.ReallyExpensiveAssessmentOfTheSystemState()\n\tfor host, oomCount := range oomCountByHost {\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tc.OOMCountDesc,\n\t\t\tprometheus.CounterValue,\n\t\t\tfloat64(oomCount),\n\t\t\thost,\n\t\t)\n\t}\n\tfor host, ramUsage := range ramUsageByHost {\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tc.RAMUsageDesc,\n\t\t\tprometheus.GaugeValue,\n\t\t\tramUsage,\n\t\t\thost,\n\t\t)\n\t}\n}", "func (collector *Collector) Collect(ch chan<- prometheus.Metric) {\n\tch <- prometheus.MustNewConstMetric(collector.incidentsCreatedCount, prometheus.CounterValue, collector.storage.GetIncidentsCreatedCount())\n}", "func (collector *collector) Collect(ch chan<- prometheus.Metric) {\n\tcontainerNames, err := collector.server.GetContainerNames()\n\tif err != nil {\n\t\tcollector.logger.Printf(\"Can't query container names: %s\", err)\n\t\treturn\n\t}\n\n\tfor _, containerName := range containerNames {\n\t\tstate, _, err := collector.server.GetContainerState(containerName)\n\t\tif err != nil {\n\t\t\tcollector.logger.Printf(\n\t\t\t\t\"Can't query container state for `%s`: %s\", containerName, err)\n\t\t\tcontinue\n\t\t}\n\n\t\tcollector.collectContainerMetrics(ch, containerName, state)\n\t}\n}", "func (e *Exporter) Collect(ch chan<- prometheus.Metric) {\n\te.withCollectors(func(cs []prometheus.Collector) {\n\t\tfor _, c := range cs {\n\t\t\tc.Collect(ch)\n\t\t}\n\t})\n}", "func (o *observer) Collect(ch chan<- prometheus.Metric) {\n\to.updateError.Collect(ch)\n\to.verifyError.Collect(ch)\n\to.expiration.Collect(ch)\n}", "func (c *CephExporter) Collect(ch chan<- prometheus.Metric) {\n\tc.mu.Lock()\n\tdefer c.mu.Unlock()\n\n\tfor _, cc := range c.collectors {\n\t\tcc.Collect(ch)\n\t}\n}", "func (coll WmiCollector) Collect(ch chan<- prometheus.Metric) {\n\texecute(coll.collector, ch)\n}", "func (m *Client) Collect(ch chan<- prometheus.Metric) {\n\tm.storeMu.Lock()\n\tdefer m.storeMu.Unlock()\n\n\tch <- prometheus.MustNewConstMetric(m.storeValuesDesc, prometheus.GaugeValue, float64(len(m.store)))\n\n\tfor k, v := range m.store {\n\t\tch <- prometheus.MustNewConstMetric(m.storeSizesDesc, prometheus.GaugeValue, float64(len(v.value)), k)\n\t}\n}", "func (collector *MetricsCollector) Collect(ch chan<- prometheus.Metric) {\n\tfilterMetricsByKind := func(kind string, orgMetrics []constMetric) (filteredMetrics []constMetric) {\n\t\tfor _, metric := range orgMetrics {\n\t\t\tif metric.kind == kind {\n\t\t\t\tfilteredMetrics = append(filteredMetrics, metric)\n\t\t\t}\n\t\t}\n\t\treturn filteredMetrics\n\t}\n\tcollector.defMetrics.reset()\n\tfor k := range collector.metrics {\n\t\tcounters := filterMetricsByKind(config.KeyMetricTypeCounter, collector.metrics[k])\n\t\tgauges := filterMetricsByKind(config.KeyMetricTypeGauge, collector.metrics[k])\n\t\thistograms := filterMetricsByKind(config.KeyMetricTypeHistogram, collector.metrics[k])\n\t\tcollectCounters(counters, collector.defMetrics, ch)\n\t\tcollectGauges(gauges, collector.defMetrics, ch)\n\t\tcollectHistograms(histograms, collector.defMetrics, ch)\n\t\tcollector.cache.Reset()\n\t}\n\tcollector.defMetrics.collectDefaultMetrics(ch)\n}", "func (e *Exporter) Collect(ch chan<- prometheus.Metric) {\n\t// Protect metrics from concurrent collects.\n\te.mutex.Lock()\n\tdefer e.mutex.Unlock()\n\n\t// Scrape metrics from Tankerkoenig API.\n\tif err := e.scrape(ch); err != nil {\n\t\te.logger.Printf(\"error: cannot scrape tankerkoenig api: %v\", err)\n\t}\n\n\t// Collect metrics.\n\te.up.Collect(ch)\n\te.scrapeDuration.Collect(ch)\n\te.failedScrapes.Collect(ch)\n\te.totalScrapes.Collect(ch)\n}", "func (e *Exporter) Collect(ch chan<- prometheus.Metric) {\n\te.mutex.Lock()\n\tdefer e.mutex.Unlock()\n\n\te.scrape()\n\n\te.up.Collect(ch)\n\te.totalScrapes.Collect(ch)\n\te.exchangeStatus.Collect(ch)\n\te.ltp.Collect(ch)\n\te.bestBid.Collect(ch)\n\te.bestAsk.Collect(ch)\n\te.bestBidSize.Collect(ch)\n\te.bestAskSize.Collect(ch)\n\te.totalBidDepth.Collect(ch)\n\te.totalAskDepth.Collect(ch)\n\te.volume.Collect(ch)\n\te.volumeByProduct.Collect(ch)\n}", "func (c *metricbeatCollector) Collect(ch chan<- prometheus.Metric) {\n\n\tfor _, i := range c.metrics {\n\t\tch <- prometheus.MustNewConstMetric(i.desc, i.valType, i.eval(c.stats))\n\t}\n\n}", "func (pc *PrometheusCollector) Collect(ch chan<- prometheus.Metric) {\n\tpc.attempts.Collect(ch)\n\tpc.errors.Collect(ch)\n\tpc.successes.Collect(ch)\n\tpc.failures.Collect(ch)\n\tpc.rejects.Collect(ch)\n\tpc.shortCircuits.Collect(ch)\n\tpc.timeouts.Collect(ch)\n\tpc.fallbackSuccesses.Collect(ch)\n\tpc.fallbackFailures.Collect(ch)\n\tpc.totalDuration.Collect(ch)\n\tpc.runDuration.Collect(ch)\n}", "func (e *Exporter) Collect(ch chan<- prometheus.Metric) {\n\te.mutex.Lock() // To protect metrics from concurrent collects.\n\tdefer e.mutex.Unlock()\n\tif err := e.collect(ch); err != nil {\n\t\tglog.Error(fmt.Sprintf(\"Error collecting stats: %s\", err))\n\t}\n\treturn\n}", "func (e *Exporter) Collect(ch chan<- prometheus.Metric) {\n\te.mutex.Lock() // To protect metrics from concurrent collects.\n\tdefer e.mutex.Unlock()\n\tif err := e.collect(ch); err != nil {\n\t\tlog.Errorf(\"Error scraping ingestor: %s\", err)\n\t}\n\treturn\n}", "func (e *Exporter) Collect(ch chan<- prometheus.Metric) {\n\te.mutex.Lock()\n\tdefer e.mutex.Unlock()\n\n\tfor _, vec := range e.gauges {\n\t\tvec.Reset()\n\t}\n\n\tdefer func() { ch <- e.up }()\n\n\t// If we fail at any point in retrieving GPU status, we fail 0\n\te.up.Set(1)\n\n\te.GetTelemetryFromNVML()\n\n\tfor _, vec := range e.gauges {\n\t\tvec.Collect(ch)\n\t}\n}", "func (c *beatCollector) Collect(ch chan<- prometheus.Metric) {\n\n\tfor _, i := range c.metrics {\n\t\tch <- prometheus.MustNewConstMetric(i.desc, i.valType, i.eval(c.stats))\n\t}\n\n}", "func (e *Exporter) Collect(ch chan<- prometheus.Metric) {\n\tok := e.collectPeersMetric(ch)\n\tok = e.collectLeaderMetric(ch) && ok\n\tok = e.collectNodesMetric(ch) && ok\n\tok = e.collectMembersMetric(ch) && ok\n\tok = e.collectMembersWanMetric(ch) && ok\n\tok = e.collectServicesMetric(ch) && ok\n\tok = e.collectHealthStateMetric(ch) && ok\n\tok = e.collectKeyValues(ch) && ok\n\n\tif ok {\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tup, prometheus.GaugeValue, 1.0,\n\t\t)\n\t} else {\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tup, prometheus.GaugeValue, 0.0,\n\t\t)\n\t}\n}", "func (cpuCollector *CPUCollector) Collect() {\n\tcpuCollector.cpuStats.GetCPUStats()\n\n\tcpuCollector.cpuMetrics.cpuTotal.Set(float64(cpuCollector.cpuStats.Total))\n\tcpuCollector.cpuMetrics.cupIdle.Set(float64(cpuCollector.cpuStats.Idle))\n\tcpuCollector.cpuMetrics.cpuUtilization.Set(cpuCollector.cpuStats.Utilization)\n}", "func (e *Exporter) Collect(ch chan<- prometheus.Metric) {\n\te.mutex.Lock() // To protect metrics from concurrent collects.\n\tdefer e.mutex.Unlock()\n\n\tup := e.scrape(ch)\n\n\tch <- prometheus.MustNewConstMetric(artifactoryUp, prometheus.GaugeValue, up)\n\tch <- e.totalScrapes\n\tch <- e.jsonParseFailures\n}", "func (c *prometheusCollector) Collect(ch chan<- prometheus.Metric) {\n\tvar stats = c.db.Stats()\n\n\tch <- prometheus.MustNewConstMetric(c.maxOpenConnections, prometheus.GaugeValue, float64(stats.MaxOpenConnections))\n\tch <- prometheus.MustNewConstMetric(c.openConnections, prometheus.GaugeValue, float64(stats.OpenConnections))\n\tch <- prometheus.MustNewConstMetric(c.inUse, prometheus.GaugeValue, float64(stats.InUse))\n\tch <- prometheus.MustNewConstMetric(c.idle, prometheus.GaugeValue, float64(stats.Idle))\n\tch <- prometheus.MustNewConstMetric(c.waitCount, prometheus.CounterValue, float64(stats.WaitCount))\n\tch <- prometheus.MustNewConstMetric(c.waitDuration, prometheus.CounterValue, float64(stats.WaitDuration))\n\tch <- prometheus.MustNewConstMetric(c.maxIdleClosed, prometheus.CounterValue, float64(stats.MaxIdleClosed))\n\tch <- prometheus.MustNewConstMetric(c.maxIdleTimeClosed, prometheus.CounterValue, float64(stats.MaxIdleTimeClosed))\n\tch <- prometheus.MustNewConstMetric(c.maxLifetimeClosed, prometheus.CounterValue, float64(stats.MaxLifetimeClosed))\n}", "func (e *Exporter) Collect(ch chan<- prometheus.Metric) {\n\n\te.mutex.Lock() // To protect metrics from concurrent collects.\n\tdefer e.mutex.Unlock()\n\n\te.zpool.getStatus()\n\te.poolUsage.Set(float64(e.zpool.capacity))\n\te.providersOnline.Set(float64(e.zpool.online))\n\te.providersFaulted.Set(float64(e.zpool.faulted))\n\n\tch <- e.poolUsage\n\tch <- e.providersOnline\n\tch <- e.providersFaulted\n}", "func (e *Exporter) Collect(ch chan<- prometheus.Metric) {\n\te.mutex.Lock() // To protect metrics from concurrent collects.\n\tdefer e.mutex.Unlock()\n\tif err := e.collect(ch); err != nil {\n\t\tlog.Errorf(\"Error scraping: %s\", err)\n\t}\n\treturn\n}", "func (c *filebeatCollector) Collect(ch chan<- prometheus.Metric) {\n\n\tfor _, i := range c.metrics {\n\t\tch <- prometheus.MustNewConstMetric(i.desc, i.valType, i.eval(c.stats))\n\t}\n\n}", "func (e *Exporter) Collect(ch chan<- prometheus.Metric) {\n\te.mutex.Lock() // To protect metrics from concurrent collects.\n\tdefer e.mutex.Unlock()\n\tif err := e.scrape(ch); err != nil {\n\t\tlog.Printf(\"Error scraping nightscout url: %s\", err)\n\t}\n\n\te.statusNightscout.Collect(ch)\n\n\treturn\n}", "func (c *analysisRunCollector) Collect(ch chan<- prometheus.Metric) {\n\tanalysisRuns, err := c.store.List(labels.NewSelector())\n\tif err != nil {\n\t\tlog.Warnf(\"Failed to collect analysisRuns: %v\", err)\n\t\treturn\n\t}\n\tfor _, ar := range analysisRuns {\n\t\tcollectAnalysisRuns(ch, ar)\n\t}\n}", "func (d *decorator) Collect(in chan<- prometheus.Metric) {\n\td.duration.Collect(in)\n\td.requests.Collect(in)\n}", "func (e *Exporter) Collect(ch chan<- prometheus.Metric) {\n\tfor _, db := range e.dbs {\n\t\t// logger.Log(\"Scraping\", db.String())\n\t\tgo e.scrapeDatabase(db)\n\t}\n\te.mutex.Lock()\n\tdefer e.mutex.Unlock()\n\te.cpuPercent.Collect(ch)\n\te.dataIO.Collect(ch)\n\te.logIO.Collect(ch)\n\te.memoryPercent.Collect(ch)\n\te.workPercent.Collect(ch)\n\te.sessionPercent.Collect(ch)\n\te.storagePercent.Collect(ch)\n\te.dbUp.Collect(ch)\n\te.up.Set(1)\n}", "func (c *solarCollector) Collect(ch chan<- prometheus.Metric) {\n\tc.mutex.Lock() // To protect metrics from concurrent collects.\n\tdefer c.mutex.Unlock()\n\tif err := c.collect(ch); err != nil {\n\t\tlog.Printf(\"Error getting solar controller data: %s\", err)\n\t\tc.scrapeFailures.Inc()\n\t\tc.scrapeFailures.Collect(ch)\n\t}\n\treturn\n}", "func (b *EBPFTelemetry) Collect(ch chan<- prometheus.Metric) {\n\tb.getHelpersTelemetry(ch)\n\tb.getMapsTelemetry(ch)\n}", "func (c *OSCollector) Collect(ch chan<- prometheus.Metric) {\n\tif desc, err := c.collect(ch); err != nil {\n\t\tlog.Println(\"[ERROR] failed collecting os metrics:\", desc, err)\n\t\treturn\n\t}\n}", "func (p *plug) Collect(ch chan<- prometheus.Metric) {\n\tp.doStats(ch, doMetric)\n}", "func (sc *SlurmCollector) Collect(ch chan<- prometheus.Metric) {\n\tsc.mutex.Lock()\n\tdefer sc.mutex.Unlock()\n\n\tlog.Debugf(\"Time since last scrape: %f seconds\", time.Since(sc.lastScrape).Seconds())\n\tif time.Since(sc.lastScrape).Seconds() > float64(sc.scrapeInterval) {\n\t\tsc.updateDynamicJobIds()\n\t\tvar err error\n\t\tsc.sshClient, err = sc.sshConfig.NewClient()\n\t\tif err != nil {\n\t\t\tlog.Errorf(\"Creating SSH client: %s\", err.Error())\n\t\t\treturn\n\t\t}\n\t\tdefer sc.sshClient.Close()\n\t\tlog.Infof(\"Collecting metrics from Slurm...\")\n\t\tsc.trackedJobs = make(map[string]bool)\n\t\tif sc.targetJobIds == \"\" {\n\t\t\t// sc.collectQueue()\n\t\t} else {\n\t\t\tsc.collectAcct()\n\t\t}\n\t\tif !sc.skipInfra {\n\t\t\tsc.collectInfo()\n\t\t}\n\t\tsc.lastScrape = time.Now()\n\t\tsc.delJobs()\n\n\t}\n\n\tsc.updateMetrics(ch)\n}", "func Collect(metrics []Metric, c CloudWatchService, namespace string) {\n\tid, err := GetInstanceID()\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tfor _, metric := range metrics {\n\t\tmetric.Collect(id, c, namespace)\n\t}\n}", "func (c *Collector) Collect(ch chan<- prometheus.Metric) {\n\tip := os.Getenv(\"DYNO_INSTANCE\")\n\tif ip == \"\" {\n\t\tlogg.Error(\"could not get ip address from env variable: DYNO_INSTANCE\")\n\t}\n\n\ttoken := os.Getenv(\"DYNO_TOKEN\")\n\tif token == \"\" {\n\t\tlogg.Error(\"could not get token from env variable: DYNO_TOKEN\")\n\t}\n\n\tvar rack, dc string\n\tir, err := c.dyno.Info()\n\tif err != nil {\n\t\tlogg.Error(err.Error())\n\t} else {\n\t\track = ir.Rack\n\t\tdc = ir.DC\n\n\t\tch <- c.uptime.mustNewConstMetric(float64(ir.Uptime), rack, dc, token, ip)\n\t\tch <- c.clientConnections.mustNewConstMetric(float64(ir.Pool.ClientConnections), rack, dc, token, ip)\n\t\tch <- c.clientReadRequests.mustNewConstMetric(float64(ir.Pool.ClientReadRequests), rack, dc, token, ip)\n\t\tch <- c.clientWriteRequests.mustNewConstMetric(float64(ir.Pool.ClientWriteRequests), rack, dc, token, ip)\n\t\tch <- c.clientDroppedRequests.mustNewConstMetric(float64(ir.Pool.ClientDroppedRequests), rack, dc, token, ip)\n\t}\n\n\tstateVal := 1 // until proven otherwise\n\tstateStr := \"unknown\" // always have a value for the state label\n\n\tstate, err := c.dyno.GetState()\n\tif err != nil {\n\t\tstateVal = 0\n\t\tlogg.Error(err.Error())\n\t} else {\n\t\tstateStr = string(state)\n\t}\n\n\tif state != Normal {\n\t\tstateVal = 0\n\t}\n\tch <- c.state.mustNewConstMetric(float64(stateVal), stateStr, rack, dc, token, ip)\n\n\tsize, err := c.dyno.Backend.DBSize()\n\tif err != nil {\n\t\tlogg.Error(err.Error())\n\t} else {\n\t\tch <- c.dbSize.mustNewConstMetric(float64(size), rack, dc, token, ip)\n\t}\n}", "func (e *exporter) Collect(ch chan<- prometheus.Metric) {\n\twg := sync.WaitGroup{}\n\twg.Add(len(e.Collectors))\n\tfor name, c := range e.Collectors {\n\t\tgo func(name string, c Collector) {\n\t\t\texecute(name, c, ch)\n\t\t\twg.Done()\n\t\t}(name, c)\n\t}\n\twg.Wait()\n}", "func Collect() (result map[string]interface{}, err error) {\n\tresult = make(map[string]interface{})\n\n\tfor _, collector := range collectors {\n\t\tif shouldCollect(collector) {\n\t\t\tc, err := collector.Collect()\n\t\t\tif err != nil {\n\t\t\t\tlog.Warnf(\"[%s] %s\", collector.Name(), err)\n\t\t\t}\n\t\t\tif c != nil {\n\t\t\t\tresult[collector.Name()] = c\n\t\t\t}\n\t\t}\n\t}\n\n\treturn\n}", "func (e *Exporter) Collect(ch chan<- prometheus.Metric) {\n\tjunosTotalScrapeCount++\n\tch <- prometheus.MustNewConstMetric(junosDesc[\"ScrapesTotal\"], prometheus.CounterValue, junosTotalScrapeCount)\n\n\twg := &sync.WaitGroup{}\n\tfor _, collector := range e.Collectors {\n\t\twg.Add(1)\n\t\tgo e.runCollector(ch, collector, wg)\n\t}\n\twg.Wait()\n}", "func (n LXCCollector) Collect(ch chan<- prometheus.Metric) {\n\twg := sync.WaitGroup{}\n\twg.Add(len(n.collectors))\n\tfor name, c := range n.collectors {\n\t\tgo func(name string, c collector.Collector) {\n\t\t\texecute(name, c, ch)\n\t\t\twg.Done()\n\t\t}(name, c)\n\t}\n\twg.Wait()\n\tscrapeDurations.Collect(ch)\n}", "func (o *OSDCollector) Collect(ch chan<- prometheus.Metric) {\n\tif err := o.collectOSDPerf(); err != nil {\n\t\tlog.Println(\"failed collecting osd perf stats:\", err)\n\t}\n\n\tif err := o.collectOSDDump(); err != nil {\n\t\tlog.Println(\"failed collecting osd dump:\", err)\n\t}\n\n\tif err := o.collectOSDDF(); err != nil {\n\t\tlog.Println(\"failed collecting osd metrics:\", err)\n\t}\n\n\tif err := o.collectOSDTreeDown(ch); err != nil {\n\t\tlog.Println(\"failed collecting osd metrics:\", err)\n\t}\n\n\tfor _, metric := range o.collectorList() {\n\t\tmetric.Collect(ch)\n\t}\n\n\tif err := o.collectOSDScrubState(ch); err != nil {\n\t\tlog.Println(\"failed collecting osd scrub state:\", err)\n\t}\n}", "func (c *Client) Collect(ch chan<- prometheus.Metric) {\n\tc.metrics.functionInvocation.Collect(ch)\n\tc.metrics.functionsHistogram.Collect(ch)\n\tc.metrics.queueHistogram.Collect(ch)\n\tc.metrics.functionInvocationStarted.Collect(ch)\n\tc.metrics.serviceReplicasGauge.Reset()\n\tfor _, service := range c.services {\n\t\tvar serviceName string\n\t\tif len(service.Namespace) > 0 {\n\t\t\tserviceName = fmt.Sprintf(\"%s.%s\", service.Name, service.Namespace)\n\t\t} else {\n\t\t\tserviceName = service.Name\n\t\t}\n\t\tc.metrics.serviceReplicasGauge.\n\t\t\tWithLabelValues(serviceName).\n\t\t\tSet(float64(service.Replicas))\n\t}\n\tc.metrics.serviceReplicasGauge.Collect(ch)\n}", "func (e *Exporter) Collect(ch chan<- prometheus.Metric) {\n\te.mutex.Lock() // To protect metrics from concurrent collects.\n\tdefer e.mutex.Unlock()\n\n\t// Reset metrics.\n\tfor _, vec := range e.gauges {\n\t\tvec.Reset()\n\t}\n\n\tfor _, vec := range e.counters {\n\t\tvec.Reset()\n\t}\n\n\tresp, err := e.client.Get(e.URI)\n\tif err != nil {\n\t\te.up.Set(0)\n\t\tlog.Printf(\"Error while querying Elasticsearch: %v\", err)\n\t\treturn\n\t}\n\tdefer resp.Body.Close()\n\n\tbody, err := ioutil.ReadAll(resp.Body)\n\n\tif err != nil {\n\t\tlog.Printf(\"Failed to read ES response body: %v\", err)\n\t\te.up.Set(0)\n\t\treturn\n\t}\n\n\te.up.Set(1)\n\n\tvar all_stats NodeStatsResponse\n\terr = json.Unmarshal(body, &all_stats)\n\n\tif err != nil {\n\t\tlog.Printf(\"Failed to unmarshal JSON into struct: %v\", err)\n\t\treturn\n\t}\n\n\t// Regardless of whether we're querying the local host or the whole\n\t// cluster, here we can just iterate through all nodes found.\n\n\tfor node, stats := range all_stats.Nodes {\n\t\tlog.Printf(\"Processing node %v\", node)\n\t\t// GC Stats\n\t\tfor collector, gcstats := range stats.JVM.GC.Collectors {\n\t\t\te.counters[\"jvm_gc_collection_count\"].WithLabelValues(all_stats.ClusterName, stats.Name, collector).Set(float64(gcstats.CollectionCount))\n\t\t\te.counters[\"jvm_gc_collection_time_in_millis\"].WithLabelValues(all_stats.ClusterName, stats.Name, collector).Set(float64(gcstats.CollectionTime))\n\t\t}\n\n\t\t// Breaker stats\n\t\tfor breaker, bstats := range stats.Breakers {\n\t\t\te.gauges[\"breakers_estimated_size_in_bytes\"].WithLabelValues(all_stats.ClusterName, stats.Name, breaker).Set(float64(bstats.EstimatedSize))\n\t\t\te.gauges[\"breakers_limit_size_in_bytes\"].WithLabelValues(all_stats.ClusterName, stats.Name, breaker).Set(float64(bstats.LimitSize))\n\t\t}\n\n\t\t// JVM Memory Stats\n\t\te.gauges[\"jvm_mem_heap_committed_in_bytes\"].WithLabelValues(all_stats.ClusterName, stats.Name).Set(float64(stats.JVM.Mem.HeapCommitted))\n\t\te.gauges[\"jvm_mem_heap_used_in_bytes\"].WithLabelValues(all_stats.ClusterName, stats.Name).Set(float64(stats.JVM.Mem.HeapUsed))\n\t\te.gauges[\"jvm_mem_heap_max_in_bytes\"].WithLabelValues(all_stats.ClusterName, stats.Name).Set(float64(stats.JVM.Mem.HeapMax))\n\t\te.gauges[\"jvm_mem_non_heap_committed_in_bytes\"].WithLabelValues(all_stats.ClusterName, stats.Name).Set(float64(stats.JVM.Mem.NonHeapCommitted))\n\t\te.gauges[\"jvm_mem_non_heap_used_in_bytes\"].WithLabelValues(all_stats.ClusterName, stats.Name).Set(float64(stats.JVM.Mem.NonHeapUsed))\n\n\t\t// Indices Stats\n\t\te.gauges[\"indices_fielddata_evictions\"].WithLabelValues(all_stats.ClusterName, stats.Name).Set(float64(stats.Indices.FieldData.Evictions))\n\t\te.gauges[\"indices_fielddata_memory_size_in_bytes\"].WithLabelValues(all_stats.ClusterName, stats.Name).Set(float64(stats.Indices.FieldData.MemorySize))\n\t\te.gauges[\"indices_filter_cache_evictions\"].WithLabelValues(all_stats.ClusterName, stats.Name).Set(float64(stats.Indices.FilterCache.Evictions))\n\t\te.gauges[\"indices_filter_cache_memory_size_in_bytes\"].WithLabelValues(all_stats.ClusterName, stats.Name).Set(float64(stats.Indices.FilterCache.MemorySize))\n\n\t\te.gauges[\"indices_docs_count\"].WithLabelValues(all_stats.ClusterName, stats.Name).Set(float64(stats.Indices.Docs.Count))\n\t\te.gauges[\"indices_docs_deleted\"].WithLabelValues(all_stats.ClusterName, stats.Name).Set(float64(stats.Indices.Docs.Deleted))\n\n\t\te.gauges[\"indices_segments_memory_in_bytes\"].WithLabelValues(all_stats.ClusterName, stats.Name).Set(float64(stats.Indices.Segments.Memory))\n\n\t\te.gauges[\"indices_store_size_in_bytes\"].WithLabelValues(all_stats.ClusterName, stats.Name).Set(float64(stats.Indices.Store.Size))\n\t\te.counters[\"indices_store_throttle_time_in_millis\"].WithLabelValues(all_stats.ClusterName, stats.Name).Set(float64(stats.Indices.Store.ThrottleTime))\n\n\t\te.counters[\"indices_flush_total\"].WithLabelValues(all_stats.ClusterName, stats.Name).Set(float64(stats.Indices.Flush.Total))\n\t\te.counters[\"indices_flush_time_in_millis\"].WithLabelValues(all_stats.ClusterName, stats.Name).Set(float64(stats.Indices.Flush.Time))\n\n\t\t// Transport Stats\n\t\te.counters[\"transport_rx_count\"].WithLabelValues(all_stats.ClusterName, stats.Name).Set(float64(stats.Transport.RxCount))\n\t\te.counters[\"transport_rx_size_in_bytes\"].WithLabelValues(all_stats.ClusterName, stats.Name).Set(float64(stats.Transport.RxSize))\n\t\te.counters[\"transport_tx_count\"].WithLabelValues(all_stats.ClusterName, stats.Name).Set(float64(stats.Transport.TxCount))\n\t\te.counters[\"transport_tx_size_in_bytes\"].WithLabelValues(all_stats.ClusterName, stats.Name).Set(float64(stats.Transport.TxSize))\n\t}\n\n\t// Report metrics.\n\tch <- e.up\n\n\tfor _, vec := range e.counters {\n\t\tvec.Collect(ch)\n\t}\n\n\tfor _, vec := range e.gauges {\n\t\tvec.Collect(ch)\n\t}\n}", "func (p *Metrics) Collect(c chan<- prometheus.Metric) {\n\t//rlockCollect(c,&p.mucout,p.counters)\n\t//rlockCollect(c,&p.mugau,p.gauges)\n\t//rlockCollect(c,&p.muhist,p.historams)\n\t//rlockCollect(c,&p.musumm,p.summaries)\n\tp.rlockCollectCounter(c)\n\tp.rlockCollectGauge(c)\n\tp.rlockCollectHistorams(c)\n\tp.rlockCollectSummaries(c)\n}", "func (b Blackbox) Collect(metrics chan<- prometheus.Metric) {\n\tb.fetchReferenceDiscoveryMetrics.Collect(metrics)\n\tb.httpPostMetrics.Collect(metrics)\n\tb.wantedRefs.Collect(metrics)\n}", "func (w *Writer) Collect(ch chan<- prometheus.Metric) {\n\tw.kafkaWriteStatus.Collect(ch)\n\tw.queuedForWrites.Collect(ch)\n}", "func (m httpReferenceDiscoveryMetrics) Collect(metrics chan<- prometheus.Metric) {\n\tm.firstPacket.Collect(metrics)\n\tm.totalTime.Collect(metrics)\n\tm.advertisedRefs.Collect(metrics)\n}", "func (e *Exporter) Collect(ch chan<- prometheus.Metric) {\n\te.mutex.Lock() // To protect metrics from concurrent collects.\n\tdefer e.mutex.Unlock()\n\n\tup, result := e.scrape(ch)\n\n\tch <- e.totalScrapes\n\tch <- e.jsonParseFailures\n\tch <- prometheus.MustNewConstMetric(iqAirUp, prometheus.GaugeValue, up)\n\tch <- prometheus.MustNewConstMetric(iqAirCO2, prometheus.GaugeValue, float64(result.CO2))\n\tch <- prometheus.MustNewConstMetric(iqAirP25, prometheus.GaugeValue, float64(result.P25))\n\tch <- prometheus.MustNewConstMetric(iqAirP10, prometheus.GaugeValue, float64(result.P10))\n\tch <- prometheus.MustNewConstMetric(iqAirTemp, prometheus.GaugeValue, float64(result.Temperature))\n\tch <- prometheus.MustNewConstMetric(iqAirHumidity, prometheus.GaugeValue, float64(result.Humidity))\n}", "func (c collector) Collect(ch chan<- prometheus.Metric) {\n\tvar wg sync.WaitGroup\n\n\t// We don't bail out on errors because those can happen if there is a race condition between\n\t// the destruction of a container and us getting to read the cgroup data. We just don't report\n\t// the values we don't get.\n\n\tcollectors := []func(string, *regexp.Regexp){\n\t\tfunc(path string, re *regexp.Regexp) {\n\t\t\tdefer wg.Done()\n\t\t\tnuma, err := cgroups.GetNumaStats(cgroupPath(\"memory\", path))\n\t\t\tif err == nil {\n\t\t\t\tupdateNumaStatMetric(ch, re.FindStringSubmatch(filepath.Base(path))[0], numa)\n\t\t\t} else {\n\t\t\t\tlog.Error(\"failed to collect NUMA stats for %s: %v\", path, err)\n\t\t\t}\n\t\t},\n\t\tfunc(path string, re *regexp.Regexp) {\n\t\t\tdefer wg.Done()\n\t\t\tmemory, err := cgroups.GetMemoryUsage(cgroupPath(\"memory\", path))\n\t\t\tif err == nil {\n\t\t\t\tupdateMemoryUsageMetric(ch, re.FindStringSubmatch(filepath.Base(path))[0], memory)\n\t\t\t} else {\n\t\t\t\tlog.Error(\"failed to collect memory usage stats for %s: %v\", path, err)\n\t\t\t}\n\t\t},\n\t\tfunc(path string, re *regexp.Regexp) {\n\t\t\tdefer wg.Done()\n\t\t\tmigrate, err := cgroups.GetCPUSetMemoryMigrate(cgroupPath(\"cpuset\", path))\n\t\t\tif err == nil {\n\t\t\t\tupdateMemoryMigrateMetric(ch, re.FindStringSubmatch(filepath.Base(path))[0], migrate)\n\t\t\t} else {\n\t\t\t\tlog.Error(\"failed to collect memory migration stats for %s: %v\", path, err)\n\t\t\t}\n\t\t},\n\t\tfunc(path string, re *regexp.Regexp) {\n\t\t\tdefer wg.Done()\n\t\t\tcpuAcctUsage, err := cgroups.GetCPUAcctStats(cgroupPath(\"cpuacct\", path))\n\t\t\tif err == nil {\n\t\t\t\tupdateCPUAcctUsageMetric(ch, re.FindStringSubmatch(filepath.Base(path))[0], cpuAcctUsage)\n\t\t\t} else {\n\t\t\t\tlog.Error(\"failed to collect CPU accounting stats for %s: %v\", path, err)\n\t\t\t}\n\t\t},\n\t\tfunc(path string, re *regexp.Regexp) {\n\t\t\tdefer wg.Done()\n\t\t\thugeTlbUsage, err := cgroups.GetHugetlbUsage(cgroupPath(\"hugetlb\", path))\n\t\t\tif err == nil {\n\t\t\t\tupdateHugeTlbUsageMetric(ch, re.FindStringSubmatch(filepath.Base(path))[0], hugeTlbUsage)\n\t\t\t} else {\n\t\t\t\tlog.Error(\"failed to collect hugetlb stats for %s: %v\", path, err)\n\t\t\t}\n\t\t},\n\t\tfunc(path string, re *regexp.Regexp) {\n\t\t\tdefer wg.Done()\n\t\t\tblkioDeviceUsage, err := cgroups.GetBlkioThrottleBytes(cgroupPath(\"blkio\", path))\n\t\t\tif err == nil {\n\t\t\t\tupdateBlkioDeviceUsageMetric(ch, re.FindStringSubmatch(filepath.Base(path))[0], blkioDeviceUsage)\n\t\t\t} else {\n\t\t\t\tlog.Error(\"failed to collect blkio stats for %s: %v\", path, err)\n\t\t\t}\n\t\t},\n\t}\n\n\tcontainerIDRegexp := regexp.MustCompile(`[a-z0-9]{64}`)\n\n\tfor _, path := range walkCgroups() {\n\t\twg.Add(len(collectors))\n\t\tfor _, fn := range collectors {\n\t\t\tgo fn(path, containerIDRegexp)\n\t\t}\n\t}\n\n\t// We need to wait so that the response channel doesn't get closed.\n\twg.Wait()\n}", "func (c *MosquittoCounter) Collect(ch chan<- prometheus.Metric) {\n\tch <- prometheus.MustNewConstMetric(\n\t\tc.Desc,\n\t\tprometheus.CounterValue,\n\t\tc.counter.value,\n\t)\n}", "func (coll WmiCollector) Collect(ch chan<- prometheus.Metric) {\n\tdefer trace()()\n\twg := sync.WaitGroup{}\n\twg.Add(len(coll.collectors))\n\tfor name, c := range coll.collectors {\n\t\tgo func(name string, c collector.Collector) {\n\t\t\texecute(name, c, ch)\n\t\t\twg.Done()\n\t\t}(name, c)\n\t}\n\twg.Wait()\n\tscrapeDurations.Collect(ch)\n}", "func (c *MemoryCollector) Collect(ch chan<- prometheus.Metric) error {\n\tif desc, err := c.collect(ch); err != nil {\n\t\tlog.Error(\"failed collecting memory metrics:\", desc, err)\n\t\treturn err\n\t}\n\treturn nil\n}", "func (c *DiskCache) Collect(metrics chan<- prometheus.Metric) {\n\tc.requestTotals.Collect(metrics)\n\tc.missTotals.Collect(metrics)\n\tc.bytesStoredtotals.Collect(metrics)\n\tc.bytesFetchedtotals.Collect(metrics)\n\tc.bytesLoserTotals.Collect(metrics)\n\tc.errTotal.Collect(metrics)\n\tc.walkerRemovalTotal.Collect(metrics)\n\tc.walkerErrorTotal.Collect(metrics)\n\tc.walkerEmptyDirTotal.Collect(metrics)\n\tc.walkerEmptyDirRemovalTotal.Collect(metrics)\n}", "func (p *Collector) Collect(c chan<- prometheus.Metric) {\n\tp.Sink.mu.Lock()\n\tdefer p.Sink.mu.Unlock()\n\n\texpire := p.Sink.expiration != 0\n\tnow := time.Now()\n\tfor k, v := range p.Sink.gauges {\n\t\tlast := p.Sink.updates[k]\n\t\tif expire && last.Add(p.Sink.expiration).Before(now) {\n\t\t\tdelete(p.Sink.updates, k)\n\t\t\tdelete(p.Sink.gauges, k)\n\t\t} else {\n\t\t\tv.Collect(c)\n\t\t}\n\t}\n\tfor k, v := range p.Sink.summaries {\n\t\tlast := p.Sink.updates[k]\n\t\tif expire && last.Add(p.Sink.expiration).Before(now) {\n\t\t\tdelete(p.Sink.updates, k)\n\t\t\tdelete(p.Sink.summaries, k)\n\t\t} else {\n\t\t\tv.Collect(c)\n\t\t}\n\t}\n\tfor k, v := range p.Sink.counters {\n\t\tlast := p.Sink.updates[k]\n\t\tif expire && last.Add(p.Sink.expiration).Before(now) {\n\t\t\tdelete(p.Sink.updates, k)\n\t\t\tdelete(p.Sink.counters, k)\n\t\t} else {\n\t\t\tv.Collect(c)\n\t\t}\n\t}\n}", "func (a collectorAdapter) Collect(ch chan<- prometheus.Metric) {\n\tif err := a.Update(ch); err != nil {\n\t\tpanic(fmt.Sprintf(\"failed to update collector: %v\", err))\n\t}\n}", "func (w *HotCache) CollectMetrics() {\n\tw.writeFlow.CollectMetrics(\"write\")\n\tw.readFlow.CollectMetrics(\"read\")\n}", "func (c *grpcClientManagerCollector) Collect(ch chan<- prometheus.Metric) {\n\tfor _, con := range c.cm.Metrics().Connections {\n\t\tl := []string{con.Target}\n\t\tch <- prometheus.MustNewConstMetric(connectionStateDesc, prometheus.GaugeValue, float64(con.State), l...)\n\t}\n}", "func (r *RGWCollector) Collect(ch chan<- prometheus.Metric, version *Version) {\n\tif !r.background {\n\t\tr.logger.WithField(\"background\", r.background).Debug(\"collecting RGW GC stats\")\n\t\terr := r.collect()\n\t\tif err != nil {\n\t\t\tr.logger.WithField(\"background\", r.background).WithError(err).Error(\"error collecting RGW GC stats\")\n\t\t}\n\t}\n\n\tfor _, metric := range r.collectorList() {\n\t\tmetric.Collect(ch)\n\t}\n}", "func (c *TeamsCollector) Collect(ch chan<- prometheus.Metric) {\n\n\tteams := getTotalTeams()\n\n\tch <- prometheus.MustNewConstMetric(\n\t\tc.totalTeamsGaugeDesc,\n\t\tprometheus.GaugeValue,\n\t\tfloat64(teams),\n\t)\n}", "func Collect(mon Monitor) map[string]float64 {\n\trv := make(map[string]float64)\n\tmon.Stats(func(name string, val float64) {\n\t\trv[name] = val\n\t})\n\treturn rv\n}", "func (k *KubernetesCollector) Collect(ch chan<- prometheus.Metric) error {\n\tvar metric prometheus.Metric\n\n\tlistOptions := metav1.ListOptions{\n\t\tLabelSelector: labels.Everything().String(),\n\t\tFieldSelector: fields.Everything().String(),\n\t}\n\tnodes, err := k.client.CoreV1().Nodes().List(context.TODO(), listOptions)\n\tif err != nil {\n\t\treturn trace.Wrap(err, \"failed to query nodes: %v\", err)\n\t}\n\n\tfor _, item := range nodes.Items {\n\t\tfor _, condition := range item.Status.Conditions {\n\t\t\tif condition.Type != v1.NodeReady {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif condition.Status == v1.ConditionTrue {\n\t\t\t\tif metric, err = k.nodeIsReady.newConstMetric(1.0, item.Name); err != nil {\n\t\t\t\t\treturn trace.Wrap(err, \"failed to create prometheus metric: %v\", err)\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tif metric, err = k.nodeIsReady.newConstMetric(0.0, item.Name); err != nil {\n\t\t\t\t\treturn trace.Wrap(err, \"failed to create prometheus metric: %v\", err)\n\t\t\t\t}\n\t\t\t}\n\t\t\tch <- metric\n\t\t}\n\t}\n\n\treturn nil\n}", "func (e *Exporter) Collect(ch chan<- prometheus.Metric) {\n\tvar (\n\t\tdata *Data\n\t\terr error\n\t)\n\n\te.mutex.Lock() // To protect metrics from concurrent collects.\n\tdefer e.mutex.Unlock()\n\n\te.resetGaugeVecs() // Clean starting point\n\n\tvar endpointOfAPI []string\n\tif strings.HasSuffix(rancherURL, \"v3\") || strings.HasSuffix(rancherURL, \"v3/\") {\n\t\tendpointOfAPI = endpointsV3\n\t} else {\n\t\tendpointOfAPI = endpoints\n\t}\n\n\tcacheExpired := e.IsCacheExpired()\n\n\t// Range over the pre-configured endpoints array\n\tfor _, p := range endpointOfAPI {\n\t\tif cacheExpired {\n\t\t\tdata, err = e.gatherData(e.rancherURL, e.resourceLimit, e.accessKey, e.secretKey, p, ch)\n\t\t\tif err != nil {\n\t\t\t\tlog.Errorf(\"Error getting JSON from URL %s\", p)\n\t\t\t\treturn\n\t\t\t}\n\t\t\te.cache[p] = data\n\t\t} else {\n\t\t\td, ok := e.cache[p]\n\t\t\tif !ok {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tdata = d\n\t\t}\n\n\t\tif err := e.processMetrics(data, p, e.hideSys, ch); err != nil {\n\t\t\tlog.Errorf(\"Error scraping rancher url: %s\", err)\n\t\t\treturn\n\t\t}\n\t\tlog.Infof(\"Metrics successfully processed for %s\", p)\n\t}\n\n\tif cacheExpired {\n\t\te.RenewCache()\n\t}\n\n\tfor _, m := range e.gaugeVecs {\n\t\tm.Collect(ch)\n\t}\n}", "func (c *SystemCollector) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {\n\tif desc, err := c.collect(ctx, ch); err != nil {\n\t\t_ = level.Error(c.logger).Log(\"failed collecting system metrics\", \"desc\", desc, \"err\", err)\n\t\treturn err\n\t}\n\treturn nil\n}", "func (m *ClientMetrics) Collect(ch chan<- prometheus.Metric) {\n\tm.clientHandledSummary.Collect(ch)\n}", "func (pc *NginxProcessesMetricsCollector) Collect(ch chan<- prometheus.Metric) {\n\tpc.updateWorkerProcessCount()\n\tpc.workerProcessTotal.Collect(ch)\n}", "func (m *Monitoring) collect() {\n\tfor {\n\t\tevents, ok := <-m.ch\n\t\tif !ok {\n\t\t\tlog.Printf(\"event channel is closed\")\n\t\t\treturn\n\t\t}\n\n\t\tif err := m.w.Write(context.Background(), events); err != nil {\n\t\t\tlog.Printf(\"failed to write metric events %+v: %v\", events, err)\n\t\t}\n\t}\n\n}", "func (e *Exporter) Collect(ch chan<- prometheus.Metric) {\n\tvar up float64 = 1\n\n\tglobalMutex.Lock()\n\tdefer globalMutex.Unlock()\n\n\tif e.config.resetStats && !globalResetExecuted {\n\t\t// Its time to try to reset the stats\n\t\tif e.resetStatsSemp1() {\n\t\t\tlevel.Info(e.logger).Log(\"msg\", \"Statistics successfully reset\")\n\t\t\tglobalResetExecuted = true\n\t\t\tup = 1\n\t\t} else {\n\t\t\tup = 0\n\t\t}\n\t}\n\n\tif e.config.details {\n\t\tif up > 0 {\n\t\t\tup = e.getClientSemp1(ch)\n\t\t}\n\t\tif up > 0 {\n\t\t\tup = e.getQueueSemp1(ch)\n\t\t}\n\t\tif up > 0 && e.config.scrapeRates {\n\t\t\tup = e.getQueueRatesSemp1(ch)\n\t\t}\n\t} else { // Basic\n\t\tif up > 0 {\n\t\t\tup = e.getRedundancySemp1(ch)\n\t\t}\n\t\tif up > 0 {\n\t\t\tup = e.getSpoolSemp1(ch)\n\t\t}\n\t\tif up > 0 {\n\t\t\tup = e.getHealthSemp1(ch)\n\t\t}\n\t\tif up > 0 {\n\t\t\tup = e.getVpnSemp1(ch)\n\t\t}\n\t}\n\n\tch <- prometheus.MustNewConstMetric(solaceUp, prometheus.GaugeValue, up)\n}", "func (dc *daemonsetCollector) Collect(ch chan<- prometheus.Metric) {\n\tdss, err := dc.store.List()\n\tif err != nil {\n\t\tScrapeErrorTotalMetric.With(prometheus.Labels{\"resource\": \"daemonset\"}).Inc()\n\t\tglog.Errorf(\"listing daemonsets failed: %s\", err)\n\t\treturn\n\t}\n\tScrapeErrorTotalMetric.With(prometheus.Labels{\"resource\": \"daemonset\"}).Add(0)\n\n\tResourcesPerScrapeMetric.With(prometheus.Labels{\"resource\": \"daemonset\"}).Observe(float64(len(dss)))\n\tfor _, d := range dss {\n\t\tdc.collectDaemonSet(ch, d)\n\t}\n\n\tglog.V(4).Infof(\"collected %d daemonsets\", len(dss))\n}", "func (c *InterfacesCollector) Collect(ch chan<- prometheus.Metric) {\n\tfor _, m := range c.collectors() {\n\t\tm.Collect(ch)\n\t}\n}", "func (collector *Metrics) Collect(ch chan<- prometheus.Metric) {\n\n\tcollectedIssues, err := fetchJiraIssues()\n\tif err != nil {\n\t\tlog.Error(err)\n\t\treturn\n\t}\n\n\tfor _, issue := range collectedIssues.Issues {\n\t\tcreatedTimestamp := convertToUnixTime(issue.Fields.Created)\n\t\tch <- prometheus.MustNewConstMetric(collector.issue, prometheus.CounterValue, createdTimestamp, issue.Fields.Status.Name, issue.Fields.Project.Name, issue.Key, issue.Fields.Assignee.Name, issue.Fields.Location.Name, issue.Fields.Priority.Name, issue.Fields.Level.Name, issue.Fields.RequestType.Name, issue.Fields.Feedback, issue.Fields.Urgency.Name, issue.Fields.IssueType.Name, issue.Fields.Reporter.Name, issue.Fields.Satisfaction)\n\t}\n}", "func (c *collector) Collect(ch chan<- prometheus.Metric) {\n\tc.mu.Lock()\n\t// Get the last views\n\tviews := c.views\n\t// Now clear them out for the next accumulation\n\tc.views = c.views[:0]\n\tc.mu.Unlock()\n\n\tif len(views) == 0 {\n\t\treturn\n\t}\n\n\t// seen is necessary because within each Collect cycle\n\t// if a Metric is sent to Prometheus with the same make up\n\t// that is \"name\" and \"labels\", it will error out.\n\tseen := make(map[prometheus.Metric]bool)\n\n\tfor _, vd := range views {\n\t\tfor _, row := range vd.Rows {\n\t\t\tmetric := c.toMetric(vd.View, row)\n\t\t\tif _, ok := seen[metric]; !ok && metric != nil {\n\t\t\t\tch <- metric\n\t\t\t\tseen[metric] = true\n\t\t\t}\n\t\t}\n\t}\n}", "func (m *ClientMetrics) Collect(ch chan<- prom.Metric) {\n\tm.clientStartedCounter.Collect(ch)\n\tm.clientHandledCounter.Collect(ch)\n\tm.clientStreamMsgReceived.Collect(ch)\n\tm.clientStreamMsgSent.Collect(ch)\n\tif m.clientHandledHistogramEnabled {\n\t\tm.clientHandledHistogram.Collect(ch)\n\t}\n}", "func (n NodeCollector) Collect(ch chan<- prometheus.Metric) {\n\twg := sync.WaitGroup{}\n\twg.Add(len(n.collectors))\n\tfor name, c := range n.collectors {\n\t\tgo func(name string, c collector.Collector) {\n\t\t\texecute(name, c, ch)\n\t\t\twg.Done()\n\t\t}(name, c)\n\t}\n\twg.Wait()\n\tscrapeDurations.Collect(ch)\n}", "func (k *KACollector) Collect(ch chan<- prometheus.Metric) {\n\tk.mutex.Lock()\n\tdefer k.mutex.Unlock()\n\n\tvar err error\n\tvar kaStats []KAStats\n\n\tif k.useJSON {\n\t\tkaStats, err = k.json()\n\t\tif err != nil {\n\t\t\tch <- prometheus.MustNewConstMetric(k.metrics[\"keepalived_up\"], prometheus.GaugeValue, 0)\n\t\t\tlog.Printf(\"keepalived_exporter: %v\", err)\n\t\t\treturn\n\t\t}\n\t} else {\n\t\tkaStats, err = k.text()\n\t\tif err != nil {\n\t\t\tch <- prometheus.MustNewConstMetric(k.metrics[\"keepalived_up\"], prometheus.GaugeValue, 0)\n\t\t\tlog.Printf(\"keepalived_exporter: %v\", err)\n\t\t\treturn\n\t\t}\n\t}\n\n\tch <- prometheus.MustNewConstMetric(k.metrics[\"keepalived_up\"], prometheus.GaugeValue, 1)\n\n\tfor _, st := range kaStats {\n\t\tstate := \"\"\n\t\tif _, ok := state2string[st.Data.State]; ok {\n\t\t\tstate = state2string[st.Data.State]\n\t\t}\n\n\t\tch <- prometheus.MustNewConstMetric(k.metrics[\"keepalived_vrrp_advert_rcvd\"], prometheus.CounterValue,\n\t\t\tfloat64(st.Stats.AdvertRcvd), st.Data.Iname, st.Data.IfpIfname, strconv.Itoa(st.Data.Vrid), state)\n\t\tch <- prometheus.MustNewConstMetric(k.metrics[\"keepalived_vrrp_advert_sent\"], prometheus.CounterValue,\n\t\t\tfloat64(st.Stats.AdvertSent), st.Data.Iname, st.Data.IfpIfname, strconv.Itoa(st.Data.Vrid), state)\n\t\tch <- prometheus.MustNewConstMetric(k.metrics[\"keepalived_vrrp_become_master\"], prometheus.CounterValue,\n\t\t\tfloat64(st.Stats.BecomeMaster), st.Data.Iname, st.Data.IfpIfname, strconv.Itoa(st.Data.Vrid), state)\n\t\tch <- prometheus.MustNewConstMetric(k.metrics[\"keepalived_vrrp_release_master\"], prometheus.CounterValue,\n\t\t\tfloat64(st.Stats.ReleaseMaster), st.Data.Iname, st.Data.IfpIfname, strconv.Itoa(st.Data.Vrid), state)\n\t\tch <- prometheus.MustNewConstMetric(k.metrics[\"keepalived_vrrp_packet_len_err\"], prometheus.CounterValue,\n\t\t\tfloat64(st.Stats.PacketLenErr), st.Data.Iname, st.Data.IfpIfname, strconv.Itoa(st.Data.Vrid), state)\n\t\tch <- prometheus.MustNewConstMetric(k.metrics[\"keepalived_vrrp_advert_interval_err\"], prometheus.CounterValue,\n\t\t\tfloat64(st.Stats.AdvertIntervalErr), st.Data.Iname, st.Data.IfpIfname, strconv.Itoa(st.Data.Vrid), state)\n\t\tch <- prometheus.MustNewConstMetric(k.metrics[\"keepalived_vrrp_ip_ttl_err\"], prometheus.CounterValue,\n\t\t\tfloat64(st.Stats.AdvertIntervalErr), st.Data.Iname, st.Data.IfpIfname, strconv.Itoa(st.Data.Vrid), state)\n\t\tch <- prometheus.MustNewConstMetric(k.metrics[\"keepalived_vrrp_invalid_type_rcvd\"], prometheus.CounterValue,\n\t\t\tfloat64(st.Stats.InvalidTypeRcvd), st.Data.Iname, st.Data.IfpIfname, strconv.Itoa(st.Data.Vrid), state)\n\t\tch <- prometheus.MustNewConstMetric(k.metrics[\"keepalived_vrrp_addr_list_err\"], prometheus.CounterValue,\n\t\t\tfloat64(st.Stats.AddrListErr), st.Data.Iname, st.Data.IfpIfname, strconv.Itoa(st.Data.Vrid), state)\n\t\tch <- prometheus.MustNewConstMetric(k.metrics[\"keepalived_vrrp_invalid_authtype\"], prometheus.CounterValue,\n\t\t\tfloat64(st.Stats.InvalidAuthtype), st.Data.Iname, st.Data.IfpIfname, strconv.Itoa(st.Data.Vrid), state)\n\t\tch <- prometheus.MustNewConstMetric(k.metrics[\"keepalived_vrrp_authtype_mismatch\"], prometheus.CounterValue,\n\t\t\tfloat64(st.Stats.AuthtypeMismatch), st.Data.Iname, st.Data.IfpIfname, strconv.Itoa(st.Data.Vrid), state)\n\t\tch <- prometheus.MustNewConstMetric(k.metrics[\"keepalived_vrrp_auth_failure\"], prometheus.CounterValue,\n\t\t\tfloat64(st.Stats.AuthFailure), st.Data.Iname, st.Data.IfpIfname, strconv.Itoa(st.Data.Vrid), state)\n\t\tch <- prometheus.MustNewConstMetric(k.metrics[\"keepalived_vrrp_pri_zero_rcvd\"], prometheus.CounterValue,\n\t\t\tfloat64(st.Stats.PriZeroRcvd), st.Data.Iname, st.Data.IfpIfname, strconv.Itoa(st.Data.Vrid), state)\n\t\tch <- prometheus.MustNewConstMetric(k.metrics[\"keepalived_vrrp_pri_zero_sent\"], prometheus.CounterValue,\n\t\t\tfloat64(st.Stats.PriZeroSent), st.Data.Iname, st.Data.IfpIfname, strconv.Itoa(st.Data.Vrid), state)\n\t}\n\n\tif k.handle == nil {\n\t\treturn\n\t}\n\n\tsvcs, err := k.handle.GetServices()\n\tif err != nil {\n\t\tch <- prometheus.MustNewConstMetric(k.metrics[\"keepalived_up\"], prometheus.GaugeValue, 0)\n\t\tlog.Printf(\"keepalived_exporter: services: %v\", err)\n\t\treturn\n\t}\n\n\tfor _, s := range svcs {\n\t\tdsts, err := k.handle.GetDestinations(s)\n\t\tif err != nil {\n\t\t\tlog.Printf(\"keepalived_exporter: destinations: %v\", err)\n\t\t\tcontinue\n\t\t}\n\n\t\taddr := s.Address.String() + \":\" + strconv.Itoa(int(s.Port))\n\t\tproto := strconv.Itoa(int(s.Protocol))\n\n\t\tch <- prometheus.MustNewConstMetric(k.metrics[\"keepalived_lvs_vip_in_packets\"], prometheus.CounterValue,\n\t\t\tfloat64(s.Stats.PacketsIn), addr, proto)\n\t\tch <- prometheus.MustNewConstMetric(k.metrics[\"keepalived_lvs_vip_out_packets\"], prometheus.CounterValue,\n\t\t\tfloat64(s.Stats.PacketsOut), addr, proto)\n\t\tch <- prometheus.MustNewConstMetric(k.metrics[\"keepalived_lvs_vip_in_bytes\"], prometheus.CounterValue,\n\t\t\tfloat64(s.Stats.BytesIn), addr, proto)\n\t\tch <- prometheus.MustNewConstMetric(k.metrics[\"keepalived_lvs_vip_out_bytes\"], prometheus.CounterValue,\n\t\t\tfloat64(s.Stats.BytesOut), addr, proto)\n\t\tch <- prometheus.MustNewConstMetric(k.metrics[\"keepalived_lvs_vip_conn\"], prometheus.CounterValue,\n\t\t\tfloat64(s.Stats.Connections), addr, proto)\n\n\t\tfor _, d := range dsts {\n\t\t\taddr := d.Address.String() + \":\" + strconv.Itoa(int(d.Port))\n\n\t\t\tch <- prometheus.MustNewConstMetric(k.metrics[\"keepalived_lvs_rs_in_packets\"], prometheus.CounterValue,\n\t\t\t\tfloat64(d.Stats.PacketsIn), addr, proto)\n\t\t\tch <- prometheus.MustNewConstMetric(k.metrics[\"keepalived_lvs_rs_out_packets\"], prometheus.CounterValue,\n\t\t\t\tfloat64(d.Stats.PacketsOut), addr, proto)\n\t\t\tch <- prometheus.MustNewConstMetric(k.metrics[\"keepalived_lvs_rs_in_bytes\"], prometheus.CounterValue,\n\t\t\t\tfloat64(d.Stats.BytesIn), addr, proto)\n\t\t\tch <- prometheus.MustNewConstMetric(k.metrics[\"keepalived_lvs_rs_out_bytes\"], prometheus.CounterValue,\n\t\t\t\tfloat64(d.Stats.BytesOut), addr, proto)\n\t\t\tch <- prometheus.MustNewConstMetric(k.metrics[\"keepalived_lvs_rs_conn\"], prometheus.CounterValue,\n\t\t\t\tfloat64(d.Stats.Connections), addr, proto)\n\t\t}\n\t}\n}", "func (y *YarnMetrics) Collect(ch chan<- prometheus.Metric) {\n\ty.metricsLock.Lock()\n\tdefer y.metricsLock.Unlock()\n\tif y.metrics != nil {\n\t\tch <- prometheus.MustNewConstMetric(y.containerStatusDesc, prometheus.GaugeValue,\n\t\t\tfloat64(y.metrics.ContainersLaunched), y.nodeName, \"launched\")\n\t\tch <- prometheus.MustNewConstMetric(y.containerStatusDesc, prometheus.GaugeValue,\n\t\t\tfloat64(y.metrics.ContainersCompleted), y.nodeName, \"completed\")\n\t\tch <- prometheus.MustNewConstMetric(y.containerStatusDesc, prometheus.GaugeValue,\n\t\t\tfloat64(y.metrics.ContainersFailed), y.nodeName, \"failed\")\n\t\tch <- prometheus.MustNewConstMetric(y.containerStatusDesc, prometheus.GaugeValue,\n\t\t\tfloat64(y.metrics.ContainersKilled), y.nodeName, \"killed\")\n\t\tch <- prometheus.MustNewConstMetric(y.containerStatusDesc, prometheus.GaugeValue,\n\t\t\tfloat64(y.metrics.ContainersRunning), y.nodeName, \"running\")\n\t\tch <- prometheus.MustNewConstMetric(y.containerStatusDesc, prometheus.GaugeValue,\n\t\t\tfloat64(y.metrics.ContainersIniting), y.nodeName, \"initing\")\n\t}\n\tif y.nodeStatus != nil {\n\t\tch <- prometheus.MustNewConstMetric(y.nodeStatusDesc, prometheus.GaugeValue,\n\t\t\ty.nodeStatus.NodeHealthyFloat, y.nodeName)\n\t}\n}", "func (c *Collector) Collect(ch chan<- prometheus.Metric) {\n\tsess, err := sessions.CreateAWSSession()\n\tif err != nil {\n\t\tlog.Error(err)\n\t\treturn\n\t}\n\n\t// Init WaitGroup. Without a WaitGroup the channel we write\n\t// results to will close before the goroutines finish\n\tvar wg sync.WaitGroup\n\twg.Add(len(c.Scrapers))\n\n\t// Iterate through all scrapers and invoke the scrape\n\tfor _, scraper := range c.Scrapers {\n\t\t// Wrape the scrape invocation in a goroutine, but we need to pass\n\t\t// the scraper into the function explicitly to re-scope the variable\n\t\t// the goroutine accesses. If we don't do this, we can sometimes hit\n\t\t// a case where the scraper reports results twice and the collector panics\n\t\tgo func(scraper *Scraper) {\n\t\t\t// Done call deferred until end of the scrape\n\t\t\tdefer wg.Done()\n\n\t\t\tlog.Debugf(\"Running scrape: %s\", scraper.ID)\n\t\t\tscrapeResults := scraper.Scrape(sess)\n\n\t\t\t// Iterate through scrape results and send the metric\n\t\t\tfor key, results := range scrapeResults {\n\t\t\t\tfor _, result := range results {\n\t\t\t\t\tch <- prometheus.MustNewConstMetric(scraper.Metrics[key].metric, result.Type, result.Value, result.Labels...)\n\t\t\t\t}\n\t\t\t}\n\t\t\tlog.Debugf(\"Scrape completed: %s\", scraper.ID)\n\t\t}(scraper)\n\t}\n\t// Wait\n\twg.Wait()\n}", "func (c *transportNodeCollector) Collect(ch chan<- prometheus.Metric) {\n\ttransportNodeMetrics := c.generateTransportNodeMetrics()\n\tfor _, metric := range transportNodeMetrics {\n\t\tch <- metric\n\t}\n}", "func (c *CadvisorCollector) Collect(ch chan<- datapoint.Datapoint) {\n\tc.collectMachineInfo(ch)\n\tc.collectVersionInfo(ch)\n\tc.collectContainersInfo(ch)\n\t//c.errors.Collect(ch)\n}", "func (e *UwsgiExporter) Collect(ch chan<- prometheus.Metric) {\n\tstartTime := time.Now()\n\terr := e.execute(ch)\n\td := time.Since(startTime).Seconds()\n\n\tif err != nil {\n\t\tlog.Errorf(\"ERROR: scrape failed after %fs: %s\", d, err)\n\t\te.uwsgiUp.Set(0)\n\t\te.scrapeDurations.WithLabelValues(\"error\").Observe(d)\n\t} else {\n\t\tlog.Debugf(\"OK: scrape successful after %fs.\", d)\n\t\te.uwsgiUp.Set(1)\n\t\te.scrapeDurations.WithLabelValues(\"success\").Observe(d)\n\t}\n\n\te.uwsgiUp.Collect(ch)\n\te.scrapeDurations.Collect(ch)\n}", "func (co *VMICollector) Collect(ch chan<- prometheus.Metric) {\n\tcachedObjs := co.vmiInformer.GetIndexer().List()\n\tif len(cachedObjs) == 0 {\n\t\tlog.Log.V(4).Infof(\"No VMIs detected\")\n\t\treturn\n\t}\n\n\tvmis := make([]*k6tv1.VirtualMachineInstance, len(cachedObjs))\n\n\tfor i, obj := range cachedObjs {\n\t\tvmis[i] = obj.(*k6tv1.VirtualMachineInstance)\n\t}\n\n\tco.updateVMIsPhase(vmis, ch)\n\tco.updateVMIMetrics(vmis, ch)\n\treturn\n}", "func (e *Exporter) Collect(ch chan<- prometheus.Metric) {\n\tresp, err := e.Pihole.GetMetrics()\n\tif err != nil {\n\t\tlog.Errorf(\"Pihole error: %s\", err.Error())\n\t\treturn\n\t}\n\tlog.Debugf(\"PiHole metrics: %#v\", resp)\n\tch <- prometheus.MustNewConstMetric(\n\t\tdomainsBeingBlocked, prometheus.CounterValue, float64(resp.DomainsBeingBlocked))\n\n\tch <- prometheus.MustNewConstMetric(\n\t\tdnsQueries, prometheus.CounterValue, float64(resp.DNSQueriesToday))\n\n\tch <- prometheus.MustNewConstMetric(\n\t\tadsBlocked, prometheus.CounterValue, float64(resp.AdsBlockedToday))\n\n\tch <- prometheus.MustNewConstMetric(\n\t\tadsPercentage, prometheus.CounterValue, float64(resp.AdsPercentageToday))\n\n\tfor k, v := range resp.Querytypes {\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tqueryTypes, prometheus.CounterValue, v, k)\n\t}\n\tfor k, v := range resp.TopQueries {\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\ttopQueries, prometheus.CounterValue, float64(v), k)\n\t}\n\tfor k, v := range resp.TopAds {\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\ttopAds, prometheus.CounterValue, float64(v), k)\n\n\t}\n\tfor k, v := range resp.TopSources {\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\ttopSources, prometheus.CounterValue, float64(v), k)\n\t}\n}", "func (e *Exporter) Collect(ch chan<- prometheus.Metric) {\n\tupValue := 1\n\n\tif err := e.collect(ch); err != nil {\n\t\tlog.Printf(\"Error scraping clickhouse: %s\", err)\n\t\te.scrapeFailures.Inc()\n\t\te.scrapeFailures.Collect(ch)\n\n\t\tupValue = 0\n\t}\n\n\tch <- prometheus.MustNewConstMetric(\n\t\tprometheus.NewDesc(\n\t\t\tprometheus.BuildFQName(namespace, \"\", \"up\"),\n\t\t\t\"Was the last query of ClickHouse successful.\",\n\t\t\tnil, nil,\n\t\t),\n\t\tprometheus.GaugeValue, float64(upValue),\n\t)\n\n}", "func (c *UPSCollector) Collect(ch chan<- prometheus.Metric) {\n\tif desc, err := c.collect(ch); err != nil {\n\t\tlog.Printf(\"[ERROR] failed collecting UPS metric %v: %v\", desc, err)\n\t\tch <- prometheus.NewInvalidMetric(desc, err)\n\t\treturn\n\t}\n}", "func (dms *MemoryMetricsCollector) CollectedMetrics() []operation.MetricOperation {\n\treturn dms.metrics\n}", "func (c *StatsCollector) Collect(metricChannel chan<- prometheus.Metric) {\n\t// read all stats from Kamailio\n\tif completeStatMap, err := c.fetchStats(); err == nil {\n\t\t// and produce various prometheus.Metric for well-known stats\n\t\tproduceMetrics(completeStatMap, metricChannel)\n\t\t// produce prometheus.Metric objects for scripted stats (if any)\n\t\tconvertScriptedMetrics(completeStatMap, metricChannel)\n\t} else {\n\t\t// something went wrong\n\t\t// TODO: add a error metric\n\t\tlog.Error(\"Could not fetch values from kamailio\", err)\n\t}\n}", "func (c *MSCluster_ClusterCollector) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {\n\tvar dst []MSCluster_Cluster\n\tq := queryAll(&dst, c.logger)\n\tif err := wmi.QueryNamespace(q, &dst, \"root/MSCluster\"); err != nil {\n\t\treturn err\n\t}\n\n\tfor _, v := range dst {\n\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tc.AddEvictDelay,\n\t\t\tprometheus.GaugeValue,\n\t\t\tfloat64(v.AddEvictDelay),\n\t\t\tv.Name,\n\t\t)\n\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tc.AdminAccessPoint,\n\t\t\tprometheus.GaugeValue,\n\t\t\tfloat64(v.AdminAccessPoint),\n\t\t\tv.Name,\n\t\t)\n\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tc.AutoAssignNodeSite,\n\t\t\tprometheus.GaugeValue,\n\t\t\tfloat64(v.AutoAssignNodeSite),\n\t\t\tv.Name,\n\t\t)\n\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tc.AutoBalancerLevel,\n\t\t\tprometheus.GaugeValue,\n\t\t\tfloat64(v.AutoBalancerLevel),\n\t\t\tv.Name,\n\t\t)\n\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tc.AutoBalancerMode,\n\t\t\tprometheus.GaugeValue,\n\t\t\tfloat64(v.AutoBalancerMode),\n\t\t\tv.Name,\n\t\t)\n\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tc.BackupInProgress,\n\t\t\tprometheus.GaugeValue,\n\t\t\tfloat64(v.BackupInProgress),\n\t\t\tv.Name,\n\t\t)\n\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tc.BlockCacheSize,\n\t\t\tprometheus.GaugeValue,\n\t\t\tfloat64(v.BlockCacheSize),\n\t\t\tv.Name,\n\t\t)\n\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tc.ClusSvcHangTimeout,\n\t\t\tprometheus.GaugeValue,\n\t\t\tfloat64(v.ClusSvcHangTimeout),\n\t\t\tv.Name,\n\t\t)\n\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tc.ClusSvcRegroupOpeningTimeout,\n\t\t\tprometheus.GaugeValue,\n\t\t\tfloat64(v.ClusSvcRegroupOpeningTimeout),\n\t\t\tv.Name,\n\t\t)\n\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tc.ClusSvcRegroupPruningTimeout,\n\t\t\tprometheus.GaugeValue,\n\t\t\tfloat64(v.ClusSvcRegroupPruningTimeout),\n\t\t\tv.Name,\n\t\t)\n\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tc.ClusSvcRegroupStageTimeout,\n\t\t\tprometheus.GaugeValue,\n\t\t\tfloat64(v.ClusSvcRegroupStageTimeout),\n\t\t\tv.Name,\n\t\t)\n\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tc.ClusSvcRegroupTickInMilliseconds,\n\t\t\tprometheus.GaugeValue,\n\t\t\tfloat64(v.ClusSvcRegroupTickInMilliseconds),\n\t\t\tv.Name,\n\t\t)\n\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tc.ClusterEnforcedAntiAffinity,\n\t\t\tprometheus.GaugeValue,\n\t\t\tfloat64(v.ClusterEnforcedAntiAffinity),\n\t\t\tv.Name,\n\t\t)\n\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tc.ClusterFunctionalLevel,\n\t\t\tprometheus.GaugeValue,\n\t\t\tfloat64(v.ClusterFunctionalLevel),\n\t\t\tv.Name,\n\t\t)\n\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tc.ClusterGroupWaitDelay,\n\t\t\tprometheus.GaugeValue,\n\t\t\tfloat64(v.ClusterGroupWaitDelay),\n\t\t\tv.Name,\n\t\t)\n\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tc.ClusterLogLevel,\n\t\t\tprometheus.GaugeValue,\n\t\t\tfloat64(v.ClusterLogLevel),\n\t\t\tv.Name,\n\t\t)\n\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tc.ClusterLogSize,\n\t\t\tprometheus.GaugeValue,\n\t\t\tfloat64(v.ClusterLogSize),\n\t\t\tv.Name,\n\t\t)\n\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tc.ClusterUpgradeVersion,\n\t\t\tprometheus.GaugeValue,\n\t\t\tfloat64(v.ClusterUpgradeVersion),\n\t\t\tv.Name,\n\t\t)\n\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tc.CrossSiteDelay,\n\t\t\tprometheus.GaugeValue,\n\t\t\tfloat64(v.CrossSiteDelay),\n\t\t\tv.Name,\n\t\t)\n\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tc.CrossSiteThreshold,\n\t\t\tprometheus.GaugeValue,\n\t\t\tfloat64(v.CrossSiteThreshold),\n\t\t\tv.Name,\n\t\t)\n\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tc.CrossSubnetDelay,\n\t\t\tprometheus.GaugeValue,\n\t\t\tfloat64(v.CrossSubnetDelay),\n\t\t\tv.Name,\n\t\t)\n\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tc.CrossSubnetThreshold,\n\t\t\tprometheus.GaugeValue,\n\t\t\tfloat64(v.CrossSubnetThreshold),\n\t\t\tv.Name,\n\t\t)\n\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tc.CsvBalancer,\n\t\t\tprometheus.GaugeValue,\n\t\t\tfloat64(v.CsvBalancer),\n\t\t\tv.Name,\n\t\t)\n\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tc.DatabaseReadWriteMode,\n\t\t\tprometheus.GaugeValue,\n\t\t\tfloat64(v.DatabaseReadWriteMode),\n\t\t\tv.Name,\n\t\t)\n\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tc.DefaultNetworkRole,\n\t\t\tprometheus.GaugeValue,\n\t\t\tfloat64(v.DefaultNetworkRole),\n\t\t\tv.Name,\n\t\t)\n\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tc.DetectedCloudPlatform,\n\t\t\tprometheus.GaugeValue,\n\t\t\tfloat64(v.DetectedCloudPlatform),\n\t\t\tv.Name,\n\t\t)\n\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tc.DetectManagedEvents,\n\t\t\tprometheus.GaugeValue,\n\t\t\tfloat64(v.DetectManagedEvents),\n\t\t\tv.Name,\n\t\t)\n\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tc.DetectManagedEventsThreshold,\n\t\t\tprometheus.GaugeValue,\n\t\t\tfloat64(v.DetectManagedEventsThreshold),\n\t\t\tv.Name,\n\t\t)\n\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tc.DisableGroupPreferredOwnerRandomization,\n\t\t\tprometheus.GaugeValue,\n\t\t\tfloat64(v.DisableGroupPreferredOwnerRandomization),\n\t\t\tv.Name,\n\t\t)\n\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tc.DrainOnShutdown,\n\t\t\tprometheus.GaugeValue,\n\t\t\tfloat64(v.DrainOnShutdown),\n\t\t\tv.Name,\n\t\t)\n\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tc.DynamicQuorumEnabled,\n\t\t\tprometheus.GaugeValue,\n\t\t\tfloat64(v.DynamicQuorumEnabled),\n\t\t\tv.Name,\n\t\t)\n\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tc.EnableSharedVolumes,\n\t\t\tprometheus.GaugeValue,\n\t\t\tfloat64(v.EnableSharedVolumes),\n\t\t\tv.Name,\n\t\t)\n\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tc.FixQuorum,\n\t\t\tprometheus.GaugeValue,\n\t\t\tfloat64(v.FixQuorum),\n\t\t\tv.Name,\n\t\t)\n\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tc.GracePeriodEnabled,\n\t\t\tprometheus.GaugeValue,\n\t\t\tfloat64(v.GracePeriodEnabled),\n\t\t\tv.Name,\n\t\t)\n\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tc.GracePeriodTimeout,\n\t\t\tprometheus.GaugeValue,\n\t\t\tfloat64(v.GracePeriodTimeout),\n\t\t\tv.Name,\n\t\t)\n\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tc.GroupDependencyTimeout,\n\t\t\tprometheus.GaugeValue,\n\t\t\tfloat64(v.GroupDependencyTimeout),\n\t\t\tv.Name,\n\t\t)\n\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tc.HangRecoveryAction,\n\t\t\tprometheus.GaugeValue,\n\t\t\tfloat64(v.HangRecoveryAction),\n\t\t\tv.Name,\n\t\t)\n\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tc.IgnorePersistentStateOnStartup,\n\t\t\tprometheus.GaugeValue,\n\t\t\tfloat64(v.IgnorePersistentStateOnStartup),\n\t\t\tv.Name,\n\t\t)\n\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tc.LogResourceControls,\n\t\t\tprometheus.GaugeValue,\n\t\t\tfloat64(v.LogResourceControls),\n\t\t\tv.Name,\n\t\t)\n\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tc.LowerQuorumPriorityNodeId,\n\t\t\tprometheus.GaugeValue,\n\t\t\tfloat64(v.LowerQuorumPriorityNodeId),\n\t\t\tv.Name,\n\t\t)\n\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tc.MaxNumberOfNodes,\n\t\t\tprometheus.GaugeValue,\n\t\t\tfloat64(v.MaxNumberOfNodes),\n\t\t\tv.Name,\n\t\t)\n\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tc.MessageBufferLength,\n\t\t\tprometheus.GaugeValue,\n\t\t\tfloat64(v.MessageBufferLength),\n\t\t\tv.Name,\n\t\t)\n\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tc.MinimumNeverPreemptPriority,\n\t\t\tprometheus.GaugeValue,\n\t\t\tfloat64(v.MinimumNeverPreemptPriority),\n\t\t\tv.Name,\n\t\t)\n\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tc.MinimumPreemptorPriority,\n\t\t\tprometheus.GaugeValue,\n\t\t\tfloat64(v.MinimumPreemptorPriority),\n\t\t\tv.Name,\n\t\t)\n\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tc.NetftIPSecEnabled,\n\t\t\tprometheus.GaugeValue,\n\t\t\tfloat64(v.NetftIPSecEnabled),\n\t\t\tv.Name,\n\t\t)\n\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tc.PlacementOptions,\n\t\t\tprometheus.GaugeValue,\n\t\t\tfloat64(v.PlacementOptions),\n\t\t\tv.Name,\n\t\t)\n\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tc.PlumbAllCrossSubnetRoutes,\n\t\t\tprometheus.GaugeValue,\n\t\t\tfloat64(v.PlumbAllCrossSubnetRoutes),\n\t\t\tv.Name,\n\t\t)\n\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tc.PreventQuorum,\n\t\t\tprometheus.GaugeValue,\n\t\t\tfloat64(v.PreventQuorum),\n\t\t\tv.Name,\n\t\t)\n\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tc.QuarantineDuration,\n\t\t\tprometheus.GaugeValue,\n\t\t\tfloat64(v.QuarantineDuration),\n\t\t\tv.Name,\n\t\t)\n\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tc.QuarantineThreshold,\n\t\t\tprometheus.GaugeValue,\n\t\t\tfloat64(v.QuarantineThreshold),\n\t\t\tv.Name,\n\t\t)\n\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tc.QuorumArbitrationTimeMax,\n\t\t\tprometheus.GaugeValue,\n\t\t\tfloat64(v.QuorumArbitrationTimeMax),\n\t\t\tv.Name,\n\t\t)\n\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tc.QuorumArbitrationTimeMin,\n\t\t\tprometheus.GaugeValue,\n\t\t\tfloat64(v.QuorumArbitrationTimeMin),\n\t\t\tv.Name,\n\t\t)\n\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tc.QuorumLogFileSize,\n\t\t\tprometheus.GaugeValue,\n\t\t\tfloat64(v.QuorumLogFileSize),\n\t\t\tv.Name,\n\t\t)\n\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tc.QuorumTypeValue,\n\t\t\tprometheus.GaugeValue,\n\t\t\tfloat64(v.QuorumTypeValue),\n\t\t\tv.Name,\n\t\t)\n\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tc.RequestReplyTimeout,\n\t\t\tprometheus.GaugeValue,\n\t\t\tfloat64(v.RequestReplyTimeout),\n\t\t\tv.Name,\n\t\t)\n\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tc.ResiliencyDefaultPeriod,\n\t\t\tprometheus.GaugeValue,\n\t\t\tfloat64(v.ResiliencyDefaultPeriod),\n\t\t\tv.Name,\n\t\t)\n\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tc.ResiliencyLevel,\n\t\t\tprometheus.GaugeValue,\n\t\t\tfloat64(v.ResiliencyLevel),\n\t\t\tv.Name,\n\t\t)\n\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tc.ResourceDllDeadlockPeriod,\n\t\t\tprometheus.GaugeValue,\n\t\t\tfloat64(v.ResourceDllDeadlockPeriod),\n\t\t\tv.Name,\n\t\t)\n\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tc.RootMemoryReserved,\n\t\t\tprometheus.GaugeValue,\n\t\t\tfloat64(v.RootMemoryReserved),\n\t\t\tv.Name,\n\t\t)\n\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tc.RouteHistoryLength,\n\t\t\tprometheus.GaugeValue,\n\t\t\tfloat64(v.RouteHistoryLength),\n\t\t\tv.Name,\n\t\t)\n\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tc.S2DBusTypes,\n\t\t\tprometheus.GaugeValue,\n\t\t\tfloat64(v.S2DBusTypes),\n\t\t\tv.Name,\n\t\t)\n\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tc.S2DCacheDesiredState,\n\t\t\tprometheus.GaugeValue,\n\t\t\tfloat64(v.S2DCacheDesiredState),\n\t\t\tv.Name,\n\t\t)\n\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tc.S2DCacheFlashReservePercent,\n\t\t\tprometheus.GaugeValue,\n\t\t\tfloat64(v.S2DCacheFlashReservePercent),\n\t\t\tv.Name,\n\t\t)\n\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tc.S2DCachePageSizeKBytes,\n\t\t\tprometheus.GaugeValue,\n\t\t\tfloat64(v.S2DCachePageSizeKBytes),\n\t\t\tv.Name,\n\t\t)\n\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tc.S2DEnabled,\n\t\t\tprometheus.GaugeValue,\n\t\t\tfloat64(v.S2DEnabled),\n\t\t\tv.Name,\n\t\t)\n\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tc.S2DIOLatencyThreshold,\n\t\t\tprometheus.GaugeValue,\n\t\t\tfloat64(v.S2DIOLatencyThreshold),\n\t\t\tv.Name,\n\t\t)\n\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tc.S2DOptimizations,\n\t\t\tprometheus.GaugeValue,\n\t\t\tfloat64(v.S2DOptimizations),\n\t\t\tv.Name,\n\t\t)\n\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tc.SameSubnetDelay,\n\t\t\tprometheus.GaugeValue,\n\t\t\tfloat64(v.SameSubnetDelay),\n\t\t\tv.Name,\n\t\t)\n\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tc.SameSubnetThreshold,\n\t\t\tprometheus.GaugeValue,\n\t\t\tfloat64(v.SameSubnetThreshold),\n\t\t\tv.Name,\n\t\t)\n\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tc.SecurityLevel,\n\t\t\tprometheus.GaugeValue,\n\t\t\tfloat64(v.SecurityLevel),\n\t\t\tv.Name,\n\t\t)\n\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tc.SecurityLevelForStorage,\n\t\t\tprometheus.GaugeValue,\n\t\t\tfloat64(v.SecurityLevelForStorage),\n\t\t\tv.Name,\n\t\t)\n\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tc.SharedVolumeVssWriterOperationTimeout,\n\t\t\tprometheus.GaugeValue,\n\t\t\tfloat64(v.SharedVolumeVssWriterOperationTimeout),\n\t\t\tv.Name,\n\t\t)\n\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tc.ShutdownTimeoutInMinutes,\n\t\t\tprometheus.GaugeValue,\n\t\t\tfloat64(v.ShutdownTimeoutInMinutes),\n\t\t\tv.Name,\n\t\t)\n\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tc.UseClientAccessNetworksForSharedVolumes,\n\t\t\tprometheus.GaugeValue,\n\t\t\tfloat64(v.UseClientAccessNetworksForSharedVolumes),\n\t\t\tv.Name,\n\t\t)\n\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tc.WitnessDatabaseWriteTimeout,\n\t\t\tprometheus.GaugeValue,\n\t\t\tfloat64(v.WitnessDatabaseWriteTimeout),\n\t\t\tv.Name,\n\t\t)\n\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tc.WitnessDynamicWeight,\n\t\t\tprometheus.GaugeValue,\n\t\t\tfloat64(v.WitnessDynamicWeight),\n\t\t\tv.Name,\n\t\t)\n\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tc.WitnessRestartInterval,\n\t\t\tprometheus.GaugeValue,\n\t\t\tfloat64(v.WitnessRestartInterval),\n\t\t\tv.Name,\n\t\t)\n\n\t}\n\n\treturn nil\n}", "func (e *Exporter) Collect(ch chan<- prometheus.Metric) {\n\tlog.Infof(\"Syno exporter starting\")\n\tif e.Client == nil {\n\t\tlog.Errorf(\"Syno client not configured.\")\n\t\treturn\n\t}\n\terr := e.Client.Connect()\n\tif err != nil {\n\t\tlog.Errorln(\"Can't connect to Synology for SNMP: %s\", err)\n\t\treturn\n\t}\n\tdefer e.Client.SNMP.Conn.Close()\n\n\te.collectSystemMetrics(ch)\n\te.collectCPUMetrics(ch)\n\te.collectLoadMetrics(ch)\n\te.collectMemoryMetrics(ch)\n\te.collectNetworkMetrics(ch)\n\te.collectDiskMetrics(ch)\n\n\tlog.Infof(\"Syno exporter finished\")\n}", "func (c *libbeatCollector) Collect(ch chan<- prometheus.Metric) {\n\n\tfor _, i := range c.metrics {\n\t\tch <- prometheus.MustNewConstMetric(i.desc, i.valType, i.eval(c.stats))\n\t}\n\n\t// output.type with dynamic label\n\tch <- prometheus.MustNewConstMetric(libbeatOutputType, prometheus.CounterValue, float64(1), c.stats.LibBeat.Output.Type)\n\n}" ]
[ "0.75799054", "0.7552912", "0.7490085", "0.7468596", "0.74625045", "0.74528396", "0.7384188", "0.7378881", "0.7378881", "0.73669255", "0.73514974", "0.7350174", "0.7339614", "0.7323222", "0.73095244", "0.73002285", "0.72855175", "0.7246163", "0.72458863", "0.72411984", "0.72260153", "0.7214689", "0.72078705", "0.72051287", "0.719325", "0.71842724", "0.7183484", "0.7181023", "0.7154243", "0.7152048", "0.7135174", "0.7121614", "0.71170133", "0.71053797", "0.71021974", "0.70899564", "0.70837855", "0.7068985", "0.70682025", "0.7059467", "0.70449585", "0.7043916", "0.70349306", "0.70162034", "0.7008637", "0.6990686", "0.69877774", "0.69539785", "0.6953755", "0.6949643", "0.6948488", "0.6941953", "0.6941404", "0.6935818", "0.6929173", "0.6926596", "0.6919168", "0.690593", "0.6904899", "0.6903479", "0.6896567", "0.6893332", "0.6882284", "0.68677104", "0.6862643", "0.6817013", "0.6803477", "0.6792187", "0.67878646", "0.67862517", "0.67706937", "0.6770642", "0.676279", "0.6758765", "0.6730963", "0.67304385", "0.6715963", "0.671229", "0.66938907", "0.6689646", "0.6687877", "0.667838", "0.66590154", "0.66589415", "0.6655753", "0.66376376", "0.66339564", "0.66265273", "0.66222286", "0.6619698", "0.6619674", "0.6616932", "0.66119444", "0.6605918", "0.66037166", "0.65989083", "0.6597719", "0.6595419", "0.6579363", "0.6562006" ]
0.6678508
81
unwrapScalarRMValue unwraps a runtime/metrics value that is assumed to be scalar and returns the equivalent float64 value. Panics if the value is not scalar.
func unwrapScalarRMValue(v metrics.Value) float64 { switch v.Kind() { case metrics.KindUint64: return float64(v.Uint64()) case metrics.KindFloat64: return v.Float64() case metrics.KindBad: // Unsupported metric. // // This should never happen because we always populate our metric // set from the runtime/metrics package. panic("unexpected unsupported metric") default: // Unsupported metric kind. // // This should never happen because we check for this during initialization // and flag and filter metrics whose kinds we don't understand. panic("unexpected unsupported metric kind") } }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func GetScalarValue(model ModelT, t TermT, val *int32) int32 {\n\treturn int32(C.yices_get_scalar_value(ymodel(model), C.term_t(t), (*C.int32_t)(val)))\n}", "func (r *Result) Scalar() (*model.Scalar, error) {\n\tif r.Err != nil {\n\t\treturn nil, r.Err\n\t}\n\tv, ok := r.Value.(*model.Scalar)\n\tif !ok {\n\t\treturn nil, fmt.Errorf(\"query result is not a scalar\")\n\t}\n\treturn v, nil\n}", "func (ev *evaluator) evalScalar(e Expr) *model.Scalar {\n\tval := ev.eval(e)\n\tsv, ok := val.(*model.Scalar)\n\tif !ok {\n\t\tev.errorf(\"expected scalar but got %s\", documentedType(val.Type()))\n\t}\n\treturn sv\n}", "func ValGetScalar(model ModelT, yval *YvalT, val *int32, tau *TypeT) int32 {\n\treturn int32(C.yices_val_get_scalar(ymodel(model), (*C.yval_t)(yval), (*C.int32_t)(val), (*C.type_t)(tau)))\n}", "func SubtractScalar(A float64, x *Matrix) (*Matrix, error) {\n\tout, _ := AddScalar(-1*A, x)\n\treturn out, nil\n}", "func (mat Mat) SubtractScalar(s float32) Mat {\n return mat.AddScalar(-1 * s)\n}", "func toScalarMaybe(v starlark.Value) (interface{}, bool) {\n\tif num, ok := toIntMaybe(v); ok {\n\t\treturn num, true\n\t}\n\tif f, ok := toFloatMaybe(v); ok {\n\t\treturn f, true\n\t}\n\tif text, ok := toStrMaybe(v); ok {\n\t\treturn text, true\n\t}\n\tif b, ok := toBoolMaybe(v); ok {\n\t\treturn b, true\n\t}\n\treturn nil, false\n}", "func toScalarMaybe(v starlark.Value) (interface{}, bool) {\n\tif num, ok := toIntMaybe(v); ok {\n\t\treturn num, true\n\t}\n\tif f, ok := toFloatMaybe(v); ok {\n\t\treturn f, true\n\t}\n\tif text, ok := toStrMaybe(v); ok {\n\t\treturn text, true\n\t}\n\tif b, ok := toBoolMaybe(v); ok {\n\t\treturn b, true\n\t}\n\tif tim, ok := toTimeMaybe(v); ok {\n\t\treturn tim, true\n\t}\n\treturn nil, false\n}", "func (v Vec3) MulScalar(s float32) Vec3 {\n\treturn Vec3{v[0] * s, v[1] * s, v[2] * s}\n}", "func (v *Vector3) MulScalar(b float64) {\n\tv.X *= b\n\tv.Y *= b\n\tv.Z *= b\n}", "func (m Mat2f) SubScalar(s float32) Mat2f {\n\treturn Mat2f{\n\t\tm[0] - s, m[1] - s,\n\t\tm[2] - s, m[3] - s}\n}", "func (v Vector2) MulScalar(other float64) Vector {\r\n\treturn Vector2{\r\n\t\tv[0] * other,\r\n\t\tv[1] * other,\r\n\t}\r\n}", "func (mat Mat) DivideScalar(s float32) (Mat,error) {\n if s == 0 {\n return mat,&MatrixError{\"Divede By zero error.\"}\n }\n return mat.MultiplyScalar(1/s),nil\n}", "func (m Mat2f) MulScalar(s float32) Mat2f {\n\treturn Mat2f{\n\t\tm[0] * s, m[1] * s,\n\t\tm[2] * s, m[3] * s}\n}", "func funcScalar(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector {\n\tv := vals[0].(Vector)\n\tif len(v) != 1 {\n\t\treturn append(enh.Out, Sample{F: math.NaN()})\n\t}\n\treturn append(enh.Out, Sample{F: v[0].F})\n}", "func (s *Streaming) UncorrectedValue() float64 {\n\treturn s.c * s.normx * s.normy / s.n\n}", "func ConvertValue(v r.Value, to r.Type) r.Value {\n\tt := Type(v)\n\tif t == to {\n\t\treturn v\n\t}\n\tif !t.ConvertibleTo(to) {\n\t\t// reflect.Value does not allow conversions from/to complex types\n\t\tk := v.Kind()\n\t\tkto := to.Kind()\n\t\tif IsCategory(kto, r.Complex128) {\n\t\t\tif IsCategory(k, r.Int, r.Uint, r.Float64) {\n\t\t\t\ttemp := v.Convert(TypeOfFloat64).Float()\n\t\t\t\tv = r.ValueOf(complex(temp, 0.0))\n\t\t\t}\n\t\t} else if IsCategory(k, r.Complex128) {\n\t\t\tif IsCategory(k, r.Int, r.Uint, r.Float64) {\n\t\t\t\ttemp := real(v.Complex())\n\t\t\t\tv = r.ValueOf(temp)\n\t\t\t}\n\t\t}\n\t}\n\treturn v.Convert(to)\n}", "func (v *Vector3) DivideScalar(b float64) {\n\tv.X /= b\n\tv.Y /= b\n\tv.Z /= b\n}", "func (v Vec3i) DivScalar(scalar int32) Vec3i {\n\tif scalar != 0 {\n\t\treturn v.MulScalar(1 / scalar)\n\t} else {\n\t\treturn Vec3i{}\n\t}\n}", "func (n Float64Wrapper) Value() (Value, error) {\n\tif !n.Valid {\n\t\treturn nil, nil\n\t}\n\treturn n.Float64, nil\n}", "func (v Vec3i) MulScalar(s int32) Vec3i {\n\treturn Vec3i{v.X * s, v.Y * s, v.Z * s}\n}", "func (j *J) ParseScalar(s string) (iv.Scalar, error) {\n\tif _, err := strconv.ParseFloat(s, 64); err != nil {\n\t\treturn nil, err\n\t} else {\n\t\tif s[0] == '-' {\n\t\t\treturn \"_\" + s[1:], nil\n\t\t}\n\t}\n\treturn s, nil\n}", "func (a Vec2) DivScalar(b float64) Vec2 {\n\treturn Vec2{a.X / b, a.Y / b}\n}", "func (ref *UIElement) CriticalValueAsFloat64() (float64, error) {\n\treturn ref.Float64Attr(CriticalValueAttribute)\n}", "func (t *Dense) ModScalar(other interface{}, leftTensor bool, opts ...FuncOpt) (retVal *Dense, err error) {\n\tvar ret Tensor\n\tif t.oe != nil {\n\t\tif ret, err = t.oe.ModScalar(t, other, leftTensor, opts...); err != nil {\n\t\t\treturn nil, errors.Wrapf(err, \"Unable to do ModScalar()\")\n\t\t}\n\t\tif retVal, err = assertDense(ret); err != nil {\n\t\t\treturn nil, errors.Wrapf(err, opFail, \"ModScalar\")\n\t\t}\n\t\treturn\n\t}\n\n\tif moder, ok := t.e.(Moder); ok {\n\t\tif ret, err = moder.ModScalar(t, other, leftTensor, opts...); err != nil {\n\t\t\treturn nil, errors.Wrapf(err, \"Unable to do ModScalar()\")\n\t\t}\n\t\tif retVal, err = assertDense(ret); err != nil {\n\t\t\treturn nil, errors.Wrapf(err, opFail, \"ModScalar\")\n\t\t}\n\t\treturn\n\t}\n\treturn nil, errors.Errorf(\"Engine does not support ModScalar()\")\n}", "func ParseValue(namespace xmpregistry.Namespace, fieldName string, rawValue string) (parsedValue interface{}, err error) {\n\tdefer func() {\n\t\tif errRaw := recover(); errRaw != nil {\n\t\t\terr = log.Wrap(errRaw.(error))\n\t\t}\n\t}()\n\n\tft, found := namespace.Fields[fieldName]\n\tif found == false {\n\t\treturn nil, ErrChildFieldNotFound\n\t}\n\n\tnamespaceUri := namespace.Uri\n\n\tsft, ok := ft.(ScalarFieldType)\n\tif ok == false {\n\t\tlog.Panicf(\"scalar value field did not return a scalar parser: NS=[%s] FIELD=[%s] TYPE=[%v]\", namespaceUri, fieldName, reflect.TypeOf(ft))\n\t}\n\n\tparser := sft.GetValueParser(rawValue)\n\n\tparsedValue, err = parser.Parse()\n\tif err != nil {\n\t\tparseLogger.Warningf(nil, \"Could not parse value: NS=[%s] FIELD=[%s] VALUE=[%s] PARSER=[%v]\", namespaceUri, fieldName, rawValue, reflect.TypeOf(parser))\n\t\treturn nil, err\n\t}\n\n\treturn parsedValue, nil\n}", "func unwrapDouble(w *wrappers.DoubleValue) *float64 {\n\tif w == nil {\n\t\treturn nil\n\t}\n\n\tv := w.Value\n\treturn &v\n}", "func TermIsScalar(t TermT) bool {\n\treturn C.yices_term_is_scalar(C.term_t(t)) == C.int32_t(1)\n}", "func (s *Scalar) Decode(x []byte) error {\n\treturn s.s.FromCanonicalBytes(x)\n}", "func ExecuteScalarExpression(ctx context.Context, inputSchema *arrow.Schema, expression expr.Expression, partialInput compute.Datum) (compute.Datum, error) {\n\tif expression == nil {\n\t\treturn nil, arrow.ErrInvalid\n\t}\n\n\tbatch, err := makeExecBatch(ctx, inputSchema, partialInput)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer func() {\n\t\tfor _, v := range batch.Values {\n\t\t\tv.Release()\n\t\t}\n\t}()\n\n\treturn executeScalarBatch(ctx, batch, expression, GetExtensionIDSet(ctx))\n}", "func (a Vec2) MulScalar(b float64) Vec2 {\n\treturn Vec2{a.X * b, a.Y * b}\n}", "func (o *options) unmarshalScalar(dec *msgpack.Decoder, fd protoreflect.FieldDescriptor) (ret protoreflect.Value, err error) {\n\t// DecodeInterfaceLoose will return:\n\t// - int8, int16, and int32 are converted to int64,\n\t// - uint8, uint16, and uint32 are converted to uint64,\n\t// - float32 is converted to float64.\n\t// - []byte is converted to string.\n\tval, err := dec.DecodeInterfaceLoose()\n\tif err != nil {\n\t\terr = errors.Annotate(err, \"decoding scalar\").Err()\n\t\treturn\n\t}\n\n\tswitch fd.Kind() {\n\tcase protoreflect.BoolKind:\n\t\tswitch x := val.(type) {\n\t\tcase bool:\n\t\t\treturn protoreflect.ValueOfBool(x), nil\n\t\tcase uint64:\n\t\t\treturn protoreflect.ValueOfBool(x != 0), nil\n\t\tcase int64:\n\t\t\treturn protoreflect.ValueOfBool(x != 0), nil\n\t\t}\n\n\tcase protoreflect.EnumKind:\n\t\tswitch x := val.(type) {\n\t\tcase bool:\n\t\t\tif x {\n\t\t\t\treturn protoreflect.ValueOfEnum(1), nil\n\t\t\t}\n\t\t\treturn protoreflect.ValueOfEnum(0), nil\n\t\tcase uint64:\n\t\t\treturn protoreflect.ValueOfEnum(protoreflect.EnumNumber(x)), nil\n\t\tcase int64:\n\t\t\treturn protoreflect.ValueOfEnum(protoreflect.EnumNumber(x)), nil\n\t\t}\n\n\tcase protoreflect.Int32Kind, protoreflect.Sint32Kind, protoreflect.Sfixed32Kind:\n\t\tswitch x := val.(type) {\n\t\tcase bool:\n\t\t\tif x {\n\t\t\t\treturn protoreflect.ValueOfInt32(1), nil\n\t\t\t}\n\t\t\treturn protoreflect.ValueOfInt32(0), nil\n\t\tcase uint64:\n\t\t\treturn protoreflect.ValueOfInt32(int32(x)), nil\n\t\tcase int64:\n\t\t\treturn protoreflect.ValueOfInt32(int32(x)), nil\n\t\t}\n\n\tcase protoreflect.Int64Kind, protoreflect.Sint64Kind, protoreflect.Sfixed64Kind:\n\t\tswitch x := val.(type) {\n\t\tcase bool:\n\t\t\tif x {\n\t\t\t\treturn protoreflect.ValueOfInt64(1), nil\n\t\t\t}\n\t\t\treturn protoreflect.ValueOfInt64(0), nil\n\t\tcase uint64:\n\t\t\treturn protoreflect.ValueOfInt64(int64(x)), nil\n\t\tcase int64:\n\t\t\treturn protoreflect.ValueOfInt64(x), nil\n\t\t}\n\n\tcase protoreflect.Uint32Kind, protoreflect.Fixed32Kind:\n\t\tswitch x := val.(type) {\n\t\tcase bool:\n\t\t\tif x {\n\t\t\t\treturn protoreflect.ValueOfUint32(1), nil\n\t\t\t}\n\t\t\treturn protoreflect.ValueOfUint32(0), nil\n\t\tcase uint64:\n\t\t\treturn protoreflect.ValueOfUint32(uint32(x)), nil\n\t\tcase int64:\n\t\t\treturn protoreflect.ValueOfUint32(uint32(x)), nil\n\t\t}\n\n\tcase protoreflect.Uint64Kind, protoreflect.Fixed64Kind:\n\t\tswitch x := val.(type) {\n\t\tcase bool:\n\t\t\tif x {\n\t\t\t\treturn protoreflect.ValueOfUint64(1), nil\n\t\t\t}\n\t\t\treturn protoreflect.ValueOfUint64(0), nil\n\t\tcase uint64:\n\t\t\treturn protoreflect.ValueOfUint64(x), nil\n\t\tcase int64:\n\t\t\treturn protoreflect.ValueOfUint64(uint64(x)), nil\n\t\t}\n\n\tcase protoreflect.FloatKind:\n\t\tswitch x := val.(type) {\n\t\tcase uint64:\n\t\t\t// allowed, because lua will encode non-floatlike numbers as integers.\n\t\t\treturn protoreflect.ValueOfFloat32(float32(x)), nil\n\t\tcase int64:\n\t\t\t// allowed, because lua will encode non-floatlike negative numbers as integers.\n\t\t\treturn protoreflect.ValueOfFloat32(float32(x)), nil\n\t\tcase float32:\n\t\t\treturn protoreflect.ValueOfFloat32(x), nil\n\t\tcase float64:\n\t\t\treturn protoreflect.ValueOfFloat32(float32(x)), nil\n\t\t}\n\n\tcase protoreflect.DoubleKind:\n\t\tswitch x := val.(type) {\n\t\tcase uint64:\n\t\t\t// allowed, because lua will encode non-floatlike numbers as integers.\n\t\t\treturn protoreflect.ValueOfFloat64(float64(x)), nil\n\t\tcase int64:\n\t\t\t// allowed, because lua will encode non-floatlike negative numbers as integers.\n\t\t\treturn protoreflect.ValueOfFloat64(float64(x)), nil\n\t\tcase float32:\n\t\t\treturn protoreflect.ValueOfFloat64(float64(x)), nil\n\t\tcase float64:\n\t\t\treturn protoreflect.ValueOfFloat64(x), nil\n\t\t}\n\n\tcase protoreflect.StringKind, protoreflect.BytesKind:\n\t\tvar checkIntern bool\n\t\tvar internIdx int\n\t\tswitch x := val.(type) {\n\t\tcase string:\n\t\t\treturn protoreflect.ValueOf(val), nil\n\t\tcase uint64:\n\t\t\tcheckIntern = true\n\t\t\tinternIdx = int(x)\n\t\tcase int64:\n\t\t\tcheckIntern = true\n\t\t\tinternIdx = int(x)\n\t\t}\n\n\t\tif checkIntern {\n\t\t\tif internIdx < len(o.internUnmarshalTable) {\n\t\t\t\treturn protoreflect.ValueOfString(o.internUnmarshalTable[internIdx]), nil\n\t\t\t}\n\t\t\terr = errors.Reason(\"interned string has index out of bounds: %d\", internIdx).Err()\n\t\t\treturn\n\t\t}\n\t}\n\n\terr = errors.Reason(\"bad type: expected %s, got %T\", fd.Kind(), val).Err()\n\treturn\n}", "func (q Quat) MulScalar(s float32) Quat {\n\treturn Quat{q.W * s, q.X * s, q.Y * s, q.Z * s}\n}", "func (to Scalar) Convert(value Value) (Value, error) {\n\tif to.ID() == value.Type().ID() {\n\t\treturn value, nil\n\t}\n\n\tif to.ID() == StringID || to.ID() == BytesID {\n\t\t// If we are converting to a string or bytes, simply use MarshalText\n\t\tr, err := value.MarshalText()\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tv := String(r)\n\t\treturn &v, nil\n\t}\n\n\tu := ValueForType(to.ID())\n\t// Otherwise we check if the conversion is defined.\n\tswitch v := value.(type) {\n\tcase *Bytes:\n\t\t// Bytes convert the same way as strings, as bytes denote an untyped value which is almost\n\t\t// always a string.\n\t\tif err := u.UnmarshalText([]byte(*v)); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\tcase *String:\n\t\t// If the value is a string, then we can always Unmarshal it using the unmarshaller\n\t\tif err := u.UnmarshalText([]byte(*v)); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\tcase *Int32:\n\t\tc, ok := u.(int32Unmarshaler)\n\t\tif !ok {\n\t\t\treturn nil, cantConvert(to, v)\n\t\t}\n\t\tif err := c.fromInt(int32(*v)); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\tcase *Float:\n\t\tc, ok := u.(floatUnmarshaler)\n\t\tif !ok {\n\t\t\treturn nil, cantConvert(to, v)\n\t\t}\n\t\tif err := c.fromFloat(float64(*v)); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\tcase *Bool:\n\t\tc, ok := u.(boolUnmarshaler)\n\t\tif !ok {\n\t\t\treturn nil, cantConvert(to, v)\n\t\t}\n\t\tif err := c.fromBool(bool(*v)); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\tcase *Time:\n\t\tc, ok := u.(timeUnmarshaler)\n\t\tif !ok {\n\t\t\treturn nil, cantConvert(to, v)\n\t\t}\n\t\tif err := c.fromTime(v.Time); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\tcase *Date:\n\t\tc, ok := u.(dateUnmarshaler)\n\t\tif !ok {\n\t\t\treturn nil, cantConvert(to, v)\n\t\t}\n\t\tif err := c.fromDate(*v); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\tdefault:\n\t\treturn nil, cantConvert(to, v)\n\t}\n\treturn u, nil\n}", "func vectorMulScalar(v []*operation.Scalar, s *operation.Scalar) []*operation.Scalar {\n\tresult := make([]*operation.Scalar, len(v))\n\tfor i := range v {\n\t\tresult[i] = new(operation.Scalar).Mul(v[i], s)\n\t}\n\treturn result\n}", "func normalizeValue(v sqltypes.Value, coll collations.ID) sqltypes.Value {\n\ttyp := v.Type()\n\tif typ == sqltypes.VarChar && coll == collations.CollationBinaryID {\n\t\treturn sqltypes.NewVarBinary(string(v.Raw()))\n\t}\n\tif typ == sqltypes.Float32 || typ == sqltypes.Float64 {\n\t\tvar bitsize = 64\n\t\tif typ == sqltypes.Float32 {\n\t\t\tbitsize = 32\n\t\t}\n\t\tf, err := strconv.ParseFloat(v.RawStr(), bitsize)\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t\treturn sqltypes.MakeTrusted(typ, format.FormatFloat(f))\n\t}\n\treturn v\n}", "func (v *Vec3i) ClampScalar(minVal, maxVal int32) {\n\tv.Clamp(NewVec3iScalar(minVal), NewVec3iScalar(maxVal))\n}", "func IsScalar(v Value) bool {\n\tswitch v.(type) {\n\tcase String:\n\t\treturn true\n\tcase Number:\n\t\treturn true\n\tcase Boolean:\n\t\treturn true\n\tcase Null:\n\t\treturn true\n\t}\n\treturn false\n}", "func (to Scalar) Convert(value TypeValue) (TypeValue, error) {\n\tif to.ID() == stringID || to.ID() == bytesID {\n\t\t// If we are converting to a string or bytes, simply use MarshalText\n\t\tr, err := value.MarshalText()\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn String(r), nil\n\t}\n\n\tu := to.Unmarshaler\n\t// Otherwise we check if the conversion is defined.\n\tswitch v := value.(type) {\n\tcase Bytes:\n\t\t// Bytes convert the same way as strings, as bytes denote an untyped value which is almost\n\t\t// always a string.\n\t\treturn u.FromText([]byte(v))\n\n\tcase String:\n\t\t// If the value is a string, then we can always Unmarshal it using the unmarshaller\n\t\treturn u.FromText([]byte(v))\n\n\tcase Int32:\n\t\tc, ok := u.(int32Unmarshaler)\n\t\tif !ok {\n\t\t\treturn nil, cantConvert(to, v)\n\t\t}\n\t\treturn c.fromInt(int32(v))\n\n\tcase Float:\n\t\tc, ok := u.(floatUnmarshaler)\n\t\tif !ok {\n\t\t\treturn nil, cantConvert(to, v)\n\t\t}\n\t\treturn c.fromFloat(float64(v))\n\n\tcase Bool:\n\t\tc, ok := u.(boolUnmarshaler)\n\t\tif !ok {\n\t\t\treturn nil, cantConvert(to, v)\n\t\t}\n\t\treturn c.fromBool(bool(v))\n\n\tcase time.Time:\n\t\tc, ok := u.(timeUnmarshaler)\n\t\tif !ok {\n\t\t\treturn nil, cantConvert(to, v)\n\t\t}\n\t\treturn c.fromTime(v)\n\n\tcase Date:\n\t\tc, ok := u.(dateUnmarshaler)\n\t\tif !ok {\n\t\t\treturn nil, cantConvert(to, v)\n\t\t}\n\t\treturn c.fromDate(v)\n\n\tdefault:\n\t\treturn nil, cantConvert(to, v)\n\t}\n}", "func (irgb *InstanceRuntimeGroupBy) Float64(ctx context.Context) (_ float64, err error) {\n\tvar v []float64\n\tif v, err = irgb.Float64s(ctx); err != nil {\n\t\treturn\n\t}\n\tswitch len(v) {\n\tcase 1:\n\t\treturn v[0], nil\n\tcase 0:\n\t\terr = &NotFoundError{instanceruntime.Label}\n\tdefault:\n\t\terr = fmt.Errorf(\"ent: InstanceRuntimeGroupBy.Float64s returned %d results when one was expected\", len(v))\n\t}\n\treturn\n}", "func (q Quat) SubScalar(s float32) Quat {\n\treturn Quat{q.W - s, q.X - s, q.Y - s, q.Z - s}\n}", "func valueAsFloat64(value *monitoring.TypedValue) float64 {\n\tif value == nil {\n\t\treturn 0\n\t}\n\tswitch {\n\tcase value.Int64Value != nil:\n\t\treturn float64(*value.Int64Value)\n\tcase value.DoubleValue != nil:\n\t\treturn *value.DoubleValue\n\tdefault:\n\t\treturn 0\n\t}\n}", "func (d *decoder) scalar(childKey string, value reflect.Value, def string) error {\n\tglobal := d.getGlobalProvider()\n\tvar val interface{}\n\n\t// For primitive values, just get the value and set it into the field\n\tif v2 := global.Get(childKey); v2.HasValue() {\n\t\tval = v2.Value()\n\t} else if def != \"\" {\n\t\tval = def\n\t}\n\n\treturn convert(childKey, &value, val)\n}", "func (gdt *Vector3) OperatorDivideScalar(b Real) Vector3 {\n\targ0 := gdt.getBase()\n\targ1 := b.getBase()\n\n\tret := C.go_godot_vector3_operator_divide_scalar(GDNative.api, arg0, arg1)\n\n\treturn Vector3{base: &ret}\n\n}", "func (q Quat) DivScalar(s float32) Quat {\n\treturn Quat{q.W / s, q.X / s, q.Y / s, q.Z / s}\n}", "func (gdt *Vector3) OperatorMultiplyScalar(b Real) Vector3 {\n\targ0 := gdt.getBase()\n\targ1 := b.getBase()\n\n\tret := C.go_godot_vector3_operator_multiply_scalar(GDNative.api, arg0, arg1)\n\n\treturn Vector3{base: &ret}\n\n}", "func ValueMul(a, b reflect.Value) (reflect.Value, error) {\n\taBkind := GetBaseKind(a)\n\tbBkind := GetBaseKind(b)\n\n\tswitch aBkind {\n\tcase reflect.Int64:\n\t\tswitch bBkind {\n\t\tcase reflect.Int64:\n\t\t\treturn reflect.ValueOf(a.Int() * b.Int()), nil\n\t\tcase reflect.Uint64:\n\t\t\treturn reflect.ValueOf(a.Int() * int64(b.Uint())), nil\n\t\tcase reflect.Float64:\n\t\t\treturn reflect.ValueOf(float64(a.Int()) * b.Float()), nil\n\t\tdefault:\n\t\t\treturn reflect.ValueOf(nil), errors.Errorf(\"Can not do multiplication math operator between %s and %s\", a.Kind().String(), b.Kind().String())\n\t\t}\n\tcase reflect.Uint64:\n\t\tswitch bBkind {\n\t\tcase reflect.Int64:\n\t\t\treturn reflect.ValueOf(int64(a.Uint()) * b.Int()), nil\n\t\tcase reflect.Uint64:\n\t\t\treturn reflect.ValueOf(a.Uint() * b.Uint()), nil\n\t\tcase reflect.Float64:\n\t\t\treturn reflect.ValueOf(float64(a.Uint()) * b.Float()), nil\n\t\tdefault:\n\t\t\treturn reflect.ValueOf(nil), errors.Errorf(\"Can not do multiplication math operator between %s and %s\", a.Kind().String(), b.Kind().String())\n\t\t}\n\tcase reflect.Float64:\n\t\tswitch bBkind {\n\t\tcase reflect.Int64:\n\t\t\treturn reflect.ValueOf(a.Float() * float64(b.Int())), nil\n\t\tcase reflect.Uint64:\n\t\t\treturn reflect.ValueOf(a.Float() * float64(b.Uint())), nil\n\t\tcase reflect.Float64:\n\t\t\treturn reflect.ValueOf(a.Float() * b.Float()), nil\n\t\tdefault:\n\t\t\treturn reflect.ValueOf(nil), errors.Errorf(\"Can not do multiplication math operator between %s and %s\", a.Kind().String(), b.Kind().String())\n\t\t}\n\tdefault:\n\t\treturn reflect.ValueOf(nil), errors.Errorf(\"Can not do multiplication math operator between %s and %s\", a.Kind().String(), b.Kind().String())\n\t}\n}", "func Mat3MultiplyScalar(out, a []float64, b float64) []float64 {\n\tout[0] = a[0] * b\n\tout[1] = a[1] * b\n\tout[2] = a[2] * b\n\tout[3] = a[3] * b\n\tout[4] = a[4] * b\n\tout[5] = a[5] * b\n\tout[6] = a[6] * b\n\tout[7] = a[7] * b\n\tout[8] = a[8] * b\n\treturn out\n}", "func (o UnmarshalOptions) unmarshalScalar(b []byte, wtyp protowire.Type, fd protoreflect.FieldDescriptor) (val protoreflect.Value, n int, err error) {\n\tswitch fd.Kind() {\n\tcase protoreflect.BoolKind:\n\t\tif wtyp != protowire.VarintType {\n\t\t\treturn val, 0, errUnknown\n\t\t}\n\t\tv, n := protowire.ConsumeVarint(b)\n\t\tif n < 0 {\n\t\t\treturn val, 0, errDecode\n\t\t}\n\t\treturn protoreflect.ValueOfBool(protowire.DecodeBool(v)), n, nil\n\tcase protoreflect.EnumKind:\n\t\tif wtyp != protowire.VarintType {\n\t\t\treturn val, 0, errUnknown\n\t\t}\n\t\tv, n := protowire.ConsumeVarint(b)\n\t\tif n < 0 {\n\t\t\treturn val, 0, errDecode\n\t\t}\n\t\treturn protoreflect.ValueOfEnum(protoreflect.EnumNumber(v)), n, nil\n\tcase protoreflect.Int32Kind:\n\t\tif wtyp != protowire.VarintType {\n\t\t\treturn val, 0, errUnknown\n\t\t}\n\t\tv, n := protowire.ConsumeVarint(b)\n\t\tif n < 0 {\n\t\t\treturn val, 0, errDecode\n\t\t}\n\t\treturn protoreflect.ValueOfInt32(int32(v)), n, nil\n\tcase protoreflect.Sint32Kind:\n\t\tif wtyp != protowire.VarintType {\n\t\t\treturn val, 0, errUnknown\n\t\t}\n\t\tv, n := protowire.ConsumeVarint(b)\n\t\tif n < 0 {\n\t\t\treturn val, 0, errDecode\n\t\t}\n\t\treturn protoreflect.ValueOfInt32(int32(protowire.DecodeZigZag(v & math.MaxUint32))), n, nil\n\tcase protoreflect.Uint32Kind:\n\t\tif wtyp != protowire.VarintType {\n\t\t\treturn val, 0, errUnknown\n\t\t}\n\t\tv, n := protowire.ConsumeVarint(b)\n\t\tif n < 0 {\n\t\t\treturn val, 0, errDecode\n\t\t}\n\t\treturn protoreflect.ValueOfUint32(uint32(v)), n, nil\n\tcase protoreflect.Int64Kind:\n\t\tif wtyp != protowire.VarintType {\n\t\t\treturn val, 0, errUnknown\n\t\t}\n\t\tv, n := protowire.ConsumeVarint(b)\n\t\tif n < 0 {\n\t\t\treturn val, 0, errDecode\n\t\t}\n\t\treturn protoreflect.ValueOfInt64(int64(v)), n, nil\n\tcase protoreflect.Sint64Kind:\n\t\tif wtyp != protowire.VarintType {\n\t\t\treturn val, 0, errUnknown\n\t\t}\n\t\tv, n := protowire.ConsumeVarint(b)\n\t\tif n < 0 {\n\t\t\treturn val, 0, errDecode\n\t\t}\n\t\treturn protoreflect.ValueOfInt64(protowire.DecodeZigZag(v)), n, nil\n\tcase protoreflect.Uint64Kind:\n\t\tif wtyp != protowire.VarintType {\n\t\t\treturn val, 0, errUnknown\n\t\t}\n\t\tv, n := protowire.ConsumeVarint(b)\n\t\tif n < 0 {\n\t\t\treturn val, 0, errDecode\n\t\t}\n\t\treturn protoreflect.ValueOfUint64(v), n, nil\n\tcase protoreflect.Sfixed32Kind:\n\t\tif wtyp != protowire.Fixed32Type {\n\t\t\treturn val, 0, errUnknown\n\t\t}\n\t\tv, n := protowire.ConsumeFixed32(b)\n\t\tif n < 0 {\n\t\t\treturn val, 0, errDecode\n\t\t}\n\t\treturn protoreflect.ValueOfInt32(int32(v)), n, nil\n\tcase protoreflect.Fixed32Kind:\n\t\tif wtyp != protowire.Fixed32Type {\n\t\t\treturn val, 0, errUnknown\n\t\t}\n\t\tv, n := protowire.ConsumeFixed32(b)\n\t\tif n < 0 {\n\t\t\treturn val, 0, errDecode\n\t\t}\n\t\treturn protoreflect.ValueOfUint32(uint32(v)), n, nil\n\tcase protoreflect.FloatKind:\n\t\tif wtyp != protowire.Fixed32Type {\n\t\t\treturn val, 0, errUnknown\n\t\t}\n\t\tv, n := protowire.ConsumeFixed32(b)\n\t\tif n < 0 {\n\t\t\treturn val, 0, errDecode\n\t\t}\n\t\treturn protoreflect.ValueOfFloat32(math.Float32frombits(uint32(v))), n, nil\n\tcase protoreflect.Sfixed64Kind:\n\t\tif wtyp != protowire.Fixed64Type {\n\t\t\treturn val, 0, errUnknown\n\t\t}\n\t\tv, n := protowire.ConsumeFixed64(b)\n\t\tif n < 0 {\n\t\t\treturn val, 0, errDecode\n\t\t}\n\t\treturn protoreflect.ValueOfInt64(int64(v)), n, nil\n\tcase protoreflect.Fixed64Kind:\n\t\tif wtyp != protowire.Fixed64Type {\n\t\t\treturn val, 0, errUnknown\n\t\t}\n\t\tv, n := protowire.ConsumeFixed64(b)\n\t\tif n < 0 {\n\t\t\treturn val, 0, errDecode\n\t\t}\n\t\treturn protoreflect.ValueOfUint64(v), n, nil\n\tcase protoreflect.DoubleKind:\n\t\tif wtyp != protowire.Fixed64Type {\n\t\t\treturn val, 0, errUnknown\n\t\t}\n\t\tv, n := protowire.ConsumeFixed64(b)\n\t\tif n < 0 {\n\t\t\treturn val, 0, errDecode\n\t\t}\n\t\treturn protoreflect.ValueOfFloat64(math.Float64frombits(v)), n, nil\n\tcase protoreflect.StringKind:\n\t\tif wtyp != protowire.BytesType {\n\t\t\treturn val, 0, errUnknown\n\t\t}\n\t\tv, n := protowire.ConsumeBytes(b)\n\t\tif n < 0 {\n\t\t\treturn val, 0, errDecode\n\t\t}\n\t\tif strs.EnforceUTF8(fd) && !utf8.Valid(v) {\n\t\t\treturn protoreflect.Value{}, 0, errors.InvalidUTF8(string(fd.FullName()))\n\t\t}\n\t\treturn protoreflect.ValueOfString(string(v)), n, nil\n\tcase protoreflect.BytesKind:\n\t\tif wtyp != protowire.BytesType {\n\t\t\treturn val, 0, errUnknown\n\t\t}\n\t\tv, n := protowire.ConsumeBytes(b)\n\t\tif n < 0 {\n\t\t\treturn val, 0, errDecode\n\t\t}\n\t\treturn protoreflect.ValueOfBytes(append(emptyBuf[:], v...)), n, nil\n\tcase protoreflect.MessageKind:\n\t\tif wtyp != protowire.BytesType {\n\t\t\treturn val, 0, errUnknown\n\t\t}\n\t\tv, n := protowire.ConsumeBytes(b)\n\t\tif n < 0 {\n\t\t\treturn val, 0, errDecode\n\t\t}\n\t\treturn protoreflect.ValueOfBytes(v), n, nil\n\tcase protoreflect.GroupKind:\n\t\tif wtyp != protowire.StartGroupType {\n\t\t\treturn val, 0, errUnknown\n\t\t}\n\t\tv, n := protowire.ConsumeGroup(fd.Number(), b)\n\t\tif n < 0 {\n\t\t\treturn val, 0, errDecode\n\t\t}\n\t\treturn protoreflect.ValueOfBytes(v), n, nil\n\tdefault:\n\t\treturn val, 0, errUnknown\n\t}\n}", "func Scalar(tst *testing.T, msg string, tol, res, correct float64) {\n\tCheckAndPrint(tst, msg, tol, math.Abs(res-correct))\n}", "func (v *Vec3i) SetMulScalar(s int32) {\n\tv.X *= s\n\tv.Y *= s\n\tv.Z *= s\n}", "func Real(x Value) Value {\n\tif _, ok := x.(*ratVal); ok {\n\t\treturn x\n\t}\n\treturn constant.Real(x)\n}", "func (r *Result) Float64() float64 {\n\tif r.Error != nil {\n\t\treturn 0\n\t}\n\n\treturn convert.ToFloat64(r.Value)\n}", "func TestRemoteEvalEmptyScalarResponse(t *testing.T) {\n\tdefaultLimits := defaultLimitsTestConfig()\n\tlimits, err := validation.NewOverrides(defaultLimits, nil)\n\trequire.NoError(t, err)\n\n\tcli := mockClient{\n\t\thandleFn: func(ctx context.Context, in *httpgrpc.HTTPRequest, opts ...grpc.CallOption) (*httpgrpc.HTTPResponse, error) {\n\t\t\t// this is somewhat bleeding the abstraction, but it's more idiomatic/readable than constructing\n\t\t\t// the expected JSON response by hand\n\t\t\tresp := loghttp.QueryResponse{\n\t\t\t\tStatus: loghttp.QueryStatusSuccess,\n\t\t\t\tData: loghttp.QueryResponseData{\n\t\t\t\t\tResultType: loghttp.ResultTypeScalar,\n\t\t\t\t\tResult: loghttp.Scalar{},\n\t\t\t\t},\n\t\t\t}\n\n\t\t\tout, err := json.Marshal(resp)\n\t\t\trequire.NoError(t, err)\n\n\t\t\treturn &httpgrpc.HTTPResponse{\n\t\t\t\tCode: http.StatusOK,\n\t\t\t\tHeaders: nil,\n\t\t\t\tBody: out,\n\t\t\t}, nil\n\t\t},\n\t}\n\n\tev, err := NewRemoteEvaluator(cli, limits, log.Logger, prometheus.NewRegistry())\n\trequire.NoError(t, err)\n\n\tctx := context.Background()\n\tctx = user.InjectOrgID(ctx, \"test\")\n\n\tres, err := ev.Eval(ctx, \"sum(rate({foo=\\\"bar\\\"}[5m]))\", time.Now())\n\trequire.NoError(t, err)\n\trequire.Empty(t, res.Data)\n}", "func (t *Dense) PowScalar(other interface{}, leftTensor bool, opts ...FuncOpt) (retVal *Dense, err error) {\n\tvar ret Tensor\n\tif t.oe != nil {\n\t\tif ret, err = t.oe.PowScalar(t, other, leftTensor, opts...); err != nil {\n\t\t\treturn nil, errors.Wrapf(err, \"Unable to do PowScalar()\")\n\t\t}\n\t\tif retVal, err = assertDense(ret); err != nil {\n\t\t\treturn nil, errors.Wrapf(err, opFail, \"PowScalar\")\n\t\t}\n\t\treturn\n\t}\n\n\tif power, ok := t.e.(Power); ok {\n\t\tif ret, err = power.PowScalar(t, other, leftTensor, opts...); err != nil {\n\t\t\treturn nil, errors.Wrapf(err, \"Unable to do PowScalar()\")\n\t\t}\n\t\tif retVal, err = assertDense(ret); err != nil {\n\t\t\treturn nil, errors.Wrapf(err, opFail, \"PowScalar\")\n\t\t}\n\t\treturn\n\t}\n\treturn nil, errors.Errorf(\"Engine does not support PowScalar()\")\n}", "func (irs *InstanceRuntimeSelect) Float64(ctx context.Context) (_ float64, err error) {\n\tvar v []float64\n\tif v, err = irs.Float64s(ctx); err != nil {\n\t\treturn\n\t}\n\tswitch len(v) {\n\tcase 1:\n\t\treturn v[0], nil\n\tcase 0:\n\t\terr = &NotFoundError{instanceruntime.Label}\n\tdefault:\n\t\terr = fmt.Errorf(\"ent: InstanceRuntimeSelect.Float64s returned %d results when one was expected\", len(v))\n\t}\n\treturn\n}", "func (t *Dense) SubScalar(other interface{}, leftTensor bool, opts ...FuncOpt) (retVal *Dense, err error) {\n\tvar ret Tensor\n\tif t.oe != nil {\n\t\tif ret, err = t.oe.SubScalar(t, other, leftTensor, opts...); err != nil {\n\t\t\treturn nil, errors.Wrapf(err, \"Unable to do SubScalar()\")\n\t\t}\n\t\tif retVal, err = assertDense(ret); err != nil {\n\t\t\treturn nil, errors.Wrapf(err, opFail, \"SubScalar\")\n\t\t}\n\t\treturn\n\t}\n\n\tif suber, ok := t.e.(Suber); ok {\n\t\tif ret, err = suber.SubScalar(t, other, leftTensor, opts...); err != nil {\n\t\t\treturn nil, errors.Wrapf(err, \"Unable to do SubScalar()\")\n\t\t}\n\t\tif retVal, err = assertDense(ret); err != nil {\n\t\t\treturn nil, errors.Wrapf(err, opFail, \"SubScalar\")\n\t\t}\n\t\treturn\n\t}\n\treturn nil, errors.Errorf(\"Engine does not support SubScalar()\")\n}", "func (c *Color) MulScalar(scalar float64) *Color {\n\treturn NewColor(\n\t\tc.r*scalar,\n\t\tc.g*scalar,\n\t\tc.b*scalar,\n\t)\n}", "func (n NullFloat32) Value() (driver.Value, error) {\n\tif !n.Valid {\n\t\treturn nil, nil\n\t}\n\treturn float64(n.Float32), nil\n}", "func ScalarDiv(scalar float64, matrix [][]float64) ([][]float64, error) {\n\tvar result [][]float64\n\n\tif scalar == 0 {\n\t\treturn nil, errors.New(\"cannot divide by zero\")\n\t}\n\n\tfor i, row := range matrix {\n\t\tif i == 0 {\n\t\t\tresult = matrix\n\t\t}\n\t\tfor j, val := range row {\n\t\t\tresult[i][j] = val / scalar\n\t\t}\n\t}\n\n\treturn result, nil\n}", "func ExecuteScalarSubstrait(ctx context.Context, expression *expr.Extended, partialInput compute.Datum) (compute.Datum, error) {\n\tif expression == nil {\n\t\treturn nil, arrow.ErrInvalid\n\t}\n\n\tvar toExecute expr.Expression\n\n\tswitch len(expression.ReferredExpr) {\n\tcase 0:\n\t\treturn nil, fmt.Errorf(\"%w: no referred expression to execute\", arrow.ErrInvalid)\n\tcase 1:\n\t\tif toExecute = expression.ReferredExpr[0].GetExpr(); toExecute == nil {\n\t\t\treturn nil, fmt.Errorf(\"%w: measures not implemented\", arrow.ErrNotImplemented)\n\t\t}\n\tdefault:\n\t\treturn nil, fmt.Errorf(\"%w: only single referred expression implemented\", arrow.ErrNotImplemented)\n\t}\n\n\treg := GetExtensionRegistry(ctx)\n\tset := NewExtensionSet(expr.NewExtensionRegistry(expression.Extensions, &extensions.DefaultCollection), reg)\n\tsc, err := ToArrowSchema(expression.BaseSchema, set)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn ExecuteScalarExpression(WithExtensionIDSet(ctx, set), sc, toExecute, partialInput)\n}", "func (g *Graph) DivScalar(x1 Node, x2 Node) Node {\n\treturn g.NewOperator(fn.NewDivScalar(x1, x2), x1, x2)\n}", "func NewScalar() *Scalar {\n\treturn (&Scalar{}).Zero()\n}", "func Float() Scalar {\n\treturn floatTypeInstance\n}", "func (v Vec3i) SubScalar(s int32) Vec3i {\n\treturn Vec3i{v.X - s, v.Y - s, v.Z - s}\n}", "func (n NullFloat64) Value() (driver.Value, error) {\n\tif n == 0 {\n\t\treturn nil, nil\n\t}\n\treturn float64(n), nil\n}", "func (m Mat2f) AddScalar(s float32) Mat2f {\n\treturn Mat2f{\n\t\tm[0] + s, m[1] + s,\n\t\tm[2] + s, m[3] + s}\n}", "func NewRandomScalar() (*r255.Scalar, error) {\n\ts := [64]byte{}\n\t_, err := rand.Read(s[:])\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tss := r255.NewScalar()\n\treturn ss.FromUniformBytes(s[:]), nil\n}", "func MultiplyScalar(a NumberArray, scalar float64) NumberArray {\n\tresultingMatrix, _ := NewMatrix(a.GetRows(), a.GetColumns())\n\tfor i := 0; i < a.GetRows(); i++ {\n\t\tfor j := 0; j < a.GetColumns(); j++ {\n\t\t\toperandA, _ := a.GetValue(i, j)\n\t\t\tresultingMatrix.SetValue(i, j, operandA*scalar)\n\t\t}\n\t}\n\treturn resultingMatrix\n}", "func (r *renderer) toTruthyValue(v reflect.Value) (reflect.Value, error) {\n\tswitch v.Kind() {\n\tcase reflect.Bool:\n\t\tif !v.Bool() {\n\t\t\treturn reflect.Value{}, nil\n\t\t}\n\t\treturn v, nil\n\tcase reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:\n\t\tif v.Int() == 0 {\n\t\t\treturn reflect.Value{}, nil\n\t\t}\n\t\treturn v, nil\n\tcase reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:\n\t\tif v.Uint() == 0 {\n\t\t\treturn reflect.Value{}, nil\n\t\t}\n\t\treturn v, nil\n\tcase reflect.Float32, reflect.Float64:\n\t\tif math.Float64bits(v.Float()) == 0 {\n\t\t\treturn reflect.Value{}, nil\n\t\t}\n\t\treturn v, nil\n\tcase reflect.Complex64, reflect.Complex128:\n\t\tc := v.Complex()\n\t\tif math.Float64bits(real(c)) == 0 && math.Float64bits(imag(c)) == 0 {\n\t\t\treturn reflect.Value{}, nil\n\t\t}\n\t\treturn v, nil\n\tcase reflect.String:\n\t\tif v.Len() == 0 {\n\t\t\treturn reflect.Value{}, nil\n\t\t}\n\t\treturn v, nil\n\tcase reflect.Array, reflect.Slice:\n\t\tif v.IsNil() || v.Len() == 0 {\n\t\t\treturn reflect.Value{}, nil\n\t\t}\n\t\treturn v, nil\n\tcase reflect.Func:\n\t\tif v.IsNil() {\n\t\t\treturn reflect.Value{}, nil\n\t\t}\n\t\tt := v.Type()\n\t\tisArity0 := t.NumIn() == 0 && t.NumOut() == 1\n\t\tif isArity0 {\n\t\t\tv = v.Call(nil)[0]\n\t\t\tif v.Kind() != reflect.String {\n\t\t\t\treturn r.toTruthyValue(v)\n\t\t\t}\n\t\t\ttree, err := parse.Parse(\"lambda\", v.String(), parse.DefaultLeftDelim, parse.DefaultRightDelim)\n\t\t\tif err != nil {\n\t\t\t\treturn reflect.Value{}, nil\n\t\t\t}\n\t\t\ts, err := r.renderToString(tree)\n\t\t\tif err != nil {\n\t\t\t\treturn reflect.Value{}, nil\n\t\t\t}\n\t\t\treturn r.toTruthyValue(reflect.ValueOf(s))\n\t\t}\n\t\tisArity1 := t.NumIn() == 1 && t.In(0).Kind() == reflect.String && t.NumOut() == 1 && t.Out(0).Kind() == reflect.String\n\t\tif isArity1 {\n\t\t\treturn v, nil\n\t\t}\n\t\treturn reflect.Value{}, nil\n\tcase reflect.Ptr, reflect.Interface:\n\t\treturn r.toTruthyValue(indirect(v))\n\tcase reflect.Map:\n\t\tif v.IsNil() {\n\t\t\treturn reflect.Value{}, nil\n\t\t}\n\t\treturn v, nil\n\tcase reflect.Struct:\n\t\treturn v, nil\n\tcase reflect.Invalid:\n\t\treturn reflect.Value{}, nil\n\tdefault:\n\t\treturn reflect.Value{}, nil\n\t}\n}", "func IsValueScalar(v reflect.Value) bool {\n\tif IsNilOrInvalidValue(v) {\n\t\treturn false\n\t}\n\tif IsValuePtr(v) {\n\t\tif v.IsNil() {\n\t\t\treturn false\n\t\t}\n\t\tv = v.Elem()\n\t}\n\treturn !IsValueStruct(v) && !IsValueMap(v) && !IsValueSlice(v)\n}", "func (ref *UIElement) CriticalValueAsFloat32() (float32, error) {\n\treturn ref.Float32Attr(CriticalValueAttribute)\n}", "func CoerceNumber(v Value) float64 {\n\tswitch vc := v.(type) {\n\tcase SafeValue:\n\t\treturn CoerceNumber(vc.Value())\n\tcase Number:\n\t\treturn vc.Number()\n\tcase uint:\n\t\treturn float64(vc)\n\tcase uint8:\n\t\treturn float64(vc)\n\tcase uint16:\n\t\treturn float64(vc)\n\tcase uint32:\n\t\treturn float64(vc)\n\tcase uint64:\n\t\treturn float64(vc)\n\tcase int:\n\t\treturn float64(vc)\n\tcase int8:\n\t\treturn float64(vc)\n\tcase int16:\n\t\treturn float64(vc)\n\tcase int32:\n\t\treturn float64(vc)\n\tcase int64:\n\t\treturn float64(vc)\n\tcase float32:\n\t\treturn float64(vc)\n\tcase float64:\n\t\treturn vc\n\tcase decimal.Decimal:\n\t\tf, _ := vc.Float64()\n\t\treturn f\n\tcase Stringer:\n\t\treturn stringToFloat(vc.String())\n\tcase string:\n\t\treturn stringToFloat(vc)\n\tcase Boolean:\n\t\tif vc.Boolean() {\n\t\t\treturn 1\n\t\t}\n\tcase bool:\n\t\tif vc {\n\t\t\treturn 1\n\t\t}\n\t}\n\treturn 0\n}", "func (v *Vec3i) SetSubScalar(s int32) {\n\tv.X -= s\n\tv.Y -= s\n\tv.Z -= s\n}", "func (f FieldConstraints) convertScalarAtPath(path document.Path, v document.Value, conversionFn ConversionFunc) (document.Value, error) {\n\tfor _, fc := range f {\n\t\tif !fc.Path.IsEqual(path) {\n\t\t\tcontinue\n\t\t}\n\n\t\t// check if the constraint enforce a particular type\n\t\t// and if so convert the value to the new type.\n\t\tif fc.Type != 0 {\n\t\t\tnewV, err := conversionFn(v, fc.Path, fc.Type)\n\t\t\tif err != nil {\n\t\t\t\treturn v, err\n\t\t\t}\n\n\t\t\treturn newV, nil\n\t\t}\n\t\tbreak\n\t}\n\n\t// no constraint have been found for this path.\n\t// check if this is an integer and convert it to double.\n\tif v.Type == document.IntegerValue {\n\t\tnewV, _ := v.CastAsDouble()\n\t\treturn newV, nil\n\t}\n\n\treturn v, nil\n}", "func (mat Mat) MultiplyScalar(s float32) Mat {\n var temp [][]float32\n for i := 0 ; i < mat.Shape[0]; i++ {\n temp_r := make([]float32, mat.Shape[1])\n for j := 0 ; j < mat.Shape[1]; j++ {\n temp_r[j] = mat.Value[i][j] * s\n }\n temp = append(temp, temp_r)\n }\n return Mat{temp, mat.Shape}\n}", "func getValueOf(x interface{}) reflect.Value {\n\tvalue := reflect.ValueOf(x)\n\n\tif value.Kind() == reflect.Float64 {\n\t\tasfloat := value.Float()\n\t\tasint := int(asfloat)\n\t\tif float64(asint) == asfloat {\n\t\t\tvalue = reflect.ValueOf(asint)\n\t\t}\n\t}\n\n\treturn value\n}", "func (ns Float32) Value() (driver.Value, error) {\n\tif !ns.Valid {\n\t\treturn nil, nil\n\t}\n\treturn float64(ns.Float32), nil\n}", "func ValueDiv(a, b reflect.Value) (reflect.Value, error) {\n\taBkind := GetBaseKind(a)\n\tbBkind := GetBaseKind(b)\n\n\tswitch aBkind {\n\tcase reflect.Int64:\n\t\tswitch bBkind {\n\t\tcase reflect.Int64:\n\t\t\treturn reflect.ValueOf(a.Int() / b.Int()), nil\n\t\tcase reflect.Uint64:\n\t\t\treturn reflect.ValueOf(a.Int() / int64(b.Uint())), nil\n\t\tcase reflect.Float64:\n\t\t\treturn reflect.ValueOf(float64(a.Int()) / b.Float()), nil\n\t\tdefault:\n\t\t\treturn reflect.ValueOf(nil), errors.Errorf(\"Can not do division math operator between %s and %s\", a.Kind().String(), b.Kind().String())\n\t\t}\n\tcase reflect.Uint64:\n\t\tswitch bBkind {\n\t\tcase reflect.Int64:\n\t\t\treturn reflect.ValueOf(int64(a.Uint()) / b.Int()), nil\n\t\tcase reflect.Uint64:\n\t\t\treturn reflect.ValueOf(a.Uint() / b.Uint()), nil\n\t\tcase reflect.Float64:\n\t\t\treturn reflect.ValueOf(float64(a.Uint()) / b.Float()), nil\n\t\tdefault:\n\t\t\treturn reflect.ValueOf(nil), errors.Errorf(\"Can not do division math operator between %s and %s\", a.Kind().String(), b.Kind().String())\n\t\t}\n\tcase reflect.Float64:\n\t\tswitch bBkind {\n\t\tcase reflect.Int64:\n\t\t\treturn reflect.ValueOf(a.Float() / float64(b.Int())), nil\n\t\tcase reflect.Uint64:\n\t\t\treturn reflect.ValueOf(a.Float() / float64(b.Uint())), nil\n\t\tcase reflect.Float64:\n\t\t\treturn reflect.ValueOf(a.Float() / b.Float()), nil\n\t\tdefault:\n\t\t\treturn reflect.ValueOf(nil), errors.Errorf(\"Can not do division math operator between %s and %s\", a.Kind().String(), b.Kind().String())\n\t\t}\n\tdefault:\n\t\treturn reflect.ValueOf(nil), errors.Errorf(\"Can not do division math operator between %s and %s\", a.Kind().String(), b.Kind().String())\n\t}\n}", "func (n NullFloat64) Value() (driver.Value, error) {\n\tif n.V == nil {\n\t\treturn nil, nil\n\t}\n\n\treturn *n.V, nil\n}", "func (sf *ScalarFunction) Eval(row chunk.Row) (d types.Datum, err error) {\n\tvar (\n\t\tres interface{}\n\t\tisNull bool\n\t)\n\tswitch tp, evalType := sf.GetType(), sf.GetType().EvalType(); evalType {\n\tcase types.ETInt:\n\t\tvar intRes int64\n\t\tintRes, isNull, err = sf.EvalInt(sf.GetCtx(), row)\n\t\tif mysql.HasUnsignedFlag(tp.Flag) {\n\t\t\tres = uint64(intRes)\n\t\t} else {\n\t\t\tres = intRes\n\t\t}\n\tcase types.ETString:\n\t\tres, isNull, err = sf.EvalString(sf.GetCtx(), row)\n\t}\n\n\tif isNull || err != nil {\n\t\td.SetNull()\n\t\treturn d, err\n\t}\n\td.SetValue(res, sf.RetType)\n\treturn\n}", "func (v *Vec3i) SetScalar(s int32) {\n\tv.X = s\n\tv.Y = s\n\tv.Z = s\n}", "func p256GetScalar(out []uint64, in []byte) {\n\tn := new(big.Int).SetBytes(in)\n\n\tif n.Cmp(p256.N) >= 0 {\n\t\tn.Mod(n, p256.N)\n\t}\n\tfromBig(out, n)\n}", "func Float64Value(obj Value) float64 {\n\tif p, ok := obj.(*Number); ok {\n\t\treturn p.Value\n\t}\n\treturn 0\n}", "func (vc *valueContainer) float64() floatValueContainer {\n\tnewVals := make([]float64, reflect.ValueOf(vc.slice).Len())\n\tisNull := vc.isNull\n\tswitch vc.slice.(type) {\n\tcase []float64:\n\t\tnewVals = vc.slice.([]float64)\n\n\tcase []string:\n\t\tarr := vc.slice.([]string)\n\t\tfor i := range arr {\n\t\t\tnewVals[i], isNull[i] = convertStringToFloat(arr[i], isNull[i])\n\t\t}\n\n\tcase [][]byte:\n\t\tarr := vc.slice.([][]byte)\n\t\tfor i := range arr {\n\t\t\tnewVals[i], isNull[i] = convertStringToFloat(string(arr[i]), isNull[i])\n\t\t}\n\n\tcase []time.Time, []civil.DateTime, []civil.Date, []civil.Time:\n\t\tfor i := range newVals {\n\t\t\tnewVals[i], isNull[i] = 0, true\n\t\t}\n\n\tcase []bool:\n\t\tarr := vc.slice.([]bool)\n\t\tfor i := range arr {\n\t\t\tnewVals[i] = convertBoolToFloat(arr[i])\n\t\t}\n\n\tcase []interface{}:\n\t\tarr := vc.slice.([]interface{})\n\t\tfor i := range arr {\n\t\t\tswitch arr[i].(type) {\n\t\t\tcase string:\n\t\t\t\tnewVals[i], isNull[i] = convertStringToFloat(arr[i].(string), isNull[i])\n\t\t\tcase float32, float64:\n\t\t\t\tnewVals[i] = reflect.ValueOf(arr[i]).Float()\n\t\t\tcase int, int8, int16, int32, int64:\n\t\t\t\tnewVals[i] = float64(reflect.ValueOf(arr[i]).Int())\n\t\t\tcase uint, uint8, uint16, uint32, uint64:\n\t\t\t\tnewVals[i] = float64(reflect.ValueOf(arr[i]).Uint())\n\t\t\tcase time.Time, civil.DateTime, civil.Date, civil.Time:\n\t\t\t\tnewVals[i], isNull[i] = 0, true\n\t\t\tcase bool:\n\t\t\t\tnewVals[i] = convertBoolToFloat(arr[i].(bool))\n\t\t\t}\n\t\t}\n\n\tcase []uint, []uint8, []uint16, []uint32, []uint64, []int, []int8, []int16, []int32, []int64, []float32:\n\t\td := reflect.ValueOf(vc.slice)\n\t\tfor i := 0; i < d.Len(); i++ {\n\t\t\tv := d.Index(i).Interface()\n\t\t\tnewVals[i], isNull[i] = convertStringToFloat(fmt.Sprint(v), isNull[i])\n\t\t}\n\tdefault:\n\t\tfor i := range newVals {\n\t\t\tnewVals[i] = 0\n\t\t\tisNull[i] = true\n\t\t}\n\t}\n\n\tret := floatValueContainer{\n\t\tisNull: isNull,\n\t\tslice: newVals,\n\t}\n\treturn ret\n}", "func ScalarDivision(m Multivector, d string) Multivector {\n\tm.E0.SetString(m.E0.Num().String() + \"/\" + d)\n\tm.E1.SetString(m.E1.Num().String() + \"/\" + d)\n\tm.E2.SetString(m.E2.Num().String() + \"/\" + d)\n\tm.E3.SetString(m.E3.Num().String() + \"/\" + d)\n\tm.E12.SetString(m.E12.Num().String() + \"/\" + d)\n\tm.E13.SetString(m.E13.Num().String() + \"/\" + d)\n\tm.E23.SetString(m.E23.Num().String() + \"/\" + d)\n\tm.E123.SetString(m.E123.Num().String() + \"/\" + d)\n\n\treturn m\n}", "func (urgb *UserRoleGroupBy) Float64(ctx context.Context) (_ float64, err error) {\n\tvar v []float64\n\tif v, err = urgb.Float64s(ctx); err != nil {\n\t\treturn\n\t}\n\tswitch len(v) {\n\tcase 1:\n\t\treturn v[0], nil\n\tcase 0:\n\t\terr = &NotFoundError{userrole.Label}\n\tdefault:\n\t\terr = fmt.Errorf(\"ent: UserRoleGroupBy.Float64s returned %d results when one was expected\", len(v))\n\t}\n\treturn\n}", "func (g *interfaceGenerator) unmarshalPrimitiveScalar(accessor, typ, bufVar, typeCast string) {\n\tswitch typ {\n\tcase \"byte\":\n\t\tg.emit(\"*%s = %s(%s[0])\\n\", accessor, typeCast, bufVar)\n\tcase \"int8\", \"uint8\":\n\t\tg.emit(\"*%s = %s(%s(%s[0]))\\n\", accessor, typeCast, typ, bufVar)\n\tcase \"int16\", \"uint16\":\n\t\tg.recordUsedImport(\"usermem\")\n\t\tg.emit(\"*%s = %s(%s(usermem.ByteOrder.Uint16(%s[:2])))\\n\", accessor, typeCast, typ, bufVar)\n\tcase \"int32\", \"uint32\":\n\t\tg.recordUsedImport(\"usermem\")\n\t\tg.emit(\"*%s = %s(%s(usermem.ByteOrder.Uint32(%s[:4])))\\n\", accessor, typeCast, typ, bufVar)\n\tcase \"int64\", \"uint64\":\n\t\tg.recordUsedImport(\"usermem\")\n\t\tg.emit(\"*%s = %s(%s(usermem.ByteOrder.Uint64(%s[:8])))\\n\", accessor, typeCast, typ, bufVar)\n\tdefault:\n\t\tg.emit(\"// Explicilty cast to the underlying type before dispatching to\\n\")\n\t\tg.emit(\"// UnmarshalBytes, so we don't recursively call %s.UnmarshalBytes\\n\", accessor)\n\t\tg.emit(\"inner := (*%s)(%s)\\n\", typ, accessor)\n\t\tg.emit(\"inner.UnmarshalBytes(%s[:%s.SizeBytes()])\\n\", bufVar, accessor)\n\t}\n}", "func (r *Redis) Float64(reply interface{}, err error) (float64, error) {\n\treturn redigo.Float64(reply, err)\n}", "func ValueSub(a, b reflect.Value) (reflect.Value, error) {\n\taBkind := GetBaseKind(a)\n\tbBkind := GetBaseKind(b)\n\n\tswitch aBkind {\n\tcase reflect.Int64:\n\t\tswitch bBkind {\n\t\tcase reflect.Int64:\n\t\t\treturn reflect.ValueOf(a.Int() - b.Int()), nil\n\t\tcase reflect.Uint64:\n\t\t\treturn reflect.ValueOf(a.Int() - int64(b.Uint())), nil\n\t\tcase reflect.Float64:\n\t\t\treturn reflect.ValueOf(float64(a.Int()) - b.Float()), nil\n\t\tdefault:\n\t\t\treturn reflect.ValueOf(nil), errors.Errorf(\"Can not do subtraction math operator between %s and %s\", a.Kind().String(), b.Kind().String())\n\t\t}\n\tcase reflect.Uint64:\n\t\tswitch bBkind {\n\t\tcase reflect.Int64:\n\t\t\treturn reflect.ValueOf(int64(a.Uint()) - b.Int()), nil\n\t\tcase reflect.Uint64:\n\t\t\treturn reflect.ValueOf(a.Uint() - b.Uint()), nil\n\t\tcase reflect.Float64:\n\t\t\treturn reflect.ValueOf(float64(a.Uint()) - b.Float()), nil\n\t\tdefault:\n\t\t\treturn reflect.ValueOf(nil), errors.Errorf(\"Can not do subtraction math operator between %s and %s\", a.Kind().String(), b.Kind().String())\n\t\t}\n\tcase reflect.Float64:\n\t\tswitch bBkind {\n\t\tcase reflect.Int64:\n\t\t\treturn reflect.ValueOf(a.Float() - float64(b.Int())), nil\n\t\tcase reflect.Uint64:\n\t\t\treturn reflect.ValueOf(a.Float() - float64(b.Uint())), nil\n\t\tcase reflect.Float64:\n\t\t\treturn reflect.ValueOf(a.Float() - b.Float()), nil\n\t\tdefault:\n\t\t\treturn reflect.ValueOf(nil), errors.Errorf(\"Can not do subtraction math operator between %s and %s\", a.Kind().String(), b.Kind().String())\n\t\t}\n\tdefault:\n\t\treturn reflect.ValueOf(nil), errors.Errorf(\"Can not do subtraction math operator between %s and %s\", a.Kind().String(), b.Kind().String())\n\t}\n}", "func (v Float) ToLiteralValue() (schema.LiteralValue, error) {\n\tif !v.present {\n\t\treturn nil, nil\n\t}\n\treturn schema.LiteralNumber(v.v), nil\n}", "func ScalarC(tst *testing.T, msg string, tolNorm float64, res, correct complex128) {\n\tCheckAndPrint(tst, msg, tolNorm, cmplx.Abs(res-correct))\n}", "func (urs *UserRoleSelect) Float64(ctx context.Context) (_ float64, err error) {\n\tvar v []float64\n\tif v, err = urs.Float64s(ctx); err != nil {\n\t\treturn\n\t}\n\tswitch len(v) {\n\tcase 1:\n\t\treturn v[0], nil\n\tcase 0:\n\t\terr = &NotFoundError{userrole.Label}\n\tdefault:\n\t\terr = fmt.Errorf(\"ent: UserRoleSelect.Float64s returned %d results when one was expected\", len(v))\n\t}\n\treturn\n}", "func (n *Int64Wrapper) Value() (Value, error) {\n\tif !n.Valid {\n\t\treturn nil, nil\n\t}\n\treturn n.Int64, nil\n}", "func (mat Mat) AddScalar(s float32) Mat {\n var temp [][]float32\n for i := 0 ; i < mat.Shape[0]; i++ {\n temp_r := make([]float32, mat.Shape[1])\n for j := 0 ; j < mat.Shape[1]; j++ {\n temp_r[j] = mat.Value[i][j] + s\n }\n temp = append(temp, temp_r)\n }\n return Mat{temp, mat.Shape}\n}", "func (r *Decoder) Value() constant.Value {\n\tr.Sync(SyncValue)\n\tisComplex := r.Bool()\n\tval := r.scalar()\n\tif isComplex {\n\t\tval = constant.BinaryOp(val, token.ADD, constant.MakeImag(r.scalar()))\n\t}\n\treturn val\n}", "func (e *Engine) LtScalar(a tensor.Tensor, b interface{}, leftTensor bool, opts ...tensor.FuncOpt) (retVal tensor.Tensor, err error) {\n\tname := constructName1(a, leftTensor, \"lt\")\n\tif !e.HasFunc(name) {\n\t\treturn nil, errors.Errorf(\"Unable to perform LtScalar(). The tensor engine does not have the function %q\", name)\n\t}\n\n\tvar bMem tensor.Memory\n\tvar ok bool\n\tif bMem, ok = b.(tensor.Memory); !ok {\n\t\treturn nil, errors.Errorf(\"b has to be a tensor.Memory. Got %T instead\", b)\n\t}\n\n\tif err = unaryCheck(a); err != nil {\n\t\treturn nil, errors.Wrap(err, \"Basic checks failed for LtScalar\")\n\t}\n\n\tvar reuse tensor.DenseTensor\n\tvar safe, toReuse bool\n\tif reuse, safe, toReuse, _, _, err = handleFuncOpts(a.Shape(), a.Dtype(), a.DataOrder(), true, opts...); err != nil {\n\t\treturn nil, errors.Wrap(err, \"Unable to handle funcOpts\")\n\t}\n\n\tvar mem, memB cu.DevicePtr\n\tvar size int64\n\n\tswitch {\n\tcase toReuse:\n\t\tmem = cu.DevicePtr(reuse.Uintptr())\n\t\tmemA := cu.DevicePtr(a.Uintptr())\n\t\tmemSize := int64(a.MemSize())\n\t\te.memcpy(mem, memA, memSize)\n\n\t\tsize = int64(logicalSize(reuse.Shape()))\n\t\tretVal = reuse\n\tcase !safe:\n\t\tmem = cu.DevicePtr(a.Uintptr())\n\t\tretVal = a\n\t\tsize = int64(logicalSize(a.Shape()))\n\tdefault:\n\t\treturn nil, errors.New(\"Impossible state: A reuse tensor must be passed in, or the operation must be unsafe. Incr and safe operations are not supported\")\n\t}\n\n\tmemB = cu.DevicePtr(bMem.Uintptr())\n\tif !leftTensor {\n\t\tmem, memB = memB, mem\n\t}\n\n\tfn := e.f[name]\n\tgridDimX, gridDimY, gridDimZ, blockDimX, blockDimY, blockDimZ := e.ElemGridSize(int(size))\n\targs := []unsafe.Pointer{\n\t\tunsafe.Pointer(&mem),\n\t\tunsafe.Pointer(&memB),\n\t\tunsafe.Pointer(&size),\n\t}\n\tlogf(\"gx %d, gy %d, gz %d | bx %d by %d, bz %d\", gridDimX, gridDimY, gridDimZ, blockDimX, blockDimY, blockDimZ)\n\tlogf(\"CUDADO %q, Mem: %v size %v, args %v\", name, mem, size, args)\n\tlogf(\"LaunchKernel Params. mem: %v. Size %v\", mem, size)\n\te.c.LaunchAndSync(fn, gridDimX, gridDimY, gridDimZ, blockDimX, blockDimY, blockDimZ, 0, cu.NoStream, args)\n\treturn\n}", "func (v Value) Float() (float64, error) {\n\tswitch v.Value.Kind() {\n\tcase reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:\n\t\treturn float64(v.Value.Int()), nil\n\tcase reflect.Float32, reflect.Float64:\n\t\treturn v.Value.Float(), nil\n\t}\n\treturn 0, fmt.Errorf(\"Kind %s is not a float\", v.Value.Kind())\n}", "func (v Vector3) ScalarMult(scalar float64) Vector3 {\n\treturn Vector3{X: scalar * v.X, Y: scalar * v.Y, Z: scalar * v.Z}\n}", "func (c *curve) Scalar() kyber.Scalar {\n\treturn mod.NewInt64(0, &c.order.V)\n}" ]
[ "0.5818953", "0.5651207", "0.5634202", "0.56036615", "0.5513424", "0.5465434", "0.5297905", "0.52545047", "0.51847273", "0.5168473", "0.5146385", "0.5110239", "0.5108384", "0.5072312", "0.502686", "0.49844697", "0.49761152", "0.49742618", "0.49596387", "0.4956184", "0.49418354", "0.49091914", "0.49028656", "0.4882837", "0.48412305", "0.48327363", "0.48182908", "0.48096958", "0.47762293", "0.47732797", "0.47633153", "0.47578952", "0.4750263", "0.4742641", "0.47246614", "0.47117907", "0.46962768", "0.46739054", "0.46530232", "0.46425995", "0.46352616", "0.46224472", "0.4618038", "0.46126118", "0.46121353", "0.46119753", "0.4581244", "0.45598903", "0.45551845", "0.45551118", "0.45540252", "0.45342848", "0.45266584", "0.45261383", "0.4522485", "0.45210683", "0.4480288", "0.44652116", "0.44640768", "0.44553947", "0.44436994", "0.4442329", "0.4438592", "0.44336027", "0.443108", "0.4425615", "0.44210872", "0.4419073", "0.44086906", "0.44079843", "0.4384533", "0.4382131", "0.4381121", "0.4379256", "0.43634254", "0.43599233", "0.4351057", "0.43505815", "0.43504074", "0.43470782", "0.43448246", "0.4339172", "0.43384895", "0.43377835", "0.43294325", "0.43285552", "0.4327275", "0.43208036", "0.4318384", "0.4316808", "0.4299591", "0.4298208", "0.42973053", "0.42953083", "0.4294373", "0.42920592", "0.42857867", "0.42854148", "0.42742938", "0.4271523" ]
0.8668246
0
exactSumFor takes a runtime/metrics metric name (that is assumed to be of kind KindFloat64Histogram) and returns its exact sum and whether its exact sum exists. The runtime/metrics API for histograms doesn't currently expose exact sums, but some of the other metrics are in fact exact sums of histograms.
func (c *goCollector) exactSumFor(rmName string) float64 { sumName, ok := c.rmExactSumMapForHist[rmName] if !ok { return 0 } s, ok := c.sampleMap[sumName] if !ok { return 0 } return unwrapScalarRMValue(s.Value) }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (*HistogramMetric) IsMetric() {}", "func ReadHistogramSum(histogram prometheus.Histogram) float64 {\n\tvar metric dto.Metric\n\tif err := histogram.Write(&metric); err != nil {\n\t\treturn math.NaN()\n\t}\n\treturn metric.Histogram.GetSampleSum()\n}", "func (*SummaryMetric) IsMetric() {}", "func (ms HistogramDataPoint) Sum() float64 {\n\treturn (*ms.orig).Sum\n}", "func (me TxsdAnimAdditionAttrsAccumulate) IsSum() bool { return me.String() == \"sum\" }", "func funcHistogramSum(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector {\n\tinVec := vals[0].(Vector)\n\n\tfor _, sample := range inVec {\n\t\t// Skip non-histogram samples.\n\t\tif sample.H == nil {\n\t\t\tcontinue\n\t\t}\n\t\tenh.Out = append(enh.Out, Sample{\n\t\t\tMetric: enh.DropMetricName(sample.Metric),\n\t\t\tF: sample.H.Sum,\n\t\t})\n\t}\n\treturn enh.Out\n}", "func (me TxsdAnimAdditionAttrsAdditive) IsSum() bool { return me.String() == \"sum\" }", "func (m *Map) ReduceFloat64Sum(reduce func(map[interface{}]interface{}) float64) float64 {\n\tresult := float64(0)\n\tsplits := m.splits\n\tfor i := 0; i < len(splits); i++ {\n\t\tresult += splits[i].reduceFloat64(reduce)\n\t}\n\treturn result\n}", "func TestNewBoundedSumFnTemp(t *testing.T) {\n\topts := []cmp.Option{\n\t\tcmpopts.EquateApprox(0, 1e-10),\n\t\tcmpopts.IgnoreUnexported(boundedSumFloat64Fn{}, boundedSumInt64Fn{}),\n\t}\n\tfor _, tc := range []struct {\n\t\tdesc string\n\t\tnoiseKind noise.Kind\n\t\tvKind reflect.Kind\n\t\taggregationEpsilon float64\n\t\taggregationDelta float64\n\t\tpartitionSelectionEpsilon float64\n\t\tpartitionSelectionDelta float64\n\t\tpreThreshold int64\n\t\tlower float64\n\t\tupper float64\n\t\twantErr bool\n\t\twant any\n\t}{\n\t\t{\"Laplace Float64\", noise.LaplaceNoise, reflect.Float64, 0.5, 0, 0.5, 1e-5, 0, 0, 10, false,\n\t\t\t&boundedSumFloat64Fn{\n\t\t\t\tNoiseEpsilon: 0.5,\n\t\t\t\tNoiseDelta: 0,\n\t\t\t\tPartitionSelectionEpsilon: 0.5,\n\t\t\t\tPartitionSelectionDelta: 1e-5,\n\t\t\t\tMaxPartitionsContributed: 17,\n\t\t\t\tLower: 0,\n\t\t\t\tUpper: 10,\n\t\t\t\tNoiseKind: noise.LaplaceNoise,\n\t\t\t\tPublicPartitions: false,\n\t\t\t}},\n\t\t{\"Gaussian Float64\", noise.GaussianNoise, reflect.Float64, 0.5, 1e-5, 0.5, 1e-5, 0, 0, 10, false,\n\t\t\t&boundedSumFloat64Fn{\n\t\t\t\tNoiseEpsilon: 0.5,\n\t\t\t\tNoiseDelta: 1e-5,\n\t\t\t\tPartitionSelectionEpsilon: 0.5,\n\t\t\t\tPartitionSelectionDelta: 1e-5,\n\t\t\t\tMaxPartitionsContributed: 17,\n\t\t\t\tLower: 0,\n\t\t\t\tUpper: 10,\n\t\t\t\tNoiseKind: noise.GaussianNoise,\n\t\t\t\tPublicPartitions: false,\n\t\t\t}},\n\t\t{\"Laplace Int64\", noise.LaplaceNoise, reflect.Int64, 0.5, 0, 0.5, 1e-5, 0, 0, 10, false,\n\t\t\t&boundedSumInt64Fn{\n\t\t\t\tNoiseEpsilon: 0.5,\n\t\t\t\tNoiseDelta: 0,\n\t\t\t\tPartitionSelectionEpsilon: 0.5,\n\t\t\t\tPartitionSelectionDelta: 1e-5,\n\t\t\t\tMaxPartitionsContributed: 17,\n\t\t\t\tLower: 0,\n\t\t\t\tUpper: 10,\n\t\t\t\tNoiseKind: noise.LaplaceNoise,\n\t\t\t\tPublicPartitions: false,\n\t\t\t}},\n\t\t{\"Gaussian Int64\", noise.GaussianNoise, reflect.Int64, 0.5, 1e-5, 0.5, 1e-5, 0, 0, 10, false,\n\t\t\t&boundedSumInt64Fn{\n\t\t\t\tNoiseEpsilon: 0.5,\n\t\t\t\tNoiseDelta: 1e-5,\n\t\t\t\tPartitionSelectionEpsilon: 0.5,\n\t\t\t\tPartitionSelectionDelta: 1e-5,\n\t\t\t\tMaxPartitionsContributed: 17,\n\t\t\t\tLower: 0,\n\t\t\t\tUpper: 10,\n\t\t\t\tNoiseKind: noise.GaussianNoise,\n\t\t\t\tPublicPartitions: false,\n\t\t\t}},\n\t\t{\"PreThreshold set Int64\", noise.GaussianNoise, reflect.Int64, 0.5, 1e-5, 0.5, 1e-5, 10, 0, 10, false,\n\t\t\t&boundedSumInt64Fn{\n\t\t\t\tNoiseEpsilon: 0.5,\n\t\t\t\tNoiseDelta: 1e-5,\n\t\t\t\tPartitionSelectionEpsilon: 0.5,\n\t\t\t\tPartitionSelectionDelta: 1e-5,\n\t\t\t\tPreThreshold: 10,\n\t\t\t\tMaxPartitionsContributed: 17,\n\t\t\t\tLower: 0,\n\t\t\t\tUpper: 10,\n\t\t\t\tNoiseKind: noise.GaussianNoise,\n\t\t\t\tPublicPartitions: false,\n\t\t\t}},\n\t\t{\"PreThreshold set Float64\", noise.GaussianNoise, reflect.Float64, 0.5, 1e-5, 0.5, 1e-5, 10, 0, 10, false,\n\t\t\t&boundedSumFloat64Fn{\n\t\t\t\tNoiseEpsilon: 0.5,\n\t\t\t\tNoiseDelta: 1e-5,\n\t\t\t\tPartitionSelectionEpsilon: 0.5,\n\t\t\t\tPartitionSelectionDelta: 1e-5,\n\t\t\t\tPreThreshold: 10,\n\t\t\t\tMaxPartitionsContributed: 17,\n\t\t\t\tLower: 0,\n\t\t\t\tUpper: 10,\n\t\t\t\tNoiseKind: noise.GaussianNoise,\n\t\t\t\tPublicPartitions: false,\n\t\t\t}},\n\t\t{\"lower > upper\", noise.GaussianNoise, reflect.Int64, 0.5, 1e-5, 0.5, 1e-5, 0, 10, 0, true, nil},\n\t\t{\"Float64 bounds that overflow when converted to int64\", noise.GaussianNoise, reflect.Int64, 0.5, 1e-5, 0.5, 1e-5, 0, 0, 1e100, true, nil},\n\t} {\n\t\tgot, err := newBoundedSumFnTemp(PrivacySpec{preThreshold: tc.preThreshold, testMode: Disabled},\n\t\t\tSumParams{\n\t\t\t\tAggregationEpsilon: tc.aggregationEpsilon,\n\t\t\t\tAggregationDelta: tc.aggregationDelta,\n\t\t\t\tPartitionSelectionParams: PartitionSelectionParams{Epsilon: tc.partitionSelectionEpsilon, Delta: tc.partitionSelectionDelta},\n\t\t\t\tMaxPartitionsContributed: 17,\n\t\t\t\tMinValue: tc.lower,\n\t\t\t\tMaxValue: tc.upper,\n\t\t\t}, tc.noiseKind, tc.vKind, false)\n\t\tif (err != nil) != tc.wantErr {\n\t\t\tt.Fatalf(\"With %s, got=%v, wantErr=%t\", tc.desc, err, tc.wantErr)\n\t\t}\n\t\tif diff := cmp.Diff(tc.want, got, opts...); diff != \"\" {\n\t\t\tt.Errorf(\"newBoundedSumFn mismatch for '%s' (-want +got):\\n%s\", tc.desc, diff)\n\t\t}\n\t}\n}", "func DoSum() float64", "func TestHistogramMatches(t *testing.T) {\n\th1 := Histogram{\n\t\tSchema: 3,\n\t\tCount: 61,\n\t\tSum: 2.7,\n\t\tZeroThreshold: 0.1,\n\t\tZeroCount: 42,\n\t\tPositiveSpans: []Span{\n\t\t\t{Offset: 0, Length: 4},\n\t\t\t{Offset: 10, Length: 3},\n\t\t},\n\t\tPositiveBuckets: []int64{1, 2, -2, 1, -1, 0, 0},\n\t\tNegativeSpans: []Span{\n\t\t\t{Offset: 0, Length: 4},\n\t\t\t{Offset: 10, Length: 3},\n\t\t},\n\t\tNegativeBuckets: []int64{1, 2, -2, 1, -1, 0, 0},\n\t}\n\n\tequals := func(h1, h2 Histogram) {\n\t\trequire.True(t, h1.Equals(&h2))\n\t\trequire.True(t, h2.Equals(&h1))\n\t\th1f, h2f := h1.ToFloat(), h2.ToFloat()\n\t\trequire.True(t, h1f.Equals(h2f))\n\t\trequire.True(t, h2f.Equals(h1f))\n\t}\n\tnotEquals := func(h1, h2 Histogram) {\n\t\trequire.False(t, h1.Equals(&h2))\n\t\trequire.False(t, h2.Equals(&h1))\n\t\th1f, h2f := h1.ToFloat(), h2.ToFloat()\n\t\trequire.False(t, h1f.Equals(h2f))\n\t\trequire.False(t, h2f.Equals(h1f))\n\t}\n\n\th2 := h1.Copy()\n\tequals(h1, *h2)\n\n\t// Changed spans but same layout.\n\th2.PositiveSpans = append(h2.PositiveSpans, Span{Offset: 5})\n\th2.NegativeSpans = append(h2.NegativeSpans, Span{Offset: 2})\n\tequals(h1, *h2)\n\t// Adding empty spans in between.\n\th2.PositiveSpans[1].Offset = 6\n\th2.PositiveSpans = []Span{\n\t\th2.PositiveSpans[0],\n\t\t{Offset: 1},\n\t\t{Offset: 3},\n\t\th2.PositiveSpans[1],\n\t\th2.PositiveSpans[2],\n\t}\n\th2.NegativeSpans[1].Offset = 5\n\th2.NegativeSpans = []Span{\n\t\th2.NegativeSpans[0],\n\t\t{Offset: 2},\n\t\t{Offset: 3},\n\t\th2.NegativeSpans[1],\n\t\th2.NegativeSpans[2],\n\t}\n\tequals(h1, *h2)\n\n\t// All mismatches.\n\tnotEquals(h1, Histogram{})\n\n\th2.Schema = 1\n\tnotEquals(h1, *h2)\n\n\th2 = h1.Copy()\n\th2.Count++\n\tnotEquals(h1, *h2)\n\n\th2 = h1.Copy()\n\th2.Sum++\n\tnotEquals(h1, *h2)\n\n\th2 = h1.Copy()\n\th2.ZeroThreshold++\n\tnotEquals(h1, *h2)\n\n\th2 = h1.Copy()\n\th2.ZeroCount++\n\tnotEquals(h1, *h2)\n\n\t// Changing value of buckets.\n\th2 = h1.Copy()\n\th2.PositiveBuckets[len(h2.PositiveBuckets)-1]++\n\tnotEquals(h1, *h2)\n\th2 = h1.Copy()\n\th2.NegativeBuckets[len(h2.NegativeBuckets)-1]++\n\tnotEquals(h1, *h2)\n\n\t// Changing bucket layout.\n\th2 = h1.Copy()\n\th2.PositiveSpans[1].Offset++\n\tnotEquals(h1, *h2)\n\th2 = h1.Copy()\n\th2.NegativeSpans[1].Offset++\n\tnotEquals(h1, *h2)\n\n\t// Adding an empty bucket.\n\th2 = h1.Copy()\n\th2.PositiveSpans[0].Offset--\n\th2.PositiveSpans[0].Length++\n\th2.PositiveBuckets = append([]int64{0}, h2.PositiveBuckets...)\n\tnotEquals(h1, *h2)\n\th2 = h1.Copy()\n\th2.NegativeSpans[0].Offset--\n\th2.NegativeSpans[0].Length++\n\th2.NegativeBuckets = append([]int64{0}, h2.NegativeBuckets...)\n\tnotEquals(h1, *h2)\n\n\t// Adding new bucket.\n\th2 = h1.Copy()\n\th2.PositiveSpans = append(h2.PositiveSpans, Span{\n\t\tOffset: 1,\n\t\tLength: 1,\n\t})\n\th2.PositiveBuckets = append(h2.PositiveBuckets, 1)\n\tnotEquals(h1, *h2)\n\th2 = h1.Copy()\n\th2.NegativeSpans = append(h2.NegativeSpans, Span{\n\t\tOffset: 1,\n\t\tLength: 1,\n\t})\n\th2.NegativeBuckets = append(h2.NegativeBuckets, 1)\n\tnotEquals(h1, *h2)\n}", "func (h *Hash) Sum64() (uint64, bool) {\n\th64, ok := h.Hash.(hash.Hash64)\n\tif !ok {\n\t\treturn 0, false\n\t}\n\n\treturn h64.Sum64(), true\n}", "func (a *Assertions) AnyOfFloat64(target []float64, predicate PredicateOfFloat, userMessageComponents ...interface{}) bool {\n\ta.assertion()\n\tif didFail, message := shouldAnyOfFloat(target, predicate); didFail {\n\t\treturn a.fail(message, userMessageComponents...)\n\t}\n\treturn true\n}", "func SampleSum(value interface{}) (float64, bool) {\n\tswitch v := value.(type) {\n\tcase float64:\n\t\treturn v, true\n\tcase int64:\n\t\treturn float64(v), true\n\tcase uint64:\n\t\treturn float64(v), true\n\tdefault:\n\t\treturn 0, false\n\t}\n}", "func (t *TimerSnapshot) Sum() int64 { return t.histogram.Sum() }", "func (r *Registry) Contains(name string) bool {\n\tr.mtx.RLock()\n\t_, found := r.metricNames[name]\n\tr.mtx.RUnlock()\n\treturn found\n}", "func Sum(m types.Metadata) (string, error) {\n\t// Open the file.\n\tfile, err := os.Open(m.File())\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\t// Defer closing it.\n\tdefer file.Close()\n\n\t// Make a new hash object.\n\thash := sha256.New()\n\n\t// Copy the file to the hasher object.\n\t_, err = io.Copy(hash, file)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\t// Calculate the hash and return it.\n\treturn hex.EncodeToString(hash.Sum(nil)), err\n}", "func (s *UniformSample) Sum() int64 {\n\ts.mutex.Lock()\n\tdefer s.mutex.Unlock()\n\treturn gometrics.SampleSum(s.values)\n}", "func (s *Uint64) ContainsAny(vals ...uint64) bool {\n\tfor _, v := range vals {\n\t\tif _, ok := s.m[v]; ok {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func (m *MockHistogram) Equals(other Histogram) bool {\n\targs := m.Called()\n\treturn args.Bool(0)\n}", "func HistogramSummary(scope *Scope, tag tf.Output, values tf.Output) (summary tf.Output) {\n\tif scope.Err() != nil {\n\t\treturn\n\t}\n\topspec := tf.OpSpec{\n\t\tType: \"HistogramSummary\",\n\t\tInput: []tf.Input{\n\t\t\ttag, values,\n\t\t},\n\t}\n\top := scope.AddOperation(opspec)\n\treturn op.Output(0)\n}", "func (info Scalar) SumFloat64(e *Config, key *keys.Float64) {\n\tdata := &Float64Data{Info: &info, key: key}\n\te.subscribe(key, data.sum)\n}", "func TestSum(t *testing.T) {\n\ttestMap := [][]int{\n\t\t{1, 1, 2},\n\t\t{2, 2, 4},\n\t\t{4, 4, 8},\n\t\t{5, 15, 20},\n\t}\n\n\tfor _, v := range testMap {\n\t\tif i := Sum(v[0], v[1]); i != v[2] {\n\t\t\tt.Errorf(\"Error at Sum(%d, %d) returned %d\", v[0], v[1], i)\n\t\t}\n\t}\n}", "func TermIsSum(t TermT) bool {\n\treturn C.yices_term_is_sum(C.term_t(t)) == C.int32_t(1)\n}", "func (a *Assertions) AllOfFloat64(target []float64, predicate PredicateOfFloat, userMessageComponents ...interface{}) bool {\n\ta.assertion()\n\tif didFail, message := shouldAllOfFloat(target, predicate); didFail {\n\t\treturn a.fail(message, userMessageComponents...)\n\t}\n\treturn true\n}", "func (ms SummaryDataPoint) Sum() float64 {\n\treturn (*ms.orig).Sum\n}", "func TestGetSum(t *testing.T) {\n\tperson := Answer{10, []int{2, 3, 4, 1}}\n\n\ts := GetSum(person.result)\n\tif s != person.Guess {\n\t\tt.Error(\"your guess is false:\", person.Guess, \"should be:\", s)\n\t}\n}", "func (s Signals) MatchesAny(ctx context.Context, pullCtx pull.Context, tag string) (bool, string, error) {\n\tif !s.Enabled() {\n\t\treturn false, fmt.Sprintf(\"no %s signals provided to match against\", tag), nil\n\t}\n\n\tsignals := []Signal{\n\t\t&s.Labels,\n\t\t&s.CommentSubstrings,\n\t\t&s.Comments,\n\t\t&s.PRBodySubstrings,\n\t\t&s.Branches,\n\t\t&s.BranchPatterns,\n\t\t&s.AutoMerge,\n\t}\n\n\tfor _, signal := range signals {\n\t\tmatches, description, err := signal.Matches(ctx, pullCtx, tag)\n\t\tif err != nil {\n\t\t\treturn false, \"\", err\n\t\t}\n\n\t\tif matches {\n\t\t\treturn true, description, nil\n\t\t}\n\t}\n\n\treturn false, fmt.Sprintf(\"pull request does not match the %s\", tag), nil\n}", "func (d Digest) UsesDigestFunction(f Function) bool {\n\tdigestFunction, _, _, sizeBytesEnd := d.unpack()\n\treturn digestFunction == f.bareFunction.enumValue && d.value[sizeBytesEnd+1:] == f.instanceName.value\n}", "func (tf *TracingFingerprint) Sum() []byte {\n\treturn tf.Fingerprint.Sum()\n}", "func (h xxhasher) Sum64(key string) uint64 {\n\treturn xxhash.Sum64String(key)\n}", "func (fw *LocalClient) hashsum(relpath string) string {\n\n\tabspath := filepath.Join(fw.rootDir, relpath)\n\tcheck, err := utils.CheckSum(abspath)\n\tcheckErr(err)\n\treturn check\n}", "func (h *Handler) GetMetricSum(w http.ResponseWriter, r *http.Request) {\n\tvar b []byte\n\tkey := mux.Vars(r)[\"key\"]\n\n\tresp := response{}\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\tw.WriteHeader(http.StatusOK)\n\tcachedVal, ok := h.MetricsCache.Get(key)\n\tif !ok {\n\t\tb, _ = json.Marshal(resp)\n\t\tw.Write(b)\n\t\treturn\n\t}\n\n\tcachedList := cachedVal.(*list.List)\n\tnewList := list.New()\n\n\tfor element := cachedList.Front(); element != nil; element = element.Next() {\n\t\tdata := element.Value.(metricData)\n\t\tmetricTime := data.time\n\t\tvalidMetricTime := metricTime.Add(h.InstrumentationTimeInSeconds * time.Second)\n\n\t\tif validMetricTime.After(time.Now()) {\n\t\t\tresp.Value = resp.Value + data.value\n\t\t\tdata := metricData{value: data.value, time: data.time}\n\n\t\t\tnewList.PushBack(data)\n\t\t} else {\n\t\t\th.MetricsCache.Set(key, newList, cache.NoExpiration)\n\t\t\tbreak\n\t\t}\n\t}\n\n\tb, _ = json.Marshal(resp)\n\tw.Write(b)\n\n\treturn\n}", "func IsUint64SumOverflow(a, b uint64) bool {\n\treturn math.MaxUint64-a < b\n}", "func (h *Hash) Sum() string {\n\tvar result [64]byte\n\thex.Encode(result[:], h.hmac.Sum(nil))\n\treturn string(result[:])\n}", "func (*SimpleMetric) IsMetric() {}", "func CalcBlobSum(chunks []string) string {\n\tvar blobsum string\n\tfor _, v := range chunks {\n\t\tblobsum += v\n\t}\n\treturn utils.GetDigestStr(blobsum)\n}", "func Sum(ctx context.Context, c *cli.Context, store storer) error {\n\tname := c.Args().First()\n\tif name == \"\" {\n\t\treturn action.ExitError(ctx, action.ExitUsage, nil, \"Usage: %s sha256 name\", c.App.Name)\n\t}\n\n\tif !strings.HasSuffix(name, Suffix) {\n\t\tname += Suffix\n\t}\n\n\tbuf, err := binaryGet(ctx, name, store)\n\tif err != nil {\n\t\treturn action.ExitError(ctx, action.ExitDecrypt, err, \"failed to read secret: %s\", err)\n\t}\n\n\th := sha256.New()\n\t_, _ = h.Write(buf)\n\tout.Yellow(ctx, \"%x\", h.Sum(nil))\n\n\treturn nil\n}", "func (d *RabinKarp64) Sum64() uint64 {\n\treturn uint64(d.value)\n}", "func (s *Int64) ContainsAny(vals ...int64) bool {\n\tfor _, v := range vals {\n\t\tif _, ok := s.m[v]; ok {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func (bm *BlobsManifest) GetBlobSum() string {\n\treturn CalcBlobSum(bm.Chunks)\n}", "func TestSum(t *testing.T) {\n\tt.Run(\"correct matrix\", func(t *testing.T) {\n\t\ttestMatrix := getTestMatrix_Ok()\n\t\tgot, _ := Sum(testMatrix)\n\t\twant := \"45\"\n\n\t\tif got != want {\n\t\t\tt.Errorf(\"got %q want %q given, %q\", got, want, testMatrix)\n\t\t}\n\t})\n\n\tt.Run(\"nil matrix\", func(t *testing.T) {\n\t\ttestMatrix := getTestMatrix_Nil()\n\t\t_, got := Sum(testMatrix)\n\t\twant := errors.New(\"matrix is nil\")\n\n\t\tif got != want {\n\t\t\tt.Errorf(\"got %q want %q given, %q\", got, want, testMatrix)\n\t\t}\n\t})\n\n\tt.Run(\"empty matrix\", func(t *testing.T) {\n\t\ttestMatrix := getTestMatrix_Empty()\n\t\t_, got := Sum(testMatrix)\n\t\twant := errors.New(\"matrix has no elements\")\n\n\t\tif got != want {\n\t\t\tt.Errorf(\"got %q want %q given, %q\", got, want, testMatrix)\n\t\t}\n\t})\n\n\tt.Run(\"non square matrix\", func(t *testing.T) {\n\t\ttestMatrix := getTestMatrix_NonSquare()\n\t\t_, got := Sum(testMatrix)\n\t\twant := errors.New(\"matrix must be a square matrix\")\n\n\t\tif got != want {\n\t\t\tt.Errorf(\"got %q want %q given, %q\", got, want, testMatrix)\n\t\t}\n\t})\n\n\tt.Run(\"non integer matrix\", func(t *testing.T) {\n\t\ttestMatrix := getTestMatrix_NonInteger()\n\t\t_, got := Sum(testMatrix)\n\t\twant := errors.New(\"matrix must have only integer elements\")\n\n\t\tif got != want {\n\t\t\tt.Errorf(\"got %q want %q given, %q\", got, want, testMatrix)\n\t\t}\n\t})\n\n\tt.Run(\"big matrix\", func(t *testing.T) {\n\t\ttestMatrix := getTestMatrix_Big()\n\t\tgot, _ := Sum(testMatrix)\n\t\twant := \"19327352824\"\n\n\t\tif got != want {\n\t\t\tt.Errorf(\"got %q want %q given, %q\", got, want, testMatrix)\n\t\t}\n\t})\n}", "func WriteHistogramSummary(scope *Scope, writer tf.Output, step tf.Output, tag tf.Output, values tf.Output) (o *tf.Operation) {\n\tif scope.Err() != nil {\n\t\treturn\n\t}\n\topspec := tf.OpSpec{\n\t\tType: \"WriteHistogramSummary\",\n\t\tInput: []tf.Input{\n\t\t\twriter, step, tag, values,\n\t\t},\n\t}\n\treturn scope.AddOperation(opspec)\n}", "func sameHash(a, b func() hash.Hash) bool {\n\tha := a().Sum(make([]byte, 0))\n\thb := b().Sum(make([]byte, 0))\n\treturn bytes.Equal(ha, hb)\n}", "func (t *StandardTimer) Sum() int64 {\n\treturn t.histogram.Sum()\n}", "func (n NodePool) GetSum(attr string) float64 {\n\treturn float64(n.SumNodes) * n.VmType.GetAttrValue(attr)\n}", "func TestSummaSet(t *testing.T) {\n\tfor _, pair := range testsSUM {\n\t\tv := Summa(pair.values)\n\t\tif v != pair.result {\n\t\t\tt.Error(\n\t\t\t\t\"For\", pair.values,\n\t\t\t\t\"expected\", pair.result,\n\t\t\t\t\"got\", v,\n\t\t\t)\n\t\t}\n\t}\n}", "func (dec *XMASDecoder) FindWeaknessSumFor(target int) (int, error) {\n\tfor i := dec.PreambleSize; i < len(dec.Stream)-1; i++ {\n\t\tfor j := i + 1; j < len(dec.Stream); j++ {\n\t\t\tslice := dec.Stream[i:j]\n\t\t\tif sumOfSlice(slice) == target {\n\t\t\t\tminSoFar := slice[0]\n\t\t\t\tmaxSoFar := slice[0]\n\n\t\t\t\tfor _, n := range slice {\n\t\t\t\t\tminSoFar = min(minSoFar, n)\n\t\t\t\t\tmaxSoFar = max(maxSoFar, n)\n\t\t\t\t}\n\n\t\t\t\treturn minSoFar + maxSoFar, nil\n\t\t\t}\n\t\t}\n\t}\n\treturn -1, errors.New(\"no weakness found\")\n}", "func (o *KubernetesAddonDefinitionAllOf) GetDigestOk() (*string, bool) {\n\tif o == nil || o.Digest == nil {\n\t\treturn nil, false\n\t}\n\treturn o.Digest, true\n}", "func (te *TelemetryEmitter) emitHistogram(metric Metric, timestamp time.Time) error {\n\thist, ok := metric.value.(*dto.Histogram)\n\tif !ok {\n\t\treturn fmt.Errorf(\"unknown histogram metric type for %q: %T\", metric.name, metric.value)\n\t}\n\n\tif m, ok := te.deltaCalculator.CountMetric(metric.name+\".sum\", metric.attributes, hist.GetSampleSum(), timestamp); ok {\n\t\tte.harvester.RecordMetric(m)\n\t}\n\n\tmetricName := metric.name + \".buckets\"\n\tbuckets := make(histogram.Buckets, 0, len(hist.Bucket))\n\tfor _, b := range hist.GetBucket() {\n\t\tupperBound := b.GetUpperBound()\n\t\tcount := float64(b.GetCumulativeCount())\n\t\tif !math.IsInf(upperBound, 1) {\n\t\t\tbucketAttrs := copyAttrs(metric.attributes)\n\t\t\tbucketAttrs[\"histogram.bucket.upperBound\"] = upperBound\n\t\t\tif m, ok := te.deltaCalculator.CountMetric(metricName, bucketAttrs, count, timestamp); ok {\n\t\t\t\tte.harvester.RecordMetric(m)\n\t\t\t}\n\t\t}\n\t\tbuckets = append(\n\t\t\tbuckets,\n\t\t\thistogram.Bucket{\n\t\t\t\tUpperBound: upperBound,\n\t\t\t\tCount: count,\n\t\t\t},\n\t\t)\n\t}\n\n\tvar results error\n\tmetricName = metric.name + \".percentiles\"\n\tfor _, p := range te.percentiles {\n\t\tv, err := histogram.Percentile(p, buckets)\n\t\tif err != nil {\n\t\t\tif results == nil {\n\t\t\t\tresults = err\n\t\t\t} else {\n\t\t\t\tresults = fmt.Errorf(\"%v: %w\", err, results)\n\t\t\t}\n\t\t\tcontinue\n\t\t}\n\n\t\tpercentileAttrs := copyAttrs(metric.attributes)\n\t\tpercentileAttrs[\"percentile\"] = p\n\t\tte.harvester.RecordMetric(telemetry.Gauge{\n\t\t\tName: metricName,\n\t\t\tAttributes: percentileAttrs,\n\t\t\tValue: v,\n\t\t\tTimestamp: timestamp,\n\t\t})\n\t}\n\n\treturn results\n}", "func (fn *formulaFuncs) countSum(countText bool, args []formulaArg) (count, sum float64) {\n\tfor _, arg := range args {\n\t\tswitch arg.Type {\n\t\tcase ArgNumber:\n\t\t\tif countText || !arg.Boolean {\n\t\t\t\tsum += arg.Number\n\t\t\t\tcount++\n\t\t\t}\n\t\tcase ArgString:\n\t\t\tif !countText && (arg.Value() == \"TRUE\" || arg.Value() == \"FALSE\") {\n\t\t\t\tcontinue\n\t\t\t} else if countText && (arg.Value() == \"TRUE\" || arg.Value() == \"FALSE\") {\n\t\t\t\tnum := arg.ToBool()\n\t\t\t\tif num.Type == ArgNumber {\n\t\t\t\t\tcount++\n\t\t\t\t\tsum += num.Number\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t}\n\t\t\tnum := arg.ToNumber()\n\t\t\tcount, sum = calcStringCountSum(countText, count, sum, num, arg)\n\t\tcase ArgList, ArgMatrix:\n\t\t\tcnt, summary := fn.countSum(countText, arg.ToList())\n\t\t\tsum += summary\n\t\t\tcount += cnt\n\t\t}\n\t}\n\treturn\n}", "func (s *Stat) GetSum() float64 {\n\treturn s.sum\n}", "func sliceSum(a []float64) float64", "func AnyFloat64(f func(float64, int) bool, input []float64) (output bool) {\n\toutput = false\n\tfor idx, data := range input {\n\t\toutput = output || f(data, idx)\n\t\tif output {\n\t\t\tbreak\n\t\t}\n\t}\n\treturn\n}", "func (o *NetworkElementSummaryAllOf) HasAlarmSummary() bool {\n\tif o != nil && o.AlarmSummary.IsSet() {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func (d Digest64) Sum64() uint64 {\n\th1, _ := Digest128(d).Sum128()\n\treturn h1\n}", "func AnyUint64(f func(uint64, int) bool, input []uint64) (output bool) {\n\toutput = false\n\tfor idx, data := range input {\n\t\toutput = output || f(data, idx)\n\t\tif output {\n\t\t\tbreak\n\t\t}\n\t}\n\treturn\n}", "func TestSum(t *testing.T) {\n\trequire.Equal(t, int64(2), coverme.Sum(1, 1))\n}", "func (r *MFI) Sum() float64 {\n\treturn r.Value\n}", "func (ser *Series) AllEqual(other *Series) (bool, int) {\n\treturn ser.AllClose(other, 0.0)\n}", "func (imageName *ImageName) HasDigest() bool {\n\treturn strings.HasPrefix(imageName.Tag, \"sha256:\")\n}", "func (o *KubernetesAddonDefinitionAllOf) HasDigest() bool {\n\tif o != nil && o.Digest != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func funcHistogramQuantile(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector {\n\tq := vals[0].(Vector)[0].F\n\tinVec := vals[1].(Vector)\n\n\tif enh.signatureToMetricWithBuckets == nil {\n\t\tenh.signatureToMetricWithBuckets = map[string]*metricWithBuckets{}\n\t} else {\n\t\tfor _, v := range enh.signatureToMetricWithBuckets {\n\t\t\tv.buckets = v.buckets[:0]\n\t\t}\n\t}\n\n\tvar histogramSamples []Sample\n\n\tfor _, sample := range inVec {\n\t\t// We are only looking for conventional buckets here. Remember\n\t\t// the histograms for later treatment.\n\t\tif sample.H != nil {\n\t\t\thistogramSamples = append(histogramSamples, sample)\n\t\t\tcontinue\n\t\t}\n\n\t\tupperBound, err := strconv.ParseFloat(\n\t\t\tsample.Metric.Get(model.BucketLabel), 64,\n\t\t)\n\t\tif err != nil {\n\t\t\t// Oops, no bucket label or malformed label value. Skip.\n\t\t\t// TODO(beorn7): Issue a warning somehow.\n\t\t\tcontinue\n\t\t}\n\t\tenh.lblBuf = sample.Metric.BytesWithoutLabels(enh.lblBuf, labels.BucketLabel)\n\t\tmb, ok := enh.signatureToMetricWithBuckets[string(enh.lblBuf)]\n\t\tif !ok {\n\t\t\tsample.Metric = labels.NewBuilder(sample.Metric).\n\t\t\t\tDel(excludedLabels...).\n\t\t\t\tLabels()\n\n\t\t\tmb = &metricWithBuckets{sample.Metric, nil}\n\t\t\tenh.signatureToMetricWithBuckets[string(enh.lblBuf)] = mb\n\t\t}\n\t\tmb.buckets = append(mb.buckets, bucket{upperBound, sample.F})\n\n\t}\n\n\t// Now deal with the histograms.\n\tfor _, sample := range histogramSamples {\n\t\t// We have to reconstruct the exact same signature as above for\n\t\t// a conventional histogram, just ignoring any le label.\n\t\tenh.lblBuf = sample.Metric.Bytes(enh.lblBuf)\n\t\tif mb, ok := enh.signatureToMetricWithBuckets[string(enh.lblBuf)]; ok && len(mb.buckets) > 0 {\n\t\t\t// At this data point, we have conventional histogram\n\t\t\t// buckets and a native histogram with the same name and\n\t\t\t// labels. Do not evaluate anything.\n\t\t\t// TODO(beorn7): Issue a warning somehow.\n\t\t\tdelete(enh.signatureToMetricWithBuckets, string(enh.lblBuf))\n\t\t\tcontinue\n\t\t}\n\n\t\tenh.Out = append(enh.Out, Sample{\n\t\t\tMetric: enh.DropMetricName(sample.Metric),\n\t\t\tF: histogramQuantile(q, sample.H),\n\t\t})\n\t}\n\n\tfor _, mb := range enh.signatureToMetricWithBuckets {\n\t\tif len(mb.buckets) > 0 {\n\t\t\tenh.Out = append(enh.Out, Sample{\n\t\t\t\tMetric: mb.metric,\n\t\t\t\tF: bucketQuantile(q, mb.buckets),\n\t\t\t})\n\t\t}\n\t}\n\n\treturn enh.Out\n}", "func (ms HistogramDataPoint) SetSum(v float64) {\n\t(*ms.orig).Sum = v\n}", "func (t *SBF) Has(hashes []uint64) bool {\n t.mutex.RLock()\n\n has := false\n\n for _, pf := range t.plainFilters {\n if pf.Has(hashes) {\n has = true\n break\n }\n }\n\n t.mutex.RUnlock()\n\n atomic.AddUint64(&t.checks, 1)\n if has {\n atomic.AddUint64(&t.hits, 1)\n } else {\n atomic.AddUint64(&t.misses, 1)\n }\n return has\n}", "func (it *emptyIterator) AtFloatHistogram() (int64, *histogram.FloatHistogram) { return 0, nil }", "func (s *Recommendation) SetAllMatchesSum(v float64) *Recommendation {\n\ts.AllMatchesSum = &v\n\treturn s\n}", "func (ds *Dataset) Sum() float64 {\n\tds.mustNotEmpty()\n\treturn ds.total\n}", "func AliquotSum(n int64) (sum int64) {\n\n\tsum = -n\n\tvar bound int64\n\tif n < (1 << 8) { // quick bit shift for small numbers\n\t\tbound = isqrt(n, 8)\n\t} else {\n\t\tbound = sqrt(n)\n\t}\n\n\tfor i := int64(1); i <= bound; i++ {\n\n\t\tif sum > n {\n\t\t\treturn\n\t\t}\n\n\t\tif n%i == 0 {\n\t\t\tf1, f2 := i, n/i\n\t\t\tif f1 == f2 {\n\t\t\t\tsum += f1\n\t\t\t} else {\n\t\t\t\tsum = sum + f1 + f2\n\t\t\t}\n\t\t}\n\t}\n\n\treturn\n}", "func (m *MetricsManager) AddHistogram(name, help string, labelNames []string, buckets []float64) error {\n\tvar allLabels sort.StringSlice\n\tfor k := range m.commonLabels {\n\t\tallLabels = append(allLabels, k)\n\t}\n\tallLabels = append(allLabels, labelNames...)\n\tallLabels.Sort()\n\n\topts := prometheus.HistogramOpts{\n\t\tName: name,\n\t\tHelp: help,\n\t}\n\tif buckets != nil {\n\t\topts.Buckets = buckets\n\t}\n\n\tmetric := prometheus.NewHistogramVec(opts, allLabels)\n\tif err := prometheus.Register(metric); err != nil {\n\t\treturn err\n\t}\n\n\tpartialMetric, err := metric.CurryWith(m.commonLabels)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tm.histograms[name] = &Histogram{\n\t\tcreationTime: time.Now(),\n\t\tmetric: partialMetric,\n\t}\n\treturn nil\n}", "func (s Series) Sum(force bool) (float64, error) {\n\tif s.elements.Len() == 0 || s.Type() == String || s.Type() == Bool {\n\t\treturn math.NaN(), nil\n\t}\n\tsFloat, err := s.Float(force)\n\tif err != nil {\n\t\treturn math.NaN(), err\n\t}\n\tsum := sFloat[0]\n\tfor i := 1; i < len(sFloat); i++ {\n\t\telem := sFloat[i]\n\t\tsum += elem\n\t}\n\treturn sum, nil\n}", "func (a *Sum) BigVal() (*big.Float, bool) {\n\tif a.nans > 0 {\n\t\treturn nil, true\n\t}\n\t// Handle infs.\n\tif a.minusInfs != 0 {\n\t\tif a.plusInfs != 0 {\n\t\t\t// (+Inf) + (-Inf) => NaN.\n\t\t\treturn nil, true\n\t\t}\n\t\treturn big.NewFloat(math.Inf(-1)), false\n\t}\n\tif a.plusInfs != 0 {\n\t\treturn big.NewFloat(math.Inf(1)), false\n\t}\n\tvar q bfAdder\n\t// end at exponentBits-1 to ignore nans and infs which were handled above.\n\tfor i := 0; i < 1<<exponentBits-1; i++ {\n\t\tsign := 1.0\n\t\thi := a.mantissaHi[i]\n\t\tlo := a.mantissaLo[i]\n\t\tif lo == 0 && hi == 0 {\n\t\t\tcontinue\n\t\t}\n\t\tif hi < 0 {\n\t\t\tsign = -1\n\t\t\thi = -hi\n\t\t\thi--\n\t\t\tlo = -lo\n\t\t}\n\t\texp := uint64(i)\n\t\tif exp == 0 {\n\t\t\texp = 1 // Handling subnormals\n\t\t}\n\t\tmantissa := lo & (1<<mantissaBits - 1)\n\t\tif mantissa != 0 {\n\t\t\t// ints between -2^(mantissaBits+1) and 2^(mantissaBits+1) can be represented as floats.\n\t\t\tu := big.NewFloat(float64(mantissa) * sign)\n\t\t\tu.SetMantExp(u, int(exp)-exponentBias-mantissaBits)\n\t\t\tq.Add(u)\n\t\t}\n\n\t\tmantissa = lo >> (mantissaBits)\n\t\tmantissa |= uint64(hi) << (64 - mantissaBits)\n\n\t\tif mantissa != 0 {\n\t\t\tu := big.NewFloat(float64(mantissa) * sign)\n\t\t\tu.SetMantExp(u, int(exp)-exponentBias)\n\t\t\tq.Add(u)\n\t\t}\n\t}\n\treturn q.BigVal(), false\n}", "func CompareSummary(a Summary, b Summary) (c SummaryCompare, err error) {\n\tif len(a.Histogram) != len(b.Histogram) {\n\t\treturn SummaryCompare{}, fmt.Errorf(\"len(a.Histogram) %d != len(b.Histogram) %d\", len(a.Histogram), len(b.Histogram))\n\t}\n\n\tc = SummaryCompare{\n\t\tA: a,\n\t\tB: b,\n\t}\n\n\t// e.g. \"A\" 100, \"B\" 50 == -50%\n\t// e.g. \"A\" 50, \"B\" 100 == 100%\n\tdeltaP50 := float64(b.P50) - float64(a.P50)\n\tdeltaP50 /= float64(a.P50)\n\tdeltaP50 *= 100.0\n\tdeltaP50 = convertInvalid(deltaP50)\n\n\tdeltaP90 := float64(b.P90) - float64(a.P90)\n\tdeltaP90 /= float64(a.P90)\n\tdeltaP90 *= 100.0\n\tdeltaP90 = convertInvalid(deltaP90)\n\n\tdeltaP99 := float64(b.P99) - float64(a.P99)\n\tdeltaP99 /= float64(a.P99)\n\tdeltaP99 *= 100.0\n\tdeltaP99 = convertInvalid(deltaP99)\n\n\tdeltaP999 := float64(b.P999) - float64(a.P999)\n\tdeltaP999 /= float64(a.P999)\n\tdeltaP999 *= 100.0\n\tdeltaP999 = convertInvalid(deltaP999)\n\n\tdeltaP9999 := float64(b.P9999) - float64(a.P9999)\n\tdeltaP9999 /= float64(a.P9999)\n\tdeltaP9999 *= 100.0\n\tdeltaP9999 = convertInvalid(deltaP9999)\n\n\tc.P50DeltaPercent = deltaP50\n\tc.P90DeltaPercent = deltaP90\n\tc.P99DeltaPercent = deltaP99\n\tc.P999DeltaPercent = deltaP999\n\tc.P9999DeltaPercent = deltaP9999\n\n\treturn c, nil\n}", "func (d *digest) Sum64() uint64 { return d.crc }", "func funcSumOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector {\n\tif len(vals[0].(Matrix)[0].Floats) > 0 && len(vals[0].(Matrix)[0].Histograms) > 0 {\n\t\t// TODO(zenador): Add warning for mixed floats and histograms.\n\t\treturn enh.Out\n\t}\n\tif len(vals[0].(Matrix)[0].Floats) == 0 {\n\t\t// The passed values only contain histograms.\n\t\treturn aggrHistOverTime(vals, enh, func(s Series) *histogram.FloatHistogram {\n\t\t\tsum := s.Histograms[0].H.Copy()\n\t\t\tfor _, h := range s.Histograms[1:] {\n\t\t\t\t// The histogram being added must have\n\t\t\t\t// an equal or larger schema.\n\t\t\t\tif h.H.Schema >= sum.Schema {\n\t\t\t\t\tsum.Add(h.H)\n\t\t\t\t} else {\n\t\t\t\t\tsum = h.H.Copy().Add(sum)\n\t\t\t\t}\n\t\t\t}\n\t\t\treturn sum\n\t\t})\n\t}\n\treturn aggrOverTime(vals, enh, func(s Series) float64 {\n\t\tvar sum, c float64\n\t\tfor _, f := range s.Floats {\n\t\t\tsum, c = kahanSumInc(f.F, sum, c)\n\t\t}\n\t\tif math.IsInf(sum, 0) {\n\t\t\treturn sum\n\t\t}\n\t\treturn sum + c\n\t})\n}", "func reduceSum(key string, values []interface{}, r *reducer) {\n\tvar n float64\n\tfor _, v := range values {\n\t\tn += v.(float64)\n\t}\n\tr.emit(key, n)\n}", "func (rm RowsMap) SumFloat(field string, multiple int) int {\n\tsum := 0\n\tfor _, v := range rm {\n\t\tsum += int(math.Round(v.Float64(field) * float64(multiple)))\n\t}\n\treturn sum\n}", "func FindMatchedSum(inputs []int, sum int) (int, int) {\n\thashmap := make(map[int]int)\n\n\tu.Debug(\"- BEGIN inputs%+v\\n\", inputs)\n\tfor i := 0; i < len(inputs); i++ {\n\t\tvar value = inputs[i]\n\t\tvar target = sum - value\n\t\tif index, ok := hashmap[target]; ok {\n\t\t\tu.Debug(\"- FOUND inputs[%v] == %v, in hash %+v\\n\\n\", i, value, hashmap)\n\t\t\treturn index, i\n\t\t}\n\n\t\thashmap[value] = i\n\t\tu.Debug(\"- ADDED inputs[%v] == %v, to hash %+v\\n\", i, value, hashmap)\n\t}\n\n\tu.Debug(\"\\n\")\n\treturn -1, -1\n}", "func TestFloatsMatch(t *testing.T) {\n\tdefer leaktest.AfterTest(t)()\n\tfor _, tc := range []struct {\n\t\tf1, f2 string\n\t\tmatch bool\n\t}{\n\t\t{f1: \"NaN\", f2: \"+Inf\", match: false},\n\t\t{f1: \"+Inf\", f2: \"+Inf\", match: true},\n\t\t{f1: \"NaN\", f2: \"NaN\", match: true},\n\t\t{f1: \"+Inf\", f2: \"-Inf\", match: false},\n\t\t{f1: \"-0.0\", f2: \"0.0\", match: true},\n\t\t{f1: \"0.0\", f2: \"NaN\", match: false},\n\t\t{f1: \"123.45\", f2: \"12.345\", match: false},\n\t\t{f1: \"0.1234567890123456\", f2: \"0.1234567890123455\", match: true},\n\t\t{f1: \"0.1234567890123456\", f2: \"0.1234567890123457\", match: true},\n\t\t{f1: \"-0.1234567890123456\", f2: \"0.1234567890123456\", match: false},\n\t\t{f1: \"-0.1234567890123456\", f2: \"-0.1234567890123455\", match: true},\n\t} {\n\t\tmatch, err := floatsMatch(tc.f1, tc.f2)\n\t\tif err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t\tif match != tc.match {\n\t\t\tt.Fatalf(\"wrong result on %v\", tc)\n\t\t}\n\t}\n}", "func hasPathSum(u *TreeNode, sum int) bool {\r\n\t// base\r\n\tif u == nil {\r\n\t\treturn false\r\n\t}\r\n\tif u.Left == nil && u.Right == nil && u.Val == sum {\r\n\t\treturn true\r\n\t}\r\n\r\n\t// recursion\r\n\tl := hasPathSum(u.Left, sum-u.Val)\r\n\tr := hasPathSum(u.Right, sum-u.Val)\r\n\treturn l == true || r == true\r\n}", "func readFloat64(exp *testExporter, metric monitoring.Metric, tags []tag.Tag) float64 {\n\texp.Lock()\n\tdefer exp.Unlock()\n\tfor _, r := range exp.rows[metric.Name()] {\n\t\tif !reflect.DeepEqual(r.Tags, tags) {\n\t\t\tcontinue\n\t\t}\n\t\tif sd, ok := r.Data.(*view.SumData); ok {\n\t\t\treturn sd.Value\n\t\t}\n\t}\n\treturn 0\n}", "func almostEquals(a, b float64) bool {\n\treturn math.Abs(a-b) < threshold\n}", "func Sum(a float64, b float64) float64 {\n\treturn a + b\n}", "func Sum(vals ...float64) float64 {\n\tsum := 0.0\n\tfor _, v := range vals {\n\t\tsum += v\n\t}\n\treturn sum\n}", "func (iDB *InfluxDB) GroupedSum(response *client.Response, ft FilterType) (map[string]float64, error) {\n\tsums := make(map[string]float64)\n\tif len(response.Results[0].Series) == 0 {\n\t\tsums[\"\"] = 0\n\t\treturn sums, nil\n\t}\n\n\tfor _, s := range response.Results[0].Series {\n\t\tjsonCount, ok := s.Values[0][1].(json.Number)\n\t\tif !ok {\n\t\t\treturn nil, errors.New(\"influx result is not json.Number, cannot proceed\")\n\t\t}\n\t\tsum, err := jsonCount.Float64()\n\t\tif err != nil {\n\t\t\treturn nil, errors.Wrap(err, fmt.Sprintf(\"unable to parse influx count [%f]\", sum))\n\t\t}\n\t\tsums[s.Tags[ft.column()]] = sum\n\t}\n\n\treturn sums, nil\n}", "func (m mathUtil) Sum(values ...float64) float64 {\n\tvar total float64\n\tfor _, v := range values {\n\t\ttotal += v\n\t}\n\treturn total\n}", "func (s *NumSeries) Sum() float64 { return s.sum }", "func (fn *formulaFuncs) SUM(argsList *list.List) formulaArg {\n\tvar sum float64\n\tfor arg := argsList.Front(); arg != nil; arg = arg.Next() {\n\t\ttoken := arg.Value.(formulaArg)\n\t\tswitch token.Type {\n\t\tcase ArgError:\n\t\t\treturn token\n\t\tcase ArgString:\n\t\t\tif num := token.ToNumber(); num.Type == ArgNumber {\n\t\t\t\tsum += num.Number\n\t\t\t}\n\t\tcase ArgNumber:\n\t\t\tsum += token.Number\n\t\tcase ArgMatrix:\n\t\t\tfor _, row := range token.Matrix {\n\t\t\t\tfor _, value := range row {\n\t\t\t\t\tif num := value.ToNumber(); num.Type == ArgNumber {\n\t\t\t\t\t\tsum += num.Number\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\treturn newNumberFormulaArg(sum)\n}", "func allHashesEqual(usl []fleet.UnitStatus) (bool, error) {\n\tuhis, err := groupUnitHashInfos(usl)\n\tif err != nil {\n\t\treturn false, maskAny(err)\n\t}\n\n\tfor _, uhi1 := range uhis {\n\t\tfor _, uhi2 := range uhis {\n\t\t\tif uhi1.Base != uhi2.Base {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif uhi1.Hash != uhi2.Hash {\n\t\t\t\treturn false, nil\n\t\t\t}\n\t\t}\n\t}\n\n\treturn true, nil\n}", "func TestSumAndAvg(t *testing.T) {\n\n\tdataset := dparval.ValueCollection{\n\t\tdparval.NewValue(map[string]interface{}{\n\t\t\t\"name\": \"marty\",\n\t\t\t\"score\": 20.0,\n\t\t}),\n\t\tdparval.NewValue(map[string]interface{}{\n\t\t\t\"name\": \"gerald\",\n\t\t\t\"score\": nil,\n\t\t}),\n\t\tdparval.NewValue(map[string]interface{}{\n\t\t\t\"name\": \"steve\",\n\t\t}),\n\t\tdparval.NewValue(map[string]interface{}{\n\t\t\t\"name\": \"siri\",\n\t\t\t\"score\": \"thirty\",\n\t\t}),\n\t\tdparval.NewValue(map[string]interface{}{\n\t\t\t\"name\": \"deep\",\n\t\t\t\"score\": 10.0,\n\t\t}),\n\t\tdparval.NewValue(map[string]interface{}{\n\t\t\t\"name\": \"ketaki\",\n\t\t\t\"score\": \"false\",\n\t\t}),\n\t\tdparval.NewValue(map[string]interface{}{\n\t\t\t\"name\": \"pratap\",\n\t\t\t\"score\": []interface{}{5.5},\n\t\t}),\n\t\tdparval.NewValue(map[string]interface{}{\n\t\t\t\"name\": \"karen\",\n\t\t\t\"score\": map[string]interface{}{\"score\": 5.5},\n\t\t}),\n\t}\n\n\ttests := AggregateTestSet{\n\t\t// test expression (eliminiates null and missing)\n\t\t{\n\t\t\tNewFunctionCall(\"SUM\", FunctionArgExpressionList{NewFunctionArgExpression(NewProperty(\"score\"))}),\n\t\t\tdparval.NewValue(30.0),\n\t\t},\n\t\t{\n\t\t\tNewFunctionCall(\"AVG\", FunctionArgExpressionList{NewFunctionArgExpression(NewProperty(\"score\"))}),\n\t\t\tdparval.NewValue(15.0),\n\t\t},\n\t}\n\n\ttests.Run(t, dataset)\n\n}", "func (s Signals) MatchesAll(ctx context.Context, pullCtx pull.Context, tag string) (bool, string, error) {\n\tif !s.Enabled() {\n\t\treturn false, fmt.Sprintf(\"no %s signals provided to match against\", tag), nil\n\t}\n\n\tsignals := []Signal{\n\t\t&s.Labels,\n\t\t&s.CommentSubstrings,\n\t\t&s.Comments,\n\t\t&s.PRBodySubstrings,\n\t\t&s.Branches,\n\t\t&s.BranchPatterns,\n\t\t&s.MaxCommits,\n\t\t&s.AutoMerge,\n\t}\n\n\tfor _, signal := range signals {\n\t\tif signal.Enabled() {\n\t\t\tmatches, _, err := signal.Matches(ctx, pullCtx, tag)\n\t\t\tif err != nil {\n\t\t\t\treturn false, \"\", err\n\t\t\t}\n\n\t\t\tif !matches {\n\t\t\t\treturn false, fmt.Sprintf(\"pull request does not match all %s signals\", tag), nil\n\t\t\t}\n\t\t}\n\t}\n\n\treturn true, fmt.Sprintf(\"pull request matches all %s signals\", tag), nil\n}", "func Sum(data []byte, h0, h1, h2, h3, h4 uint32, desiredLen int) [Size]byte {\n\tvar d digest\n\td.Reset(h0, h1, h2, h3, h4)\n\td.Write(data)\n\treturn d.checkSum(desiredLen)\n}", "func (f *Filter) MatchAny(key [KeySize]byte, data [][]byte) bool {\n\tif len(data) == 0 {\n\t\treturn false\n\t}\n\n\t// Create a filter bitstream.\n\tb := newBitReader(f.filterNData[4:])\n\n\t// Create an uncompressed filter of the search values.\n\tvar values *[]uint64\n\tif v := matchPool.Get(); v != nil {\n\t\tvalues = v.(*[]uint64)\n\t\t*values = (*values)[:0]\n\t} else {\n\t\tvs := make([]uint64, 0, len(data))\n\t\tvalues = &vs\n\t}\n\tdefer matchPool.Put(values)\n\tk0 := binary.LittleEndian.Uint64(key[0:8])\n\tk1 := binary.LittleEndian.Uint64(key[8:16])\n\tfor _, d := range data {\n\t\tv := siphash.Hash(k0, k1, d) % f.modulusNP\n\t\t*values = append(*values, v)\n\t}\n\tsort.Sort((*uint64s)(values))\n\n\t// Zip down the filters, comparing values until we either run out of\n\t// values to compare in one of the filters or we reach a matching\n\t// value.\n\tvar lastValue1, lastValue2 uint64\n\tlastValue2 = (*values)[0]\n\ti := 1\n\tfor lastValue1 != lastValue2 {\n\t\t// Check which filter to advance to make sure we're comparing\n\t\t// the right values.\n\t\tswitch {\n\t\tcase lastValue1 > lastValue2:\n\t\t\t// Advance filter created from search terms or return\n\t\t\t// false if we're at the end because nothing matched.\n\t\t\tif i < len(*values) {\n\t\t\t\tlastValue2 = (*values)[i]\n\t\t\t\ti++\n\t\t\t} else {\n\t\t\t\treturn false\n\t\t\t}\n\t\tcase lastValue2 > lastValue1:\n\t\t\t// Advance filter we're searching or return false if\n\t\t\t// we're at the end because nothing matched.\n\t\t\tvalue, err := f.readFullUint64(&b)\n\t\t\tif err != nil {\n\t\t\t\treturn false\n\t\t\t}\n\t\t\tlastValue1 += value\n\t\t}\n\t}\n\n\t// If we've made it this far, an element matched between filters so we\n\t// return true.\n\treturn true\n}", "func (p *Provider) NewExplicitHistogram(name string, _ xmetrics.DistributionFunc) metrics.Histogram {\n\treturn p.newHistogram(name)\n}", "func TestSha1SingleHash(t *testing.T) {\n\ttestCases := []struct {\n\t\tname string\n\t\talgorithm string\n\t\tfooHash string\n\t\tfooAndBarHash string\n\t}{\n\t\t{\n\t\t\tname: \"sha1 no combine\",\n\t\t\talgorithm: \"sha1\",\n\t\t\tfooHash: \"0beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a33\",\n\t\t\tfooAndBarHash: \"4030c3573bf908b75420818b8c0b041443a3f21e\",\n\t\t},\n\t\t{\n\t\t\tname: \"sha256\",\n\t\t\talgorithm: \"sha256\",\n\t\t\tfooHash: \"2c26b46b68ffc68ff99b453c1d30413413422d706483bfa0f98a5e886266e7ae\",\n\t\t\tfooAndBarHash: \"50d2e3c6f77d85d62907693deb75af0985012566e1fd37e0c2859b3716bccc85\",\n\t\t},\n\t\t{\n\t\t\tname: \"crc32\",\n\t\t\talgorithm: \"crc32\",\n\t\t\tfooHash: \"8c736521\",\n\t\t\tfooAndBarHash: \"045139db\",\n\t\t},\n\t\t{\n\t\t\tname: \"crc64\",\n\t\t\talgorithm: \"crc64\",\n\t\t\tfooHash: \"3c3c303000000000\",\n\t\t\tfooAndBarHash: \"1ff602f5b67b13f4\",\n\t\t},\n\t\t{\n\t\t\tname: \"blake3\",\n\t\t\talgorithm: \"blake3\",\n\t\t\tfooHash: \"04e0bb39f30b1a3feb89f536c93be15055482df748674b00d26e5a75777702e9\",\n\t\t\tfooAndBarHash: \"17d3b6ed7a554870abc95efae5e6255174a53efa40ef1844a21d0d29edac5d68\",\n\t\t},\n\t}\n\n\tfor _, test := range testCases {\n\t\tt.Run(test.name+\" foo\", func(t *testing.T) {\n\t\t\tstate, target := newStateWithHashFunc(\"//hash_test:hash_test\", test.algorithm)\n\n\t\t\ttarget.AddOutput(\"foo.txt\")\n\n\t\t\th, err := newTargetHasher(state).OutputHash(target)\n\t\t\trequire.NoError(t, err)\n\t\t\tassert.Equal(t, test.fooHash, hex.EncodeToString(h))\n\t\t})\n\t\tt.Run(test.name+\" foo and bar\", func(t *testing.T) {\n\t\t\tstate, target := newStateWithHashFunc(\"//hash_test:hash_test\", test.algorithm)\n\n\t\t\ttarget.AddOutput(\"foo.txt\")\n\t\t\ttarget.AddOutput(\"bar.txt\")\n\n\t\t\th, err := newTargetHasher(state).OutputHash(target)\n\t\t\trequire.NoError(t, err)\n\t\t\tassert.Equal(t, test.fooAndBarHash, hex.EncodeToString(h))\n\t\t})\n\t}\n}", "func Sum(input []float64) (sum float64) {\n\tfor _, v := range input {\n\t\tvar fuel float64\n\t\tfuel += calculator(v, fuel)\n\t\tsum += fuel\n\t}\n\treturn sum\n}", "func (a *Sum) Add(v float64) {\n\tb := math.Float64bits(v)\n\tif b == 0 {\n\t\treturn\n\t}\n\tsign := b >> 63\n\tb &= ^uint64(1 << 63)\n\texp := b >> mantissaBits\n\tmantissa := b & (1<<mantissaBits - 1)\n\tmantissa |= 1 << mantissaBits // implicit bit.\n\tprev := a.mantissaLo[exp]\n\tif exp != 0 && exp != 1<<exponentBits-1 {\n\t\tif sign == 0 {\n\t\t\tnew := prev + mantissa\n\t\t\ta.mantissaLo[exp] = new\n\t\t\tif new < prev {\n\t\t\t\ta.mantissaHi[exp]++\n\t\t\t}\n\t\t\treturn\n\t\t}\n\t\tnew := prev - mantissa\n\t\ta.mantissaLo[exp] = new\n\t\tif a.mantissaLo[exp] > prev {\n\t\t\ta.mantissaHi[exp]--\n\t\t}\n\t\treturn\n\t}\n\t// Handle subnormals, signed zeros, infs and nans.\n\t// Subnormals: exp == 0 && mantissa != 0.\n\t// Signed zeroes: exp == 0 && mantissa == 0.\n\t// Infs: exp == 2047 == (1<<exponentBits - 1) && mantissa == 0.\n\t// NaNs: exp == 2047 == (1<<exponentBits - 1) && mantissa != 0.\n\tswitch exp {\n\tcase 0:\n\t\tmantissa ^= 1 << mantissaBits // Clear the implicit bit.\n\t\tif mantissa == 0 {\n\t\t\t// Signed zero does not change the sum.\n\t\t\treturn\n\t\t}\n\t\t// Subnormals are handleed below.\n\tcase 1<<exponentBits - 1:\n\t\tmantissa ^= 1 << mantissaBits\n\t\tif mantissa == 0 {\n\t\t\t// Infs.\n\t\t\tif sign == 0 {\n\t\t\t\ta.plusInfs++\n\t\t\t\treturn\n\t\t\t}\n\t\t\ta.minusInfs++\n\t\t\treturn\n\t\t}\n\t\t// NaNs.\n\t\ta.nans++\n\t\treturn\n\t}\n\t// Subnormals: add full mantissa.\n\t// It is slightly faster with code duplicated like this.\n\tif sign == 0 {\n\t\tnew := prev + mantissa\n\t\ta.mantissaLo[exp] = new\n\t\tif new < prev {\n\t\t\ta.mantissaHi[exp]++\n\t\t}\n\t\treturn\n\t}\n\tnew := prev - mantissa\n\ta.mantissaLo[exp] = new\n\tif a.mantissaLo[exp] > prev {\n\t\ta.mantissaHi[exp]--\n\t}\n}", "func (m *MetricDeclaration) Matches(metric *pdata.Metric) bool {\n\tfor _, regex := range m.metricRegexList {\n\t\tif regex.MatchString(metric.Name()) {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func Sum(values ...float64) float64 {\n\tres := 0.0\n\n\tfor _, v := range values {\n\t\tres += v\n\t}\n\n\treturn res\n}", "func Sum(s string) string {\n\tbytes := []byte(s)\n\tsumBytes := md5.Sum(bytes)\n\treturn hex.EncodeToString(sumBytes[:])\n}" ]
[ "0.53356564", "0.52509165", "0.5046912", "0.5027105", "0.5016691", "0.48859367", "0.4768543", "0.4761051", "0.47506544", "0.47159982", "0.46346056", "0.46067107", "0.4600202", "0.46000293", "0.4588058", "0.45645773", "0.4544128", "0.45185417", "0.45172217", "0.44887275", "0.4470024", "0.44601712", "0.4459524", "0.44558024", "0.4438535", "0.44103298", "0.4342557", "0.4265059", "0.42630377", "0.42408323", "0.42373475", "0.423459", "0.42226887", "0.420787", "0.4204146", "0.41995132", "0.41894037", "0.4184798", "0.41825908", "0.41604766", "0.41449755", "0.41405794", "0.4136939", "0.41320312", "0.41113204", "0.40941587", "0.40935338", "0.40918276", "0.40841535", "0.4083573", "0.40758836", "0.40742856", "0.4048847", "0.40415156", "0.40368816", "0.40346473", "0.40321296", "0.40250292", "0.4020165", "0.4015383", "0.4012775", "0.40054414", "0.40014783", "0.39954093", "0.3989449", "0.39893335", "0.39875457", "0.398355", "0.39817256", "0.3968731", "0.39631933", "0.3961962", "0.3957092", "0.3948624", "0.39345968", "0.3931875", "0.39181033", "0.39175835", "0.3913977", "0.39130703", "0.39045808", "0.39015156", "0.38984516", "0.387013", "0.38666427", "0.38655692", "0.38609296", "0.38569126", "0.38557434", "0.3847769", "0.38471878", "0.38467008", "0.3843648", "0.38406563", "0.38387853", "0.38366848", "0.3830168", "0.3827323", "0.38259715", "0.38229257" ]
0.7263921
0
newBatchHistogram creates a new batch histogram value with the given Desc, buckets, and whether or not it has an exact sum available. buckets must always be from the runtime/metrics package, following the same conventions.
func newBatchHistogram(desc *Desc, buckets []float64, hasSum bool) *batchHistogram { // We need to remove -Inf values. runtime/metrics keeps them around. // But -Inf bucket should not be allowed for prometheus histograms. if buckets[0] == math.Inf(-1) { buckets = buckets[1:] } h := &batchHistogram{ desc: desc, buckets: buckets, // Because buckets follows runtime/metrics conventions, there's // 1 more value in the buckets list than there are buckets represented, // because in runtime/metrics, the bucket values represent *boundaries*, // and non-Inf boundaries are inclusive lower bounds for that bucket. counts: make([]uint64, len(buckets)-1), hasSum: hasSum, } h.init(h) return h }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func NewHistogram(opts HistogramOptions) *Histogram {\n\tif opts.NumBuckets == 0 {\n\t\topts.NumBuckets = 32\n\t}\n\tif opts.BaseBucketSize == 0.0 {\n\t\topts.BaseBucketSize = 1.0\n\t}\n\th := Histogram{\n\t\topts: opts,\n\t\tbuckets: make([]bucketInternal, opts.NumBuckets),\n\t\tcount: newCounter(),\n\t\tsum: newCounter(),\n\t\tsumOfSquares: newCounter(),\n\t\ttracker: newTracker(),\n\n\t\tlogBaseBucketSize: math.Log(opts.BaseBucketSize),\n\t\toneOverLogOnePlusGrowthFactor: 1 / math.Log(1+opts.GrowthFactor),\n\t}\n\tm := 1.0 + opts.GrowthFactor\n\tdelta := opts.BaseBucketSize\n\th.buckets[0].lowBound = float64(opts.MinValue)\n\th.buckets[0].count = newCounter()\n\tfor i := 1; i < opts.NumBuckets; i++ {\n\t\th.buckets[i].lowBound = float64(opts.MinValue) + delta\n\t\th.buckets[i].count = newCounter()\n\t\tdelta = delta * m\n\t}\n\treturn &h\n}", "func NewHistogram(name, help string, cutoffs []int64) *Histogram {\n\tlabels := make([]string, len(cutoffs)+1)\n\tfor i, v := range cutoffs {\n\t\tlabels[i] = fmt.Sprintf(\"%d\", v)\n\t}\n\tlabels[len(labels)-1] = \"inf\"\n\treturn NewGenericHistogram(name, help, cutoffs, labels, \"Count\", \"Total\")\n}", "func New(cnt int, desc *metric.Descriptor, opts ...Option) []Aggregator {\n\tvar cfg config\n\n\tif desc.NumberKind() == number.Int64Kind {\n\t\tcfg.explicitBoundaries = defaultInt64ExplicitBoundaries\n\t} else {\n\t\tcfg.explicitBoundaries = defaultFloat64ExplicitBoundaries\n\t}\n\n\tfor _, opt := range opts {\n\t\topt.apply(&cfg)\n\t}\n\n\taggs := make([]Aggregator, cnt)\n\n\t// Boundaries MUST be ordered otherwise the histogram could not\n\t// be properly computed.\n\tsortedBoundaries := make([]float64, len(cfg.explicitBoundaries))\n\n\tcopy(sortedBoundaries, cfg.explicitBoundaries)\n\tsort.Float64s(sortedBoundaries)\n\n\tfor i := range aggs {\n\t\taggs[i] = Aggregator{\n\t\t\tkind: desc.NumberKind(),\n\t\t\tboundaries: sortedBoundaries,\n\t\t}\n\t\taggs[i].state = aggs[i].newState()\n\t}\n\treturn aggs\n}", "func NewHistogram(labels map[string]string) *Histogram {\n\treturn &Histogram{\n\t\tLabels: labels,\n\t\tBuckets: make(map[string]int),\n\t}\n}", "func NewHistogram(Offset float64, Divider float64) *Histogram {\n\th := new(Histogram)\n\th.Offset = Offset\n\tif Divider == 0 {\n\t\treturn nil\n\t}\n\th.Divider = Divider\n\th.Hdata = make([]int32, numBuckets)\n\treturn h\n}", "func NewGenericHistogram(name, help string, cutoffs []int64, labels []string, countLabel, totalLabel string) *Histogram {\n\tif len(cutoffs) != len(labels)-1 {\n\t\tpanic(\"mismatched cutoff and label lengths\")\n\t}\n\th := &Histogram{\n\t\tname: name,\n\t\thelp: help,\n\t\tcutoffs: cutoffs,\n\t\tlabels: labels,\n\t\tcountLabel: countLabel,\n\t\ttotalLabel: totalLabel,\n\t\tbuckets: make([]atomic.Int64, len(labels)),\n\t}\n\tif name != \"\" {\n\t\tpublish(name, h)\n\t}\n\treturn h\n}", "func (h *Histogram) filterHistogram(newBuckets []Bucket) *Histogram {\n\tcheckBucketsValid(newBuckets)\n\n\ttotal := int64(0)\n\tfor _, b := range newBuckets {\n\t\ttotal += b.NumEq + b.NumRange\n\t}\n\n\tif total == 0 {\n\t\treturn &Histogram{}\n\t}\n\n\tselectivity := float64(total) / float64(h.RowCount)\n\n\t// Estimate the new DistinctCount based on the selectivity of this filter.\n\t// todo(rytaft): this could be more precise if we take into account the\n\t// null count of the original histogram. This could also be more precise for\n\t// the operators =, !=, in, and not in, since we know how these operators\n\t// should affect the distinct count.\n\tdistinctCount := int64(float64(h.DistinctCount) * selectivity)\n\tif distinctCount == 0 {\n\t\t// There must be at least one distinct value since RowCount > 0.\n\t\tdistinctCount++\n\t}\n\n\treturn &Histogram{\n\t\tRowCount: total,\n\t\tDistinctCount: distinctCount,\n\n\t\t// All the returned rows will be non-null for this column.\n\t\tNullCount: 0,\n\t\tBuckets: newBuckets,\n\t}\n}", "func NewHistogram(w io.Writer, key string, interval time.Duration) metrics.Histogram {\n\th := make(chan string)\n\tgo fwd(w, key, interval, h)\n\treturn statsdHistogram(h)\n}", "func NewHistogram(subsystem, name, help string, labels []string, buckets []float64) *prometheus.HistogramVec {\n\treturn promauto.NewHistogramVec(\n\t\tprometheus.HistogramOpts{\n\t\t\tNamespace: namespace,\n\t\t\tSubsystem: subsystem,\n\t\t\tName: name,\n\t\t\tHelp: help,\n\t\t\tBuckets: buckets,\n\t\t},\n\t\tlabels,\n\t)\n}", "func NewHistogram(nanoseconds []float64, opts *HistogramOptions) *Histogram {\n\tif opts.BinCount <= 0 {\n\t\tpanic(\"binCount must be larger than 0\")\n\t}\n\n\thist := &Histogram{}\n\thist.Width = 40\n\thist.Bins = make([]HistogramBin, opts.BinCount)\n\tif len(nanoseconds) == 0 {\n\t\treturn hist\n\t}\n\n\tnanoseconds = append(nanoseconds[:0:0], nanoseconds...)\n\tsort.Float64s(nanoseconds)\n\n\thist.Minimum = nanoseconds[0]\n\thist.Maximum = nanoseconds[len(nanoseconds)-1]\n\n\thist.Average = float64(0)\n\tfor _, x := range nanoseconds {\n\t\thist.Average += x\n\t}\n\thist.Average /= float64(len(nanoseconds))\n\n\tp := func(p float64) float64 {\n\t\ti := int(math.Round(p * float64(len(nanoseconds))))\n\t\tif i < 0 {\n\t\t\ti = 0\n\t\t}\n\t\tif i >= len(nanoseconds) {\n\t\t\ti = len(nanoseconds) - 1\n\t\t}\n\t\treturn nanoseconds[i]\n\t}\n\n\thist.P50, hist.P90, hist.P99, hist.P999, hist.P9999 = p(0.50), p(0.90), p(0.99), p(0.999), p(0.9999)\n\n\tclampMaximum := hist.Maximum\n\tif opts.ClampPercentile > 0 {\n\t\tclampMaximum = p(opts.ClampPercentile)\n\t}\n\tif opts.ClampMaximum > 0 {\n\t\tclampMaximum = opts.ClampMaximum\n\t}\n\n\tvar minimum, spacing float64\n\n\tif opts.NiceRange {\n\t\tminimum, spacing = calculateNiceSteps(hist.Minimum, clampMaximum, opts.BinCount)\n\t} else {\n\t\tminimum, spacing = calculateSteps(hist.Minimum, clampMaximum, opts.BinCount)\n\t}\n\n\tfor i := range hist.Bins {\n\t\thist.Bins[i].Start = spacing*float64(i) + minimum\n\t}\n\thist.Bins[0].Start = hist.Minimum\n\n\tfor _, x := range nanoseconds {\n\t\tk := int(float64(x-minimum) / spacing)\n\t\tif k < 0 {\n\t\t\tk = 0\n\t\t}\n\t\tif k >= opts.BinCount {\n\t\t\tk = opts.BinCount - 1\n\t\t\thist.Bins[k].andAbove = true\n\t\t}\n\t\thist.Bins[k].Count++\n\t}\n\n\tmaxBin := 0\n\tfor _, bin := range hist.Bins {\n\t\tif bin.Count > maxBin {\n\t\t\tmaxBin = bin.Count\n\t\t}\n\t}\n\n\tfor k := range hist.Bins {\n\t\tbin := &hist.Bins[k]\n\t\tbin.Width = float64(bin.Count) / float64(maxBin)\n\t}\n\n\treturn hist\n}", "func funcHistogramQuantile(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector {\n\tq := vals[0].(Vector)[0].F\n\tinVec := vals[1].(Vector)\n\n\tif enh.signatureToMetricWithBuckets == nil {\n\t\tenh.signatureToMetricWithBuckets = map[string]*metricWithBuckets{}\n\t} else {\n\t\tfor _, v := range enh.signatureToMetricWithBuckets {\n\t\t\tv.buckets = v.buckets[:0]\n\t\t}\n\t}\n\n\tvar histogramSamples []Sample\n\n\tfor _, sample := range inVec {\n\t\t// We are only looking for conventional buckets here. Remember\n\t\t// the histograms for later treatment.\n\t\tif sample.H != nil {\n\t\t\thistogramSamples = append(histogramSamples, sample)\n\t\t\tcontinue\n\t\t}\n\n\t\tupperBound, err := strconv.ParseFloat(\n\t\t\tsample.Metric.Get(model.BucketLabel), 64,\n\t\t)\n\t\tif err != nil {\n\t\t\t// Oops, no bucket label or malformed label value. Skip.\n\t\t\t// TODO(beorn7): Issue a warning somehow.\n\t\t\tcontinue\n\t\t}\n\t\tenh.lblBuf = sample.Metric.BytesWithoutLabels(enh.lblBuf, labels.BucketLabel)\n\t\tmb, ok := enh.signatureToMetricWithBuckets[string(enh.lblBuf)]\n\t\tif !ok {\n\t\t\tsample.Metric = labels.NewBuilder(sample.Metric).\n\t\t\t\tDel(excludedLabels...).\n\t\t\t\tLabels()\n\n\t\t\tmb = &metricWithBuckets{sample.Metric, nil}\n\t\t\tenh.signatureToMetricWithBuckets[string(enh.lblBuf)] = mb\n\t\t}\n\t\tmb.buckets = append(mb.buckets, bucket{upperBound, sample.F})\n\n\t}\n\n\t// Now deal with the histograms.\n\tfor _, sample := range histogramSamples {\n\t\t// We have to reconstruct the exact same signature as above for\n\t\t// a conventional histogram, just ignoring any le label.\n\t\tenh.lblBuf = sample.Metric.Bytes(enh.lblBuf)\n\t\tif mb, ok := enh.signatureToMetricWithBuckets[string(enh.lblBuf)]; ok && len(mb.buckets) > 0 {\n\t\t\t// At this data point, we have conventional histogram\n\t\t\t// buckets and a native histogram with the same name and\n\t\t\t// labels. Do not evaluate anything.\n\t\t\t// TODO(beorn7): Issue a warning somehow.\n\t\t\tdelete(enh.signatureToMetricWithBuckets, string(enh.lblBuf))\n\t\t\tcontinue\n\t\t}\n\n\t\tenh.Out = append(enh.Out, Sample{\n\t\t\tMetric: enh.DropMetricName(sample.Metric),\n\t\t\tF: histogramQuantile(q, sample.H),\n\t\t})\n\t}\n\n\tfor _, mb := range enh.signatureToMetricWithBuckets {\n\t\tif len(mb.buckets) > 0 {\n\t\t\tenh.Out = append(enh.Out, Sample{\n\t\t\t\tMetric: mb.metric,\n\t\t\t\tF: bucketQuantile(q, mb.buckets),\n\t\t\t})\n\t\t}\n\t}\n\n\treturn enh.Out\n}", "func NewHistogramBucket() HistogramBucket {\n\torig := (*otlpmetrics.HistogramDataPoint_Bucket)(nil)\n\treturn newHistogramBucket(&orig)\n}", "func (e *Exporter) NewHistogram(name, help string, cutoffs []int64) *stats.Histogram {\n\tif e.name == \"\" || name == \"\" {\n\t\tv := stats.NewHistogram(name, help, cutoffs)\n\t\taddUnnamedExport(name, v)\n\t\treturn v\n\t}\n\thist := stats.NewHistogram(\"\", help, cutoffs)\n\te.addToOtherVars(name, hist)\n\treturn hist\n}", "func NewHistogram() *Histogram {\n\treturn &Histogram{make([]int, HistogramSize), &sync.Mutex{}}\n}", "func NewBucket(tokens uint64) *Bucket {\n\treturn &Bucket{Added: float64(tokens)}\n}", "func (p *influxProvider) NewHistogram(name string, buckets int) metrics.Histogram {\n\treturn p.in.NewHistogram(name)\n}", "func NewHistogramBucketSlice() HistogramBucketSlice {\n\torig := []*otlpmetrics.HistogramDataPoint_Bucket(nil)\n\treturn HistogramBucketSlice{&orig}\n}", "func newBuckets(c Config, end time.Time) buckets {\n\tn := c.len()\n\tif n == 0 {\n\t\treturn nil\n\t}\n\tout := make(buckets, n+1)\n\n\tfor i := n; i >= 1; i-- {\n\t\tw := c.MinInterval()\n\n\t\t// c is a copy of the config,\n\t\t// so we can decrement values without\n\t\t// a problem.\n\t\tswitch w {\n\t\tcase Hour:\n\t\t\tc.Hourly--\n\t\tcase Day:\n\t\t\tc.Daily--\n\t\tcase Week:\n\t\t\tc.Weekly--\n\t\tcase Month:\n\t\t\tc.Monthly--\n\t\tcase Year:\n\t\t\tc.Yearly--\n\t\tcase -1:\n\t\t\tpanic(\"ran out of buckets\")\n\t\tdefault:\n\t\t\tpanic(\"unknown interval\")\n\t\t}\n\n\t\tout[i] = &bucket{\n\t\t\tWidth: w,\n\t\t\tEnd: end,\n\t\t}\n\t\tend = end.Add(-w)\n\t}\n\n\t// Catchall bucket.\n\tout[0] = &bucket{\n\t\tEnd: end,\n\t\tWidth: 0,\n\t}\n\n\treturn out\n}", "func (h *batchHistogram) update(his *metrics.Float64Histogram, sum float64) {\n\tcounts, buckets := his.Counts, his.Buckets\n\n\th.mu.Lock()\n\tdefer h.mu.Unlock()\n\n\t// Clear buckets.\n\tfor i := range h.counts {\n\t\th.counts[i] = 0\n\t}\n\t// Copy and reduce buckets.\n\tvar j int\n\tfor i, count := range counts {\n\t\th.counts[j] += count\n\t\tif buckets[i+1] == h.buckets[j+1] {\n\t\t\tj++\n\t\t}\n\t}\n\tif h.hasSum {\n\t\th.sum = sum\n\t}\n}", "func NewHistogram(config HistogramConfig) (objectspec.InstrumentorHistogram, error) {\n\tnewHistogram := &histogram{\n\t\tHistogramConfig: config,\n\t}\n\n\tif len(newHistogram.Buckets) == 0 {\n\t\treturn nil, maskAnyf(invalidConfigError, \"buckets must not be empty\")\n\t}\n\tif newHistogram.Help == \"\" {\n\t\treturn nil, maskAnyf(invalidConfigError, \"help must not be empty\")\n\t}\n\tif newHistogram.Name == \"\" {\n\t\treturn nil, maskAnyf(invalidConfigError, \"name must not be empty\")\n\t}\n\n\tnewHistogram.ClientHistogram = prometheusclient.NewHistogram(prometheusclient.HistogramOpts{\n\t\tBuckets: newHistogram.Buckets,\n\t\tHelp: newHistogram.Help,\n\t\tName: newHistogram.Name,\n\t})\n\n\treturn newHistogram, nil\n}", "func NewMultiHistogram(name string, histograms ...Histogram) Histogram {\n\treturn &multiHistogram{\n\t\tname: name,\n\t\ta: histograms,\n\t}\n}", "func newBuckets(n int) *buckets {\n\treturn &buckets{counts: make([]uint64, n)}\n}", "func (d TestSink) Histogram(c *telemetry.Context, stat string, value float64) {\n\td[stat] = TestMetric{\"Histogram\", value, c.Tags()}\n}", "func splitBuckets(h *Histogram, feedback *QueryFeedback) ([]bucket, []bool, int64) {\n\tbktID2FB, numTotalFBs := buildBucketFeedback(h, feedback)\n\tbuckets := make([]bucket, 0, h.Len())\n\tisNewBuckets := make([]bool, 0, h.Len())\n\tsplitCount := getSplitCount(numTotalFBs, defaultBucketCount-h.Len())\n\tfor i := 0; i < h.Len(); i++ {\n\t\tbktFB, ok := bktID2FB[i]\n\t\t// No feedback, just use the original one.\n\t\tif !ok {\n\t\t\tbuckets = append(buckets, bucket{h.GetLower(i), h.GetUpper(i), h.bucketCount(i), h.Buckets[i].Repeat})\n\t\t\tisNewBuckets = append(isNewBuckets, false)\n\t\t\tcontinue\n\t\t}\n\t\t// Distribute the total split count to bucket based on number of bucket feedback.\n\t\tnewBktNums := splitCount * len(bktFB.feedback) / numTotalFBs\n\t\tbkts := bktFB.splitBucket(newBktNums, h.TotalRowCount(), float64(h.bucketCount(i)))\n\t\tbuckets = append(buckets, bkts...)\n\t\tif len(bkts) == 1 {\n\t\t\tisNewBuckets = append(isNewBuckets, false)\n\t\t} else {\n\t\t\tfor i := 0; i < len(bkts); i++ {\n\t\t\t\tisNewBuckets = append(isNewBuckets, true)\n\t\t\t}\n\t\t}\n\t}\n\ttotCount := int64(0)\n\tfor _, bkt := range buckets {\n\t\ttotCount += bkt.Count\n\t}\n\treturn buckets, isNewBuckets, totCount\n}", "func (cm *customMetrics) AddHistogram(\n\tnamespace, subsystem, name, help, internalKey string,\n\tconstLabels prometheus.Labels, buckets []float64) {\n\n\tcm.histograms[internalKey] = promauto.NewHistogram(prometheus.HistogramOpts{\n\t\tNamespace: namespace,\n\t\tSubsystem: subsystem,\n\t\tName: name,\n\t\tHelp: help,\n\t\tConstLabels: constLabels,\n\t\tBuckets: buckets,\n\t})\n}", "func (r *JobsService) Histogram(gethistogramrequest *GetHistogramRequest) *JobsHistogramCall {\n\tc := &JobsHistogramCall{s: r.s, urlParams_: make(gensupport.URLParams)}\n\tc.gethistogramrequest = gethistogramrequest\n\treturn c\n}", "func NewBucket(desc metrics.Descriptor, dur time.Duration) (*Bucket, error) {\n\tvar (\n\t\tm metrics.Metric\n\t\terr error\n\t)\n\tif m, err = metrics.FromDescriptor(desc); err != nil {\n\t\treturn nil, err\n\t}\n\tshard := NewShard(m, dur)\n\treturn &Bucket{\n\t\tdescriptor: desc,\n\t\tshards: []*Shard{shard},\n\t\tshardDuration: dur,\n\t}, nil\n}", "func (c *Aggregator) Histogram() (aggregation.Buckets, error) {\n\treturn aggregation.Buckets{\n\t\tBoundaries: c.boundaries,\n\t\tCounts: c.state.bucketCounts,\n\t}, nil\n}", "func (m *Metrics) Histogram(name, help string, buckets []float64) prometheus.Histogram {\n\thistogram := prometheus.NewHistogram(prometheus.HistogramOpts{\n\t\tNamespace: m.config.Namespace,\n\t\tName: name,\n\t\tHelp: help,\n\t\tBuckets: buckets,\n\t})\n\n\tprometheus.MustRegister(histogram)\n\n\treturn histogram\n}", "func UpdateHistogram(h *Histogram, feedback *QueryFeedback) *Histogram {\n\tbuckets, isNewBuckets, totalCount := splitBuckets(h, feedback)\n\tbuckets = mergeBuckets(buckets, isNewBuckets, float64(totalCount))\n\thist := buildNewHistogram(h, buckets)\n\t// Update the NDV of primary key column.\n\tif feedback.Tp == PkType {\n\t\thist.NDV = int64(hist.TotalRowCount())\n\t}\n\treturn hist\n}", "func (bench *Stopwatch) Histogram(binCount int) *Histogram {\n\tbench.mustBeCompleted()\n\n\topts := defaultOptions\n\topts.BinCount = binCount\n\n\treturn NewDurationHistogram(bench.Durations(), &opts)\n}", "func (h *Histogram) Export() *HistogramData {\n\tvar res HistogramData\n\tres.Count = h.Counter.Count\n\tres.Min = h.Counter.Min\n\tres.Max = h.Counter.Max\n\tres.Sum = h.Counter.Sum\n\tres.Avg = h.Counter.Avg()\n\tres.StdDev = h.Counter.StdDev()\n\tmultiplier := h.Divider\n\toffset := h.Offset\n\t// calculate the last bucket index\n\tlastIdx := -1\n\tfor i := numBuckets - 1; i >= 0; i-- {\n\t\tif h.Hdata[i] > 0 {\n\t\t\tlastIdx = i\n\t\t\tbreak\n\t\t}\n\t}\n\tif lastIdx == -1 {\n\t\treturn &res\n\t}\n\n\t// previous bucket value:\n\tprev := histogramBucketValues[0]\n\tvar total int64\n\tctrTotal := float64(h.Count)\n\t// export the data of each bucket of the histogram\n\tfor i := 0; i <= lastIdx; i++ {\n\t\tif h.Hdata[i] == 0 {\n\t\t\t// empty bucket: skip it but update prev which is needed for next iter\n\t\t\tif i < numValues {\n\t\t\t\tprev = histogramBucketValues[i]\n\t\t\t}\n\t\t\tcontinue\n\t\t}\n\t\tvar b Bucket\n\t\ttotal += int64(h.Hdata[i])\n\t\tif len(res.Data) == 0 {\n\t\t\t// First entry, start is min\n\t\t\tb.Start = h.Min\n\t\t} else {\n\t\t\tb.Start = multiplier*float64(prev) + offset\n\t\t}\n\t\tb.Percent = 100. * float64(total) / ctrTotal\n\t\tif i < numValues {\n\t\t\tcur := histogramBucketValues[i]\n\t\t\tb.End = multiplier*float64(cur) + offset\n\t\t\tprev = cur\n\t\t} else {\n\t\t\t// Last Entry\n\t\t\tb.Start = multiplier*float64(prev) + offset\n\t\t\tb.End = h.Max\n\t\t}\n\t\tb.Count = int64(h.Hdata[i])\n\t\tres.Data = append(res.Data, b)\n\t}\n\tres.Data[len(res.Data)-1].End = h.Max\n\treturn &res\n}", "func (bench *Stopwatch) HistogramClamp(binCount int, min, max time.Duration) *Histogram {\n\tbench.mustBeCompleted()\n\n\tdurations := make([]time.Duration, 0, len(bench.spans))\n\tfor _, span := range bench.spans {\n\t\tduration := span.Duration()\n\t\tif duration < min {\n\t\t\tdurations = append(durations, min)\n\t\t} else {\n\t\t\tdurations = append(durations, duration)\n\t\t}\n\t}\n\n\topts := defaultOptions\n\topts.BinCount = binCount\n\topts.ClampMaximum = float64(max.Nanoseconds())\n\topts.ClampPercentile = 0\n\n\treturn NewDurationHistogram(durations, &opts)\n}", "func (_m *Reporter) Histogram(name string, value float64, tags ...monitoring.Tag) {\n\t_va := make([]interface{}, len(tags))\n\tfor _i := range tags {\n\t\t_va[_i] = tags[_i]\n\t}\n\tvar _ca []interface{}\n\t_ca = append(_ca, name, value)\n\t_ca = append(_ca, _va...)\n\t_m.Called(_ca...)\n}", "func Histogram(name string, requestTime float64, tags []string, rate float64) {\n\tif ddog == nil {\n\t\tlog.Error(\"datadog client is not initialized\")\n\t\treturn\n\t}\n\n\terr := ddog.Client.Histogram(name, requestTime, tags, rate)\n\tif err != nil {\n\t\tlog.WithFields(logrus.Fields{\n\t\t\t\"error\": err,\n\t\t\t\"name\": name,\n\t\t}).Error(\"Failed to send histogram data to datadog\")\n\t}\n}", "func (m *MetricsManager) AddHistogram(name, help string, labelNames []string, buckets []float64) error {\n\tvar allLabels sort.StringSlice\n\tfor k := range m.commonLabels {\n\t\tallLabels = append(allLabels, k)\n\t}\n\tallLabels = append(allLabels, labelNames...)\n\tallLabels.Sort()\n\n\topts := prometheus.HistogramOpts{\n\t\tName: name,\n\t\tHelp: help,\n\t}\n\tif buckets != nil {\n\t\topts.Buckets = buckets\n\t}\n\n\tmetric := prometheus.NewHistogramVec(opts, allLabels)\n\tif err := prometheus.Register(metric); err != nil {\n\t\treturn err\n\t}\n\n\tpartialMetric, err := metric.CurryWith(m.commonLabels)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tm.histograms[name] = &Histogram{\n\t\tcreationTime: time.Now(),\n\t\tmetric: partialMetric,\n\t}\n\treturn nil\n}", "func (datadog *Datadog) Histogram(name string, startTime time.Time, tags []string) error {\n\telapsedTime := time.Since(startTime).Seconds() * 1000\n\terr := datadog.client.Histogram(name, elapsedTime, tags, float64(1))\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}", "func NewBatcher(want, min, max int) *Batcher {\n\treturn &Batcher{\n\t\twant: want,\n\t\tmin: min,\n\t\tmax: max,\n\t\tratio: 1,\n\t}\n}", "func (c *Client) Histogram(stat string, value int, rate float64) error {\n\treturn c.send(stat, rate, \"%d|ms\", value)\n}", "func (c *StatsClient) Histogram(name string, value float64) {\n\tif err := c.client.Histogram(name, value, c.tags, Rate); err != nil {\n\t\tc.logger().Printf(\"datadog.StatsClient.Histogram error: %s\", err)\n\t}\n}", "func getHistogramSamples(name string,\n\tlabels []*prometheus_models.LabelPair,\n\ttimestampMs int64,\n\th *prometheus_models.Histogram,\n\tentity string,\n) []Sample {\n\tsamples := make([]Sample, len(h.GetBucket())*2+2)\n\tsamples[0] = Sample{\n\t\tname: name + \"_count\",\n\t\tlabels: labels,\n\t\ttimestampMs: timestampMs,\n\t\tvalue: strconv.FormatUint(h.GetSampleCount(), 10),\n\t\tentity: entity,\n\t}\n\tsamples[1] = Sample{\n\t\tname: name + \"_sum\",\n\t\tlabels: labels,\n\t\ttimestampMs: timestampMs,\n\t\tvalue: strconv.FormatFloat(h.GetSampleSum(), 'E', -1, 64),\n\t\tentity: entity,\n\t}\n\tfor i, b := range h.GetBucket() {\n\t\tsamples[i+2] = Sample{\n\t\t\tname: fmt.Sprintf(\"%s_bucket_%d_le\", name, i),\n\t\t\tlabels: labels,\n\t\t\ttimestampMs: timestampMs,\n\t\t\tvalue: strconv.FormatFloat(b.GetUpperBound(), 'E', -1, 64),\n\t\t\tentity: entity,\n\t\t}\n\t\tsamples[i+3] = Sample{\n\t\t\tname: fmt.Sprintf(\"%s_bucket_%d_count\", name, i),\n\t\t\tlabels: labels,\n\t\t\ttimestampMs: timestampMs,\n\t\t\tvalue: strconv.FormatUint(b.GetCumulativeCount(), 10),\n\t\t\tentity: entity,\n\t\t}\n\t}\n\treturn samples\n}", "func ParseHistogram(scale string, histo *dto.Histogram) (buckets HistogramBuckets, err error) {\n\tif histo == nil {\n\t\treturn nil, errors.New(\"nil Histogram\")\n\t}\n\n\ttotal := *histo.SampleCount\n\tn := len(histo.Bucket)\n\n\tbuckets = make(HistogramBuckets, n+1)\n\tbuckets[n] = HistogramBucket{\n\t\tScale: scale,\n\t\tUpperBound: math.MaxFloat64,\n\t\tCount: total,\n\t}\n\tfor idx, bv := range histo.Bucket {\n\t\tbuckets[idx] = HistogramBucket{\n\t\t\tScale: scale,\n\t\t\tUpperBound: *bv.UpperBound,\n\t\t\tCount: *bv.CumulativeCount,\n\t\t}\n\t}\n\tfor idx := n; idx > 0; idx-- { // start from last, end at second to last\n\t\t// convert cumulative count to per-bucket count\n\t\tbuckets[idx].Count = buckets[idx].Count - buckets[idx-1].Count\n\t\t// use previous bucket upper bound as lower bound\n\t\tbuckets[idx].LowerBound = buckets[idx-1].UpperBound\n\t}\n\n\tsort.Sort(HistogramBuckets(buckets))\n\treturn buckets, nil\n}", "func BuildColumn(ctx context.Context, numBuckets, id int64, ndv int64, count int64, samples []types.Datum) (*Histogram, error) {\n\tif count == 0 {\n\t\treturn &Histogram{ID: id}, nil\n\t}\n\tsc := ctx.GetSessionVars().StmtCtx\n\terr := types.SortDatums(sc, samples)\n\tif err != nil {\n\t\treturn nil, errors.Trace(err)\n\t}\n\thg := &Histogram{\n\t\tID: id,\n\t\tNDV: ndv,\n\t\tBuckets: make([]Bucket, 1, numBuckets),\n\t}\n\tvaluesPerBucket := float64(count)/float64(numBuckets) + 1\n\n\t// As we use samples to build the histogram, the bucket number and repeat should multiply a factor.\n\tsampleFactor := float64(count) / float64(len(samples))\n\tndvFactor := float64(count) / float64(ndv)\n\tif ndvFactor > sampleFactor {\n\t\tndvFactor = sampleFactor\n\t}\n\tbucketIdx := 0\n\tvar lastCount int64\n\tfor i := int64(0); i < int64(len(samples)); i++ {\n\t\tcmp, err := hg.Buckets[bucketIdx].Value.CompareDatum(sc, samples[i])\n\t\tif err != nil {\n\t\t\treturn nil, errors.Trace(err)\n\t\t}\n\t\ttotalCount := float64(i+1) * sampleFactor\n\t\tif cmp == 0 {\n\t\t\t// The new item has the same value as current bucket value, to ensure that\n\t\t\t// a same value only stored in a single bucket, we do not increase bucketIdx even if it exceeds\n\t\t\t// valuesPerBucket.\n\t\t\thg.Buckets[bucketIdx].Count = int64(totalCount)\n\t\t\tif float64(hg.Buckets[bucketIdx].Repeats) == ndvFactor {\n\t\t\t\thg.Buckets[bucketIdx].Repeats = int64(2 * sampleFactor)\n\t\t\t} else {\n\t\t\t\thg.Buckets[bucketIdx].Repeats += int64(sampleFactor)\n\t\t\t}\n\t\t} else if totalCount-float64(lastCount) <= valuesPerBucket {\n\t\t\t// The bucket still have room to store a new item, update the bucket.\n\t\t\thg.Buckets[bucketIdx].Count = int64(totalCount)\n\t\t\thg.Buckets[bucketIdx].Value = samples[i]\n\t\t\thg.Buckets[bucketIdx].Repeats = int64(ndvFactor)\n\t\t} else {\n\t\t\tlastCount = hg.Buckets[bucketIdx].Count\n\t\t\t// The bucket is full, store the item in the next bucket.\n\t\t\tbucketIdx++\n\t\t\thg.Buckets = append(hg.Buckets, Bucket{\n\t\t\t\tCount: int64(totalCount),\n\t\t\t\tValue: samples[i],\n\t\t\t\tRepeats: int64(ndvFactor),\n\t\t\t})\n\t\t}\n\t}\n\treturn hg, nil\n}", "func getHistogram(src [][3]int, size float64, pixels *[HistSize][3]float64, hist *[HistSize]float64) {\n\tvar ind, r, g, b, i int\n\tvar inr, ing, inb int\n\n\tfor i = range src {\n\t\tr = src[i][0]\n\t\tg = src[i][1]\n\t\tb = src[i][2]\n\n\t\tinr = r >> Shift\n\t\ting = g >> Shift\n\t\tinb = b >> Shift\n\n\t\tind = (inr << (2 * HistBits)) + (ing << HistBits) + inb\n\t\tpixels[ind][0], pixels[ind][1], pixels[ind][2] = float64(r), float64(g), float64(b)\n\t\thist[ind]++\n\t}\n\n\t// normalize weight by the number of pixels in the image\n\tfor i = 0; i < HistSize; i++ {\n\t\thist[i] /= size\n\t}\n}", "func HistogramSummary(scope *Scope, tag tf.Output, values tf.Output) (summary tf.Output) {\n\tif scope.Err() != nil {\n\t\treturn\n\t}\n\topspec := tf.OpSpec{\n\t\tType: \"HistogramSummary\",\n\t\tInput: []tf.Input{\n\t\t\ttag, values,\n\t\t},\n\t}\n\top := scope.AddOperation(opspec)\n\treturn op.Output(0)\n}", "func funcHistogramCount(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector {\n\tinVec := vals[0].(Vector)\n\n\tfor _, sample := range inVec {\n\t\t// Skip non-histogram samples.\n\t\tif sample.H == nil {\n\t\t\tcontinue\n\t\t}\n\t\tenh.Out = append(enh.Out, Sample{\n\t\t\tMetric: enh.DropMetricName(sample.Metric),\n\t\t\tF: sample.H.Count,\n\t\t})\n\t}\n\treturn enh.Out\n}", "func NewHistogram(name string, options ...Option) Histogram {\n\treturn newHistogram(name, options...)\n}", "func newBucketGroup(mBuckets int64) bucketGroup {\n\treturn make(bucketGroup, mBuckets)\n}", "func NewDeltaHistogram[N int64 | float64](cfg aggregation.ExplicitBucketHistogram) Aggregator[N] {\n\treturn &deltaHistogram[N]{\n\t\thistValues: newHistValues[N](cfg.Boundaries),\n\t\tnoMinMax: cfg.NoMinMax,\n\t\tstart: now(),\n\t}\n}", "func makeBuckets(current, prev summaryMap, l *logrus.Entry) []*agentpb.MetricsBucket {\n\tres := make([]*agentpb.MetricsBucket, 0, len(current))\n\n\tfor digest, currentESS := range current {\n\t\tprevESS := prev[digest]\n\t\tif prevESS == nil {\n\t\t\tprevESS = &eventsStatementsSummaryByDigest{}\n\t\t}\n\n\t\tswitch {\n\t\tcase currentESS.CountStar == prevESS.CountStar:\n\t\t\t// Another way how this is possible is if events_statements_summary_by_digest was truncated,\n\t\t\t// and then the same number of queries were made.\n\t\t\t// Currently, we can't differentiate between those situations.\n\t\t\t// TODO We probably could by using first_seen/last_seen columns.\n\t\t\tl.Tracef(\"Skipped due to the same number of queries: %s.\", currentESS)\n\t\t\tcontinue\n\t\tcase currentESS.CountStar < prevESS.CountStar:\n\t\t\tl.Debugf(\"Truncate detected. Treating as a new query: %s.\", currentESS)\n\t\t\tprevESS = &eventsStatementsSummaryByDigest{}\n\t\tcase prevESS.CountStar == 0:\n\t\t\tl.Debugf(\"New query: %s.\", currentESS)\n\t\tdefault:\n\t\t\tl.Debugf(\"Normal query: %s.\", currentESS)\n\t\t}\n\n\t\tcount := inc(currentESS.CountStar, prevESS.CountStar)\n\t\tfingerprint, isTruncated := truncate.Query(*currentESS.DigestText)\n\t\tmb := &agentpb.MetricsBucket{\n\t\t\tCommon: &agentpb.MetricsBucket_Common{\n\t\t\t\tSchema: pointer.GetString(currentESS.SchemaName), // TODO can it be NULL?\n\t\t\t\tQueryid: *currentESS.Digest,\n\t\t\t\tFingerprint: fingerprint,\n\t\t\t\tIsTruncated: isTruncated,\n\t\t\t\tNumQueries: count,\n\t\t\t\tNumQueriesWithErrors: inc(currentESS.SumErrors, prevESS.SumErrors),\n\t\t\t\tNumQueriesWithWarnings: inc(currentESS.SumWarnings, prevESS.SumWarnings),\n\t\t\t\tAgentType: inventorypb.AgentType_QAN_MYSQL_PERFSCHEMA_AGENT,\n\t\t\t},\n\t\t\tMysql: &agentpb.MetricsBucket_MySQL{},\n\t\t}\n\n\t\tfor _, p := range []struct {\n\t\t\tvalue float32 // result value: currentESS.SumXXX-prevESS.SumXXX\n\t\t\tsum *float32 // MetricsBucket.XXXSum field to write value\n\t\t\tcnt *float32 // MetricsBucket.XXXCnt field to write count\n\t\t}{\n\t\t\t// in order of events_statements_summary_by_digest columns\n\n\t\t\t// convert picoseconds to seconds\n\t\t\t{inc(currentESS.SumTimerWait, prevESS.SumTimerWait) / 1000000000000, &mb.Common.MQueryTimeSum, &mb.Common.MQueryTimeCnt},\n\t\t\t{inc(currentESS.SumLockTime, prevESS.SumLockTime) / 1000000000000, &mb.Mysql.MLockTimeSum, &mb.Mysql.MLockTimeCnt},\n\n\t\t\t{inc(currentESS.SumRowsAffected, prevESS.SumRowsAffected), &mb.Mysql.MRowsAffectedSum, &mb.Mysql.MRowsAffectedCnt},\n\t\t\t{inc(currentESS.SumRowsSent, prevESS.SumRowsSent), &mb.Mysql.MRowsSentSum, &mb.Mysql.MRowsSentCnt},\n\t\t\t{inc(currentESS.SumRowsExamined, prevESS.SumRowsExamined), &mb.Mysql.MRowsExaminedSum, &mb.Mysql.MRowsExaminedCnt},\n\n\t\t\t{inc(currentESS.SumCreatedTmpDiskTables, prevESS.SumCreatedTmpDiskTables), &mb.Mysql.MTmpDiskTablesSum, &mb.Mysql.MTmpDiskTablesCnt},\n\t\t\t{inc(currentESS.SumCreatedTmpTables, prevESS.SumCreatedTmpTables), &mb.Mysql.MTmpTablesSum, &mb.Mysql.MTmpTablesCnt},\n\t\t\t{inc(currentESS.SumSelectFullJoin, prevESS.SumSelectFullJoin), &mb.Mysql.MFullJoinSum, &mb.Mysql.MFullJoinCnt},\n\t\t\t{inc(currentESS.SumSelectFullRangeJoin, prevESS.SumSelectFullRangeJoin), &mb.Mysql.MSelectFullRangeJoinSum, &mb.Mysql.MSelectFullRangeJoinCnt},\n\t\t\t{inc(currentESS.SumSelectRange, prevESS.SumSelectRange), &mb.Mysql.MSelectRangeSum, &mb.Mysql.MSelectRangeCnt},\n\t\t\t{inc(currentESS.SumSelectRangeCheck, prevESS.SumSelectRangeCheck), &mb.Mysql.MSelectRangeCheckSum, &mb.Mysql.MSelectRangeCheckCnt},\n\t\t\t{inc(currentESS.SumSelectScan, prevESS.SumSelectScan), &mb.Mysql.MFullScanSum, &mb.Mysql.MFullScanCnt},\n\n\t\t\t{inc(currentESS.SumSortMergePasses, prevESS.SumSortMergePasses), &mb.Mysql.MMergePassesSum, &mb.Mysql.MMergePassesCnt},\n\t\t\t{inc(currentESS.SumSortRange, prevESS.SumSortRange), &mb.Mysql.MSortRangeSum, &mb.Mysql.MSortRangeCnt},\n\t\t\t{inc(currentESS.SumSortRows, prevESS.SumSortRows), &mb.Mysql.MSortRowsSum, &mb.Mysql.MSortRowsCnt},\n\t\t\t{inc(currentESS.SumSortScan, prevESS.SumSortScan), &mb.Mysql.MSortScanSum, &mb.Mysql.MSortScanCnt},\n\n\t\t\t{inc(currentESS.SumNoIndexUsed, prevESS.SumNoIndexUsed), &mb.Mysql.MNoIndexUsedSum, &mb.Mysql.MNoIndexUsedCnt},\n\t\t\t{inc(currentESS.SumNoGoodIndexUsed, prevESS.SumNoGoodIndexUsed), &mb.Mysql.MNoGoodIndexUsedSum, &mb.Mysql.MNoGoodIndexUsedCnt},\n\t\t} {\n\t\t\tif p.value != 0 {\n\t\t\t\t*p.sum = p.value\n\t\t\t\t*p.cnt = count\n\t\t\t}\n\t\t}\n\n\t\tres = append(res, mb)\n\t}\n\n\treturn res\n}", "func NewSimpleHistogram(subsystem, name, help string, buckets []float64) SimpleHistogram {\n\treturn NewSimpleHistogramWithOpts(subsystem, name, help, buckets, DefaultOptions)\n}", "func (d *pebbleDB) NewBatchBucket(name string) (store.BatchBucket, error) {\n\tbn := []byte(name)\n\treturn &pebbleBucket{\n\t\tdb: d.DB,\n\t\tname: bn,\n\t\tprefixIterOpts: getPrefixIterOptions(bn),\n\t\twriteOpts: pebble.NoSync,\n\t}, nil\n}", "func MakeBulkMetrics(histogramWindow time.Duration) Metrics {\n\treturn Metrics{\n\t\tMaxBytesHist: metric.NewHistogram(metaMemMaxBytes, histogramWindow, log10int64times1000, 3),\n\t\tCurBytesCount: metric.NewGauge(metaMemCurBytes),\n\t}\n}", "func Buckets(count int) func(any) error {\n\treturn func(f any) error {\n\t\td := f.(*dto.Histogram)\n\t\tif len(d.Bucket) != count {\n\t\t\treturn fmt.Errorf(\"want %v buckets, got %v\", count, len(d.Bucket))\n\t\t}\n\t\treturn nil\n\t}\n}", "func (te *TelemetryEmitter) emitHistogram(metric Metric, timestamp time.Time) error {\n\thist, ok := metric.value.(*dto.Histogram)\n\tif !ok {\n\t\treturn fmt.Errorf(\"unknown histogram metric type for %q: %T\", metric.name, metric.value)\n\t}\n\n\tif m, ok := te.deltaCalculator.CountMetric(metric.name+\".sum\", metric.attributes, hist.GetSampleSum(), timestamp); ok {\n\t\tte.harvester.RecordMetric(m)\n\t}\n\n\tmetricName := metric.name + \".buckets\"\n\tbuckets := make(histogram.Buckets, 0, len(hist.Bucket))\n\tfor _, b := range hist.GetBucket() {\n\t\tupperBound := b.GetUpperBound()\n\t\tcount := float64(b.GetCumulativeCount())\n\t\tif !math.IsInf(upperBound, 1) {\n\t\t\tbucketAttrs := copyAttrs(metric.attributes)\n\t\t\tbucketAttrs[\"histogram.bucket.upperBound\"] = upperBound\n\t\t\tif m, ok := te.deltaCalculator.CountMetric(metricName, bucketAttrs, count, timestamp); ok {\n\t\t\t\tte.harvester.RecordMetric(m)\n\t\t\t}\n\t\t}\n\t\tbuckets = append(\n\t\t\tbuckets,\n\t\t\thistogram.Bucket{\n\t\t\t\tUpperBound: upperBound,\n\t\t\t\tCount: count,\n\t\t\t},\n\t\t)\n\t}\n\n\tvar results error\n\tmetricName = metric.name + \".percentiles\"\n\tfor _, p := range te.percentiles {\n\t\tv, err := histogram.Percentile(p, buckets)\n\t\tif err != nil {\n\t\t\tif results == nil {\n\t\t\t\tresults = err\n\t\t\t} else {\n\t\t\t\tresults = fmt.Errorf(\"%v: %w\", err, results)\n\t\t\t}\n\t\t\tcontinue\n\t\t}\n\n\t\tpercentileAttrs := copyAttrs(metric.attributes)\n\t\tpercentileAttrs[\"percentile\"] = p\n\t\tte.harvester.RecordMetric(telemetry.Gauge{\n\t\t\tName: metricName,\n\t\t\tAttributes: percentileAttrs,\n\t\t\tValue: v,\n\t\t\tTimestamp: timestamp,\n\t\t})\n\t}\n\n\treturn results\n}", "func (c *LoggerClient) Histogram(name string, value float64) {\n\tc.print(\"Histogram\", name, value, value)\n}", "func init() {\n\tval2Bucket = make([]int, maxArrayValue)\n\tmaxArrayValueIndex = -1\n\tfor i, value := range histogramBucketValues {\n\t\tif value == maxArrayValue {\n\t\t\tmaxArrayValueIndex = i\n\t\t\tbreak\n\t\t}\n\t}\n\tif maxArrayValueIndex == -1 {\n\t\tlog.Fatalf(\"Bug boundary maxArrayValue=%d not found in bucket list %v\", maxArrayValue, histogramBucketValues)\n\t}\n\tidx := 0\n\tfor i := int32(0); i < maxArrayValue; i++ {\n\t\tif i >= histogramBucketValues[idx] {\n\t\t\tidx++\n\t\t}\n\t\tval2Bucket[i] = idx\n\t}\n\t// coding bug detection (aka impossible if it works once) until 1000\n\tif idx != maxArrayValueIndex {\n\t\tlog.Fatalf(\"Bug in creating histogram index idx %d vs index %d up to %d\", idx, int(maxArrayValue), maxArrayValue)\n\t}\n}", "func (c *Client) Histogram(name string, value int) error {\n\treturn c.DataDog.Histogram(name, float64(value), nil, 1)\n}", "func newRuneHistogram(tracking, init []rune) *runeHistogram {\n\th := &runeHistogram{set: make(map[rune]int)}\n\tfor _, ch := range tracking {\n\t\th.set[ch] = 0\n\t}\n\tif len(init) > 0 {\n\t\tfor _, ch := range init {\n\t\t\tif _, ok := h.set[ch]; ok {\n\t\t\t\th.set[ch]++\n\t\t\t}\n\t\t}\n\t\th.refreshValidity()\n\t}\n\treturn h\n}", "func (h *Histogram) Add(value int64) {\n\tfor i := range h.labels {\n\t\tif i == len(h.labels)-1 || value <= h.cutoffs[i] {\n\t\t\th.buckets[i].Add(1)\n\t\t\th.total.Add(value)\n\t\t\tbreak\n\t\t}\n\t}\n\tif h.hook != nil {\n\t\th.hook(value)\n\t}\n\tif defaultStatsdHook.histogramHook != nil && h.name != \"\" {\n\t\tdefaultStatsdHook.histogramHook(h.name, value)\n\t}\n}", "func newBatch(t *testing.T, flgs []bool, ts []types.Type, proc *process.Process, rows int64) *batch.Batch {\n\treturn testutil.NewBatch(ts, false, int(rows), proc.Mp())\n}", "func (ac *Accumulator) AddHistogram(measurement string, fields map[string]interface{},\n\ttags map[string]string, t ...time.Time) {\n\t// as of right now metric always returns a nil error\n\tm, _ := metric.New(measurement, tags, fields, getTime(t), telegraf.Histogram)\n\tac.AddMetric(m)\n}", "func (p *Provider) NewHistogram(name string, _ int) metrics.Histogram {\n\treturn p.newHistogram(name)\n}", "func (p *Provider) Histogram(name string, value float64, tags map[string]string) error {\n\treturn p.client.Histogram(name, value, p.formatTags(tags), p.rate)\n}", "func (r *Recorder) Histogram(ctx context.Context, tconn *chrome.TestConn) ([]*Histogram, error) {\n\tnames := r.names()\n\n\ts, err := GetHistograms(ctx, tconn, names)\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"failed to get snapshot\")\n\t}\n\n\treturn DiffHistograms(r.snapshot, s)\n}", "func NewHistgram(filename string) (obj Histgram) {\n\tobj = Histgram{\n\t\tName: filename,\n\t\tR: make([]int, 16),\n\t\tG: make([]int, 16),\n\t\tB: make([]int, 16),\n\t}\n\treturn\n}", "func NewTable(desiredBucketCount int) *hashtable {\n\th := &hashtable{\n\t\tbucketcount: desiredBucketCount,\n\t\tbuckets: make([]*CacheItem, desiredBucketCount),\n\t}\n\treturn h\n}", "func (client *Client) DescribeHistogramWithChan(request *DescribeHistogramRequest) (<-chan *DescribeHistogramResponse, <-chan error) {\n\tresponseChan := make(chan *DescribeHistogramResponse, 1)\n\terrChan := make(chan error, 1)\n\terr := client.AddAsyncTask(func() {\n\t\tdefer close(responseChan)\n\t\tdefer close(errChan)\n\t\tresponse, err := client.DescribeHistogram(request)\n\t\tif err != nil {\n\t\t\terrChan <- err\n\t\t} else {\n\t\t\tresponseChan <- response\n\t\t}\n\t})\n\tif err != nil {\n\t\terrChan <- err\n\t\tclose(responseChan)\n\t\tclose(errChan)\n\t}\n\treturn responseChan, errChan\n}", "func funcHistogramSum(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector {\n\tinVec := vals[0].(Vector)\n\n\tfor _, sample := range inVec {\n\t\t// Skip non-histogram samples.\n\t\tif sample.H == nil {\n\t\t\tcontinue\n\t\t}\n\t\tenh.Out = append(enh.Out, Sample{\n\t\t\tMetric: enh.DropMetricName(sample.Metric),\n\t\t\tF: sample.H.Sum,\n\t\t})\n\t}\n\treturn enh.Out\n}", "func timeHistogramMetricsBuckets() []float64 {\n\tb := make([]float64, timeHistTotalBuckets+1)\n\t// Underflow bucket.\n\tb[0] = float64NegInf()\n\n\tfor j := 0; j < timeHistNumSubBuckets; j++ {\n\t\t// No bucket bit for the first few buckets. Just sub-bucket bits after the\n\t\t// min bucket bit.\n\t\tbucketNanos := uint64(j) << (timeHistMinBucketBits - 1 - timeHistSubBucketBits)\n\t\t// Convert nanoseconds to seconds via a division.\n\t\t// These values will all be exactly representable by a float64.\n\t\tb[j+1] = float64(bucketNanos) / 1e9\n\t}\n\t// Generate the rest of the buckets. It's easier to reason\n\t// about if we cut out the 0'th bucket.\n\tfor i := timeHistMinBucketBits; i < timeHistMaxBucketBits; i++ {\n\t\tfor j := 0; j < timeHistNumSubBuckets; j++ {\n\t\t\t// Set the bucket bit.\n\t\t\tbucketNanos := uint64(1) << (i - 1)\n\t\t\t// Set the sub-bucket bits.\n\t\t\tbucketNanos |= uint64(j) << (i - 1 - timeHistSubBucketBits)\n\t\t\t// The index for this bucket is going to be the (i+1)'th bucket\n\t\t\t// (note that we're starting from zero, but handled the first bucket\n\t\t\t// earlier, so we need to compensate), and the j'th sub bucket.\n\t\t\t// Add 1 because we left space for -Inf.\n\t\t\tbucketIndex := (i-timeHistMinBucketBits+1)*timeHistNumSubBuckets + j + 1\n\t\t\t// Convert nanoseconds to seconds via a division.\n\t\t\t// These values will all be exactly representable by a float64.\n\t\t\tb[bucketIndex] = float64(bucketNanos) / 1e9\n\t\t}\n\t}\n\t// Overflow bucket.\n\tb[len(b)-2] = float64(uint64(1)<<(timeHistMaxBucketBits-1)) / 1e9\n\tb[len(b)-1] = float64Inf()\n\treturn b\n}", "func createHistogram(data []float64, n int) {\n\tp, err := plot.New()\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tp.Add(plotter.NewGrid())\n\thistdata := valuer{data}\n\tp.Add(plotter.NewHist(histdata, n))\n\tp.X.Label.Text = \"time / ps\"\n\tp.Y.Label.Text = \"frequency\"\n\tp.Title.Text = fmt.Sprintf(\"Frequency of lifetime data from lifetime.txt. %v bins.\", n)\n\n\tif err := p.Save(5, 5, fmt.Sprintf(\"out/Histogram with %v bins.png\", n)); err != nil {\n\t\tpanic(err)\n\t}\n}", "func (b *BucketFeedback) splitBucket(newNumBkts int, totalCount float64, originBucketCount float64) []bucket {\n\t// Split the bucket.\n\tbounds := b.getBoundaries(newNumBkts + 1)\n\tbkts := make([]bucket, 0, len(bounds)-1)\n\tsc := &stmtctx.StatementContext{TimeZone: time.UTC}\n\tfor i := 1; i < len(bounds); i++ {\n\t\tnewBkt := bucket{&bounds[i-1], bounds[i].Copy(), 0, 0}\n\t\t// get bucket count\n\t\t_, ratio := getOverlapFraction(Feedback{b.lower, b.upper, int64(originBucketCount), 0}, newBkt)\n\t\tcountInNewBkt := originBucketCount * ratio\n\t\tcountInNewBkt = b.refineBucketCount(sc, newBkt, countInNewBkt)\n\t\t// do not split if the count of result bucket is too small.\n\t\tif countInNewBkt < minBucketFraction*totalCount {\n\t\t\tbounds[i] = bounds[i-1]\n\t\t\tcontinue\n\t\t}\n\t\tnewBkt.Count = int64(countInNewBkt)\n\t\tbkts = append(bkts, newBkt)\n\t\t// To guarantee that each bucket's range will not overlap.\n\t\tsetNextValue(&bounds[i])\n\t}\n\treturn bkts\n}", "func (c *Context) Histogram(stat string, value float64) {\n\tfor _, sink := range c.sinks {\n\t\tsink.Histogram(c, stat, value)\n\t}\n}", "func (c *Prometheus) getHistogram(name string, labels ...map[string]string) *prometheus.HistogramVec {\n\tvar keys []string\n\tvar values []string\n\n\tfor key, value := range labels[0] {\n\t\tkeys = append(keys, key)\n\t\tvalues = append(values, value)\n\t}\n\n\tc.Lock()\n\tdefer c.Unlock()\n\n\tif _, ok := c.histograms[name]; !ok {\n\t\tc.histograms[name] = prometheus.NewHistogramVec(prometheus.HistogramOpts{\n\t\t\tName: name + \"_seconds\",\n\t\t\tHelp: \" \",\n\t\t}, keys)\n\t\tprometheus.Register(c.histograms[name])\n\t}\n\treturn c.histograms[name]\n}", "func histHelper(expFormat string, labels ...interface{}) *metrics.Histogram {\n\treturn metrics.GetOrCreateHistogram(fmt.Sprintf(expFormat, labels...))\n}", "func WrapWithMetrics(b Bucket, reg prometheus.Registerer, name string) *metricBucket {\n\tbkt := &metricBucket{\n\t\tbkt: b,\n\t\tisOpFailureExpected: func(err error) bool { return false },\n\t\tops: promauto.With(reg).NewCounterVec(prometheus.CounterOpts{\n\t\t\tName: \"objstore_bucket_operations_total\",\n\t\t\tHelp: \"Total number of all attempted operations against a bucket.\",\n\t\t\tConstLabels: prometheus.Labels{\"bucket\": name},\n\t\t}, []string{\"operation\"}),\n\n\t\topsFailures: promauto.With(reg).NewCounterVec(prometheus.CounterOpts{\n\t\t\tName: \"objstore_bucket_operation_failures_total\",\n\t\t\tHelp: \"Total number of operations against a bucket that failed, but were not expected to fail in certain way from caller perspective. Those errors have to be investigated.\",\n\t\t\tConstLabels: prometheus.Labels{\"bucket\": name},\n\t\t}, []string{\"operation\"}),\n\n\t\topsFetchedBytes: promauto.With(reg).NewCounterVec(prometheus.CounterOpts{\n\t\t\tName: \"objstore_bucket_operation_fetched_bytes_total\",\n\t\t\tHelp: \"Total number of bytes fetched from bucket, per operation.\",\n\t\t\tConstLabels: prometheus.Labels{\"bucket\": name},\n\t\t}, []string{\"operation\"}),\n\n\t\topsTransferredBytes: promauto.With(reg).NewHistogramVec(prometheus.HistogramOpts{\n\t\t\tName: \"objstore_bucket_operation_transferred_bytes\",\n\t\t\tHelp: \"Number of bytes transferred from/to bucket per operation.\",\n\t\t\tConstLabels: prometheus.Labels{\"bucket\": name},\n\t\t\tBuckets: prometheus.ExponentialBuckets(2<<14, 2, 16), // 32KiB, 64KiB, ... 1GiB\n\t\t}, []string{\"operation\"}),\n\n\t\topsDuration: promauto.With(reg).NewHistogramVec(prometheus.HistogramOpts{\n\t\t\tName: \"objstore_bucket_operation_duration_seconds\",\n\t\t\tHelp: \"Duration of successful operations against the bucket\",\n\t\t\tConstLabels: prometheus.Labels{\"bucket\": name},\n\t\t\tBuckets: []float64{0.001, 0.01, 0.1, 0.3, 0.6, 1, 3, 6, 9, 20, 30, 60, 90, 120},\n\t\t}, []string{\"operation\"}),\n\n\t\tlastSuccessfulUploadTime: promauto.With(reg).NewGauge(prometheus.GaugeOpts{\n\t\t\tName: \"objstore_bucket_last_successful_upload_time\",\n\t\t\tHelp: \"Second timestamp of the last successful upload to the bucket.\",\n\t\t\tConstLabels: prometheus.Labels{\"bucket\": name},\n\t\t}),\n\t}\n\tfor _, op := range []string{\n\t\tOpIter,\n\t\tOpGet,\n\t\tOpGetRange,\n\t\tOpExists,\n\t\tOpUpload,\n\t\tOpDelete,\n\t\tOpAttributes,\n\t} {\n\t\tbkt.ops.WithLabelValues(op)\n\t\tbkt.opsFailures.WithLabelValues(op)\n\t\tbkt.opsDuration.WithLabelValues(op)\n\t\tbkt.opsFetchedBytes.WithLabelValues(op)\n\t}\n\t// fetched bytes only relevant for get and getrange\n\tfor _, op := range []string{\n\t\tOpGet,\n\t\tOpGetRange,\n\t\t// TODO: Add uploads\n\t} {\n\t\tbkt.opsTransferredBytes.WithLabelValues(op)\n\t}\n\treturn bkt\n}", "func NewHistoCounter(sink *Counter) *HistoCounter {\n\treturn &HistoCounter{\n\t\tsink: sink,\n\t\tDatapointBucket: sfxclient.NewRollingBucket(\"datapoint_batch_size\", map[string]string{}),\n\t\tEventBucket: sfxclient.NewRollingBucket(\"event_batch_size\", map[string]string{}),\n\t\tSpanBucket: sfxclient.NewRollingBucket(\"span_batch_size\", map[string]string{}),\n\t}\n}", "func newBatch(evts []*evtsapi.Event, offset *events.Offset) (events.Batch, error) {\n\tif offset == nil {\n\t\treturn nil, fmt.Errorf(\"cannot create a batch with nil offset\")\n\t}\n\treturn &batch{evts: evts, offset: offset}, nil\n}", "func mockStatsHistogram(id int64, values []types.Datum, repeat int64, tp *types.FieldType) *statistics.Histogram {\n\tndv := len(values)\n\thistogram := statistics.NewHistogram(id, int64(ndv), 0, 0, tp, ndv, 0)\n\tfor i := 0; i < ndv; i++ {\n\t\thistogram.AppendBucket(&values[i], &values[i], repeat*int64(i+1), repeat)\n\t}\n\treturn histogram\n}", "func (client *Client) DescribeHistogramWithCallback(request *DescribeHistogramRequest, callback func(response *DescribeHistogramResponse, err error)) <-chan int {\n\tresult := make(chan int, 1)\n\terr := client.AddAsyncTask(func() {\n\t\tvar response *DescribeHistogramResponse\n\t\tvar err error\n\t\tdefer close(result)\n\t\tresponse, err = client.DescribeHistogram(request)\n\t\tcallback(response, err)\n\t\tresult <- 1\n\t})\n\tif err != nil {\n\t\tdefer close(result)\n\t\tcallback(nil, err)\n\t\tresult <- 0\n\t}\n\treturn result\n}", "func NewPrometheusWithBuckets(buckets []float64, namespace string, registry prometheus.Registerer) *Prometheus {\n\tp := &Prometheus{\n\t\tqueuedEvents: prometheus.NewCounterVec(prometheus.CounterOpts{\n\t\t\tNamespace: namespace,\n\t\t\tSubsystem: promControllerSubsystem,\n\t\t\tName: \"queued_events_total\",\n\t\t\tHelp: \"Total number of events queued.\",\n\t\t}, []string{\"handler\", \"type\"}),\n\n\t\tprocessedSuc: prometheus.NewCounterVec(prometheus.CounterOpts{\n\t\t\tNamespace: namespace,\n\t\t\tSubsystem: promControllerSubsystem,\n\t\t\tName: \"processed_events_total\",\n\t\t\tHelp: \"Total number of successfuly processed events.\",\n\t\t}, []string{\"handler\", \"type\"}),\n\n\t\tprocessedError: prometheus.NewCounterVec(prometheus.CounterOpts{\n\t\t\tNamespace: namespace,\n\t\t\tSubsystem: promControllerSubsystem,\n\t\t\tName: \"processed_events_errors_total\",\n\t\t\tHelp: \"Total number of errors processing events.\",\n\t\t}, []string{\"handler\", \"type\"}),\n\n\t\tprocessedSucDuration: prometheus.NewHistogramVec(prometheus.HistogramOpts{\n\t\t\tNamespace: namespace,\n\t\t\tSubsystem: promControllerSubsystem,\n\t\t\tName: \"processed_events_duration_seconds\",\n\t\t\tHelp: \"The duration for a successful event to be processed.\",\n\t\t\tBuckets: buckets,\n\t\t}, []string{\"handler\", \"type\"}),\n\n\t\tprocessedErrDuration: prometheus.NewHistogramVec(prometheus.HistogramOpts{\n\t\t\tNamespace: namespace,\n\t\t\tSubsystem: promControllerSubsystem,\n\t\t\tName: \"processed_events_error_duration_seconds\",\n\t\t\tHelp: \"The duration for a event finished in error to be processed.\",\n\t\t\tBuckets: buckets,\n\t\t}, []string{\"handler\", \"type\"}),\n\n\t\treg: registry,\n\t}\n\n\tp.registerMetrics()\n\treturn p\n}", "func (e errChunkIterator) AtHistogram() (int64, *histogram.Histogram) { panic(\"not implemented\") }", "func getHistogram(ctx context.Context, name string, labels map[string]string) Histogram {\n\tif !On(ctx) {\n\t\treturn &nopHistogram{}\n\t}\n\tif opts, ok := Histograms[name]; !ok {\n\t\tmsg := fmt.Sprintf(\"attempted to get undeclared histogram %s\", name)\n\t\tpanic(msg)\n\t} else {\n\t\tif !mustCompleteLabels(opts.Labels, labels) {\n\t\t\terr := fmt.Sprintf(\"attempted to set histogram %s with invalid labels, expected %v but got %v\",\n\t\t\t\tname, opts.Labels, labels)\n\t\t\tpanic(err)\n\t\t}\n\t}\n\treturn metricsClient(ctx).GetHistogram(name, labels)\n}", "func NewHistograms(name string, config *HistogramConfig) (*Histograms, error) {\n\treturn &Histograms{\n\t\tmetricVec: newMetricVec(func(labels map[string]string) prometheus.Metric {\n\t\t\treturn &expiringHistogram{prometheus.NewHistogram(prometheus.HistogramOpts{\n\t\t\t\tHelp: config.Description,\n\t\t\t\tName: name,\n\t\t\t\tConstLabels: labels,\n\t\t\t\tBuckets: config.Buckets,\n\t\t\t}),\n\t\t\t\t0,\n\t\t\t}\n\t\t}, int64(config.MaxIdle.Seconds())),\n\t\tCfg: config,\n\t}, nil\n}", "func NewBatch() Batch {\n\treturn Batch{}\n}", "func (m *HistogramStatByBreakout) Validate(formats strfmt.Registry) error {\n\tvar res []error\n\n\tif err := m.validateData(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateKey(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}", "func (h *Histograms) With(labels model.LabelSet) prometheus.Histogram {\n\treturn h.metricVec.With(labels).(prometheus.Histogram)\n}", "func NewAggregator(\n\tmetrics chan Metric,\n\tinterval float64,\n\thostname string,\n\tformatter Formatter,\n\thistogramAggregates []string,\n\thistogramPercentiles []float64,\n\trecentPointThreshold int64,\n\texpiry ...int64,\n) Aggregator {\n\tif recentPointThreshold == 0 {\n\t\trecentPointThreshold = DefaultRecentPointThreshold\n\t}\n\n\tvar expirySeconds int64\n\tif len(expiry) > 0 {\n\t\texpirySeconds = expiry[0]\n\t} else {\n\t\texpirySeconds = DefaultExpirySeconds\n\t}\n\n\treturn &aggregator{\n\t\tmetrics: metrics,\n\t\tcontext: make(map[Context]Generator),\n\t\tinterval: interval,\n\t\thostname: hostname,\n\t\tformatter: formatter,\n\t\thistogramAggregates: histogramAggregates,\n\t\thistogramPercentiles: histogramPercentiles,\n\t\trecentPointThreshold: recentPointThreshold,\n\t\texpirySeconds: expirySeconds,\n\t}\n}", "func GetHistogram(ctx context.Context, tconn *chrome.TestConn, name string) (*Histogram, error) {\n\th := Histogram{Name: name}\n\tif err := tconn.Call(ctx, &h, `tast.promisify(chrome.metricsPrivate.getHistogram)`, name); err != nil {\n\t\tif strings.Contains(err.Error(), fmt.Sprintf(\"Histogram %s not found\", name)) {\n\t\t\treturn &Histogram{Name: name}, nil\n\t\t}\n\t\treturn nil, err\n\t}\n\tif err := h.validate(); err != nil {\n\t\treturn nil, errors.Wrapf(err, \"bad histogram %v\", h)\n\t}\n\treturn &h, nil\n}", "func (m *Metrics) HistogramVec(name, help string, buckets []float64, labels ...string) *prometheus.HistogramVec {\n\thistogram := prometheus.NewHistogramVec(prometheus.HistogramOpts{\n\t\tNamespace: m.config.Namespace,\n\t\tName: name,\n\t\tHelp: help,\n\t\tBuckets: buckets,\n\t}, labels)\n\n\tprometheus.MustRegister(histogram)\n\n\treturn histogram\n}", "func (m *Metrics) Histogram(key string) *metrics.Histogram {\n\tm.Lock()\n\tdefer m.Unlock()\n\thist, ok := m.histograms[key]\n\tif !ok {\n\t\thist = metrics.NewHistogram(key, 0, 1e8, 5)\n\t\tm.histograms[key] = hist\n\t}\n\treturn hist\n}", "func HistogramFixedWidth(scope *Scope, values tf.Output, value_range tf.Output, nbins tf.Output, optional ...HistogramFixedWidthAttr) (out tf.Output) {\n\tif scope.Err() != nil {\n\t\treturn\n\t}\n\tattrs := map[string]interface{}{}\n\tfor _, a := range optional {\n\t\ta(attrs)\n\t}\n\topspec := tf.OpSpec{\n\t\tType: \"HistogramFixedWidth\",\n\t\tInput: []tf.Input{\n\t\t\tvalues, value_range, nbins,\n\t\t},\n\t\tAttrs: attrs,\n\t}\n\top := scope.AddOperation(opspec)\n\treturn op.Output(0)\n}", "func NewBatchDeleter(ctx context.Context, wg *sync.WaitGroup, s *SQSService, every, drainTimeout time.Duration) chan<- *sqs.Message {\n\tdq := &deleteQueue{\n\t\tsvc: s,\n\t\taccumulationTimeout: every,\n\t\tdrainTimeout: drainTimeout,\n\t\tqueue: make(chan *sqs.Message),\n\t}\n\twg.Add(1)\n\tgo dq.start(ctx, wg)\n\treturn dq.queue\n}", "func New(conn *pgx.Conn, size int64, tableName string, columnNames []string) *Batcher {\n\treturn &Batcher{\n\t\tconn: conn,\n\t\tsize: size,\n\t\ttableName: tableName,\n\t\tcolumnNames: columnNames,\n\t}\n}", "func buildBucketFeedback(h *Histogram, feedback *QueryFeedback) (map[int]*BucketFeedback, int) {\n\tbktID2FB := make(map[int]*BucketFeedback)\n\tif len(feedback.Feedback) == 0 {\n\t\treturn bktID2FB, 0\n\t}\n\ttotal := 0\n\tsc := &stmtctx.StatementContext{TimeZone: time.UTC}\n\tmin, max := GetMinValue(h.Tp), GetMaxValue(h.Tp)\n\tfor _, fb := range feedback.Feedback {\n\t\tskip, err := fb.adjustFeedbackBoundaries(sc, &min, &max)\n\t\tif err != nil {\n\t\t\tlogutil.BgLogger().Debug(\"adjust feedback boundaries failed\", zap.Error(err))\n\t\t\tcontinue\n\t\t}\n\t\tif skip {\n\t\t\tcontinue\n\t\t}\n\t\tidx := h.Bounds.UpperBound(0, fb.Lower)\n\t\tbktIdx := 0\n\t\t// The last bucket also stores the feedback that falls outside the upper bound.\n\t\tif idx >= h.Bounds.NumRows()-1 {\n\t\t\tbktIdx = h.Len() - 1\n\t\t} else if h.Len() == 1 {\n\t\t\tbktIdx = 0\n\t\t} else {\n\t\t\tif idx == 0 {\n\t\t\t\tbktIdx = 0\n\t\t\t} else {\n\t\t\t\tbktIdx = (idx - 1) / 2\n\t\t\t}\n\t\t\t// Make sure that this feedback lies within the bucket.\n\t\t\tif chunk.Compare(h.Bounds.GetRow(2*(bktIdx+1)), 0, fb.Upper) < 0 {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t}\n\t\ttotal++\n\t\tbkt := bktID2FB[bktIdx]\n\t\tif bkt == nil {\n\t\t\tbkt = &BucketFeedback{lower: h.GetLower(bktIdx), upper: h.GetUpper(bktIdx)}\n\t\t\tbktID2FB[bktIdx] = bkt\n\t\t}\n\t\tbkt.feedback = append(bkt.feedback, fb)\n\t\t// Update the bound if necessary.\n\t\tres, err := bkt.lower.CompareDatum(nil, fb.Lower)\n\t\tif err != nil {\n\t\t\tlogutil.BgLogger().Debug(\"compare datum failed\", zap.Any(\"value1\", bkt.lower), zap.Any(\"value2\", fb.Lower), zap.Error(err))\n\t\t\tcontinue\n\t\t}\n\t\tif res > 0 {\n\t\t\tbkt.lower = fb.Lower\n\t\t}\n\t\tres, err = bkt.upper.CompareDatum(nil, fb.Upper)\n\t\tif err != nil {\n\t\t\tlogutil.BgLogger().Debug(\"compare datum failed\", zap.Any(\"value1\", bkt.upper), zap.Any(\"value2\", fb.Upper), zap.Error(err))\n\t\t\tcontinue\n\t\t}\n\t\tif res < 0 {\n\t\t\tbkt.upper = fb.Upper\n\t\t}\n\t}\n\treturn bktID2FB, total\n}", "func makeBuckets(m *dto.Metric) map[string]interface{} {\n\tfields := make(map[string]interface{})\n\tfor _, b := range m.GetHistogram().Bucket {\n\t\tfields[fmt.Sprint(b.GetUpperBound())] = float64(b.GetCumulativeCount())\n\t}\n\treturn fields\n}", "func (ht *HashTable) growBucket(bucket int) {\n\tht.EnsureSize(BUCKET_SIZE)\n\tlastBucketAddr := ht.lastBucket(bucket) * BUCKET_SIZE\n\tbinary.PutVarint(ht.Buf[lastBucketAddr:lastBucketAddr+10], int64(ht.numBuckets))\n\tht.Used += BUCKET_SIZE\n\tht.numBuckets++\n}", "func NewPercentiles(label string, values []float64) *Percentiles {\n\tvalues = append(values[:0:0], values...)\n\tsort.Float64s(values)\n\n\tpoints := make([]Point, 0, len(values))\n\n\tmultiplier := 1 / float64(len(values)+1)\n\tfor i, v := range values {\n\t\tvar p Point\n\t\tp.X = float64(i+1) * multiplier\n\t\tp.Y = v\n\t\tpoints = append(points, p)\n\t}\n\n\treturn &Percentiles{\n\t\tLabel: label,\n\t\tData: points,\n\t}\n}", "func NewBatch(docs []doc.Metadata, opts ...BatchOption) Batch {\n\tb := Batch{Docs: docs}\n\n\tfor _, opt := range opts {\n\t\tb = opt.apply(b)\n\t}\n\n\treturn b\n}", "func (h *Histogram) Diff(old *Histogram) (*Histogram, error) {\n\tif h.Name != old.Name {\n\t\treturn nil, errors.Errorf(\"unmatched histogram, %s vs %s\", h.Name, old.Name)\n\t}\n\tif len(old.Buckets) > len(h.Buckets) {\n\t\treturn nil, errors.Errorf(\"old histogram has %d bucket(s), new only has %d\", len(old.Buckets), len(h.Buckets))\n\t}\n\n\tdiff := &Histogram{Name: h.Name, Sum: h.Sum - old.Sum}\n\toi := 0\n\tfor _, hb := range h.Buckets {\n\t\t// If we've already looked at all of the old buckets, copy the new bucket over.\n\t\tif oi >= len(old.Buckets) {\n\t\t\tdiff.Buckets = append(diff.Buckets, hb)\n\t\t\tcontinue\n\t\t}\n\n\t\tob := old.Buckets[oi]\n\n\t\tswitch {\n\t\tcase ob.Min < hb.Min:\n\t\t\t// The old histogram shouldn't contain any buckets that aren't in the new one.\n\t\t\treturn nil, errors.Errorf(\"bucket [%d,%d) is present in old histogram but not new one\", ob.Min, ob.Max)\n\t\tcase ob.Min > hb.Min:\n\t\t\t// If this bucket isn't present in the old histogram, just copy it over.\n\t\t\tif ob.Min < hb.Max {\n\t\t\t\treturn nil, errors.Errorf(\"old bucket [%d,%d) overlaps new bucket [%d,%d)\", ob.Min, ob.Max, hb.Min, hb.Max)\n\t\t\t}\n\t\t\tdiff.Buckets = append(diff.Buckets, hb)\n\t\tcase ob.Min == hb.Min:\n\t\t\t// If we're looking at the same bucket in both histograms, save the difference (if any) and move to the next old bucket.\n\t\t\tif ob.Max != hb.Max {\n\t\t\t\treturn nil, errors.Errorf(\"old bucket [%d,%d) doesn't match new bucket [%d,%d)\", ob.Min, ob.Max, hb.Min, hb.Max)\n\t\t\t}\n\t\t\tif hb.Count < ob.Count {\n\t\t\t\treturn nil, errors.Errorf(\"old bucket [%d,%d) has count %d, new only has %d\", ob.Min, ob.Max, ob.Count, hb.Count)\n\t\t\t} else if hb.Count > ob.Count {\n\t\t\t\tdiff.Buckets = append(diff.Buckets, HistogramBucket{hb.Min, hb.Max, hb.Count - ob.Count})\n\t\t\t}\n\t\t\toi++\n\t\t}\n\t}\n\treturn diff, nil\n}" ]
[ "0.65275115", "0.6225844", "0.60444945", "0.59526646", "0.5879388", "0.58536047", "0.5797989", "0.5789569", "0.57089955", "0.5602748", "0.55806327", "0.5529546", "0.5499361", "0.54291517", "0.54144174", "0.54133976", "0.540959", "0.5363193", "0.5343349", "0.5327263", "0.52712864", "0.52507234", "0.5237935", "0.5220097", "0.52135086", "0.5168766", "0.5162947", "0.5133181", "0.5127125", "0.51220345", "0.5119906", "0.51163363", "0.5057929", "0.5044926", "0.5032113", "0.50148445", "0.5007246", "0.49882755", "0.49854013", "0.4977828", "0.49485964", "0.49354413", "0.49322984", "0.49267304", "0.49213156", "0.49194723", "0.49158326", "0.49110112", "0.4899984", "0.48619017", "0.48514864", "0.48414063", "0.48378316", "0.48304302", "0.481513", "0.47893676", "0.4789133", "0.4788589", "0.47839186", "0.47686872", "0.47667137", "0.4748323", "0.47410506", "0.47156027", "0.4714094", "0.4698242", "0.4686744", "0.4679445", "0.46649683", "0.46614653", "0.46110988", "0.4605755", "0.46040115", "0.46017238", "0.459566", "0.4589535", "0.45753455", "0.45636624", "0.453906", "0.45327356", "0.4522215", "0.4512406", "0.4498793", "0.44852823", "0.44837585", "0.44826224", "0.4478236", "0.44775787", "0.44772053", "0.44725418", "0.4463491", "0.44563317", "0.4448306", "0.44384918", "0.4434421", "0.44285125", "0.44229195", "0.44145632", "0.43988198", "0.43845645" ]
0.8757579
0
update updates the batchHistogram from a runtime/metrics histogram. sum must be provided if the batchHistogram was created to have an exact sum. h.buckets must be a strict subset of his.Buckets.
func (h *batchHistogram) update(his *metrics.Float64Histogram, sum float64) { counts, buckets := his.Counts, his.Buckets h.mu.Lock() defer h.mu.Unlock() // Clear buckets. for i := range h.counts { h.counts[i] = 0 } // Copy and reduce buckets. var j int for i, count := range counts { h.counts[j] += count if buckets[i+1] == h.buckets[j+1] { j++ } } if h.hasSum { h.sum = sum } }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func UpdateHistogram(h *Histogram, feedback *QueryFeedback) *Histogram {\n\tbuckets, isNewBuckets, totalCount := splitBuckets(h, feedback)\n\tbuckets = mergeBuckets(buckets, isNewBuckets, float64(totalCount))\n\thist := buildNewHistogram(h, buckets)\n\t// Update the NDV of primary key column.\n\tif feedback.Tp == PkType {\n\t\thist.NDV = int64(hist.TotalRowCount())\n\t}\n\treturn hist\n}", "func newBatchHistogram(desc *Desc, buckets []float64, hasSum bool) *batchHistogram {\n\t// We need to remove -Inf values. runtime/metrics keeps them around.\n\t// But -Inf bucket should not be allowed for prometheus histograms.\n\tif buckets[0] == math.Inf(-1) {\n\t\tbuckets = buckets[1:]\n\t}\n\th := &batchHistogram{\n\t\tdesc: desc,\n\t\tbuckets: buckets,\n\t\t// Because buckets follows runtime/metrics conventions, there's\n\t\t// 1 more value in the buckets list than there are buckets represented,\n\t\t// because in runtime/metrics, the bucket values represent *boundaries*,\n\t\t// and non-Inf boundaries are inclusive lower bounds for that bucket.\n\t\tcounts: make([]uint64, len(buckets)-1),\n\t\thasSum: hasSum,\n\t}\n\th.init(h)\n\treturn h\n}", "func (c *Aggregator) Update(_ context.Context, number number.Number, desc *metric.Descriptor) error {\n\tkind := desc.NumberKind()\n\tasFloat := number.CoerceToFloat64(kind)\n\n\tbucketID := len(c.boundaries)\n\tfor i, boundary := range c.boundaries {\n\t\tif asFloat < boundary {\n\t\t\tbucketID = i\n\t\t\tbreak\n\t\t}\n\t}\n\t// Note: Binary-search was compared using the benchmarks. The following\n\t// code is equivalent to the linear search above:\n\t//\n\t// bucketID := sort.Search(len(c.boundaries), func(i int) bool {\n\t// return asFloat < c.boundaries[i]\n\t// })\n\t//\n\t// The binary search wins for very large boundary sets, but\n\t// the linear search performs better up through arrays between\n\t// 256 and 512 elements, which is a relatively large histogram, so we\n\t// continue to prefer linear search.\n\n\tc.lock.Lock()\n\tdefer c.lock.Unlock()\n\n\tc.state.count++\n\tc.state.sum.AddNumber(kind, number)\n\tc.state.bucketCounts[bucketID]++\n\n\treturn nil\n}", "func (v *verifiableMetric) update(data *Data, fieldValues string, packer *numberPacker) {\n\tswitch v.wantMetric.Type {\n\tcase TypeCounter:\n\t\tv.lastCounterValue[v.verifier.internMap.Intern(fieldValues)] = packer.pack(data.Number)\n\tcase TypeHistogram:\n\t\tlastDistributionSnapshot := v.lastDistributionSnapshot[v.verifier.internMap.Intern(fieldValues)]\n\t\tlastBucketSamples := lastDistributionSnapshot.numSamples\n\t\tvar count uint64\n\t\tfor i, b := range data.HistogramValue.Buckets {\n\t\t\tlastBucketSamples[i] = packer.packInt(int64(b.Samples))\n\t\t\tcount += b.Samples\n\t\t}\n\t\tlastDistributionSnapshot.sum = packer.pack(&data.HistogramValue.Total)\n\t\tlastDistributionSnapshot.count = packer.packInt(int64(count))\n\t\tlastDistributionSnapshot.min = packer.pack(&data.HistogramValue.Min)\n\t\tlastDistributionSnapshot.max = packer.pack(&data.HistogramValue.Max)\n\t\tlastDistributionSnapshot.ssd = packer.pack(&data.HistogramValue.SumOfSquaredDeviations)\n\t}\n}", "func (ms HistogramDataPoint) SetSum(v float64) {\n\t(*ms.orig).Sum = v\n}", "func funcHistogramSum(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector {\n\tinVec := vals[0].(Vector)\n\n\tfor _, sample := range inVec {\n\t\t// Skip non-histogram samples.\n\t\tif sample.H == nil {\n\t\t\tcontinue\n\t\t}\n\t\tenh.Out = append(enh.Out, Sample{\n\t\t\tMetric: enh.DropMetricName(sample.Metric),\n\t\t\tF: sample.H.Sum,\n\t\t})\n\t}\n\treturn enh.Out\n}", "func (h *sizeHistogram) add(size int64) {\n\t// Fetch the histogram interval corresponding\n\t// to the passed object size.\n\tfor i, interval := range ObjectsHistogramIntervals {\n\t\tif size >= interval.start && size <= interval.end {\n\t\t\th[i]++\n\t\t\tbreak\n\t\t}\n\t}\n}", "func (h *Histogram) Add(value int64) {\n\tfor i := range h.labels {\n\t\tif i == len(h.labels)-1 || value <= h.cutoffs[i] {\n\t\t\th.buckets[i].Add(1)\n\t\t\th.total.Add(value)\n\t\t\tbreak\n\t\t}\n\t}\n\tif h.hook != nil {\n\t\th.hook(value)\n\t}\n\tif defaultStatsdHook.histogramHook != nil && h.name != \"\" {\n\t\tdefaultStatsdHook.histogramHook(h.name, value)\n\t}\n}", "func (h *Histogram) filterHistogram(newBuckets []Bucket) *Histogram {\n\tcheckBucketsValid(newBuckets)\n\n\ttotal := int64(0)\n\tfor _, b := range newBuckets {\n\t\ttotal += b.NumEq + b.NumRange\n\t}\n\n\tif total == 0 {\n\t\treturn &Histogram{}\n\t}\n\n\tselectivity := float64(total) / float64(h.RowCount)\n\n\t// Estimate the new DistinctCount based on the selectivity of this filter.\n\t// todo(rytaft): this could be more precise if we take into account the\n\t// null count of the original histogram. This could also be more precise for\n\t// the operators =, !=, in, and not in, since we know how these operators\n\t// should affect the distinct count.\n\tdistinctCount := int64(float64(h.DistinctCount) * selectivity)\n\tif distinctCount == 0 {\n\t\t// There must be at least one distinct value since RowCount > 0.\n\t\tdistinctCount++\n\t}\n\n\treturn &Histogram{\n\t\tRowCount: total,\n\t\tDistinctCount: distinctCount,\n\n\t\t// All the returned rows will be non-null for this column.\n\t\tNullCount: 0,\n\t\tBuckets: newBuckets,\n\t}\n}", "func (c *Aggregator) Update(_ context.Context, num number.Number, desc *metric.Descriptor) error {\n\tc.value.AddNumberAtomic(desc.NumberKind(), num)\n\treturn nil\n}", "func (h *Histogram) Add(value int64) error {\n\tbucket, err := h.findBucket(value)\n\tif err != nil {\n\t\treturn err\n\t}\n\th.buckets[bucket].count.Incr(1)\n\th.count.Incr(1)\n\th.sum.Incr(value)\n\th.sumOfSquares.Incr(value * value)\n\th.tracker.Push(value)\n\treturn nil\n}", "func (t *TimerSnapshot) Sum() int64 { return t.histogram.Sum() }", "func (nsc *NilConsumerStatsCollector) UpdateBatchSize(int) {}", "func (ds *Dataset) update(value float64) {\n\tds.total += value\n\tds.product *= value\n\tds.recipsum += 1 / value\n\tds.min = math.Min(ds.min, value)\n\tds.max = math.Max(ds.max, value)\n}", "func (r *JobsService) Histogram(gethistogramrequest *GetHistogramRequest) *JobsHistogramCall {\n\tc := &JobsHistogramCall{s: r.s, urlParams_: make(gensupport.URLParams)}\n\tc.gethistogramrequest = gethistogramrequest\n\treturn c\n}", "func (ls *logStats) add(logData logData) {\n\t//needed for timeSpan calculation\n\tif ls.firstEntryUnixTime == -1 {\n\t\tls.firstEntryUnixTime = logData.date\n\t}\n\t//cheaper than sort / heap / bst for tracking only the max\n\tif ls.sectionWithMostHits == \"\" {\n\t\tls.sectionWithMostHits = logData.section\n\t}\n\t//update httpRes\n\tif _, ok := ls.httpRes[logData.resCode]; ok {\n\t\tls.httpRes[logData.resCode]++\n\t} else {\n\t\tls.httpRes[logData.resCode] = 1\n\t}\n\t//update hostHits\n\tif _, ok := ls.hostHits[logData.host]; ok {\n\t\tls.hostHits[logData.host].add(logData)\n\t} else {\n\t\tls.hostHits[logData.host] = newHttpHits()\n\t\tls.hostHits[logData.host].add(logData)\n\t}\n\t//update sectionHits\n\tif _, ok := ls.sectionHits[logData.section]; ok {\n\t\tls.sectionHits[logData.section].add(logData)\n\t} else {\n\t\tls.sectionHits[logData.section] = newHttpHits()\n\t\tls.sectionHits[logData.section].add(logData)\n\t\tif ls.sectionHits[logData.section].total > ls.sectionHits[ls.sectionWithMostHits].total {\n\t\t\tls.sectionWithMostHits = logData.section\n\t\t}\n\t}\n\t//update throughput\n\tls.throughput.add(logData)\n\n\tls.timeSpanSec = logData.date - ls.firstEntryUnixTime\n\tls.previousEntryUnixTime = logData.date\n}", "func (d TestSink) Histogram(c *telemetry.Context, stat string, value float64) {\n\td[stat] = TestMetric{\"Histogram\", value, c.Tags()}\n}", "func(c *HermesClient) ObserveSummary(metricName string, labels map[string]string, observation float64) {\n log.Debug(fmt.Sprintf(\"setting observation on histogram %s\", metricName))\n packet := HermesSummaryPacket{\n MetricName: metricName,\n Payload: HermesSummaryPayload{\n SummaryLabels: labels,\n SummaryObservation: observation,\n },\n }\n c.SendUDPPacket(packet)\n}", "func HistogramSummary(scope *Scope, tag tf.Output, values tf.Output) (summary tf.Output) {\n\tif scope.Err() != nil {\n\t\treturn\n\t}\n\topspec := tf.OpSpec{\n\t\tType: \"HistogramSummary\",\n\t\tInput: []tf.Input{\n\t\t\ttag, values,\n\t\t},\n\t}\n\top := scope.AddOperation(opspec)\n\treturn op.Output(0)\n}", "func (w *WeightsBatch) Update(id identity.ID, diff int64) {\n\tif w.diffs[id] += diff; w.diffs[id] == 0 {\n\t\tdelete(w.diffs, id)\n\t}\n\n\tw.totalDiff += diff\n}", "func (c *Cache) recordUpdate(p *partition, bytesAdded, bytesGuessed, entriesAdded int32) {\n\t// This method is always called while p.mu is held.\n\t// The below code takes care to ensure that all bytes in c due to p are\n\t// updated appropriately.\n\n\t// NB: The loop and atomics are used because p.size can be modified\n\t// concurrently to calls to recordUpdate. In all cases where p.size is updated\n\t// outside of this function occur while c.mu is held inside of c.Add. These\n\t// occur when either:\n\t//\n\t// 1) a new write adds its guessed write size to p\n\t// 2) p is evicted to make room for a write\n\t//\n\t// Thus p.size is either increasing or becomes evicted while we attempt to\n\t// record the update to p. Once p is evicted it stays evicted forever.\n\t// These facts combine to ensure that p.size never becomes negative from the\n\t// below call to add.\n\n\tdelta := bytesAdded - bytesGuessed\n\tfor {\n\t\tcurSize := p.loadSize()\n\t\tif curSize == evicted {\n\t\t\treturn\n\t\t}\n\t\tnewSize := curSize.add(delta, entriesAdded)\n\t\tif updated := p.setSize(curSize, newSize); updated {\n\t\t\tc.updateGauges(c.addBytes(delta), c.addEntries(entriesAdded))\n\t\t\treturn\n\t\t}\n\t}\n}", "func (e *exemplarSampler) updateAggregations(val float64) {\n\te.count++\n\tdelta := val - e.mean\n\te.mean += delta / float64(e.count)\n\tdelta2 := val - e.mean\n\te.m2 += delta * delta2\n}", "func HashUpdate(tls *libc.TLS, aData uintptr, nData uint32) { /* speedtest1.c:146:13: */\n\tvar t uint8\n\tvar i uint8 = g.hash.i\n\tvar j uint8 = g.hash.j\n\tvar k uint32\n\tif g.hashFile != 0 {\n\t\tlibc.Xfwrite(tls, aData, uint32(1), nData, g.hashFile)\n\t}\n\tfor k = uint32(0); k < nData; k++ {\n\t\tj = uint8(int32(j) + (int32(*(*uint8)(unsafe.Pointer((uintptr(unsafe.Pointer(&g)) + 3104 /* &.hash */ + 3 /* &.s */) + uintptr(i)))) + int32(*(*uint8)(unsafe.Pointer(aData + uintptr(k))))))\n\t\tt = *(*uint8)(unsafe.Pointer((uintptr(unsafe.Pointer(&g)) + 3104 /* &.hash */ + 3 /* &.s */) + uintptr(j)))\n\t\t*(*uint8)(unsafe.Pointer((uintptr(unsafe.Pointer(&g)) + 3104 /* &.hash */ + 3 /* &.s */) + uintptr(j))) = *(*uint8)(unsafe.Pointer((uintptr(unsafe.Pointer(&g)) + 3104 /* &.hash */ + 3 /* &.s */) + uintptr(i)))\n\t\t*(*uint8)(unsafe.Pointer((uintptr(unsafe.Pointer(&g)) + 3104 /* &.hash */ + 3 /* &.s */) + uintptr(i))) = t\n\t\ti++\n\t}\n\tg.hash.i = i\n\tg.hash.j = j\n}", "func (b *Uniform) Update(arm, reward int) {\n\t// Update the frequency\n\tb.counts[arm]++\n\tn := float64(b.counts[arm])\n\n\tvalue := b.values[arm]\n\tb.values[arm] = ((n-1)/n)*value + (1/n)*float64(reward)\n}", "func (pq *PriorityQueue) update(step *Step, function StepFunc, priority int) {\n\tstep.function = function\n\tstep.priority = priority\n\theap.Fix(pq, step.index)\n}", "func (sc statsCache) update(tables []*statistics.Table, deletedIDs []int64, newVersion uint64, opts ...TableStatsOpt) statsCache {\n\toption := &tableStatsOption{}\n\tfor _, opt := range opts {\n\t\topt(option)\n\t}\n\tnewCache := sc.copy()\n\tif newVersion == newCache.version {\n\t\tnewCache.minorVersion += uint64(1)\n\t} else {\n\t\tnewCache.version = newVersion\n\t\tnewCache.minorVersion = uint64(0)\n\t}\n\tfor _, tbl := range tables {\n\t\tid := tbl.PhysicalID\n\t\tif option.byQuery {\n\t\t\tnewCache.PutByQuery(id, tbl)\n\t\t} else {\n\t\t\tnewCache.Put(id, tbl)\n\t\t}\n\t}\n\tfor _, id := range deletedIDs {\n\t\tnewCache.Del(id)\n\t}\n\treturn newCache\n}", "func (c *StatsClient) Histogram(name string, value float64) {\n\tif err := c.client.Histogram(name, value, c.tags, Rate); err != nil {\n\t\tc.logger().Printf(\"datadog.StatsClient.Histogram error: %s\", err)\n\t}\n}", "func (pq *bvhPriorityQueue) update(item *Item, value *boundingVolumeHierarchyNode, priority float64) {\n\titem.value = value\n\titem.t = priority\n\theap.Fix(pq, item.index)\n}", "func (h *Heap) Update(obj interface{}) error {\n\treturn h.Add(obj)\n}", "func (ms *metricsStore) update(obj runtime.Object, rms []*resourceMetrics) error {\n\tms.Lock()\n\tdefer ms.Unlock()\n\n\tkey, err := utils.GetUIDForObject(obj)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\torigMds := make([]agentmetricspb.ExportMetricsServiceRequest, len(rms))\n\tmds := make([]*agentmetricspb.ExportMetricsServiceRequest, len(rms))\n\tfor i, rm := range rms {\n\t\tmds[i] = &origMds[i]\n\t\tmds[i].Resource = rm.resource\n\t\tmds[i].Metrics = rm.metrics\n\t}\n\n\tms.metricsCache[key] = mds\n\treturn nil\n}", "func (c *Client) Histogram(stat string, value int, rate float64) error {\n\treturn c.send(stat, rate, \"%d|ms\", value)\n}", "func (tb *Table) UpdateMetrics(m *TableMetrics) {\n\tm.ActiveInmemoryMerges += atomic.LoadUint64(&tb.activeInmemoryMerges)\n\tm.ActiveFileMerges += atomic.LoadUint64(&tb.activeFileMerges)\n\n\tm.InmemoryMergesCount += atomic.LoadUint64(&tb.inmemoryMergesCount)\n\tm.FileMergesCount += atomic.LoadUint64(&tb.fileMergesCount)\n\n\tm.InmemoryItemsMerged += atomic.LoadUint64(&tb.inmemoryItemsMerged)\n\tm.FileItemsMerged += atomic.LoadUint64(&tb.fileItemsMerged)\n\n\tm.InmemoryAssistedMerges += atomic.LoadUint64(&tb.inmemoryAssistedMerges)\n\tm.FileAssistedMerges += atomic.LoadUint64(&tb.fileAssistedMerges)\n\n\tm.ItemsAdded += atomic.LoadUint64(&tb.itemsAdded)\n\tm.ItemsAddedSizeBytes += atomic.LoadUint64(&tb.itemsAddedSizeBytes)\n\n\tm.PendingItems += uint64(tb.rawItems.Len())\n\n\ttb.partsLock.Lock()\n\n\tm.InmemoryPartsCount += uint64(len(tb.inmemoryParts))\n\tfor _, pw := range tb.inmemoryParts {\n\t\tp := pw.p\n\t\tm.InmemoryBlocksCount += p.ph.blocksCount\n\t\tm.InmemoryItemsCount += p.ph.itemsCount\n\t\tm.InmemorySizeBytes += p.size\n\t\tm.PartsRefCount += uint64(atomic.LoadUint32(&pw.refCount))\n\t}\n\n\tm.FilePartsCount += uint64(len(tb.fileParts))\n\tfor _, pw := range tb.fileParts {\n\t\tp := pw.p\n\t\tm.FileBlocksCount += p.ph.blocksCount\n\t\tm.FileItemsCount += p.ph.itemsCount\n\t\tm.FileSizeBytes += p.size\n\t\tm.PartsRefCount += uint64(atomic.LoadUint32(&pw.refCount))\n\t}\n\ttb.partsLock.Unlock()\n\n\tm.DataBlocksCacheSize = uint64(ibCache.Len())\n\tm.DataBlocksCacheSizeBytes = uint64(ibCache.SizeBytes())\n\tm.DataBlocksCacheSizeMaxBytes = uint64(ibCache.SizeMaxBytes())\n\tm.DataBlocksCacheRequests = ibCache.Requests()\n\tm.DataBlocksCacheMisses = ibCache.Misses()\n\n\tm.IndexBlocksCacheSize = uint64(idxbCache.Len())\n\tm.IndexBlocksCacheSizeBytes = uint64(idxbCache.SizeBytes())\n\tm.IndexBlocksCacheSizeMaxBytes = uint64(idxbCache.SizeMaxBytes())\n\tm.IndexBlocksCacheRequests = idxbCache.Requests()\n\tm.IndexBlocksCacheMisses = idxbCache.Misses()\n}", "func (c *Collector) LoadFromRuntimeMetrics(h *metrics.Float64Histogram) {\n\tc.Lock()\n\tdefer c.Unlock()\n\n\tc.Buckets = make([]Bucket, len(h.Buckets)-1)\n\tc.BucketsLimit = len(h.Buckets)\n\tc.Bucket = Bucket{\n\t\tMin: h.Buckets[0],\n\t\tMax: h.Buckets[0],\n\t}\n\n\tfor i, b := range h.Buckets[1:] {\n\t\tbb := Bucket{\n\t\t\tMin: c.Bucket.Max,\n\t\t\tMax: b,\n\t\t\tCount: int(h.Counts[i]),\n\t\t}\n\n\t\tif bb.Count != 0 && !math.IsInf(b, 0) {\n\t\t\tbb.Sum = float64(bb.Count) * b\n\t\t\tc.Bucket.Sum += bb.Sum\n\t\t}\n\n\t\tc.Bucket.Count += bb.Count\n\t\tc.Bucket.Max = b\n\n\t\tc.Buckets[i] = bb\n\t}\n}", "func (m *Metric) Update(v float64) *Metric {\n\tm.Lock()\n\tm.LastValue = v\n\tm.Sum += v\n\tif v < m.Min {\n\t\tm.Min = v\n\t}\n\tif v > m.Max {\n\t\tm.Max = v\n\t}\n\tm.Samples++\n\tm.Unlock()\n\treturn m\n}", "func (cb *cachedBatch) hash(namespace string, key []byte) hash.CacheHash {\n\tstream := hash.Hash160b([]byte(namespace))\n\tstream = append(stream, key...)\n\treturn byteutil.BytesToCacheHash(hash.Hash160b(stream))\n}", "func (s *CheckpointManager) UpdateTotal(taskID int, added int, last bool) {\n\ts.mu.Lock()\n\tdefer s.mu.Unlock()\n\tcp := s.checkpoints[taskID]\n\tcp.totalKeys += added\n\tcp.lastBatchSent = last\n}", "func (s *Stats) add(d *Stats) {\n\ts.ItemCount += d.ItemCount\n\ts.EffectiveSize += d.EffectiveSize\n\ts.UsedSize += d.UsedSize\n\ts.Readers += d.Readers\n\ts.MarkedDeleted += d.MarkedDeleted\n\ts.Writers += d.Writers\n\ts.WritersBlocked += d.WritersBlocked\n}", "func (s *UniformSample) Update(v int64) {\n\ts.mutex.Lock()\n\tdefer s.mutex.Unlock()\n\ts.count++\n\tif len(s.values) < s.reservoirSize {\n\t\ts.values = append(s.values, v)\n\t} else {\n\t\t// Use circle buffer to eliminate the oldest value\n\t\tidx := s.count % int64(s.reservoirSize)\n\t\ts.values[idx] = v\n\t}\n}", "func (_m *Reporter) Histogram(name string, value float64, tags ...monitoring.Tag) {\n\t_va := make([]interface{}, len(tags))\n\tfor _i := range tags {\n\t\t_va[_i] = tags[_i]\n\t}\n\tvar _ca []interface{}\n\t_ca = append(_ca, name, value)\n\t_ca = append(_ca, _va...)\n\t_m.Called(_ca...)\n}", "func Update(crc uint64, tab *Table, p []byte) uint64 {}", "func (c *Counter) Update(timestamp time.Time, value int64, annotation []byte) {\n\tif c.lastAt.IsZero() || timestamp.After(c.lastAt) {\n\t\t// NB(r): Only set the last value if this value arrives\n\t\t// after the wall clock timestamp of previous values, not\n\t\t// the arrival time (i.e. order received).\n\t\tc.lastAt = timestamp\n\t} else {\n\t\tc.Options.Metrics.Counter.IncValuesOutOfOrder()\n\t}\n\n\tc.sum += value\n\n\tc.count++\n\tif c.max < value {\n\t\tc.max = value\n\t}\n\tif c.min > value {\n\t\tc.min = value\n\t}\n\n\tif c.HasExpensiveAggregations {\n\t\tc.sumSq += value * value\n\t}\n\n\tc.annotation = MaybeReplaceAnnotation(c.annotation, annotation)\n}", "func (h *Histogram) Add(v float64) {\n\th.mutex.Lock()\n\tdefer h.mutex.Unlock()\n\tmax := 1.0\n\tfor i := 0; i < HistogramSize-1; i++ {\n\t\tif v <= max {\n\t\t\th.values[i]++\n\t\t\treturn\n\t\t}\n\t\tmax *= 2\n\t}\n\th.values[HistogramSize-1]++\n}", "func (r *DeviceHealthScriptRunSummaryRequest) Update(ctx context.Context, reqObj *DeviceHealthScriptRunSummary) error {\n\treturn r.JSONRequest(ctx, \"PATCH\", \"\", reqObj, nil)\n}", "func (self *IoStatsBuilder) Update(current map[int]*IoAmount, now time.Time) {\n\tseconds := now.Sub(self.lastMeasurement).Seconds()\n\tvar total IoAmount\n\tfor pid, amt := range current {\n\t\ttotal.Increment(amt)\n\t\tdelete(self.lastPids, pid)\n\t}\n\tfor _, amt := range self.lastPids {\n\t\tself.deadUsage.Increment(amt)\n\t}\n\ttotal.Increment(&self.deadUsage)\n\tself.lastPids = current\n\tself.lastMeasurement = now\n\tdiff := self.Total.update(&total)\n\tif seconds > 0 {\n\t\trate := diff.rate(seconds)\n\t\tself.RateMax.TakeMax(rate)\n\t\trate.weightSquared(seconds)\n\t\tself.weightedSumSquared.Increment(rate)\n\t\tif t := now.Sub(self.start).Seconds(); t > 0 {\n\t\t\tself.RateDev = self.weightedSumSquared.computeStdDev(\n\t\t\t\t&self.Total, t)\n\t\t}\n\t}\n}", "func (bwt *BWTable) refresh(stats protoStatSlice) {\n\trows := [][]string{bwtHeader}\n\n\tsort.Sort(stats)\n\tfor _, stat := range stats {\n\t\trow := []string{stat.Protocol,\n\t\t\tpaddedHumanBytes(uint64(stat.Stats.TotalIn)),\n\t\t\tpaddedHumanBytes(uint64(stat.Stats.TotalOut)),\n\t\t\tpaddedHumanBytes(uint64(stat.Stats.RateIn)),\n\t\t\tpaddedHumanBytes(uint64(stat.Stats.RateOut))}\n\t\trows = append(rows, row)\n\t}\n\n\tbwt.Table.Rows = rows\n}", "func (job *AnalyzeJob) Update(rowCount int64) {\n\tnow := time.Now()\n\tjob.Mutex.Lock()\n\tjob.RowCount += rowCount\n\tjob.updateTime = now\n\tjob.Mutex.Unlock()\n}", "func (h *Histogram) record(v float64, count int) {\n\t// Scaled value to bucketize - we subtract epsilon because the interval\n\t// is open to the left ] start, end ] so when exactly on start it has\n\t// to fall on the previous bucket. TODO add boundary tests\n\tscaledVal := (v-h.Offset)/h.Divider - 0.0001\n\tvar idx int\n\tif scaledVal <= firstValue {\n\t\tidx = 0\n\t} else if scaledVal > lastValue {\n\t\tidx = numBuckets - 1 // last bucket is for > last value\n\t} else {\n\t\t// else we look it up\n\t\tidx = lookUpIdx(int(scaledVal))\n\t}\n\th.Hdata[idx] += int32(count)\n}", "func (c *Aggregator) Update(_ context.Context, number number.Number, desc *sdkapi.Descriptor) error {\n\tnow := time.Now()\n\tc.lock.Lock()\n\tdefer c.lock.Unlock()\n\tc.samples = append(c.samples, Point{\n\t\tNumber: number,\n\t\tTime: now,\n\t})\n\n\treturn nil\n}", "func (c *Channel) bundleUpdate(b *channelconfig.Bundle) {\n\tc.lock.Lock()\n\tc.resources = b\n\tc.lock.Unlock()\n}", "func Histogram(name string, requestTime float64, tags []string, rate float64) {\n\tif ddog == nil {\n\t\tlog.Error(\"datadog client is not initialized\")\n\t\treturn\n\t}\n\n\terr := ddog.Client.Histogram(name, requestTime, tags, rate)\n\tif err != nil {\n\t\tlog.WithFields(logrus.Fields{\n\t\t\t\"error\": err,\n\t\t\t\"name\": name,\n\t\t}).Error(\"Failed to send histogram data to datadog\")\n\t}\n}", "func doSum(h hash.Hash, b []byte, data ...[]byte) ([]byte, error) {\n\th.Reset()\n\tfor _, v := range data {\n\t\tvar err error\n\t\t_, err = h.Write(v)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\treturn h.Sum(b), nil\n}", "func (pq *PQ_ARRAY) update(item *InternalItem, value interface{}, priority int) {\n\titem.Value = value\n\titem.Priority = priority\n // call heap method\n\theap.Fix(pq, item.index)\n}", "func (fb *FlatBatch) Put(key []byte, value []byte) error {\n\tfb.lock.Lock()\n\tdefer fb.lock.Unlock()\n\n\tfb.keys = append(fb.keys, key)\n\tfb.vals = append(fb.vals, value)\n\tfb.keysize += len(key)\n\tfb.valsize += len(value)\n\treturn nil\n}", "func updateAggregate(c context.Context, tf *model.TestFile, aggr, incr *model.AggregateResult) error {\n\tif !model.IsAggregateTestFile(tf.Name) {\n\t\treturn errors.New(\"frontend: tf should be an aggregate test file\")\n\t}\n\n\tsize := model.ResultsSize\n\tif tf.Name == \"results-small.json\" {\n\t\tsize = model.ResultsSmallSize\n\t}\n\n\tif aggr == nil {\n\t\taggr = incr\n\t} else {\n\t\tif err := aggr.Merge(incr); err != nil {\n\t\t\tmsg := logging.WithError(err)\n\t\t\tswitch err {\n\t\t\tcase model.ErrBuilderNameConflict:\n\t\t\t\tmsg.Warningf(c, \"updateAggregate: merge for master: %q, builder: %q, file: %q\", tf.Master, tf.Builder, tf.Name)\n\t\t\t\treturn statusError{err, http.StatusBadRequest}\n\t\t\tcase model.ErrBuildNumberConflict:\n\t\t\t\tmsg.Warningf(c, \"updateAggregate: merge for master: %q, builder: %q, file: %q\", tf.Master, tf.Builder, tf.Name)\n\t\t\t\treturn statusError{err, http.StatusConflict}\n\t\t\tdefault:\n\t\t\t\tmsg.Errorf(c, \"updateAggregate: merge for master: %q, builder: %q, file: %q\", tf.Master, tf.Builder, tf.Name)\n\t\t\t\treturn statusError{err, http.StatusInternalServerError}\n\t\t\t}\n\t\t}\n\t}\n\n\tif err := aggr.Trim(size); err != nil {\n\t\tlogging.WithError(err).Errorf(c, \"updateAggregate: trim\")\n\t\treturn statusError{err, http.StatusInternalServerError}\n\t}\n\n\terr := tf.PutData(c, func(w io.Writer) error {\n\t\tif err := json.NewEncoder(w).Encode(&aggr); err != nil {\n\t\t\tlogging.WithError(err).Errorf(c, \"updateAggregate: marshal JSON\")\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t})\n\tif err != nil {\n\t\tlogging.WithError(err).Errorf(c, \"updateAggregate: PutData\")\n\t\treturn statusError{err, http.StatusInternalServerError}\n\t}\n\n\tif err := datastore.Put(c, tf); err != nil {\n\t\tlogging.WithError(err).Errorf(c, \"updateAggregate: datastore.Put\")\n\t\treturn statusError{err, http.StatusInternalServerError}\n\t}\n\tif err := deleteKeys(c, tf.OldDataKeys); err != nil {\n\t\tlogging.Fields{\n\t\t\tlogging.ErrorKey: err,\n\t\t\t\"keys\": tf.OldDataKeys,\n\t\t}.Errorf(c, \"upload: failed to delete keys\")\n\t}\n\treturn nil\n}", "func (eng *Engine) updateHash(flags hashFlags, depth, score int32, move Move, static int32) {\n\t// if search is stopped then score cannot be trusted\n\tif eng.stopped {\n\t\treturn\n\t}\n\t// update principal variation table in exact nodes\n\tif flags&exact != 0 {\n\t\teng.pvTable.Put(eng.Position, move)\n\t}\n\tif eng.ply() == 0 && (len(eng.ignoreRootMoves) != 0 || len(eng.onlyRootMoves) != 0) {\n\t\t// at root if there are moves to ignore (e.g. because of multipv)\n\t\t// then this is an incomplete search, so don't update the hash\n\t\treturn\n\t}\n\n\t// save the mate score relative to the current position\n\t// when retrieving from hash the score will be adjusted relative to root\n\tif score < KnownLossScore {\n\t\tscore -= eng.ply()\n\t} else if score > KnownWinScore {\n\t\tscore += eng.ply()\n\t}\n\n\tGlobalHashTable.put(eng.Position, hashEntry{\n\t\tkind: flags,\n\t\tscore: int16(score),\n\t\tdepth: int8(depth),\n\t\tmove: move,\n\t\tstatic: int16(static),\n\t})\n}", "func (te *TelemetryEmitter) emitHistogram(metric Metric, timestamp time.Time) error {\n\thist, ok := metric.value.(*dto.Histogram)\n\tif !ok {\n\t\treturn fmt.Errorf(\"unknown histogram metric type for %q: %T\", metric.name, metric.value)\n\t}\n\n\tif m, ok := te.deltaCalculator.CountMetric(metric.name+\".sum\", metric.attributes, hist.GetSampleSum(), timestamp); ok {\n\t\tte.harvester.RecordMetric(m)\n\t}\n\n\tmetricName := metric.name + \".buckets\"\n\tbuckets := make(histogram.Buckets, 0, len(hist.Bucket))\n\tfor _, b := range hist.GetBucket() {\n\t\tupperBound := b.GetUpperBound()\n\t\tcount := float64(b.GetCumulativeCount())\n\t\tif !math.IsInf(upperBound, 1) {\n\t\t\tbucketAttrs := copyAttrs(metric.attributes)\n\t\t\tbucketAttrs[\"histogram.bucket.upperBound\"] = upperBound\n\t\t\tif m, ok := te.deltaCalculator.CountMetric(metricName, bucketAttrs, count, timestamp); ok {\n\t\t\t\tte.harvester.RecordMetric(m)\n\t\t\t}\n\t\t}\n\t\tbuckets = append(\n\t\t\tbuckets,\n\t\t\thistogram.Bucket{\n\t\t\t\tUpperBound: upperBound,\n\t\t\t\tCount: count,\n\t\t\t},\n\t\t)\n\t}\n\n\tvar results error\n\tmetricName = metric.name + \".percentiles\"\n\tfor _, p := range te.percentiles {\n\t\tv, err := histogram.Percentile(p, buckets)\n\t\tif err != nil {\n\t\t\tif results == nil {\n\t\t\t\tresults = err\n\t\t\t} else {\n\t\t\t\tresults = fmt.Errorf(\"%v: %w\", err, results)\n\t\t\t}\n\t\t\tcontinue\n\t\t}\n\n\t\tpercentileAttrs := copyAttrs(metric.attributes)\n\t\tpercentileAttrs[\"percentile\"] = p\n\t\tte.harvester.RecordMetric(telemetry.Gauge{\n\t\t\tName: metricName,\n\t\t\tAttributes: percentileAttrs,\n\t\t\tValue: v,\n\t\t\tTimestamp: timestamp,\n\t\t})\n\t}\n\n\treturn results\n}", "func (pq *PriorityQueue) update(item *Item, value []float64, priority float64) {\n\titem.value = value\n\titem.priority = priority\n\theap.Fix(pq, item.index)\n}", "func getHistogram(src [][3]int, size float64, pixels *[HistSize][3]float64, hist *[HistSize]float64) {\n\tvar ind, r, g, b, i int\n\tvar inr, ing, inb int\n\n\tfor i = range src {\n\t\tr = src[i][0]\n\t\tg = src[i][1]\n\t\tb = src[i][2]\n\n\t\tinr = r >> Shift\n\t\ting = g >> Shift\n\t\tinb = b >> Shift\n\n\t\tind = (inr << (2 * HistBits)) + (ing << HistBits) + inb\n\t\tpixels[ind][0], pixels[ind][1], pixels[ind][2] = float64(r), float64(g), float64(b)\n\t\thist[ind]++\n\t}\n\n\t// normalize weight by the number of pixels in the image\n\tfor i = 0; i < HistSize; i++ {\n\t\thist[i] /= size\n\t}\n}", "func (c *Context) Histogram(stat string, value float64) {\n\tfor _, sink := range c.sinks {\n\t\tsink.Histogram(c, stat, value)\n\t}\n}", "func (ms SummaryDataPoint) SetSum(v float64) {\n\t(*ms.orig).Sum = v\n}", "func (ms HistogramDataPoint) Sum() float64 {\n\treturn (*ms.orig).Sum\n}", "func (tk *timekeeper) updateHWT(cmd Message) Timestamp {\n\n\tstreamId := cmd.(*MsgStream).GetStreamId()\n\tmeta := cmd.(*MsgStream).GetMutationMeta()\n\n\tbucketHWTMap := tk.streamBucketHWTMap[streamId]\n\tbucketNewTsReqd := tk.streamBucketNewTsReqdMap[streamId]\n\tbucketFlushInProgressMap := tk.streamBucketFlushInProgressMap[streamId]\n\tbucketTsListMap := tk.streamBucketTsListMap[streamId]\n\tbucketFlushEnabledMap := tk.streamBucketFlushEnabledMap[streamId]\n\n\t//update HWT for this bucket\n\tvar ts Timestamp\n\tvar ok bool\n\tif ts, ok = (*bucketHWTMap)[meta.bucket]; ok {\n\t\t//if seqno has incremented, update it\n\t\tif meta.seqno > ts[meta.vbucket] {\n\t\t\t(*bucketNewTsReqd)[meta.bucket] = true\n\t\t\tts[meta.vbucket] = meta.seqno\n\t\t}\n\t} else {\n\t\t//allocate a new timestamp for this bucket\n\t\t(*bucketHWTMap)[meta.bucket] = NewTimestamp()\n\t\t(*bucketNewTsReqd)[meta.bucket] = false\n\t\t(*bucketTsListMap)[meta.bucket] = list.New()\n\t\t(*bucketFlushInProgressMap)[meta.bucket] = false\n\t\t(*bucketFlushEnabledMap)[meta.bucket] = true\n\t}\n\n\treturn ts\n}", "func ReadHistogramSum(histogram prometheus.Histogram) float64 {\n\tvar metric dto.Metric\n\tif err := histogram.Write(&metric); err != nil {\n\t\treturn math.NaN()\n\t}\n\treturn metric.Histogram.GetSampleSum()\n}", "func (c *Cuckoo) addTable(growFactor float64) {\n\t//fmt.Printf(\"table: %d\\n\", c.Ntables)\n\tc.Ntables++\n\tbuckets := int(float64(c.Nbuckets) * growFactor)\n\tslots := c.Nslots\n\tc.Size += buckets * slots\n\tc.MaxElements = int(float64(c.Size) * c.MaxLoadFactor)\n\tt := new(Table)\n\tt.buckets = make([]Slots, buckets, buckets)\n\t// we should do this lazily\n\tfor b, _ := range t.buckets {\n\t\tif len(t.buckets[b]) == 0 {\n\t\t\tt.buckets[b] = makeSlots(t.buckets[b], slots)\n\t\t\tfor s, _ := range t.buckets[b] {\n\t\t\t\tt.buckets[b][s].val = c.emptyValue // ???\n\t\t\t}\n\t\t}\n\t}\n\tt.seed = uint64(len(c.tables) + 1)\n\tt.hfs = c.getHash(c.HashName, t.seed)\n\tt.Nbuckets = c.Nbuckets\n\tt.Nslots = c.Nslots\n\tt.Size = t.Nbuckets * t.Nslots\n\tt.MaxElements = int(float64(t.Size) * c.MaxLoadFactor)\n\tt.c = c\n\tc.tables = append(c.tables, t)\n\n\t// perhaps reset the stats ???\n}", "func (s *Summary) merge(o *Summary) {\n\tif o.TimeElapsed > s.TimeElapsed {\n\t\ts.TimeElapsed = o.TimeElapsed\n\t}\n\ts.SuccessHistogram.Merge(o.SuccessHistogram)\n\ts.UncorrectedSuccessHistogram.Merge(o.UncorrectedSuccessHistogram)\n\ts.ErrorHistogram.Merge(o.ErrorHistogram)\n\ts.UncorrectedErrorHistogram.Merge(o.UncorrectedErrorHistogram)\n\ts.SuccessTotal += o.SuccessTotal\n\ts.ErrorTotal += o.ErrorTotal\n\ts.Throughput += o.Throughput\n\ts.RequestRate += o.RequestRate\n}", "func update(rt *Runtime, r goengage.Fundraise, key string) {\n\tg := Stat{}\n\trt.DB.Where(\"id = ?\", key).First(&g)\n\tif g.CreatedDate == nil {\n\t\tg.ID = key\n\t\tt := time.Now()\n\t\tg.CreatedDate = &t\n\t\trt.DB.Create(&g)\n\t}\n\tfor _, t := range r.Transactions {\n\t\tg.AllCount++\n\t\tg.AllAmount = g.AllAmount + t.Amount\n\t\tif r.WasImported {\n\t\t\tg.OfflineCount++\n\t\t\tg.OfflineAmount += t.Amount\n\t\t} else {\n\t\t\tswitch r.DonationType {\n\t\t\tcase goengage.OneTime:\n\t\t\t\tg.OneTimeCount++\n\t\t\t\tg.OneTimeAmount += t.Amount\n\t\t\tcase goengage.Recurring:\n\t\t\t\tg.RecurringCount++\n\t\t\t\tg.RecurringAmount += t.Amount\n\t\t\t}\n\t\t\tswitch t.Type {\n\t\t\tcase goengage.Refund:\n\t\t\t\tg.RefundsCount++\n\t\t\t\tg.RefundsAmount += t.Amount\n\t\t\t}\n\t\t}\n\t\tg.Largest = math.Max(g.Largest, t.Amount)\n\t\tif t.Amount > 0.0 {\n\t\t\tif g.Smallest < 1.0 {\n\t\t\t\tg.Smallest = t.Amount\n\t\t\t} else {\n\t\t\t\tg.Smallest = math.Min(g.Smallest, t.Amount)\n\t\t\t}\n\t\t}\n\t\trt.DB.Model(&g).Updates(&g)\n\t}\n}", "func (b *Batcher) Update(matched, total int) {\n\tb.ratio = float64(matched) / float64(total)\n}", "func UpdateCount(key Type, val int) {\n\tmutex.Lock()\n\tdefer mutex.Unlock()\n\tCountStats[key] = CountStats[key] + val\n}", "func (datadog *Datadog) Histogram(name string, startTime time.Time, tags []string) error {\n\telapsedTime := time.Since(startTime).Seconds() * 1000\n\terr := datadog.client.Histogram(name, elapsedTime, tags, float64(1))\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}", "func (h *ihash) Sum(b []byte) []byte {\n\tn, err := h.s.Read(h.buf)\n\tif err != nil {\n\t\tpanic(fmt.Sprintf(\"alg: failed to read out finalized hash: %v\", err))\n\t}\n\n\treturn append(b, h.buf[:n]...)\n}", "func (mb *MutableBag) update(dictionary dictionary, attrs *mixerpb.Attributes) error {\n\t// check preconditions up front and bail if there are any\n\t// errors without mutating the bag.\n\tif err := checkPreconditions(dictionary, attrs); err != nil {\n\t\treturn err\n\t}\n\n\tvar log *bytes.Buffer\n\tif glog.V(2) {\n\t\tlog = pool.GetBuffer()\n\t}\n\n\tif attrs.ResetContext {\n\t\tif log != nil {\n\t\t\tlog.WriteString(\" resetting bag to empty state\\n\")\n\t\t}\n\t\tmb.Reset()\n\t}\n\n\t// delete requested attributes\n\tfor _, d := range attrs.DeletedAttributes {\n\t\tif name, present := dictionary[d]; present {\n\t\t\tif log != nil {\n\t\t\t\tlog.WriteString(fmt.Sprintf(\" attempting to delete attribute %s\\n\", name))\n\t\t\t}\n\n\t\t\tdelete(mb.values, name)\n\t\t}\n\t}\n\n\t// apply all attributes\n\tfor k, v := range attrs.StringAttributes {\n\t\tif log != nil {\n\t\t\tlog.WriteString(fmt.Sprintf(\" updating string attribute %s from '%v' to '%v'\\n\", dictionary[k], mb.values[dictionary[k]], v))\n\t\t}\n\t\tmb.values[dictionary[k]] = v\n\t}\n\n\tfor k, v := range attrs.Int64Attributes {\n\t\tif log != nil {\n\t\t\tlog.WriteString(fmt.Sprintf(\" updating int64 attribute %s from '%v' to '%v'\\n\", dictionary[k], mb.values[dictionary[k]], v))\n\t\t}\n\t\tmb.values[dictionary[k]] = v\n\t}\n\n\tfor k, v := range attrs.DoubleAttributes {\n\t\tif log != nil {\n\t\t\tlog.WriteString(fmt.Sprintf(\" updating double attribute %s from '%v' to '%v'\\n\", dictionary[k], mb.values[dictionary[k]], v))\n\t\t}\n\t\tmb.values[dictionary[k]] = v\n\t}\n\n\tfor k, v := range attrs.BoolAttributes {\n\t\tif log != nil {\n\t\t\tlog.WriteString(fmt.Sprintf(\" updating bool attribute %s from '%v' to '%v'\\n\", dictionary[k], mb.values[dictionary[k]], v))\n\t\t}\n\t\tmb.values[dictionary[k]] = v\n\t}\n\n\tfor k, v := range attrs.TimestampAttributes {\n\t\tif log != nil {\n\t\t\tlog.WriteString(fmt.Sprintf(\" updating time attribute %s from '%v' to '%v'\\n\", dictionary[k], mb.values[dictionary[k]], v))\n\t\t}\n\t\tmb.values[dictionary[k]] = v\n\t}\n\n\tfor k, v := range attrs.DurationAttributes {\n\t\tif log != nil {\n\t\t\tlog.WriteString(fmt.Sprintf(\" updating duration attribute %s from '%v' to '%v'\\n\", dictionary[k], mb.values[dictionary[k]], v))\n\t\t}\n\t\tmb.values[dictionary[k]] = v\n\t}\n\n\tfor k, v := range attrs.BytesAttributes {\n\t\tif log != nil {\n\t\t\tlog.WriteString(fmt.Sprintf(\" updating bytes attribute %s from '%v' to '%v'\\n\", dictionary[k], mb.values[dictionary[k]], v))\n\t\t}\n\t\tmb.values[dictionary[k]] = v\n\t}\n\n\tfor k, v := range attrs.StringMapAttributes {\n\t\tm, ok := mb.values[dictionary[k]].(map[string]string)\n\t\tif !ok {\n\t\t\tm = make(map[string]string)\n\t\t\tmb.values[dictionary[k]] = m\n\t\t}\n\n\t\tif log != nil {\n\t\t\tlog.WriteString(fmt.Sprintf(\" updating stringmap attribute %s from\\n\", dictionary[k]))\n\n\t\t\tif len(m) > 0 {\n\t\t\t\tfor k2, v2 := range m {\n\t\t\t\t\tlog.WriteString(fmt.Sprintf(\" %s:%s\\n\", k2, v2))\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tlog.WriteString(\" <empty>\\n\")\n\t\t\t}\n\n\t\t\tlog.WriteString(\" to\\n\")\n\t\t}\n\n\t\tfor k2, v2 := range v.Map {\n\t\t\tm[dictionary[k2]] = v2\n\t\t}\n\n\t\tif log != nil {\n\t\t\tif len(m) > 0 {\n\t\t\t\tfor k2, v2 := range m {\n\t\t\t\t\tlog.WriteString(fmt.Sprintf(\" %s:%s\\n\", k2, v2))\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tlog.WriteString(\" <empty>\\n\")\n\t\t\t}\n\t\t}\n\t}\n\n\tif log != nil {\n\t\tif log.Len() > 0 {\n\t\t\tglog.Infof(\"Updating attribute bag %d:\\n%s\", mb.id, log.String())\n\t\t}\n\t}\n\n\treturn nil\n}", "func (pq *PriorityQueue) update(item *QItem, value string, priority int) {\n\theap.Remove(pq, item.index)\n\titem.value = value\n\titem.priority = priority\n\theap.Push(pq, item)\n}", "func update(weight, mean, sumSq, sampleWeight, sample, sampleSumSq float64) (newWeight, newMean, newSumSq float64) {\n\tnewWeight = weight + sampleWeight\n\tnewMean = (weight*mean + sample*sampleWeight) / newWeight\n\tnewSumSq = sumSq + sampleSumSq + (sample-mean)*(sample-mean)*weight*sampleWeight/newWeight\n\treturn newWeight, newMean, newSumSq\n}", "func (o MempoolBinSlice) UpdateAll(ctx context.Context, exec boil.ContextExecutor, cols M) (int64, error) {\n\tln := int64(len(o))\n\tif ln == 0 {\n\t\treturn 0, nil\n\t}\n\n\tif len(cols) == 0 {\n\t\treturn 0, errors.New(\"models: update all requires at least one column argument\")\n\t}\n\n\tcolNames := make([]string, len(cols))\n\targs := make([]interface{}, len(cols))\n\n\ti := 0\n\tfor name, value := range cols {\n\t\tcolNames[i] = name\n\t\targs[i] = value\n\t\ti++\n\t}\n\n\t// Append all of the primary key values for each column\n\tfor _, obj := range o {\n\t\tpkeyArgs := queries.ValuesFromMapping(reflect.Indirect(reflect.ValueOf(obj)), mempoolBinPrimaryKeyMapping)\n\t\targs = append(args, pkeyArgs...)\n\t}\n\n\tsql := fmt.Sprintf(\"UPDATE \\\"mempool_bin\\\" SET %s WHERE %s\",\n\t\tstrmangle.SetParamNames(\"\\\"\", \"\\\"\", 1, colNames),\n\t\tstrmangle.WhereClauseRepeated(string(dialect.LQ), string(dialect.RQ), len(colNames)+1, mempoolBinPrimaryKeyColumns, len(o)))\n\n\tif boil.IsDebug(ctx) {\n\t\twriter := boil.DebugWriterFrom(ctx)\n\t\tfmt.Fprintln(writer, sql)\n\t\tfmt.Fprintln(writer, args...)\n\t}\n\tresult, err := exec.ExecContext(ctx, sql, args...)\n\tif err != nil {\n\t\treturn 0, errors.Wrap(err, \"models: unable to update all in mempoolBin slice\")\n\t}\n\n\trowsAff, err := result.RowsAffected()\n\tif err != nil {\n\t\treturn 0, errors.Wrap(err, \"models: unable to retrieve rows affected all in update all mempoolBin\")\n\t}\n\treturn rowsAff, nil\n}", "func PublishHistogram(ctx context.Context, key string, value float64) {\n\n\t// Spans are not processed by the collector until the span ends, so to prevent any delay\n\t// in processing the stats when the current span is long-lived we create a new span for every call.\n\t// suffix the span name with SpannameSuffixDummy to denote that it is used only to hold a metric and isn't itself of any interest\n\tspan, ctx := opentracing.StartSpanFromContext(ctx, \"histogram_metrics\"+SpannameSuffixDummy)\n\tdefer span.Finish()\n\n\t// The field name we use is the metric name prepended with FieldnamePrefixHistogram to designate that it is a Prometheus histogram metric\n\t// The collector will replace that prefix with \"fn_\" and use the result as the Prometheus metric name.\n\tfieldname := FieldnamePrefixHistogram + key\n\tspan.LogFields(log.Float64(fieldname, value))\n}", "func (s *histValues[N]) Aggregate(value N, attr attribute.Set) {\n\t// Accept all types to satisfy the Aggregator interface. However, since\n\t// the Aggregation produced by this Aggregator is only float64, convert\n\t// here to only use this type.\n\tv := float64(value)\n\n\t// This search will return an index in the range [0, len(s.bounds)], where\n\t// it will return len(s.bounds) if value is greater than the last element\n\t// of s.bounds. This aligns with the buckets in that the length of buckets\n\t// is len(s.bounds)+1, with the last bucket representing:\n\t// (s.bounds[len(s.bounds)-1], +∞).\n\tidx := sort.SearchFloat64s(s.bounds, v)\n\n\ts.valuesMu.Lock()\n\tdefer s.valuesMu.Unlock()\n\n\tb, ok := s.values[attr]\n\tif !ok {\n\t\t// N+1 buckets. For example:\n\t\t//\n\t\t// bounds = [0, 5, 10]\n\t\t//\n\t\t// Then,\n\t\t//\n\t\t// buckets = (-∞, 0], (0, 5.0], (5.0, 10.0], (10.0, +∞)\n\t\tb = newBuckets(len(s.bounds) + 1)\n\t\t// Ensure min and max are recorded values (not zero), for new buckets.\n\t\tb.min, b.max = v, v\n\t\ts.values[attr] = b\n\t}\n\tb.bin(idx, v)\n}", "func (c *LoggerClient) Histogram(name string, value float64) {\n\tc.print(\"Histogram\", name, value, value)\n}", "func (bh *BuzHash) Sum(b []byte) []byte {\n\tvar buf bytes.Buffer\n\tbinary.Write(&buf, binary.LittleEndian, bh.state)\n\thash := buf.Bytes()\n\tfor _, hb := range hash {\n\t\tb = append(b, hb)\n\t}\n\n\treturn b\n}", "func BuildColumn(ctx context.Context, numBuckets, id int64, ndv int64, count int64, samples []types.Datum) (*Histogram, error) {\n\tif count == 0 {\n\t\treturn &Histogram{ID: id}, nil\n\t}\n\tsc := ctx.GetSessionVars().StmtCtx\n\terr := types.SortDatums(sc, samples)\n\tif err != nil {\n\t\treturn nil, errors.Trace(err)\n\t}\n\thg := &Histogram{\n\t\tID: id,\n\t\tNDV: ndv,\n\t\tBuckets: make([]Bucket, 1, numBuckets),\n\t}\n\tvaluesPerBucket := float64(count)/float64(numBuckets) + 1\n\n\t// As we use samples to build the histogram, the bucket number and repeat should multiply a factor.\n\tsampleFactor := float64(count) / float64(len(samples))\n\tndvFactor := float64(count) / float64(ndv)\n\tif ndvFactor > sampleFactor {\n\t\tndvFactor = sampleFactor\n\t}\n\tbucketIdx := 0\n\tvar lastCount int64\n\tfor i := int64(0); i < int64(len(samples)); i++ {\n\t\tcmp, err := hg.Buckets[bucketIdx].Value.CompareDatum(sc, samples[i])\n\t\tif err != nil {\n\t\t\treturn nil, errors.Trace(err)\n\t\t}\n\t\ttotalCount := float64(i+1) * sampleFactor\n\t\tif cmp == 0 {\n\t\t\t// The new item has the same value as current bucket value, to ensure that\n\t\t\t// a same value only stored in a single bucket, we do not increase bucketIdx even if it exceeds\n\t\t\t// valuesPerBucket.\n\t\t\thg.Buckets[bucketIdx].Count = int64(totalCount)\n\t\t\tif float64(hg.Buckets[bucketIdx].Repeats) == ndvFactor {\n\t\t\t\thg.Buckets[bucketIdx].Repeats = int64(2 * sampleFactor)\n\t\t\t} else {\n\t\t\t\thg.Buckets[bucketIdx].Repeats += int64(sampleFactor)\n\t\t\t}\n\t\t} else if totalCount-float64(lastCount) <= valuesPerBucket {\n\t\t\t// The bucket still have room to store a new item, update the bucket.\n\t\t\thg.Buckets[bucketIdx].Count = int64(totalCount)\n\t\t\thg.Buckets[bucketIdx].Value = samples[i]\n\t\t\thg.Buckets[bucketIdx].Repeats = int64(ndvFactor)\n\t\t} else {\n\t\t\tlastCount = hg.Buckets[bucketIdx].Count\n\t\t\t// The bucket is full, store the item in the next bucket.\n\t\t\tbucketIdx++\n\t\t\thg.Buckets = append(hg.Buckets, Bucket{\n\t\t\t\tCount: int64(totalCount),\n\t\t\t\tValue: samples[i],\n\t\t\t\tRepeats: int64(ndvFactor),\n\t\t\t})\n\t\t}\n\t}\n\treturn hg, nil\n}", "func (t *WindowedThroughput) updateMaps() {\n\tcurrentIndex := t.indexGenerator.GetCurrentIndex()\n\tlookbackIndexes := t.indexGenerator.DurationToIndexes(t.LookbackFrequencyDuration)\n\taggregateCounts := t.countList.AggregateCounts(currentIndex, lookbackIndexes)\n\n\t// Apply the same aggregation algorithm as total throughput\n\t// Short circuit if no traffic\n\tt.numKeys = len(aggregateCounts)\n\tif t.numKeys == 0 {\n\t\t// no traffic during the last period.\n\t\tt.lock.Lock()\n\t\tdefer t.lock.Unlock()\n\t\tt.savedSampleRates = make(map[string]int)\n\t\treturn\n\t}\n\t// figure out our target throughput per key over the lookback window.\n\ttotalGoalThroughput := t.GoalThroughputPerSec * t.LookbackFrequencyDuration.Seconds()\n\t// floor the throughput but min should be 1 event per bucket per time period\n\tthroughputPerKey := math.Max(1, float64(totalGoalThroughput)/float64(t.numKeys))\n\t// for each key, calculate sample rate by dividing counted events by the\n\t// desired number of events\n\tnewSavedSampleRates := make(map[string]int)\n\tfor k, v := range aggregateCounts {\n\t\trate := int(math.Max(1, (float64(v) / float64(throughputPerKey))))\n\t\tnewSavedSampleRates[k] = rate\n\t}\n\t// save newly calculated sample rates\n\tt.lock.Lock()\n\tdefer t.lock.Unlock()\n\tt.savedSampleRates = newSavedSampleRates\n}", "func (batch *ProvUpdateBatch) Put(key string, value []byte, txnID string, blkHeight uint64, deps []string, depSnapshot uint64) {\n\tbatch.KVs[key] = value\n\tbatch.TxnIDs[key] = txnID\n\tbatch.BlkHeight[key] = blkHeight\n\tbatch.KeyDeps[key] = deps\n\tbatch.DepSnapshots[key] = depSnapshot\n}", "func (b *BaseNode) updateHash(n Node) {\n\tif n.Type() == HashT || n.Type() == EmptyT {\n\t\tpanic(\"can't update hash for empty or hash node\")\n\t}\n\tb.hash = hash.DoubleSha256(b.getBytes(n))\n\tb.hashValid = true\n}", "func (dsc *DefaultConsumerStatsCollector) UpdateBatchSize(count int) {\n\tdsc.BatchSize.Update(int64(count))\n}", "func (c *Client) Histogram(name string, value int) error {\n\treturn c.DataDog.Histogram(name, float64(value), nil, 1)\n}", "func (h *Handle) updateGlobalStats(tblInfo *model.TableInfo) error {\n\t// We need to merge the partition-level stats to global-stats when we drop table partition in dynamic mode.\n\ttableID := tblInfo.ID\n\tse, err := h.pool.Get()\n\tif err != nil {\n\t\treturn err\n\t}\n\tsctx := se.(sessionctx.Context)\n\tis := sessiontxn.GetTxnManager(sctx).GetTxnInfoSchema()\n\th.pool.Put(se)\n\tglobalStats, err := h.TableStatsFromStorage(tblInfo, tableID, true, 0)\n\tif err != nil {\n\t\treturn err\n\t}\n\t// If we do not currently have global-stats, no new global-stats will be generated.\n\tif globalStats == nil {\n\t\treturn nil\n\t}\n\topts := make(map[ast.AnalyzeOptionType]uint64, len(analyzeOptionDefault))\n\tfor key, val := range analyzeOptionDefault {\n\t\topts[key] = val\n\t}\n\t// Use current global-stats related information to construct the opts for `MergePartitionStats2GlobalStats` function.\n\tglobalColStatsTopNNum, globalColStatsBucketNum := 0, 0\n\tfor colID := range globalStats.Columns {\n\t\tglobalColStatsTopN := globalStats.Columns[colID].TopN\n\t\tif globalColStatsTopN != nil && len(globalColStatsTopN.TopN) > globalColStatsTopNNum {\n\t\t\tglobalColStatsTopNNum = len(globalColStatsTopN.TopN)\n\t\t}\n\t\tglobalColStats := globalStats.Columns[colID]\n\t\tif globalColStats != nil && len(globalColStats.Buckets) > globalColStatsBucketNum {\n\t\t\tglobalColStatsBucketNum = len(globalColStats.Buckets)\n\t\t}\n\t}\n\tif globalColStatsTopNNum != 0 {\n\t\topts[ast.AnalyzeOptNumTopN] = uint64(globalColStatsTopNNum)\n\t}\n\tif globalColStatsBucketNum != 0 {\n\t\topts[ast.AnalyzeOptNumBuckets] = uint64(globalColStatsBucketNum)\n\t}\n\t// Generate the new column global-stats\n\tnewColGlobalStats, err := h.mergePartitionStats2GlobalStats(h.mu.ctx, opts, is, tblInfo, 0, nil, nil)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif len(newColGlobalStats.MissingPartitionStats) > 0 {\n\t\tlogutil.BgLogger().Warn(\"missing partition stats when merging global stats\", zap.String(\"table\", tblInfo.Name.L),\n\t\t\tzap.String(\"item\", \"columns\"), zap.Strings(\"missing\", newColGlobalStats.MissingPartitionStats))\n\t}\n\tfor i := 0; i < newColGlobalStats.Num; i++ {\n\t\thg, cms, topN := newColGlobalStats.Hg[i], newColGlobalStats.Cms[i], newColGlobalStats.TopN[i]\n\t\tif hg == nil {\n\t\t\t// All partitions have no stats so global stats are not created.\n\t\t\tcontinue\n\t\t}\n\t\t// fms for global stats doesn't need to dump to kv.\n\t\terr = h.SaveStatsToStorage(tableID, newColGlobalStats.Count, newColGlobalStats.ModifyCount,\n\t\t\t0, hg, cms, topN, 2, 1, false, StatsMetaHistorySourceSchemaChange)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\t// Generate the new index global-stats\n\tglobalIdxStatsTopNNum, globalIdxStatsBucketNum := 0, 0\n\tfor _, idx := range tblInfo.Indices {\n\t\tglobalIdxStatsTopN := globalStats.Indices[idx.ID].TopN\n\t\tif globalIdxStatsTopN != nil && len(globalIdxStatsTopN.TopN) > globalIdxStatsTopNNum {\n\t\t\tglobalIdxStatsTopNNum = len(globalIdxStatsTopN.TopN)\n\t\t}\n\t\tglobalIdxStats := globalStats.Indices[idx.ID]\n\t\tif globalIdxStats != nil && len(globalIdxStats.Buckets) > globalIdxStatsBucketNum {\n\t\t\tglobalIdxStatsBucketNum = len(globalIdxStats.Buckets)\n\t\t}\n\t\tif globalIdxStatsTopNNum != 0 {\n\t\t\topts[ast.AnalyzeOptNumTopN] = uint64(globalIdxStatsTopNNum)\n\t\t}\n\t\tif globalIdxStatsBucketNum != 0 {\n\t\t\topts[ast.AnalyzeOptNumBuckets] = uint64(globalIdxStatsBucketNum)\n\t\t}\n\t\tnewIndexGlobalStats, err := h.mergePartitionStats2GlobalStats(h.mu.ctx, opts, is, tblInfo, 1, []int64{idx.ID}, nil)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif len(newIndexGlobalStats.MissingPartitionStats) > 0 {\n\t\t\tlogutil.BgLogger().Warn(\"missing partition stats when merging global stats\", zap.String(\"table\", tblInfo.Name.L),\n\t\t\t\tzap.String(\"item\", \"index \"+idx.Name.L), zap.Strings(\"missing\", newIndexGlobalStats.MissingPartitionStats))\n\t\t}\n\t\tfor i := 0; i < newIndexGlobalStats.Num; i++ {\n\t\t\thg, cms, topN := newIndexGlobalStats.Hg[i], newIndexGlobalStats.Cms[i], newIndexGlobalStats.TopN[i]\n\t\t\tif hg == nil {\n\t\t\t\t// All partitions have no stats so global stats are not created.\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\t// fms for global stats doesn't need to dump to kv.\n\t\t\terr = h.SaveStatsToStorage(tableID, newIndexGlobalStats.Count, newIndexGlobalStats.ModifyCount, 1, hg, cms, topN, 2, 1, false, StatsMetaHistorySourceSchemaChange)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\treturn nil\n}", "func (tc *TaskCount) Update(c context.Context, queue string, exec, tot int) error {\n\t// Queue names are globally unique, so we can use them as IDs.\n\ttc.ID = queue\n\ttc.Computed = clock.Now(c).UTC()\n\ttc.Queue = queue\n\ttc.Executing = exec\n\ttc.Total = tot\n\tif err := datastore.Put(c, tc); err != nil {\n\t\treturn errors.Annotate(err, \"failed to store count\").Err()\n\t}\n\treturn nil\n}", "func (pq *PriorityQueue) update(item *item, dist float64) {\n\titem.dist = dist\n\theap.Fix(pq, item.index)\n}", "func (ob *OrderBook) BatchUpdate() {\n\n}", "func (o *MempoolBin) Update(ctx context.Context, exec boil.ContextExecutor, columns boil.Columns) (int64, error) {\n\tvar err error\n\tkey := makeCacheKey(columns, nil)\n\tmempoolBinUpdateCacheMut.RLock()\n\tcache, cached := mempoolBinUpdateCache[key]\n\tmempoolBinUpdateCacheMut.RUnlock()\n\n\tif !cached {\n\t\twl := columns.UpdateColumnSet(\n\t\t\tmempoolBinAllColumns,\n\t\t\tmempoolBinPrimaryKeyColumns,\n\t\t)\n\n\t\tif len(wl) == 0 {\n\t\t\treturn 0, errors.New(\"models: unable to update mempool_bin, could not build whitelist\")\n\t\t}\n\n\t\tcache.query = fmt.Sprintf(\"UPDATE \\\"mempool_bin\\\" SET %s WHERE %s\",\n\t\t\tstrmangle.SetParamNames(\"\\\"\", \"\\\"\", 1, wl),\n\t\t\tstrmangle.WhereClause(\"\\\"\", \"\\\"\", len(wl)+1, mempoolBinPrimaryKeyColumns),\n\t\t)\n\t\tcache.valueMapping, err = queries.BindMapping(mempoolBinType, mempoolBinMapping, append(wl, mempoolBinPrimaryKeyColumns...))\n\t\tif err != nil {\n\t\t\treturn 0, err\n\t\t}\n\t}\n\n\tvalues := queries.ValuesFromMapping(reflect.Indirect(reflect.ValueOf(o)), cache.valueMapping)\n\n\tif boil.IsDebug(ctx) {\n\t\twriter := boil.DebugWriterFrom(ctx)\n\t\tfmt.Fprintln(writer, cache.query)\n\t\tfmt.Fprintln(writer, values)\n\t}\n\tvar result sql.Result\n\tresult, err = exec.ExecContext(ctx, cache.query, values...)\n\tif err != nil {\n\t\treturn 0, errors.Wrap(err, \"models: unable to update mempool_bin row\")\n\t}\n\n\trowsAff, err := result.RowsAffected()\n\tif err != nil {\n\t\treturn 0, errors.Wrap(err, \"models: failed to get rows affected by update for mempool_bin\")\n\t}\n\n\tif !cached {\n\t\tmempoolBinUpdateCacheMut.Lock()\n\t\tmempoolBinUpdateCache[key] = cache\n\t\tmempoolBinUpdateCacheMut.Unlock()\n\t}\n\n\treturn rowsAff, nil\n}", "func (h *Histogram) copyHDataFrom(src *Histogram) {\n\tif h.Divider == src.Divider && h.Offset == src.Offset {\n\t\tfor i := 0; i < len(h.Hdata); i++ {\n\t\t\th.Hdata[i] += src.Hdata[i]\n\t\t}\n\t\treturn\n\t}\n\n\thData := src.Export()\n\tfor _, data := range hData.Data {\n\t\th.record((data.Start+data.End)/2, int(data.Count))\n\t}\n}", "func (p *ProgressUpdateBatcher) Add(ctx context.Context, delta float32) error {\n\tp.Lock()\n\tp.completed += delta\n\tcompleted := p.completed\n\tshouldReport := p.completed-p.reported > progressFractionThreshold\n\tshouldReport = shouldReport && p.lastReported.Add(progressTimeThreshold).Before(timeutil.Now())\n\n\tif shouldReport {\n\t\tp.reported = p.completed\n\t\tp.lastReported = timeutil.Now()\n\t}\n\tp.Unlock()\n\n\tif shouldReport {\n\t\treturn p.Report(ctx, completed)\n\t}\n\treturn nil\n}", "func (c *tabletStatsCache) StatsUpdate(stats *discovery.TabletStats) {\n\tc.mu.Lock()\n\tdefer c.mu.Unlock()\n\n\tkeyspace := stats.Target.Keyspace\n\tshard := stats.Target.Shard\n\tcell := stats.Tablet.Alias.Cell\n\ttabletType := stats.Target.TabletType\n\n\taliasKey := tabletToMapKey(stats)\n\tts, ok := c.statusesByAlias[aliasKey]\n\tif !stats.Up {\n\t\tif !ok {\n\t\t\t// Tablet doesn't exist and was recently deleted or changed its type. Panic as this is unexpected behavior.\n\t\t\tpanic(fmt.Sprintf(\"BUG: tablet (%v) doesn't exist\", aliasKey))\n\t\t}\n\t\t// The tablet still exists in our cache but was recently deleted or changed its type. Delete it now.\n\t\tc.statuses[keyspace][shard][cell][tabletType] = remove(c.statuses[keyspace][shard][cell][tabletType], stats.Tablet.Alias)\n\t\tdelete(c.statusesByAlias, aliasKey)\n\t\tc.tabletCountsByCell[cell]--\n\t\tif c.tabletCountsByCell[cell] == 0 {\n\t\t\tdelete(c.tabletCountsByCell, cell)\n\t\t}\n\t\treturn\n\t}\n\n\tif !ok {\n\t\t// Tablet isn't tracked yet so just add it.\n\t\tshards, ok := c.statuses[keyspace]\n\t\tif !ok {\n\t\t\tshards = make(map[string]map[string]map[topodatapb.TabletType][]*discovery.TabletStats)\n\t\t\tc.statuses[keyspace] = shards\n\t\t}\n\n\t\tcells, ok := c.statuses[keyspace][shard]\n\t\tif !ok {\n\t\t\tcells = make(map[string]map[topodatapb.TabletType][]*discovery.TabletStats)\n\t\t\tc.statuses[keyspace][shard] = cells\n\t\t}\n\n\t\ttypes, ok := c.statuses[keyspace][shard][cell]\n\t\tif !ok {\n\t\t\ttypes = make(map[topodatapb.TabletType][]*discovery.TabletStats)\n\t\t\tc.statuses[keyspace][shard][cell] = types\n\t\t}\n\n\t\ttablets, ok := c.statuses[keyspace][shard][cell][tabletType]\n\t\tif !ok {\n\t\t\ttablets = make([]*discovery.TabletStats, 0)\n\t\t\tc.statuses[keyspace][shard][cell][tabletType] = tablets\n\t\t}\n\n\t\tc.statuses[keyspace][shard][cell][tabletType] = append(c.statuses[keyspace][shard][cell][tabletType], stats)\n\t\tsort.Sort(byTabletUID(c.statuses[keyspace][shard][cell][tabletType]))\n\t\tc.statusesByAlias[aliasKey] = stats\n\t\tc.tabletCountsByCell[cell]++\n\t\treturn\n\t}\n\n\t// Tablet already exists so just update it in the cache.\n\t*ts = *stats\n}", "func NewHistogram(opts HistogramOptions) *Histogram {\n\tif opts.NumBuckets == 0 {\n\t\topts.NumBuckets = 32\n\t}\n\tif opts.BaseBucketSize == 0.0 {\n\t\topts.BaseBucketSize = 1.0\n\t}\n\th := Histogram{\n\t\topts: opts,\n\t\tbuckets: make([]bucketInternal, opts.NumBuckets),\n\t\tcount: newCounter(),\n\t\tsum: newCounter(),\n\t\tsumOfSquares: newCounter(),\n\t\ttracker: newTracker(),\n\n\t\tlogBaseBucketSize: math.Log(opts.BaseBucketSize),\n\t\toneOverLogOnePlusGrowthFactor: 1 / math.Log(1+opts.GrowthFactor),\n\t}\n\tm := 1.0 + opts.GrowthFactor\n\tdelta := opts.BaseBucketSize\n\th.buckets[0].lowBound = float64(opts.MinValue)\n\th.buckets[0].count = newCounter()\n\tfor i := 1; i < opts.NumBuckets; i++ {\n\t\th.buckets[i].lowBound = float64(opts.MinValue) + delta\n\t\th.buckets[i].count = newCounter()\n\t\tdelta = delta * m\n\t}\n\treturn &h\n}", "func (ac *Accumulator) AddHistogram(measurement string, fields map[string]interface{},\n\ttags map[string]string, t ...time.Time) {\n\t// as of right now metric always returns a nil error\n\tm, _ := metric.New(measurement, tags, fields, getTime(t), telegraf.Histogram)\n\tac.AddMetric(m)\n}", "func (c *LBCache) update(existing []LB, toDelete []string) {\n\tc.Lock()\n\tdefer c.Unlock()\n\tfor _, uuid := range toDelete {\n\t\tdelete(c.existing, uuid)\n\t}\n\n\tfor _, lb := range existing {\n\t\tif lb.UUID == \"\" {\n\t\t\tpanic(fmt.Sprintf(\"coding error: cache add LB %s with no UUID\", lb.Name))\n\t\t}\n\t\tc.existing[lb.UUID] = &CachedLB{\n\t\t\tName: lb.Name,\n\t\t\tUUID: lb.UUID,\n\t\t\tProtocol: strings.ToLower(lb.Protocol),\n\t\t\tExternalIDs: lb.ExternalIDs,\n\t\t\tVIPs: getVips(&lb),\n\n\t\t\tSwitches: sets.NewString(lb.Switches...),\n\t\t\tRouters: sets.NewString(lb.Routers...),\n\t\t\tGroups: sets.NewString(lb.Groups...),\n\t\t}\n\t}\n}", "func (phStats *passwordHasherStats) accumulateStats() {\n\tphStats.logger.Print(\"Collecting stats...\")\n\tok := true\n\tfor ok {\n\t\tvar ms microseconds\n\t\tif ms, ok = <-phStats.queue; ok {\n\t\t\tphStats.logger.Printf(\"Elapsed time: %dms\", ms)\n\n\t\t\t// block reads while appending/resizing/reallocating\n\t\t\tphStats.lock.Lock()\n\t\t\tphStats.times = append(phStats.times, ms)\n\t\t\tphStats.lock.Unlock()\n\t\t}\n\t}\n\tphStats.logger.Print(\"Done collecting stats\")\n}", "func mockStatsHistogram(id int64, values []types.Datum, repeat int64, tp *types.FieldType) *statistics.Histogram {\n\tndv := len(values)\n\thistogram := statistics.NewHistogram(id, int64(ndv), 0, 0, tp, ndv, 0)\n\tfor i := 0; i < ndv; i++ {\n\t\thistogram.AppendBucket(&values[i], &values[i], repeat*int64(i+1), repeat)\n\t}\n\treturn histogram\n}", "func (s *CountMinSketch) Update(key []byte, count uint64) {\n\tfor r, c := range s.locations(key) {\n\t\ts.count[r][c] += count\n\t}\n}", "func (cm *customMetrics) AddHistogram(\n\tnamespace, subsystem, name, help, internalKey string,\n\tconstLabels prometheus.Labels, buckets []float64) {\n\n\tcm.histograms[internalKey] = promauto.NewHistogram(prometheus.HistogramOpts{\n\t\tNamespace: namespace,\n\t\tSubsystem: subsystem,\n\t\tName: name,\n\t\tHelp: help,\n\t\tConstLabels: constLabels,\n\t\tBuckets: buckets,\n\t})\n}", "func (c *Container) UpdateTotal() error {\n\tret, err := hrpc.Invoke(ownerAddr, contractAddr, ownerPriKey, contractABI, methodName[0], true)\n\tcolorLog.Info(\"Invoke to %q, Return Type: %T, Return Value: %v\", methodName[0], ret, ret)\n\tif err != nil {\n\t\tcolorLog.Error(\"Invoke to %q failed, error is %q\", methodName[0], err)\n\t\treturn err\n\t}\n\tcolorLog.Success(\"Invoke to %q success\", methodName[0])\n\tc.Total = ret.(*big.Int).Uint64()\n\n\treturn nil\n}" ]
[ "0.6296699", "0.615325", "0.55599385", "0.55494124", "0.54620075", "0.5192552", "0.50973797", "0.5033839", "0.50225997", "0.4982572", "0.49692327", "0.49395838", "0.49386215", "0.49116746", "0.48813924", "0.4831629", "0.48179474", "0.48177814", "0.48161152", "0.47989178", "0.47881764", "0.47584262", "0.47091028", "0.47070226", "0.47036844", "0.46911794", "0.46807763", "0.46593693", "0.46252754", "0.46201497", "0.4619772", "0.46015766", "0.45933887", "0.45905632", "0.4582751", "0.45813662", "0.45709312", "0.45699343", "0.4568262", "0.45573413", "0.45226997", "0.45223448", "0.45201433", "0.45152944", "0.45139214", "0.45102897", "0.44858658", "0.44787738", "0.44645372", "0.4449436", "0.44357464", "0.44296256", "0.44281465", "0.44249105", "0.44236872", "0.4409331", "0.44002873", "0.4391148", "0.43908718", "0.4389533", "0.4389436", "0.43887228", "0.43811452", "0.4378247", "0.43691456", "0.43627045", "0.43614596", "0.43604562", "0.43571022", "0.43566224", "0.43516415", "0.43455115", "0.4338577", "0.43302542", "0.4311047", "0.42984974", "0.42920357", "0.42838994", "0.4281857", "0.4269339", "0.4252093", "0.4234417", "0.4229134", "0.4228814", "0.4228703", "0.4225814", "0.42240065", "0.4222635", "0.4221847", "0.42172113", "0.42127663", "0.42117614", "0.42055047", "0.420007", "0.4187378", "0.41771832", "0.41746056", "0.41716182", "0.41709492", "0.41683164" ]
0.82468635
0
Stats just calls f with the given cb
func (f MonitorFunc) Stats(cb func(name string, val float64)) { f(cb) }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (v *ValueMonitor) Stats(cb func(name string, val float64)) {\n\tv.mtx.Lock()\n\tcount := v.count\n\tsum := v.sum\n\tsum_squared := v.sum_squared\n\trecent := v.recent\n\tmax := v.max\n\tmin := v.min\n\tv.mtx.Unlock()\n\n\tif count > 0 {\n\t\tcb(\"avg\", sum/float64(count))\n\t}\n\tcb(\"count\", float64(count))\n\tcb(\"max\", max)\n\tcb(\"min\", min)\n\tcb(\"recent\", recent)\n\tcb(\"sum\", sum)\n\tcb(\"sum_squared\", sum_squared)\n}", "func (v *IntValueMonitor) Stats(cb func(name string, val float64)) {\n\tv.mtx.Lock()\n\tcount := v.count\n\tsum := v.sum\n\tsum_squared := v.sum_squared\n\trecent := v.recent\n\tmax := v.max\n\tmin := v.min\n\tv.mtx.Unlock()\n\n\tif count > 0 {\n\t\tcb(\"avg\", float64(sum/count))\n\t}\n\tcb(\"count\", float64(count))\n\tcb(\"max\", float64(max))\n\tcb(\"min\", float64(min))\n\tcb(\"recent\", float64(recent))\n\tcb(\"sum\", float64(sum))\n\tcb(\"sum_squared\", float64(sum_squared))\n}", "func (g *goMetrics) AddCallback(f func(stats *runtime.MemStats)) {\n\tg.mu.Lock()\n\tg.cb = append(g.cb, f)\n\tg.mu.Unlock()\n}", "func Statsf(category string, format string, params ...interface{}) {\n\tDefaultLogger.Statsf(category, format, params...)\n}", "func (c *Stats) FlushCallback(f func(metricSeries []*client.DDMetric)) {\n\tc.flushCallback = f\n}", "func Benchmark(f func(b *B)) BenchmarkResult {}", "func (this *List) Do(f func(Counter) error) []error {\n this.lock.RLock()\n defer this.lock.RUnlock()\n\n errors := make([]error, 0)\n\n for k, _ := range this.counters {\n err := f(this.counters[k])\n if err != nil {\n errors = append(errors, err)\n }\n }\n\n return errors\n}", "func Benchmark(f func(b *B)) BenchmarkResult", "func TestFinishInitStatsCb(t *testing.T) {\n\tr := rand.New(rand.NewSource(time.Now().UTC().UnixNano()))\n\ts, err := NewSession(\"localhost\", DefaultSettings())\n\tassert.NoError(t, err)\n\tassert.NotNil(t, s)\n\n\tfor i := 0; i < 400; i++ {\n\t\ts.Stats.EchoRequested()\n\t}\n\n\tsum := uint64(0)\n\tsqsum := uint64(0)\n\tmx := uint64(0)\n\tmn := uint64(math.MaxUint32)\n\tfor i := 0; i < 100; i++ {\n\t\trtt := uint64(r.Uint32())\n\t\tsum += rtt\n\t\tsqsum += rtt * rtt\n\t\tmx = max(mx, rtt)\n\t\tmn = min(mn, rtt)\n\t\ts.Stats.EchoReplied(rtt)\n\t}\n\n\tloss := 1 - float64(100)/float64(400)\n\n\tavg := sum / uint64(100)\n\tsqrd := float64((sqsum / uint64(100)) - avg*avg)\n\tmdev := uint64(math.Sqrt(sqrd))\n\tnow := time.Now()\n\n\tfinishStatsCb(s)\n\n\tend, started := s.Stats.GetEndTime()\n\tassert.True(t, started)\n\tassert.True(t, end.After(now))\n\tassert.Equal(t, mn, s.Stats.GetRTTMin())\n\tassert.Equal(t, mx, s.Stats.GetRTTMax())\n\tassert.Equal(t, avg, s.Stats.GetRTTAvg())\n\tassert.Equal(t, mdev, s.Stats.GetRTTMDev())\n\tassert.Equal(t, loss, s.Stats.GetPktLoss())\n}", "func Do(threshold time.Duration, sf SlowFunc, cb CallbackFunc) error {\n\t// Call the slow function\n\terrCh := make(chan error, 1)\n\tgo func() {\n\t\terrCh <- sf()\n\t}()\n\n\t// Wait for it to complete or the threshold to pass\n\tselect {\n\tcase err := <-errCh:\n\t\treturn err\n\tcase <-time.After(threshold):\n\t\t// Threshold reached, call the callback\n\t\tcb()\n\t}\n\n\t// Wait an indefinite amount of time for it to finally complete\n\treturn <-errCh\n}", "func (th *Throttler) stats() (int64, int64) {\n\tnow := timeNowFunc()\n\n\tth.mu.Lock()\n\ta, t := th.accepts.sum(now), th.throttles.sum(now)\n\tth.mu.Unlock()\n\treturn a, t\n}", "func RunBenchmark(b *testing.B, version int, f func(*Anwork, int)) {\n\ta, err := MakeAnwork(version)\n\tif err != nil {\n\t\tb.Fatal(err)\n\t}\n\tdefer a.Close()\n\n\tb.ResetTimer()\n\tfor i := 0; i < b.N; i++ {\n\t\tf(a, i)\n\t}\n}", "func AllocsPerRun(runs int, f func()) (avg float64) {}", "func fprintStats(w io.Writer, q *QueryBenchmarker) {\n\tmaxKeyLength := 0\n\tkeys := make([]string, 0, len(q.statMapping))\n\tfor k := range q.statMapping {\n\t\tif len(k) > maxKeyLength {\n\t\t\tmaxKeyLength = len(k)\n\t\t}\n\t\tkeys = append(keys, k)\n\t}\n\tsort.Strings(keys)\n\tfor _, k := range keys {\n\t\tv := q.statMapping[k]\n\t\tminRate := 1e3 / v.Min\n\t\tmeanRate := 1e3 / v.Mean\n\t\tmaxRate := 1e3 / v.Max\n\t\tpaddedKey := fmt.Sprintf(\"%s\", k)\n\t\tfor len(paddedKey) < maxKeyLength {\n\t\t\tpaddedKey += \" \"\n\t\t}\n\t\tkStats := make(map[string]interface{})\n\t\tkStats[\"min\"] = v.Min\n\t\tkStats[\"minRate\"] = minRate\n\t\tkStats[\"mean\"] = v.Mean\n\t\tkStats[\"meanRate\"] = meanRate\n\t\tkStats[\"max\"] = v.Max\n\t\tkStats[\"maxRate\"] = maxRate\n\t\tkStats[\"count\"] = v.Count\n\t\tkStats[\"sum\"] = v.Sum / 1e3\n\t\tq.json[k] = kStats\n\t\tif !q.doJson {\n\t\t\t_, err := fmt.Fprintf(w, \"%s : min: %8.2fms (%7.2f/sec), mean: %8.2fms (%7.2f/sec), max: %7.2fms (%6.2f/sec), count: %8d, sum: %5.1fsec \\n\", paddedKey, v.Min, minRate, v.Mean, meanRate, v.Max, maxRate, v.Count, v.Sum/1e3)\n\t\t\tif err != nil {\n\t\t\t\tlog.Fatal(err)\n\t\t\t}\n\t\t}\n\t}\n\tq.json[\"totalQueries\"] = q.totalQueries\n\tq.json[\"wallClockTime\"] = q.wallTook.Seconds()\n\tq.json[\"queryRate\"] = float64(q.totalQueries) / float64(q.wallTook.Seconds())\n\tq.json[\"workers\"] = q.workers\n\tq.json[\"batchSize\"] = q.batchSize\n\tif q.doJson {\n\t\tfor k, v := range q.json {\n\t\t\tif _, err := json.Marshal(v); err != nil {\n\t\t\t\tq.json[k] = \"\"\n\t\t\t}\n\t\t}\n\t\tb, err := json.Marshal(q.json)\n\t\tif err != nil {\n\t\t\tlog.Println(\"error:\", err)\n\t\t}\n\t\tos.Stdout.Write(b)\n\t}\n}", "func RunBenchmark(name string, b *testing.B, f Func, n int) {\n\tb.Run(name, func(b *testing.B) {\n\t\tb.Logf(\"f(%d), loop (b.N) = %d\\n\", n, b.N)\n\t\tfor i := 0; i < b.N; i++ {\n\t\t\tf(n)\n\t\t}\n\t})\n}", "func (e *executor) function(opts opts, fn func() error, format string, args ...interface{}) error {\n\te.increaseIndent()\n\tdefer e.decreaseIndent()\n\te.printf(e.verboseStdout(opts), format, args...)\n\terr := fn()\n\te.printf(e.verboseStdout(opts), okOrFailed(err))\n\treturn err\n}", "func AllocsPerRun(runs int, f func()) (avg float64)", "func TestInitStatsCb(t *testing.T) {\n\ts, err := NewSession(\"localhost\", DefaultSettings())\n\tassert.NoError(t, err)\n\tassert.NotNil(t, s)\n\n\tmsg := s.buildEchoRequest(0)\n\n\tnow := time.Now()\n\tinitStatsCb(s, msg)\n\tst, started := s.Stats.GetStartTime()\n\tassert.True(t, started)\n\tassert.True(t, st.After(now))\n}", "func printTime(fn func(int) ([]int)) (func(int) ([]int)) {\n return func(arg int) ([]int){\n start := time.Now()\n res := fn(arg)\n end := time.Now()\n elapsed := end.Sub(start)\n fmt.Println(\"elapsed time = \", elapsed)\n return res\n }\n}", "func perfTest(arg perfArg, f func()) (res perfResult) {\n\t// Pipeline: request generator -> workers -> sampler\n\tendUtil := startUtil()\n\n\t// Generate requests until arg.dur elapses\n\tstop := time.NewTimer(arg.dur)\n\tdefer stop.Stop()\n\tvar send *time.Ticker\n\tif arg.interval > 0 {\n\t\tsend = time.NewTicker(arg.interval)\n\t\tdefer send.Stop()\n\t}\n\trequests := make(chan time.Time, arg.maxq)\n\tgo func() {\n\t\tdefer close(requests)\n\t\tfor {\n\t\t\tif send == nil {\n\t\t\t\t// No request interval: send whenever the queue has space.\n\t\t\t\tselect {\n\t\t\t\tcase <-stop.C:\n\t\t\t\t\treturn\n\t\t\t\tcase requests <- time.Now():\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\t// Attempt to send a request periodically, drop if queue is full.\n\t\t\t\tselect {\n\t\t\t\tcase <-stop.C:\n\t\t\t\t\treturn\n\t\t\t\tcase <-send.C:\n\t\t\t\t}\n\t\t\t\tselect {\n\t\t\t\tcase requests <- time.Now():\n\t\t\t\tdefault:\n\t\t\t\t\tres.drops++\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}()\n\n\t// Workers run f until requests closed.\n\tdurations := make(chan time.Duration)\n\tvar wg sync.WaitGroup\n\twg.Add(arg.par)\n\tfor i := 0; i < arg.par; i++ {\n\t\tgo func() {\n\t\t\tdefer wg.Done()\n\t\t\tfor start := range requests {\n\t\t\t\tqueueTime := time.Since(start)\n\t\t\t\t_ = queueTime // not currently used\n\t\t\t\tstart = time.Now()\n\t\t\t\tf()\n\t\t\t\tdurations <- time.Since(start)\n\t\t\t}\n\t\t}()\n\t}\n\tgo func() {\n\t\twg.Wait()\n\t\tclose(durations)\n\t}()\n\n\t// Sampler populates result with samples.\n\tres.par = arg.par\n\tres.sampler = newSampler()\n\tdefer res.sampler.close()\n\tfor elapsed := range durations {\n\t\tres.sampler.add(elapsed)\n\t}\n\tres.walltime, res.exectime = endUtil()\n\treturn\n}", "func BpfStats(fd int) (*BpfStat, error) {\n\tvar s BpfStat\n\terr := ioctlPtr(fd, BIOCGSTATS, unsafe.Pointer(&s))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &s, nil\n}", "func (bg *Backgrounder) RunProfile(f Handler, name string) {\n\tbg.count++\n\tgo func() {\n\t\tstartTime := time.Now()\n\t\tbg.pipe <- process{\n\t\t\tName: name,\n\t\t\tError: f(),\n\t\t\tProcessTime: time.Since(startTime),\n\t\t}\n\t}()\n}", "func countResponseTime(owner, traceSn, funcName string, appid uint16, countFunc func(uint64) uint64) func() {\n\tstart := time.Now()\n\treturn func() {\n\t\telapsed := int64(time.Since(start))\n\t\tif elapsed > netConf().ResponseSlowThreshold {\n\t\t\tLogger.Warn(owner, appid, traceSn, funcName, \"Slow hanlding!\",\n\t\t\t\tfmt.Sprintf(\"used %.3f ms for reuqtest\", float64(elapsed)/float64(time.Millisecond)))\n\t\t}\n\t\tcountFunc(uint64(elapsed))\n\t}\n}", "func (c *Fanout) Do(ctx context.Context, f func(c context.Context)) (err error) {\n\tif f == nil || c.ctx.Err() != nil {\n\t\treturn c.ctx.Err()\n\t}\n\tnakeCtx := metadata.WithContext(ctx)\n\t//去掉trace,感觉没啥用。。\n\t//if span := opentracing.SpanFromContext(ctx); span != nil { //如果里面有trace,就接着加一段,没有就不加\n\t//\tspan := span.Tracer().StartSpan(\"Fanout:Do\")\n\t//\tsetTags(span)\n\t//\tnakeCtx = opentracing.ContextWithSpan(nakeCtx, span)\n\t//}\n\tselect {\n\tcase c.ch <- item{f: f, ctx: nakeCtx}:\n\tdefault:\n\t\terr = ErrFull\n\t}\n\t//todo... addMetric\n\t//_metricChanSize.Set(float64(len(c.ch)), c.name)\n\treturn\n}", "func (g *Glimit) Run(f func()) {\n\tg.c <- struct{}{}\n\tgo func() {\n\t\tf()\n\t\t<-g.c\n\t}()\n}", "func (c *Counter) Stat(name string, value int64) {\n\tif !c.stopped {\n\t\tc.recordChannel <- countRecord{name, value}\n\t}\n}", "func (f Function) GetStats() []Statement {\n\treturn f.stats\n}", "func (m *Meter) MaybeStatsCall(cb MeterStatsCallback) bool {\n\ts := m.MaybeStats()\n\tif s != nil {\n\t\tcb(*s)\n\t\treturn true\n\t}\n\treturn false\n}", "func (s *Stats) Process(next echo.HandlerFunc) echo.HandlerFunc {\n\treturn func(c echo.Context) error {\n\t\tc.Set(\"user\", \"hello?\")\n\n\t\tif err := next(c); err != nil {\n\t\t\tc.Error(err)\n\t\t}\n\t\ts.mutex.Lock()\n\t\tdefer s.mutex.Unlock()\n\t\ts.RequestCount++\n\t\tstatus := strconv.Itoa(c.Response().Status)\n\t\ts.Statuses[status]++\n\t\treturn nil\n\t}\n}", "func Run(ctx context.Context, tconn *chrome.TestConn, f func(ctx context.Context) error, names ...string) ([]*Histogram, error) {\n\tr, err := StartRecorder(ctx, tconn, names...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif err := f(ctx); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn r.Histogram(ctx, tconn)\n}", "func (tm *Manager) Stats() (Stats, Result) {\n\tstats := Stats{}\n\tresult := Result{}\n\n\tif tm.done {\n\t\tresult.Message = shutdownMsg\n\t\tresult.Code = 500\n\t\treturn stats, result\n\t}\n\n\ttm.mutex.Lock()\n\tdefer tm.mutex.Unlock()\n\n\tstats.Total = tm.completedTasks\n\tavg := float64(tm.taskRuntime*time.Nanosecond) / float64(tm.completedTasks)\n\tstats.Average = uint64(avg)\n\tresult.Code = 200\n\n\treturn stats, result\n}", "func (c *Fanout) Do(ctx context.Context, f func(ctx context.Context)) (err error) {\n\tif f == nil || c.ctx.Err() != nil {\n\t\treturn c.ctx.Err()\n\t}\n\tnakeCtx := metadata.WithContext(ctx)\n\tif tr, ok := trace.FromContext(ctx); ok {\n\t\ttr = tr.Fork(\"\", \"Fanout:Do\").SetTag(traceTags...)\n\t\tnakeCtx = trace.NewContext(nakeCtx, tr)\n\t}\n\tselect {\n\tcase c.ch <- item{f: f, ctx: nakeCtx}:\n\tdefault:\n\t\terr = ErrFull\n\t}\n\t_metricChanSize.Set(float64(len(c.ch)), c.name)\n\treturn\n}", "func TimeRun(f func([]int) []int, c chan uint64, v []int) {\n\tdefer wg.Done()\n\tstart := time.Now()\n\tf(v)\n\tduration := uint64(time.Now().Sub(start).Nanoseconds())\n\tfmt.Printf(\"%v \", duration)\t\n\tc <- duration\n}", "func MakeStats(handler http.Handler, stats *Stats) http.Handler {\r\n return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\r\n \t//Save time everytime we get a request\r\n \tstart := time.Now()\r\n\r\n //Log connection\r\n log.Printf(\"%s %s\", r.RemoteAddr, r.URL)\r\n\r\n //Print all info for request\r\n //log.Printf(\"Request: %v\", r)\r\n\r\n \t//Route and fulfil request\r\n handler.ServeHTTP(w, r)\r\n\r\n //Only count hash requests in stats (even if request is broken) from client side\r\n //This does not count the background hash-write operations as user is not aware\r\n //of these as per requirement. If requirement changes, this will need to be changed\r\n if strings.Contains(r.URL.Path, \"hash\"){\r\n //Calculate request time\r\n end := time.Now()\r\n requestTime := end.Sub(start)\r\n\r\n //Update server stats - thread-safe\r\n stats.timeLock.Lock()\r\n stats.totalTime = stats.totalTime + requestTime\r\n stats.Requests++\r\n stats.AverageTime = float32(stats.totalTime / time.Millisecond) / float32(stats.Requests)\r\n stats.timeLock.Unlock()\r\n log.Printf(\"Request Complete: %v Average: %v\",requestTime,stats.AverageTime)\r\n } \r\n })\r\n}", "func Timing(f func()) int64 {\n\tnow := time.Now().UnixNano()\n\tf()\n\n\treturn time.Now().UnixNano() - now\n}", "func (f *FakeProcTableImpl) Stats() ProcTableStats {\n\tf.StatsCount++\n\treturn f.realTable.Stats()\n}", "func (d *DBClient) StatsHandler(w http.ResponseWriter, req *http.Request, next http.HandlerFunc) {\n\tstats := d.Counter.Flush()\n\n\tcount, err := d.Cache.RecordsCount()\n\n\tif err != nil {\n\t\tlog.Error(err)\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t}\n\n\tvar sr statsResponse\n\tsr.Stats = stats\n\tsr.RecordsCount = count\n\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\n\tb, err := json.Marshal(sr)\n\n\tif err != nil {\n\t\tlog.Error(err)\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t} else {\n\t\tw.Write(b)\n\t\treturn\n\t}\n\n}", "func HandlerStats(stats stats.Stats) server.HandlerWrapper {\n\t// return a handler wrapper\n\treturn func(h server.HandlerFunc) server.HandlerFunc {\n\t\t// return a function that returns a function\n\t\treturn func(ctx context.Context, req server.Request, rsp interface{}) error {\n\t\t\t// execute the handler\n\t\t\terr := h(ctx, req, rsp)\n\t\t\t// record the stats\n\t\t\tstats.Record(err)\n\t\t\t// return the error\n\t\t\treturn err\n\t\t}\n\t}\n}", "func (s *BatchProvidingSystem) Stat(ctx context.Context) (BatchedProviderStats, error) {\n\t// TODO: Does it matter that there is no locking around the total+average values?\n\treturn BatchedProviderStats{\n\t\tTotalProvides: s.totalProvides,\n\t\tLastReprovideBatchSize: s.lastReprovideBatchSize,\n\t\tAvgProvideDuration: s.avgProvideDuration,\n\t\tLastReprovideDuration: s.lastReprovideDuration,\n\t}, nil\n}", "func (s *scanCoordinator) handleStats(cmd Message) {\n\ts.supvCmdch <- &MsgSuccess{}\n\n\treq := cmd.(*MsgStatsRequest)\n\treplych := req.GetReplyChannel()\n\ts.mu.RLock()\n\tdefer s.mu.RUnlock()\n\n\tstats := s.stats.Get()\n\tst := s.serv.Statistics()\n\tstats.numConnections.Set(st.Connections)\n\n\t// Compute counts asynchronously and reply to stats request\n\tgo func() {\n\t\tfor id, idxStats := range stats.indexes {\n\t\t\terr := s.updateItemsCount(id, idxStats)\n\t\t\tif err != nil {\n\t\t\t\tlogging.Errorf(\"%v: Unable to compute index items_count for %v/%v/%v state %v (%v)\", s.logPrefix,\n\t\t\t\t\tidxStats.bucket, idxStats.name, id, idxStats.indexState.Value(), err)\n\t\t\t}\n\n\t\t\t// compute scan rate\n\t\t\tnow := time.Now().UnixNano()\n\t\t\telapsed := float64(now-idxStats.lastScanGatherTime.Value()) / float64(time.Second)\n\t\t\tif elapsed > 60 {\n\t\t\t\tpartitions := idxStats.getPartitions()\n\t\t\t\tfor _, pid := range partitions {\n\t\t\t\t\tpartnStats := idxStats.getPartitionStats(pid)\n\t\t\t\t\tnumRowsScanned := partnStats.numRowsScanned.Value()\n\t\t\t\t\tif idxStats.lastScanGatherTime.Value() != int64(0) {\n\t\t\t\t\t\tscanRate := float64(numRowsScanned-partnStats.lastNumRowsScanned.Value()) / elapsed\n\t\t\t\t\t\tpartnStats.avgScanRate.Set(int64((scanRate + float64(partnStats.avgScanRate.Value())) / 2))\n\t\t\t\t\t\tlogging.Debugf(\"scanCoordinator.handleStats: index %v partition %v numRowsScanned %v scan rate %v avg scan rate %v\",\n\t\t\t\t\t\t\tid, pid, numRowsScanned, scanRate, partnStats.avgScanRate.Value())\n\t\t\t\t\t}\n\t\t\t\t\tpartnStats.lastNumRowsScanned.Set(numRowsScanned)\n\t\t\t\t\tidxStats.lastScanGatherTime.Set(now)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\treplych <- true\n\t}()\n}", "func (m *Metrics) WrapHandleFunc(h http.Handler) http.Handler {\n\treturn http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tbefore := time.Now()\n\t\trw := &statusRecorder{\n\t\t\tResponseWriter: w,\n\t\t\tstatusCode: http.StatusOK,\n\t\t}\n\t\th.ServeHTTP(rw, r)\n\t\tafter := time.Now()\n\t\tm.add(rw, after.Sub(before).Seconds())\n\t})\n}", "func (my *Driver) UseStat(s statFn) {\n\tmy.stat = s\n}", "func (t *durationTracker) Track(f func()) {\n\tstartedAt := t.clock.Now()\n\tdefer func() {\n\t\tduration := t.clock.Since(startedAt)\n\t\tt.mu.Lock()\n\t\tdefer t.mu.Unlock()\n\t\tt.latency = t.aggregateFunction(t.latency, duration)\n\t}()\n\n\tf()\n}", "func (r *ProcessingStatsRefresher) Run(ctx context.Context) error {\n\tif r.refreshRate == 0 {\n\t\treturn nil\n\t}\n\treturn wait.RepeatUntil(ctx, r.refreshRate, r.collectStats)\n}", "func (m *Monitor) Stats(ctx *context.Context) {\n\tctx.JSON(m.Holder.GetStats())\n}", "func (iterator *calcInverseIterator) callBack(x float64) float64 {\n\tif iterator.name == \"CHISQ.INV\" {\n\t\treturn iterator.fp - getChiSqDistCDF(x, iterator.fDF)\n\t}\n\treturn iterator.fp - getTDist(x, iterator.fDF, iterator.nT)\n}", "func fstat(t *kernel.Task, f *fs.File, statAddr usermem.Addr) error {\n\tuattr, err := f.UnstableAttr(t)\n\tif err != nil {\n\t\treturn err\n\t}\n\ts := statFromAttrs(t, f.Dirent.Inode.StableAttr, uattr)\n\treturn s.CopyOut(t, statAddr)\n}", "func RunFunc(benchmarkFunc func() int, duration time.Duration, c int) *Result {\n\tworkers := make([]Worker, c)\n\tfor i := 0; i < c; i++ {\n\t\tworkers[i] = &funcWorker{ID: i, benchmarkFunc: benchmarkFunc}\n\t}\n\treturn Run(workers, duration)\n}", "func (m *mapper) run() {\n\tfor m.itr.NextIterval() {\n\t\tm.fn(m.itr, m)\n\t}\n\tclose(m.c)\n}", "func (t *Twemproxy) processStat(\n\tacc telegraf.Accumulator,\n\ttags map[string]string,\n\tdata map[string]interface{},\n) {\n\tif source, ok := data[\"source\"]; ok {\n\t\tif val, ok := source.(string); ok {\n\t\t\ttags[\"source\"] = val\n\t\t}\n\t}\n\n\tfields := make(map[string]interface{})\n\tmetrics := []string{\"total_connections\", \"curr_connections\", \"timestamp\"}\n\tfor _, m := range metrics {\n\t\tif value, ok := data[m]; ok {\n\t\t\tif val, ok := value.(float64); ok {\n\t\t\t\tfields[m] = val\n\t\t\t}\n\t\t}\n\t}\n\tacc.AddFields(\"twemproxy\", fields, tags)\n\n\tfor _, pool := range t.Pools {\n\t\tif poolStat, ok := data[pool]; ok {\n\t\t\tif data, ok := poolStat.(map[string]interface{}); ok {\n\t\t\t\tpoolTags := copyTags(tags)\n\t\t\t\tpoolTags[\"pool\"] = pool\n\t\t\t\tt.processPool(acc, poolTags, data)\n\t\t\t}\n\t\t}\n\t}\n}", "func RecordStats(db *sql.DB) func() {\n\treturn ocsql.RecordStats(db, 10*time.Second)\n}", "func (h *HamtFunctional) Stats() *Stats {\n\treturn h.hamtBase.Stats()\n}", "func (c *Collector) Run() {\n\ttick := time.NewTicker(c.interval)\n\tdefer tick.Stop()\n\tfor {\n\t\tselect {\n\t\tcase <-c.done:\n\t\t\treturn\n\t\tcase <-tick.C:\n\t\t\tc.emitStats()\n\t\t}\n\t}\n}", "func (c *Client) Time(stat string, rate float64, f func()) error {\n\tts := time.Now()\n\tf()\n\treturn c.Duration(stat, time.Since(ts), rate)\n}", "func (st *httpStats) updateStats(r *http.Request, w *httpResponseRecorder) {\n\t// A successful request has a 2xx response code\n\tsuccessReq := (w.respStatusCode >= 200 && w.respStatusCode < 300)\n\t// Update stats according to method verb\n\tswitch r.Method {\n\tcase \"HEAD\":\n\t\tst.totalHEADs.Inc(1)\n\t\tif successReq {\n\t\t\tst.successHEADs.Inc(1)\n\t\t}\n\tcase \"GET\":\n\t\tst.totalGETs.Inc(1)\n\t\tif successReq {\n\t\t\tst.successGETs.Inc(1)\n\t\t}\n\tcase \"PUT\":\n\t\tst.totalPUTs.Inc(1)\n\t\tif successReq {\n\t\t\tst.successPUTs.Inc(1)\n\t\t}\n\tcase \"POST\":\n\t\tst.totalPOSTs.Inc(1)\n\t\tif successReq {\n\t\t\tst.successPOSTs.Inc(1)\n\t\t}\n\tcase \"DELETE\":\n\t\tst.totalDELETEs.Inc(1)\n\t\tif successReq {\n\t\t\tst.successDELETEs.Inc(1)\n\t\t}\n\t}\n}", "func Stat(ds *ds.Datastore) echo.HandlerFunc {\n\treturn func(c echo.Context) error {\n\t\treturn c.String(http.StatusOK, fmt.Sprintf(`{ \"total\": %d }`, ds.TotalHits()))\n\t}\n}", "func (q *QueryBenchmarker) processStats(telemetrySink chan *report.Point) {\n\n\tq.statMapping = StatsMap{\n\t\tAllQueriesLabel: &StatGroup{},\n\t}\n\n\tlastRefresh := time.Time{}\n\ti := uint64(0)\n\tfor stat := range q.statChan {\n\t\tq.isBurnIn = i < q.burnIn\n\t\tif q.isBurnIn {\n\t\t\ti++\n\t\t\tq.statPool.Put(stat)\n\t\t\tcontinue\n\t\t} else if i == q.burnIn && q.burnIn > 0 {\n\t\t\tlog.Printf(\"burn-in complete after %d queries with %d workers\\n\", q.burnIn, q.workers)\n\t\t}\n\n\t\tif _, ok := q.statMapping[string(stat.Label)]; !ok {\n\t\t\tq.statMapping[string(stat.Label)] = &StatGroup{}\n\t\t}\n\n\t\tnow := time.Now()\n\n\t\tif stat.IsActual {\n\t\t\tq.movingAverageStat.Push(now, stat.Value)\n\t\t\tq.statMapping[AllQueriesLabel].Push(stat.Value)\n\t\t\tq.statMapping[string(stat.Label)].Push(stat.Value)\n\t\t\ti++\n\t\t}\n\n\t\tq.statPool.Put(stat)\n\n\t\tif lastRefresh.Nanosecond() == 0 || now.Sub(lastRefresh).Seconds() >= 1.0 {\n\t\t\tq.movingAverageStat.UpdateAvg(now, q.workers)\n\t\t\tlastRefresh = now\n\t\t\t// Report telemetry, if applicable:\n\t\t\tif telemetrySink != nil {\n\t\t\t\tp := report.GetPointFromGlobalPool()\n\t\t\t\tp.Init(\"benchmarks_telemetry\", now.UnixNano())\n\t\t\t\tfor _, tagpair := range q.reportTags {\n\t\t\t\t\tp.AddTag(tagpair[0], tagpair[1])\n\t\t\t\t}\n\t\t\t\tp.AddTag(\"client_type\", \"query\")\n\t\t\t\tp.AddFloat64Field(\"query_response_time_mean\", q.statMapping[AllQueriesLabel].Mean)\n\t\t\t\tp.AddFloat64Field(\"query_response_time_moving_mean\", q.movingAverageStat.Avg())\n\t\t\t\tp.AddIntField(\"query_workers\", q.workers)\n\t\t\t\tp.AddInt64Field(\"queries\", int64(i))\n\t\t\t\ttelemetrySink <- p\n\t\t\t}\n\t\t}\n\t\t// print stats to stderr (if printInterval is greater than zero):\n\t\tif q.printInterval > 0 && i > 0 && i%q.printInterval == 0 && (int64(i) < q.limit || q.limit < 0) {\n\t\t\tlog.Printf(\"%s: after %d queries with %d workers:\\n\", time.Now().String(), i-q.burnIn, q.workers)\n\t\t\tfprintStats(os.Stderr, q)\n\t\t\tlog.Printf(\"\\n\")\n\t\t}\n\n\t}\n\n\tlog.Printf(\"run complete after %d queries with %d workers:\\n\", i-q.burnIn, q.workers)\n\tq.totalQueries = int(i)\n\tq.statGroup.Done()\n}", "func addMetric(s *events.EventStream, metric string, period time.Duration, fn func(*swarming_api.SwarmingRpcsTaskRequestMetadata) (int64, error)) error {\n\ttags := map[string]string{\n\t\t\"metric\": metric,\n\t}\n\tf := func(ev []*events.Event) ([]map[string]string, []float64, error) {\n\t\tsklog.Infof(\"Computing value(s) for metric %q\", metric)\n\t\tif len(ev) == 0 {\n\t\t\treturn []map[string]string{}, []float64{}, nil\n\t\t}\n\t\ttasks, err := decodeTasks(ev)\n\t\tif err != nil {\n\t\t\treturn nil, nil, err\n\t\t}\n\t\ttagSets := map[string]map[string]string{}\n\t\ttotals := map[string]int64{}\n\t\tcounts := map[string]int{}\n\t\tfor _, t := range tasks {\n\t\t\tval, err := fn(t)\n\t\t\tif err == errNoValue {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif err != nil {\n\t\t\t\treturn nil, nil, err\n\t\t\t}\n\t\t\ttags := map[string]string{\n\t\t\t\t\"task-name\": t.TaskResult.Name,\n\t\t\t}\n\t\t\tfor d := range DIMENSION_WHITELIST {\n\t\t\t\ttags[d] = \"\"\n\t\t\t}\n\t\t\tfor _, dim := range t.Request.Properties.Dimensions {\n\t\t\t\tif _, ok := DIMENSION_WHITELIST[dim.Key]; ok {\n\t\t\t\t\ttags[dim.Key] = dim.Value\n\t\t\t\t}\n\t\t\t}\n\t\t\tkey, err := util.MD5Params(tags)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, nil, err\n\t\t\t}\n\t\t\ttagSets[key] = tags\n\t\t\ttotals[key] += val\n\t\t\tcounts[key]++\n\t\t}\n\t\ttagSetsList := make([]map[string]string, 0, len(tagSets))\n\t\tvals := make([]float64, 0, len(tagSets))\n\t\tfor key, tags := range tagSets {\n\t\t\ttagSetsList = append(tagSetsList, tags)\n\t\t\tvals = append(vals, float64(totals[key])/float64(counts[key]))\n\t\t}\n\t\treturn tagSetsList, vals, nil\n\t}\n\treturn s.DynamicMetric(tags, period, f)\n}", "func startStats(l *logrus.Logger, c *config.C, buildVersion string, configTest bool) (func(), error) {\n\tmType := c.GetString(\"stats.type\", \"\")\n\tif mType == \"\" || mType == \"none\" {\n\t\treturn nil, nil\n\t}\n\n\tinterval := c.GetDuration(\"stats.interval\", 0)\n\tif interval == 0 {\n\t\treturn nil, fmt.Errorf(\"stats.interval was an invalid duration: %s\", c.GetString(\"stats.interval\", \"\"))\n\t}\n\n\tvar startFn func()\n\tswitch mType {\n\tcase \"graphite\":\n\t\terr := startGraphiteStats(l, interval, c, configTest)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\tcase \"prometheus\":\n\t\tvar err error\n\t\tstartFn, err = startPrometheusStats(l, interval, c, buildVersion, configTest)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\tdefault:\n\t\treturn nil, fmt.Errorf(\"stats.type was not understood: %s\", mType)\n\t}\n\n\tmetrics.RegisterDebugGCStats(metrics.DefaultRegistry)\n\tmetrics.RegisterRuntimeMemStats(metrics.DefaultRegistry)\n\n\tgo metrics.CaptureDebugGCStats(metrics.DefaultRegistry, interval)\n\tgo metrics.CaptureRuntimeMemStats(metrics.DefaultRegistry, interval)\n\n\treturn startFn, nil\n}", "func (b *BlockProcessorQueue) Stat() StatResponse {\n\n\tresp := make(chan StatResponse)\n\treq := Stat{ResponseChan: resp}\n\n\tb.StatChan <- req\n\treturn <-resp\n\n}", "func Call(f func()) {\n\tdone := dPool.Get().(chan struct{})\n\tdefer dPool.Put(done)\n\tfq <- fun{fn: f, done: done}\n\t<-done\n}", "func (b *Buffered) F(f string, v ...interface{}) {\n\tb.prepare()\n\tb.Lock()\n\tfmt.Fprintf(b.bb, f+\"\\n\", v...)\n\tb.Unlock()\n}", "func (cb *printcb) outputStat(stats map[string]string) error {\n\tidx := len(*cb)\n\t*cb = append(*cb, FileDetails{})\n\tdetails := &(*cb)[idx]\n\tfor key, value := range stats {\n\t\tif err := setTaggedField(details, key, value, false); err != nil {\n\t\t\tglog.Warningf(\"Couldn't set field %v: %v\", key, err)\n\t\t}\n\t}\n\treturn nil\n}", "func callback(srv *rospy_tutorials.AddTwoInts) error {\n\tsrv.Response.Sum = srv.Request.A + srv.Request.B\n\treturn nil\n}", "func (s *Serializer) F(f func(s *Serializer)) *Serializer { return s.FC(f, true) }", "func call(f CallBack) {\n\tif f != nil {\n\t\tf()\n\t}\n}", "func statFunc(cmd *cobra.Command, args []string) {\n\tsession := getAwsSession()\n\tmanager := db.NewManager(rds.New(session))\n\tname := args[0]\n\n\ti, err := manager.Stat(name)\n\tif err != nil {\n\t\tfmt.Printf(\"%s: %s\\n\", name, getAwsError(err))\n\t\treturn\n\t}\n\n\tfmt.Println(i)\n}", "func (table *ConcurrentHashMap) CallBackUpdate(key KeyType, cb func(ValueType) ValueType) {\n\thashValue := table.mhash(key)\n\tshard := table.getShard(hashValue)\n\n\ttable.RWLocks[shard].Lock()\n\n\texists, value := table.shards[shard].shardGetVal(key, hashValue)\n\tif exists {\n\t\ttable.shards[shard].shardSet(key, hashValue, cb(value))\n\t}\n\n\ttable.RWLocks[shard].Unlock()\n\n}", "func invoke(f func(float64, float64) float64) float64 {\n\treturn f(0.23, 0.9)\n}", "func (wtc *Watcher) Run(f func(*Watcher) error) {\n\tgo func() {\n\t\tfor {\n\t\t\tt := wtc.nextTimer()\n\t\t\t<-t.C\n\t\t\tif err := f(wtc); err != nil {\n\t\t\t\tlogger.Printf(\"RunPeriod, %s\", err)\n\t\t\t}\n\t\t}\n\t}()\n}", "func callWithCount(fn countFn) error {\n\tpath, err := path(countFile)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tf, err := os.OpenFile(path, os.O_CREATE|os.O_RDWR, 0666)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tcontents, err := ioutil.ReadAll(f)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tvar n int = 0\n\tif len(contents) != 0 {\n\t\tn, err = strconv.Atoi(string(contents))\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tif n < 0 {\n\t\t\treturn errNegativeCount\n\t\t}\n\t}\n\n\tafter, err := fn(n)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// We want to write over the contents in the file, so \"truncate\" the\n\t// file to a length of 0, and then seek to the beginning of the file to\n\t// update the write head.\n\tif err := f.Truncate(0); err != nil {\n\t\treturn err\n\t}\n\tif _, err := f.Seek(0, io.SeekStart); err != nil {\n\t\treturn err\n\t}\n\n\tif _, err := fmt.Fprintf(f, \"%d\", after); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}", "func (s *Server) statproc() {\n\tvar (\n\t\tv *volume.Volume\n\t\tolds *stat.Stats\n\t\tnews = new(stat.Stats)\n\t)\n\tfor {\n\t\tolds = s.info.Stats\n\t\t*news = *olds\n\t\ts.info.Stats = news // use news instead, for current display\n\t\tolds.Reset()\n\t\tfor _, v = range s.store.Volumes {\n\t\t\tv.Stats.Calc()\n\t\t\tolds.Merge(v.Stats)\n\t\t}\n\t\tolds.Calc()\n\t\ts.info.Stats = olds\n\t\ttime.Sleep(statDuration)\n\t}\n}", "func (m *Metric) Func() echo.MiddlewareFunc {\n\treturn func(next echo.HandlerFunc) echo.HandlerFunc {\n\t\treturn func(c *echo.Context) (err error) {\n\t\t\tbeg := time.Now()\n\t\t\terr = next(c)\n\t\t\tcost := time.Since(beg)\n\t\t\tm.update(nameFn(c.Request().Method+c.PatternPath()), cost, map[string]string{\"if_type\": \"http\"})\n\t\t\tm.update(\"http\", cost, map[string]string{\"if_type\": \"http\"})\n\t\t\treturn\n\t\t}\n\t}\n}", "func GetStats(sleep int, iterations int64) {\n\tfor j := int64(0); j < iterations; j++ {\n\t\tmatches, err := filepath.Glob(\"/proc/[0-9]*/stat\")\n\t\tif err != nil {\n\t\tif(debug) {\n\t\t\tfmt.Println(\"err in: func GetStats\")\n\t\t}\n\t\t\tcontinue\n//\t\t\tpanic(err)\n\t\t}\n\n\t\tfor i := range matches {\n\t\t\tif 1 == 0 {\n\t\t\t\tfmt.Printf(\"matches[%v]=\\t%v\\n\", i, matches[i])\n\t\t\t}\n\t\t\tProcRead(matches[i])\n\t\t}\n\t\ttime.Sleep(time.Duration(sleep) * time.Second)\n\t\t//time.Sleep(1000 * time.Millisecond)\n\t}\n}", "func (fmt shadower) f() {}", "func logStats() {\n\tif requestsReceived > 0 {\n\t\tavgRequestMs := strconv.FormatFloat(totalRequestTime/float64(requestsReceived), 'f', 3, 64)\n\t\tlogger.Printf(\"%d requests, %d bytes received (avg. %sms)\\n\", requestsReceived, bytesReceived, avgRequestMs)\n\t}\n\trequestsReceived = 0\n\tbytesReceived = 0\n\ttotalRequestTime = 0.0\n}", "func (s *Stream) Stats() Stat {\n\treturn Stat{\n\t\tTotalWorkersRunning: atomic.LoadInt64(&s.workersUp),\n\t\tTotalWorkers: int64(s.workers + 1),\n\t\tPending: atomic.LoadInt64(&s.pending),\n\t\tCompleted: atomic.LoadInt64(&s.processed),\n\t\tClosed: atomic.LoadInt64(&s.closed),\n\t}\n}", "func (l *Listener) Count(name string, value float64, timestamp time.Time, endpoint string) {\n\tif len(endpoint) == 0 {\n\t\tlog.Println(\"Error - internal metrics referenced empty endpoint\")\n\t\treturn\n\t}\n\n\tl.ch <- Metric{\n\t\tKey: NewMetricKeyForApiCall(name, timestamp, endpoint),\n\t\tStats: awscsmmetrics.NewStatisticSet(value),\n\t}\n}", "func CounterStats() {\n\tfor {\n\t\tselect {\n\t\tcase c := <-Stats:\n\t\t\tif c.Read != 0 {\n\t\t\t\tatomic.AddInt64(&readLogCnt, c.Read)\n\t\t\t}\n\t\t\tif c.Sent != 0 {\n\t\t\t\tatomic.AddInt64(&sentLogCnt, c.Sent)\n\t\t\t}\n\t\t}\n\t}\n}", "func CallVal(f func() interface{}) interface{} {\n\tcheckRun()\n\trespChan := make(chan interface{})\n\tcallQueue <- func() {\n\t\trespChan <- f()\n\t}\n\treturn <-respChan\n}", "func printOperations(a int, f func(int) int) {\n\tfmt.Println(f(a))\n}", "func (filterdev *NetworkTap) Stats() (*syscall.BpfStat, error) {\n\tvar s syscall.BpfStat\n\t_, _, err := syscall.Syscall(syscall.SYS_IOCTL, uintptr(filterdev.device.Fd()), syscall.BIOCGSTATS, uintptr(unsafe.Pointer(&s)))\n\tif err != 0 {\n\t\treturn nil, syscall.Errno(err)\n\t}\n\treturn &s, nil\n}", "func (o Oscar) IterateResults(f func(string, int, int, int, time.Duration, time.Duration, time.Duration)) {\n\tfor i, ts := range o.Suits {\n\t\tfor _, tc := range ts.GetCases() {\n\t\t\tcntErr := 0\n\t\t\tif tc.Error != nil {\n\t\t\t\tcntErr = 1\n\t\t\t}\n\t\t\tif cntErr == 0 && tc.CountAssertSuccess == 0 {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\telapsedTotal, elapsedHTTP, elapsedSleep := tc.Elapsed()\n\n\t\t\tf(\n\t\t\t\to.prefix(i)+tc.Name,\n\t\t\t\ttc.CountAssertSuccess,\n\t\t\t\tcntErr,\n\t\t\t\ttc.CountRemoteRequests,\n\t\t\t\telapsedTotal,\n\t\t\t\telapsedHTTP,\n\t\t\t\telapsedSleep,\n\t\t\t)\n\t\t}\n\t}\n}", "func (c *Context) Count(stat string, count float64) {\n\tfor _, sink := range c.sinks {\n\t\tsink.Count(c, stat, count)\n\t}\n}", "func (f *Function) OnResult(fn func(interface{})) { f.resHandler = fn }", "func (h *Health) HandlerFunc(w http.ResponseWriter, r *http.Request) {\n\tc := h.Measure(r.Context())\n\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\tdata, err := json.Marshal(c)\n\tif err != nil {\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\tcode := http.StatusOK\n\tif c.Status == StatusUnavailable {\n\t\tcode = http.StatusServiceUnavailable\n\t}\n\tw.WriteHeader(code)\n\tw.Write(data)\n}", "func (l *Logger) Stats(resolution time.Duration) (LogStats, error) {\n\tstats, err := l.client.readLogStats()\n\tif err != nil {\n\t\treturn LogStats{}, err\n\t}\n\tevents := make([]LogEvent, 0, len(stats.Events))\n\tvar last *LogEvent\n\tfor _, le := range stats.Events {\n\t\tnext := LogEvent{\n\t\t\tTime: time.Unix(le.Time, 0).UTC(),\n\t\t\tCount: le.Count,\n\t\t}\n\t\tif last != nil && next.Time.Before(last.Time.Add(resolution)) {\n\t\t\tlast.Count += next.Count\n\t\t} else {\n\t\t\tevents = append(events, next)\n\t\t\tlast = &events[len(events)-1]\n\t\t}\n\t}\n\treturn LogStats{\n\t\tSince: time.Unix(stats.Since, 0).UTC(),\n\t\tTotal: stats.Total,\n\t\tHijacked: stats.Hijacked,\n\t\tPendingTasks: len(l.queue),\n\t\tEvents: events,\n\t}, nil\n}", "func (r Repository) Stats(commit CommitInfo, opt *StatsOptions) (CommitStats, error) {\n\tif opt == nil {\n\t\topt = &StatsOptions{}\n\t}\n\n\thash := commit.Hash.String()\n\targs := []string{\"diff\", \"--numstat\", hash, hash + \"^\"}\n\tout, err := r.run(nil, opt.Timeout, args...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tstats := CommitStats{}\n\tfor _, line := range strings.Split(out, \"\\n\") {\n\t\tif out == \"\" {\n\t\t\tcontinue\n\t\t}\n\t\tparts := strings.Split(line, \"\\t\")\n\t\tif len(parts) != 3 {\n\t\t\treturn nil, fmt.Errorf(\"failed to parse stat line: '%v'\", line)\n\t\t}\n\t\tinsertions, deletions := 0, 0\n\t\tif parts[0] != \"-\" {\n\t\t\tinsertions, err = strconv.Atoi(parts[0])\n\t\t\tif err != nil {\n\t\t\t\treturn nil, fmt.Errorf(\"failed to stat insertions '%v': %w\", parts[0], err)\n\t\t\t}\n\t\t}\n\t\tif parts[1] != \"-\" {\n\t\t\tdeletions, err = strconv.Atoi(parts[1])\n\t\t\tif err != nil {\n\t\t\t\treturn nil, fmt.Errorf(\"failed to stat deletions '%v': %w\", parts[1], err)\n\t\t\t}\n\t\t}\n\t\tfile := parts[2]\n\t\tstats[file] = FileStats{Insertions: insertions, Deletions: deletions}\n\t}\n\treturn stats, nil\n}", "func NewFuncStat(name string) *FuncStat {\n\tvar stat = new(FuncStat)\n\tstat.Name = name\n\tstat.Worker = NewCounter(0)\n\tstat.Job = NewCounter(0)\n\tstat.Processing = NewCounter(0)\n\treturn stat\n}", "func (p *FuncPool) DumpUPFLatencyStats(fID, imageName, functionName, latencyOutFilePath string) error {\n\tf := p.getFunction(fID, imageName)\n\n\treturn f.DumpUPFLatencyStats(functionName, latencyOutFilePath)\n}", "func (s *Service) Stats(r *http.Request, args *StatsArgs, result *StatsResponse) error {\n\tif args.UserID == \"\" {\n\t\tresult.Error = uidMissing\n\t\treturn nil\n\t}\n\tresult.Whole = -1\n\tresult.Bookmarks = -1\n\tresult.Pim = -1\n\tresult.Org = -1\n\tcoll := s.Session.DB(MentatDatabase).C(args.UserID)\n\twholeCount, err := coll.Count()\n\tif err != nil {\n\t\tresult.Error = fmt.Sprintf(\"failed getting stats/whole count: %s\", err)\n\t\treturn nil\n\t}\n\tresult.Whole = wholeCount\n\tif args.Detailed {\n\t\tvar entries []Entry\n\t\terr := coll.Find(bson.M{\"type\": \"bookmark\"}).All(&entries)\n\t\tif err != nil {\n\t\t\tresult.Error = fmt.Sprintf(\"failed getting stats/bookmarks count: %s\", err)\n\t\t\treturn nil\n\t\t}\n\t\tresult.Bookmarks = len(entries)\n\t\terr = coll.Find(bson.M{\"type\": \"pim\"}).All(&entries)\n\t\tif err != nil {\n\t\t\tresult.Error = fmt.Sprintf(\"failed getting stats/pim count: %s\", err)\n\t\t\treturn nil\n\t\t}\n\t\tresult.Pim = len(entries)\n\t\terr = coll.Find(bson.M{\"type\": \"org\"}).All(&entries)\n\t\tif err != nil {\n\t\t\tresult.Error = fmt.Sprintf(\"failed getting stats/org count: %s\", err)\n\t\t\treturn nil\n\t\t}\n\t\tresult.Org = len(entries)\n\t}\n\treturn nil\n}", "func (e *Engine) HandleStat(req *Request, res ResponseWriter) {\n\tio.WriteString(res, e.Stat.String())\n}", "func simplyRun(f func()) {\n\tgo f()\n}", "func (s ForStat) ProcessStat(p StatProcessor) {\n\tp.ProcessForStat(s)\n}", "func (nf *NetFlowV5Target) SendStats(stats flow.Stats) {\n}", "func Statistic(fileNames []string, colID int, wantMedian bool, numWorkers int) []stat {\n\n jobs := make(chan job, numWorkers)\n result := make(chan stat, len(fileNames))\n done := make(chan doneStatus, numWorkers)\n\n go add_jobs(fileNames, colID, wantMedian, jobs, result)\n for i := 0; i < numWorkers; i++ {\n go start_jobs(done, jobs)\n }\n\n return wait_and_process_results(result, done, numWorkers)\n}", "func sum(b *bolt.Bucket, fn func([]byte) int) (int, error) {\n\tsum := 0\n\terr := b.ForEach(func(_, v []byte) error {\n\t\tsum += fn(v)\n\t\treturn nil\n\t})\n\treturn sum, err\n}", "func (csw *ChannelStatsWatcher) Run(ctx context.Context) {\n\tflushed, unregister := csw.statser.RegisterFlush()\n\tdefer unregister()\n\n\tticker := time.NewTicker(csw.sampleInterval)\n\tdefer ticker.Stop()\n\n\tcsw.sample()\n\n\tfor {\n\t\tselect {\n\t\tcase <-ctx.Done():\n\t\t\treturn\n\t\tcase <-flushed:\n\t\t\tcsw.emit()\n\t\t\tcsw.sample() // Ensure there will always be at least one sample\n\t\tcase <-ticker.C:\n\t\t\tcsw.sample()\n\t\t}\n\t}\n}", "func (f *FakeWatchPrefixes) Stats() map[string][]watchstream.WatchQueueStat {\n\treturn nil\n}", "func (s *service) Stats() Stats {\n\ts.m.Lock()\n\tdefer s.m.Unlock()\n\n\tstats := Stats{\n\t\tServiceIdentity: s.serviceIdentity(),\n\t\tEndpoints: make([]*EndpointStats, 0),\n\t\tType: StatsResponseType,\n\t\tStarted: s.started,\n\t}\n\tfor _, endpoint := range s.endpoints {\n\t\tendpointStats := &EndpointStats{\n\t\t\tName: endpoint.stats.Name,\n\t\t\tSubject: endpoint.stats.Subject,\n\t\t\tNumRequests: endpoint.stats.NumRequests,\n\t\t\tNumErrors: endpoint.stats.NumErrors,\n\t\t\tLastError: endpoint.stats.LastError,\n\t\t\tProcessingTime: endpoint.stats.ProcessingTime,\n\t\t\tAverageProcessingTime: endpoint.stats.AverageProcessingTime,\n\t\t}\n\t\tif s.StatsHandler != nil {\n\t\t\tdata, _ := json.Marshal(s.StatsHandler(endpoint))\n\t\t\tendpointStats.Data = data\n\t\t}\n\t\tstats.Endpoints = append(stats.Endpoints, endpointStats)\n\t}\n\treturn stats\n}" ]
[ "0.7039276", "0.68317056", "0.598775", "0.56577134", "0.5528054", "0.5501102", "0.547875", "0.547735", "0.54391086", "0.53634286", "0.5234006", "0.52033424", "0.5166441", "0.5160784", "0.50803477", "0.50575864", "0.5019292", "0.501574", "0.49954444", "0.49947128", "0.49840668", "0.49489495", "0.49452847", "0.49388075", "0.49226108", "0.4912857", "0.48936906", "0.48858523", "0.4881571", "0.48600906", "0.48565122", "0.4852594", "0.48298296", "0.47995847", "0.47826424", "0.47748882", "0.47583225", "0.47489953", "0.47457677", "0.47313228", "0.47267544", "0.47177297", "0.47174644", "0.4715183", "0.47121754", "0.47081614", "0.47061023", "0.47014776", "0.46885422", "0.46867907", "0.46858737", "0.46836177", "0.46768457", "0.46754462", "0.46735984", "0.4667514", "0.46654496", "0.4662765", "0.465714", "0.46506715", "0.46475795", "0.46429014", "0.46361795", "0.4636043", "0.46311402", "0.4628694", "0.46240947", "0.46065983", "0.45997876", "0.45933497", "0.45895535", "0.45772067", "0.45741203", "0.45638686", "0.4562953", "0.45608878", "0.45497435", "0.4549593", "0.45472318", "0.45455542", "0.4542214", "0.45359695", "0.4535944", "0.45357344", "0.45347404", "0.45324093", "0.45309916", "0.45298576", "0.45241946", "0.4523797", "0.45124224", "0.4509826", "0.45094112", "0.45079967", "0.44975346", "0.44950688", "0.44914895", "0.44828737", "0.44827893", "0.4478676" ]
0.79727346
0
PrefixStats will call cb with all of the same calls obj would have, except every name is prefixed with name.
func PrefixStats(name string, obj Monitor, cb func(name string, val float64)) { obj.Stats(func(sub_name string, val float64) { cb(fmt.Sprintf("%s.%s", name, sub_name), val) }) }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (hb *httpConnManagerBuilder) StatsPrefix(statsPrefix string) *httpConnManagerBuilder {\n\thb.statsPrefix = statsPrefix\n\treturn hb\n}", "func (tb *tcpProxyBuilder) StatsPrefix(statsPrefix string) *tcpProxyBuilder {\n\ttb.statsPrefix = statsPrefix\n\treturn tb\n}", "func (fb *filterBuilder) StatsPrefix(statsPrefix string) *filterBuilder {\n\tfb.statsPrefix = statsPrefix\n\treturn fb\n}", "func (f *FakeWatchPrefixes) Stats() map[string][]watchstream.WatchQueueStat {\n\treturn nil\n}", "func (o ApplicationStatusSyncComparedToSourceKustomizeOutput) NamePrefix() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v ApplicationStatusSyncComparedToSourceKustomize) *string { return v.NamePrefix }).(pulumi.StringPtrOutput)\n}", "func (o ApplicationStatusHistorySourceKustomizeOutput) NamePrefix() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v ApplicationStatusHistorySourceKustomize) *string { return v.NamePrefix }).(pulumi.StringPtrOutput)\n}", "func (o ApplicationStatusOperationStateSyncResultSourceKustomizeOutput) NamePrefix() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v ApplicationStatusOperationStateSyncResultSourceKustomize) *string { return v.NamePrefix }).(pulumi.StringPtrOutput)\n}", "func (o ApplicationOperationSyncSourceKustomizeOutput) NamePrefix() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v ApplicationOperationSyncSourceKustomize) *string { return v.NamePrefix }).(pulumi.StringPtrOutput)\n}", "func (o ApplicationStatusOperationStateOperationSyncSourceKustomizeOutput) NamePrefix() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v ApplicationStatusOperationStateOperationSyncSourceKustomize) *string { return v.NamePrefix }).(pulumi.StringPtrOutput)\n}", "func (o UserPolicyOutput) NamePrefix() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v *UserPolicy) pulumi.StringPtrOutput { return v.NamePrefix }).(pulumi.StringPtrOutput)\n}", "func (o TriggerOutput) NamePrefix() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v *Trigger) pulumi.StringPtrOutput { return v.NamePrefix }).(pulumi.StringPtrOutput)\n}", "func (o ApplicationSpecSourceKustomizeOutput) NamePrefix() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v ApplicationSpecSourceKustomize) *string { return v.NamePrefix }).(pulumi.StringPtrOutput)\n}", "func (o ApplicationStatusSyncComparedToSourceKustomizePtrOutput) NamePrefix() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v *ApplicationStatusSyncComparedToSourceKustomize) *string {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn v.NamePrefix\n\t}).(pulumi.StringPtrOutput)\n}", "func (o BucketMetricFilterOutput) Prefix() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v BucketMetricFilter) *string { return v.Prefix }).(pulumi.StringPtrOutput)\n}", "func (o ApplicationStatusOperationStateSyncResultSourceKustomizePtrOutput) NamePrefix() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v *ApplicationStatusOperationStateSyncResultSourceKustomize) *string {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn v.NamePrefix\n\t}).(pulumi.StringPtrOutput)\n}", "func print_prefix_name(p int32)(str string){\nl:=name_dir[p].name[0]\nstr= fmt.Sprint(string(name_dir[p].name[1:]))\nif int(l)<len(name_dir[p].name){\nstr+= \"...\"\n}\nreturn\n}", "func (o ApplicationStatusHistorySourceKustomizePtrOutput) NamePrefix() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v *ApplicationStatusHistorySourceKustomize) *string {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn v.NamePrefix\n\t}).(pulumi.StringPtrOutput)\n}", "func postStats(prefix, ezKey string, hits <-chan *loghit.LogHit) {\n\tfor hit := range hits {\n\t\tvar stat string\n\t\tif len(prefix) > 0 {\n\t\t\tstat = fmt.Sprintf(\"%s: HTTP %d\", prefix, hit.Status)\n\t\t} else {\n\t\t\tstat = fmt.Sprintf(\"HTTP %d\", hit.Status)\n\t\t}\n\t\tstathat.PostEZCountTime(stat, ezKey, 1, hit.LocalTime.Unix())\n\t}\n}", "func (o BucketMetricFilterPtrOutput) Prefix() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v *BucketMetricFilter) *string {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn v.Prefix\n\t}).(pulumi.StringPtrOutput)\n}", "func (o ApplicationStatusOperationStateOperationSyncSourceKustomizePtrOutput) NamePrefix() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v *ApplicationStatusOperationStateOperationSyncSourceKustomize) *string {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn v.NamePrefix\n\t}).(pulumi.StringPtrOutput)\n}", "func (o ApplicationOperationSyncSourceKustomizePtrOutput) NamePrefix() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v *ApplicationOperationSyncSourceKustomize) *string {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn v.NamePrefix\n\t}).(pulumi.StringPtrOutput)\n}", "func (o TargetGroupOutput) NamePrefix() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v TargetGroup) *string { return v.NamePrefix }).(pulumi.StringPtrOutput)\n}", "func (m *CallStats) Increment(d time.Duration) {\n\ttook := d.Round(RoundDuration).Seconds()\n\tm.Count++\n\tm.Total = round(m.Total + took)\n\tm.Avg = round(m.Total / float64(m.Count))\n\tif took > m.Max {\n\t\tm.Max = took\n\t}\n\tif m.Min == 0 || took < m.Min {\n\t\tm.Min = took\n\t}\n}", "func NamePrefix() string {\n\treturn getRandValue([]string{\"person\", \"prefix\"})\n}", "func (f MonitorFunc) Stats(cb func(name string, val float64)) { f(cb) }", "func (r *LogGroup) NamePrefix() pulumi.StringOutput {\n\treturn (pulumi.StringOutput)(r.s.State[\"namePrefix\"])\n}", "func BenchmarkPrefixSomer(b *testing.B) {\n\n\ttrie := buildIntegrationTree()\n\n\tfor i := 0; i < b.N; i++ {\n\t\ttrie.PrefixSearch(\"somer\")\n\t}\n}", "func (mock *PluginerMock) NameCalls() []struct {\n} {\n\tvar calls []struct {\n\t}\n\tmock.lockName.RLock()\n\tcalls = mock.calls.Name\n\tmock.lockName.RUnlock()\n\treturn calls\n}", "func (s *Basememcached_protocolListener) EnterStatistic_name(ctx *Statistic_nameContext) {}", "func (o ApplicationSpecSourceKustomizePtrOutput) NamePrefix() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v *ApplicationSpecSourceKustomize) *string {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn v.NamePrefix\n\t}).(pulumi.StringPtrOutput)\n}", "func (f *fakeProgressbar) SetPrefix(format string, args ...interface{}) {\n\tf.prefix = fmt.Sprintf(format, args...)\n}", "func (fn *FakeName) Prefix() string {\n\treturn random.PickString(fn.Prefixs)\n}", "func (s *StatsdClient) GetByPrefix(prefix string) map[string]int64 {\n\tresult := make(map[string]int64)\n\n\ts.lock.RLock()\n\tdefer s.lock.RUnlock()\n\n\tfor key, value := range s.counts {\n\t\tif strings.HasPrefix(key, prefix) {\n\t\t\tk := strings.Replace(key, prefix, \"\", -1)\n\t\t\tresult[k] = value\n\t\t}\n\t}\n\n\treturn result\n}", "func PrefixedName(providerPrefix, name string) string {\n\treturn fmt.Sprintf(\"%v.%v\", providerPrefix, name)\n}", "func PrefixRenamer(prefix string) Renamer {\n\treturn func(name string) string {\n\t\treturn prefix + name\n\t}\n}", "func testStatsAndBucketstats(t *testing.T) {\n\tvar (\n\t\tmyClient1 clientStats\n\t\tmyUniqueClient1 = \"1111111\"\n\n\t\tmyClient2 clientStats\n\t\tmyUniqueClient2 = \"2222222\"\n\t)\n\n\t// Register from bucketstats from pfsagent #1\n\tbucketstats.Register(\"proxyfs.retryrpc\", myUniqueClient1, &myClient1)\n\n\t// Register from bucketstats from pfsagent #2\n\tbucketstats.Register(\"proxyfs.retryrpc\", myUniqueClient2, &myClient2)\n\n\t// Completed list stats\n\tmyClient1.AddCompleted.Add(1)\n\tmyClient1.RmCompleted.Add(1)\n\n\t// RPC counts\n\tmyClient1.RPCcompleted.Add(1)\n\tmyClient1.RPCretried.Add(1)\n\tmyClient1.RPCattempted.Add(1)\n\tmyClient1.RPCinprocess.Add(1)\n\n\t// Track duration of all RPCs in a graph\n\tstart := time.Now()\n\ttime.Sleep(10 * time.Millisecond)\n\tmyClient1.TimeOfRPCUsec.Add(uint64(time.Since(start).Microseconds()))\n\tmyClient1.ReplySize.Add(8192)\n\n\t// Example of pfsagent #2\n\tmyClient2.RPCcompleted.Add(1)\n\tmyClient2.RPCretried.Add(1)\n\tmyClient2.RPCattempted.Add(1)\n\tmyClient2.RPCinprocess.Add(1)\n\n\t// Dump stats\n\t/* DEBUG ONLY -\n\tfmt.Printf(\"pfsagent #1: %s\\n\", bucketstats.SprintStats(bucketstats.StatFormatParsable1, \"proxyfs.retryrpc\", myUniqueClient1))\n\tfmt.Printf(\"pfsagent #2: %s\\n\", bucketstats.SprintStats(bucketstats.StatFormatParsable1, \"proxyfs.retryrpc\", myUniqueClient2))\n\t*/\n\n\t// Unregister clients from bucketstats\n\tbucketstats.UnRegister(\"proxyfs.retryrpc\", myUniqueClient1)\n\tbucketstats.UnRegister(\"proxyfs.retryrpc\", myUniqueClient2)\n}", "func (rl *RateLimiter) SendStats() error {\n\tfor ruleID, stats := range rl.GetStats() {\n\t\truleIDTag := fmt.Sprintf(\"rule_id:%s\", ruleID)\n\t\tfor _, stat := range stats {\n\t\t\ttags := []string{ruleIDTag}\n\t\t\tif len(stat.Tags) > 0 {\n\t\t\t\ttags = append(tags, stat.Tags...)\n\t\t\t}\n\n\t\t\tif stat.Dropped > 0 {\n\t\t\t\tif err := rl.statsdClient.Count(metrics.MetricRateLimiterDrop, int64(stat.Dropped), tags, 1.0); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t}\n\t\t\tif stat.Allowed > 0 {\n\t\t\t\tif err := rl.statsdClient.Count(metrics.MetricRateLimiterAllow, int64(stat.Allowed), tags, 1.0); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\treturn nil\n}", "func (n *Name) Prefix() string {\n\t// TODO handle gender\n\treturn n.pick(namePrefix + \"/prefix\")\n}", "func (c Node) NamePrefix() string {\n\treturn fmt.Sprintf(\"bpm-%s-\", c.ID)\n}", "func Prefix(target Logger, f string, v ...interface{}) Logger {\n\tp := fmt.Sprintf(f, v...)\n\n\treturn prefixer{\n\t\ttarget,\n\t\tp,\n\t\tstrings.ReplaceAll(p, \"%\", \"%%\"),\n\t}\n}", "func fprintStats(w io.Writer, q *QueryBenchmarker) {\n\tmaxKeyLength := 0\n\tkeys := make([]string, 0, len(q.statMapping))\n\tfor k := range q.statMapping {\n\t\tif len(k) > maxKeyLength {\n\t\t\tmaxKeyLength = len(k)\n\t\t}\n\t\tkeys = append(keys, k)\n\t}\n\tsort.Strings(keys)\n\tfor _, k := range keys {\n\t\tv := q.statMapping[k]\n\t\tminRate := 1e3 / v.Min\n\t\tmeanRate := 1e3 / v.Mean\n\t\tmaxRate := 1e3 / v.Max\n\t\tpaddedKey := fmt.Sprintf(\"%s\", k)\n\t\tfor len(paddedKey) < maxKeyLength {\n\t\t\tpaddedKey += \" \"\n\t\t}\n\t\tkStats := make(map[string]interface{})\n\t\tkStats[\"min\"] = v.Min\n\t\tkStats[\"minRate\"] = minRate\n\t\tkStats[\"mean\"] = v.Mean\n\t\tkStats[\"meanRate\"] = meanRate\n\t\tkStats[\"max\"] = v.Max\n\t\tkStats[\"maxRate\"] = maxRate\n\t\tkStats[\"count\"] = v.Count\n\t\tkStats[\"sum\"] = v.Sum / 1e3\n\t\tq.json[k] = kStats\n\t\tif !q.doJson {\n\t\t\t_, err := fmt.Fprintf(w, \"%s : min: %8.2fms (%7.2f/sec), mean: %8.2fms (%7.2f/sec), max: %7.2fms (%6.2f/sec), count: %8d, sum: %5.1fsec \\n\", paddedKey, v.Min, minRate, v.Mean, meanRate, v.Max, maxRate, v.Count, v.Sum/1e3)\n\t\t\tif err != nil {\n\t\t\t\tlog.Fatal(err)\n\t\t\t}\n\t\t}\n\t}\n\tq.json[\"totalQueries\"] = q.totalQueries\n\tq.json[\"wallClockTime\"] = q.wallTook.Seconds()\n\tq.json[\"queryRate\"] = float64(q.totalQueries) / float64(q.wallTook.Seconds())\n\tq.json[\"workers\"] = q.workers\n\tq.json[\"batchSize\"] = q.batchSize\n\tif q.doJson {\n\t\tfor k, v := range q.json {\n\t\t\tif _, err := json.Marshal(v); err != nil {\n\t\t\t\tq.json[k] = \"\"\n\t\t\t}\n\t\t}\n\t\tb, err := json.Marshal(q.json)\n\t\tif err != nil {\n\t\t\tlog.Println(\"error:\", err)\n\t\t}\n\t\tos.Stdout.Write(b)\n\t}\n}", "func (c *Client) WatchPrefix(prefix string, keys []string, waitIndex uint64, stopChan chan bool) (uint64, error) {\n\tprefixes := append([]string{}, keys...)\n\tif prefix != \"\" {\n\t\tprefixes = append(prefixes, prefix)\n\t}\n\ttimeNow := time.Now().UTC()\n\tnamesOld := make(map[string]bool) // Old names with the prefix\n\n\t// Cycle time interval.\n\ttick := time.NewTicker(time.Second * time.Duration(c.CircleInterval))\n\tdefer tick.Stop()\n\tfor {\n\t\tselect {\n\t\tcase <-tick.C:\n\t\tcase <-stopChan:\n\t\t\treturn waitIndex, fmt.Errorf(\"stopChan\")\n\t\tcase err := <-c.ExitWatchCh:\n\t\t\treturn waitIndex + 1, err\n\t\t}\n\n\t\t// Cycle get data to find new data.\n\t\tnamesNow := make(map[string]bool)\n\t\tinput := &secretsmanager.ListSecretsInput{}\n\t\tresult, err := c.Client.ListSecrets(context.TODO(), input)\n\t\tif err != nil {\n\t\t\treturn waitIndex + 1, fmt.Errorf(\"aws get ListSecrets : %v\", err)\n\t\t}\n\n\t\t// Check if add or modify secret.Name.\n\t\tfor _, secret := range result.SecretList {\n\t\t\t// Check prefix. And if secret.LastChangedDate is new or changed.\n\t\t\tfor _, key := range prefixes {\n\t\t\t\tif strings.HasPrefix(*secret.Name, key) {\n\t\t\t\t\tif timeNow.Before(*secret.LastChangedDate) {\n\t\t\t\t\t\t// This secret.LastChangedDate changed.\n\t\t\t\t\t\treturn waitIndex + 1, nil\n\t\t\t\t\t}\n\t\t\t\t\t// Add in namesNow to find deleted secret.Name later.\n\t\t\t\t\tnamesNow[*secret.Name] = true\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\t// Check if delete secret.Name.\n\t\tfor k, _ := range namesOld {\n\t\t\tif _, ok := namesNow[k]; !ok {\n\t\t\t\t// some name be deleted\n\t\t\t\treturn waitIndex + 1, nil\n\t\t\t}\n\t\t}\n\n\t\t// Deep copy.\n\t\tfor k, _ := range namesNow {\n\t\t\tnamesOld[k] = true\n\t\t}\n\t}\n}", "func (p S3RequestsPlugin) MetricKeyPrefix() string {\n\tif p.KeyPrefix == \"\" {\n\t\treturn \"s3-requests\"\n\t}\n\treturn p.KeyPrefix\n}", "func (s *progressBar) SetPrefix(format string, args ...interface{}) {\n\ts.prefix = fmt.Sprintf(format, args...)\n}", "func (this *ReceiverHolder) stats(c *gin.Context) {\n\n\tflightData := this.receiver.GetInFlightRavens()\n\n\tdeadBoxData := this.receiver.GetDeadBoxCount()\n\tboxes := make([]string, 0)\n\tfor _, box := range this.receiver.msgReceivers {\n\t\tboxes = append(boxes, box.id)\n\t}\n\n\tdata := gin.H{\n\t\t\"Queue\": this.receiver.source.GetName(),\n\t\t\"IsReliable\": this.receiver.options.isReliable,\n\t\t\"Boxes\": boxes,\n\t\t\"Inflight\": flightData,\n\t\t\"DeadBox\": deadBoxData,\n\t}\n\tc.JSON(200, data)\n}", "func prefixWithName(name string) string {\n\treturn policyPrefix + policyPath + \"/\" + name\n}", "func (o TargetGroupPtrOutput) NamePrefix() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v *TargetGroup) *string {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn v.NamePrefix\n\t}).(pulumi.StringPtrOutput)\n}", "func (o PublicDelegatedPrefixPublicDelegatedSubPrefixResponseOutput) Name() pulumi.StringOutput {\n\treturn o.ApplyT(func(v PublicDelegatedPrefixPublicDelegatedSubPrefixResponse) string { return v.Name }).(pulumi.StringOutput)\n}", "func initPrefix(colored bool) {\n\tif colored {\n\t\tPrefix[TRACE] = fmt.Sprintf(ColoredPrefixFormat, GRAY, TRACE.String())\n\t\tPrefix[DEBUG] = fmt.Sprintf(ColoredPrefixFormat, GREEN, DEBUG.String())\n\t\tPrefix[INFO] = fmt.Sprintf(ColoredPrefixFormat, BLUE, INFO.String())\n\t\tPrefix[WARNING] = fmt.Sprintf(ColoredPrefixFormat, YELLOW, WARNING.String())\n\t\tPrefix[ERROR] = fmt.Sprintf(ColoredPrefixFormat, RED, ERROR.String())\n\t\tPrefix[CRITICAL] = fmt.Sprintf(ColoredPrefixFormat, RED, CRITICAL.String())\n\t} else {\n\t\tPrefix[TRACE] = fmt.Sprintf(PrefixFormat, TRACE.String())\n\t\tPrefix[DEBUG] = fmt.Sprintf(PrefixFormat, DEBUG.String())\n\t\tPrefix[INFO] = fmt.Sprintf(PrefixFormat, INFO.String())\n\t\tPrefix[WARNING] = fmt.Sprintf(PrefixFormat, WARNING.String())\n\t\tPrefix[ERROR] = fmt.Sprintf(PrefixFormat, ERROR.String())\n\t\tPrefix[CRITICAL] = fmt.Sprintf(PrefixFormat, CRITICAL.String())\n\t}\n}", "func (c *Client) WatchPrefix(prefix string, keys []string, waitIndex uint64, stopChan chan bool) (uint64, error) {\n\t<-stopChan\n\treturn 0, nil\n}", "func (c *Client) WatchPrefix(prefix string, keys []string, waitIndex uint64, stopChan chan bool) (uint64, error) {\n\t<-stopChan\n\treturn 0, nil\n}", "func (manager *Manager) GetAllProxyStatsHandler() {\n\tticker := time.NewTicker(500 * time.Millisecond)\n\tvar oldstats []*config.ProxyBackendStatisticsUpdate\n\tfor {\n\t\tselect {\n\t\tcase <-ticker.C:\n\t\t\tnewstats := manager.GetAllProxyStats()\n\t\t\tfor _, new := range newstats {\n\t\t\t\tfor _, old := range oldstats {\n\t\t\t\t\tif old.Statistics.UUID == new.Statistics.UUID {\n\t\t\t\t\t\tif old.Statistics.ClientsConnects != new.Statistics.ClientsConnects {\n\t\t\t\t\t\t\tmanager.proxyBackendStatisticsUpdate <- new\n\t\t\t\t\t\t}\n\t\t\t\t\t} // uuid\n\t\t\t\t} // old\n\t\t\t} // new\n\t\t\toldstats = newstats\n\n\t\t}\n\t}\n}", "func TestMetricName(t *testing.T) {\n\ttcs := []struct {\n\t\tprefix string\n\t\tname string\n\t\twantMetric string\n\t\twantLabel string\n\t}{\n\t\t{\n\t\t\tprefix: \"serverStatus.metrics.commands.saslStart.\",\n\t\t\tname: \"total\",\n\t\t\twantMetric: \"mongodb_ss_metrics_commands_saslStart_total\",\n\t\t},\n\t\t{\n\t\t\tprefix: \"serverStatus.metrics.commands._configsvrShardCollection.\",\n\t\t\tname: \"failed\",\n\t\t\twantMetric: \"mongodb_ss_metrics_commands_configsvrShardCollection_failed\",\n\t\t},\n\t\t{\n\t\t\tprefix: \"serverStatus.wiredTiger.lock.\",\n\t\t\tname: \"metadata lock acquisitions\",\n\t\t\twantMetric: \"mongodb_ss_wt_lock_metadata_lock_acquisitions\",\n\t\t},\n\t\t{\n\t\t\tprefix: \"serverStatus.wiredTiger.perf.\",\n\t\t\tname: \"file system write latency histogram (bucket 5) - 500-999ms\",\n\t\t\twantMetric: \"mongodb_ss_wt_perf\",\n\t\t\twantLabel: \"perf_bucket\",\n\t\t},\n\t\t{\n\t\t\tprefix: \"serverStatus.wiredTiger.transaction.\",\n\t\t\tname: \"rollback to stable updates removed from lookaside\",\n\t\t\twantMetric: \"mongodb_ss_wt_txn_rollback_to_stable_updates_removed_from_lookaside\",\n\t\t},\n\t}\n\n\tfor _, tc := range tcs {\n\t\tmetric, label := nameAndLabel(tc.prefix, tc.name)\n\t\tassert.Equal(t, tc.wantMetric, metric, tc.prefix+tc.name)\n\t\tassert.Equal(t, tc.wantLabel, label, tc.prefix+tc.name)\n\t}\n}", "func (_m *Plugin) InitPrefix(prefix config.Prefix) {\n\t_m.Called(prefix)\n}", "func (o *WhatsAppNameWhatsAppApiContent) GetNamePrefix() string {\n\tif o == nil || o.NamePrefix == nil {\n\t\tvar ret string\n\t\treturn ret\n\t}\n\treturn *o.NamePrefix\n}", "func (c *Client) WatchPrefix(ctx context.Context, prefix string, opts ...easykv.WatchOption) (uint64, error) {\n\tvar options easykv.WatchOptions\n\tfor _, o := range opts {\n\t\to(&options)\n\t}\n\n\trespChan := make(chan watchResponse)\n\tgo func() {\n\t\topts := api.QueryOptions{\n\t\t\tWaitIndex: options.WaitIndex,\n\t\t}\n\t\t_, meta, err := c.client.List(prefix, &opts)\n\t\tif err != nil {\n\t\t\trespChan <- watchResponse{options.WaitIndex, err}\n\t\t\treturn\n\t\t}\n\t\trespChan <- watchResponse{meta.LastIndex, err}\n\t}()\n\tfor {\n\t\tselect {\n\t\tcase <-ctx.Done():\n\t\t\treturn options.WaitIndex, easykv.ErrWatchCanceled\n\t\tcase r := <-respChan:\n\t\t\treturn r.waitIndex, r.err\n\t\t}\n\t}\n}", "func (o PublicDelegatedPrefixPublicDelegatedSubPrefixOutput) Name() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v PublicDelegatedPrefixPublicDelegatedSubPrefix) *string { return v.Name }).(pulumi.StringPtrOutput)\n}", "func (f *Faker) NamePrefix() string { return namePrefix(f.Rand) }", "func (v Value) StatName() string {\n\tsname := v.Source.Name\n\tif v.Source.Name == \"\" {\n\t\tsname = v.Source.Host\n\t}\n\treturn fmt.Sprintf(\"%s %s\", sname, v.Stat.Name)\n}", "func (dr *Resolver) SendStats() error {\n\tfor counterEntry, counter := range dr.hitsCounters {\n\t\tval := counter.Swap(0)\n\t\tif val > 0 {\n\t\t\t_ = dr.statsdClient.Count(metrics.MetricDentryResolverHits, val, counterEntry.Tags(), 1.0)\n\t\t}\n\t}\n\n\tfor counterEntry, counter := range dr.missCounters {\n\t\tval := counter.Swap(0)\n\t\tif val > 0 {\n\t\t\t_ = dr.statsdClient.Count(metrics.MetricDentryResolverMiss, val, counterEntry.Tags(), 1.0)\n\t\t}\n\t}\n\n\treturn dr.sendERPCStats()\n}", "func (o Oscar) prefix(i int) string {\n\tname := \"\"\n\tif len(o.Suits) > 1 {\n\t\tname = path.Base(o.suiteDefinitions[i].FileName) + \":\"\n\t}\n\n\treturn name\n}", "func Stats(path, prefix string) {\n\tif _, err := os.Stat(path); os.IsNotExist(err) {\n\t\tfatal(err)\n\t\treturn\n\t}\n\n\tdb, err := bolt.Open(path, 0600, nil)\n\tif err != nil {\n\t\tfatal(err)\n\t\treturn\n\t}\n\tdefer db.Close()\n\n\terr = db.View(func(tx *bolt.Tx) error {\n\t\tvar s bolt.BucketStats\n\t\tvar count int\n\t\tvar prefix = []byte(prefix)\n\t\ttx.ForEach(func(name []byte, b *bolt.Bucket) error {\n\t\t\tif bytes.HasPrefix(name, prefix) {\n\t\t\t\ts.Add(b.Stats())\n\t\t\t\tcount += 1\n\t\t\t}\n\t\t\treturn nil\n\t\t})\n\t\tprintf(\"Aggregate statistics for %d buckets\\n\\n\", count)\n\n\t\tprintln(\"Page count statistics\")\n\t\tprintf(\"\\tNumber of logical branch pages: %d\\n\", s.BranchPageN)\n\t\tprintf(\"\\tNumber of physical branch overflow pages: %d\\n\", s.BranchOverflowN)\n\t\tprintf(\"\\tNumber of logical leaf pages: %d\\n\", s.LeafPageN)\n\t\tprintf(\"\\tNumber of physical leaf overflow pages: %d\\n\", s.LeafOverflowN)\n\n\t\tprintln(\"Tree statistics\")\n\t\tprintf(\"\\tNumber of keys/value pairs: %d\\n\", s.KeyN)\n\t\tprintf(\"\\tNumber of levels in B+tree: %d\\n\", s.Depth)\n\n\t\tprintln(\"Page size utilization\")\n\t\tprintf(\"\\tBytes allocated for physical branch pages: %d\\n\", s.BranchAlloc)\n\t\tvar percentage int\n\t\tif s.BranchAlloc != 0 {\n\t\t\tpercentage = int(float32(s.BranchInuse) * 100.0 / float32(s.BranchAlloc))\n\t\t}\n\t\tprintf(\"\\tBytes actually used for branch data: %d (%d%%)\\n\", s.BranchInuse, percentage)\n\t\tprintf(\"\\tBytes allocated for physical leaf pages: %d\\n\", s.LeafAlloc)\n\t\tpercentage = 0\n\t\tif s.LeafAlloc != 0 {\n\t\t\tpercentage = int(float32(s.LeafInuse) * 100.0 / float32(s.LeafAlloc))\n\t\t}\n\t\tprintf(\"\\tBytes actually used for leaf data: %d (%d%%)\\n\", s.LeafInuse, percentage)\n\n\t\tprintln(\"Bucket statistics\")\n\t\tprintf(\"\\tTotal number of buckets: %d\\n\", s.BucketN)\n\t\tpercentage = int(float32(s.InlineBucketN) * 100.0 / float32(s.BucketN))\n\t\tprintf(\"\\tTotal number on inlined buckets: %d (%d%%)\\n\", s.InlineBucketN, percentage)\n\t\tpercentage = 0\n\t\tif s.LeafInuse != 0 {\n\t\t\tpercentage = int(float32(s.InlineBucketInuse) * 100.0 / float32(s.LeafInuse))\n\t\t}\n\t\tprintf(\"\\tBytes used for inlined buckets: %d (%d%%)\\n\", s.InlineBucketInuse, percentage)\n\n\t\treturn nil\n\t})\n\tif err != nil {\n\t\tfatal(err)\n\t\treturn\n\t}\n}", "func NamePrefix() string { return namePrefix(globalFaker.Rand) }", "func (m *Monitor) SendStats() error {\n\t// delay between to send in order to reduce the statsd pool presure\n\tconst delay = time.Second\n\ttime.Sleep(delay)\n\n\tif resolvers := m.probe.GetResolvers(); resolvers != nil {\n\t\tif err := resolvers.ProcessResolver.SendStats(); err != nil {\n\t\t\treturn fmt.Errorf(\"failed to send process_resolver stats: %w\", err)\n\t\t}\n\t\ttime.Sleep(delay)\n\n\t\tif err := resolvers.DentryResolver.SendStats(); err != nil {\n\t\t\treturn fmt.Errorf(\"failed to send process_resolver stats: %w\", err)\n\t\t}\n\t\tif err := resolvers.NamespaceResolver.SendStats(); err != nil {\n\t\t\treturn fmt.Errorf(\"failed to send namespace_resolver stats: %w\", err)\n\t\t}\n\t}\n\n\tif err := m.perfBufferMonitor.SendStats(); err != nil {\n\t\treturn fmt.Errorf(\"failed to send events stats: %w\", err)\n\t}\n\ttime.Sleep(delay)\n\n\tif err := m.loadController.SendStats(); err != nil {\n\t\treturn fmt.Errorf(\"failed to send load controller stats: %w\", err)\n\t}\n\n\tif m.activityDumpManager != nil {\n\t\tif err := m.activityDumpManager.SendStats(); err != nil {\n\t\t\treturn fmt.Errorf(\"failed to send activity dump maanger stats: %w\", err)\n\t\t}\n\t}\n\n\tif m.probe.config.RuntimeMonitor {\n\t\tif err := m.runtimeMonitor.SendStats(); err != nil {\n\t\t\treturn fmt.Errorf(\"failed to send runtime monitor stats: %w\", err)\n\t\t}\n\t}\n\n\tif err := m.discarderMonitor.SendStats(); err != nil {\n\t\treturn fmt.Errorf(\"failed to send discarder stats: %w\", err)\n\t}\n\n\treturn nil\n}", "func (si *SyncIndexJob) Prefix(p string) {\n\tsi.DestinationPrefix(p)\n}", "func (o AnalyticsConfigurationFilterOutput) Prefix() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v AnalyticsConfigurationFilter) *string { return v.Prefix }).(pulumi.StringPtrOutput)\n}", "func (v *ValueMonitor) Stats(cb func(name string, val float64)) {\n\tv.mtx.Lock()\n\tcount := v.count\n\tsum := v.sum\n\tsum_squared := v.sum_squared\n\trecent := v.recent\n\tmax := v.max\n\tmin := v.min\n\tv.mtx.Unlock()\n\n\tif count > 0 {\n\t\tcb(\"avg\", sum/float64(count))\n\t}\n\tcb(\"count\", float64(count))\n\tcb(\"max\", max)\n\tcb(\"min\", min)\n\tcb(\"recent\", recent)\n\tcb(\"sum\", sum)\n\tcb(\"sum_squared\", sum_squared)\n}", "func (o BucketReplicationConfigRuleFilterAndOutput) Prefix() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v BucketReplicationConfigRuleFilterAnd) *string { return v.Prefix }).(pulumi.StringPtrOutput)\n}", "func NameHasPrefix(v string) predicate.Watchlisthistory {\n\treturn predicate.Watchlisthistory(func(s *sql.Selector) {\n\t\ts.Where(sql.HasPrefix(s.C(FieldName), v))\n\t})\n}", "func (o InventoryFilterOutput) Prefix() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v InventoryFilter) *string { return v.Prefix }).(pulumi.StringPtrOutput)\n}", "func (mb *metadataBackend) suggestMetricName(namespace, prefix string, limit int) (metricNames []string, err error) {\n\terr = mb.db.View(func(tx *bbolt.Tx) error {\n\t\t// 1. get namespace bucket\n\t\tnsBucket := tx.Bucket(nsBucketName).Bucket([]byte(namespace))\n\t\tif nsBucket == nil {\n\t\t\treturn nil\n\t\t}\n\n\t\t// 2. scan metric name by prefix\n\t\tcursor := nsBucket.Cursor()\n\t\tprefix := []byte(prefix)\n\t\tfor k, _ := cursor.Seek(prefix); k != nil && bytes.HasPrefix(k, prefix); k, _ = cursor.Next() {\n\t\t\tmetricNames = append(metricNames, string(k))\n\t\t\tif len(metricNames) >= limit {\n\t\t\t\treturn nil\n\t\t\t}\n\t\t}\n\t\treturn nil\n\t})\n\treturn\n}", "func Prefix(s string) Option {\n\treturn optionFunc(func(l *lineWriter) {\n\t\tl.prefixes = append(l.prefixes, func() string { return s })\n\t})\n}", "func (c *Channel) SendNames(cl *Client) {\n\tvar base string = \"\"\n\n\tsend_base := func() {\n\t\tcl.Resp(RPL_NAMREPLY).SetF(\"= %s\", c.GetName()).Set(base).Send()\n\t}\n\n\t// This loop makes 510 character long messages\n\t// TODO: Bug, does not include the base message size in the max line\n\t// length. get size of packet.build() and subtract it from MAX_LINE_SIZE\n\tfor _, v := range c.Members {\n\t\tname := c.GetMemberName(v)\n\t\tif len(base+name) > MAX_LINE_SIZE {\n\t\t\tsend_base()\n\t\t\tbase = \"\"\n\t\t}\n\t\tbase = base + \" \" + name\n\t}\n\tif base != \"\" {\n\t\tsend_base()\n\t}\n\n\tcl.Resp(RPL_ENDOFNAMES).Set(c.GetName()).Set(\":End of /NAMES list.\").Send()\n}", "func (client *Client) GetStatsGroupName() (s string) {\n\ts10 := strconv.FormatInt(int64(client.myUniqueID), 10)\n\treturn clientSideGroupPrefix + s10\n}", "func (_TokensNetwork *TokensNetworkCaller) SignaturePrefix(opts *bind.CallOpts) (string, error) {\n\tvar (\n\t\tret0 = new(string)\n\t)\n\tout := ret0\n\terr := _TokensNetwork.contract.Call(opts, out, \"signature_prefix\")\n\treturn *ret0, err\n}", "func startStats(l *logrus.Logger, c *config.C, buildVersion string, configTest bool) (func(), error) {\n\tmType := c.GetString(\"stats.type\", \"\")\n\tif mType == \"\" || mType == \"none\" {\n\t\treturn nil, nil\n\t}\n\n\tinterval := c.GetDuration(\"stats.interval\", 0)\n\tif interval == 0 {\n\t\treturn nil, fmt.Errorf(\"stats.interval was an invalid duration: %s\", c.GetString(\"stats.interval\", \"\"))\n\t}\n\n\tvar startFn func()\n\tswitch mType {\n\tcase \"graphite\":\n\t\terr := startGraphiteStats(l, interval, c, configTest)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\tcase \"prometheus\":\n\t\tvar err error\n\t\tstartFn, err = startPrometheusStats(l, interval, c, buildVersion, configTest)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\tdefault:\n\t\treturn nil, fmt.Errorf(\"stats.type was not understood: %s\", mType)\n\t}\n\n\tmetrics.RegisterDebugGCStats(metrics.DefaultRegistry)\n\tmetrics.RegisterRuntimeMemStats(metrics.DefaultRegistry)\n\n\tgo metrics.CaptureDebugGCStats(metrics.DefaultRegistry, interval)\n\tgo metrics.CaptureRuntimeMemStats(metrics.DefaultRegistry, interval)\n\n\treturn startFn, nil\n}", "func (n Name) Prefix(suffix Name) Name {\n\toffset := len(n) - len(suffix)\n\tif offset < 0 {\n\t\treturn n\n\t}\n\tif n[offset:].Equal(suffix) {\n\t\treturn n[:offset]\n\t}\n\treturn n\n}", "func PrefixWriter(dst io.Writer, prefix string) io.Writer {\n\treturn writerFunc(func(p []byte) (int, error) {\n\t\treturn fmt.Fprintf(dst, \"%s%s\", prefix, p)\n\t})\n}", "func (c *client) WatchPrefix(prefix string, ch chan struct{}) {\n\twatch := c.keysAPI.Watcher(prefix, &etcd.WatcherOptions{AfterIndex: 0, Recursive: true})\n\tch <- struct{}{} // make sure caller invokes GetEntries\n\tfor {\n\t\tif _, err := watch.Next(c.ctx); err != nil {\n\t\t\treturn\n\t\t}\n\t\tch <- struct{}{}\n\t}\n}", "func (o BucketIntelligentTieringConfigurationFilterOutput) Prefix() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v BucketIntelligentTieringConfigurationFilter) *string { return v.Prefix }).(pulumi.StringPtrOutput)\n}", "func (o HttpHeaderMatchOutput) PrefixMatch() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v HttpHeaderMatch) *string { return v.PrefixMatch }).(pulumi.StringPtrOutput)\n}", "func (m *Metrics) NameFor(name string) string {\n\treturn fmt.Sprintf(\"%s_%s\", m.Prefix, name)\n}", "func (c *FileSystemCache) Prefix(p ...string) Cache {\n\tc.prefix = p\n\treturn c\n}", "func (l *LvlStruct) Prefix() string {\n\tl.mu.Lock()\n\tdefer l.mu.Unlock()\n\treturn l.log.Prefix()\n}", "func (c *Client) Prefix(s string) {\n\tc.prefix = s\n}", "func (h *clientStatsHandler) HandleRPC(ctx context.Context, s stats.RPCStats) {\n\tv, ok := ctx.Value(&rpcInfoKey).(*rpcInfo)\n\tif !ok {\n\t\treturn\n\t}\n\tmonitor := newClientReporterForStatsHanlder(v.startTime, h.clientMetrics, v.fullMethodName)\n\tswitch s := s.(type) {\n\tcase *stats.Begin:\n\t\tv.startTime = s.BeginTime\n\t\tmonitor.StartedConn()\n\tcase *stats.End:\n\t\tmonitor.Handled(status.Code(s.Error))\n\tcase *stats.InHeader:\n\t\tmonitor.ReceivedMessageSize(Header, float64(s.WireLength))\n\tcase *stats.InPayload:\n\t\t// TODO: remove the +5 offset on wire length here, which is a temporary stand-in for the missing grpc framing offset\n\t\t// See: https://github.com/grpc/grpc-go/issues/1647\n\t\tmonitor.ReceivedMessageSize(Payload, float64(s.WireLength+5))\n\tcase *stats.InTrailer:\n\t\tmonitor.ReceivedMessageSize(Tailer, float64(s.WireLength))\n\tcase *stats.OutHeader:\n\t\t// TODO: Add the sent header message size stats, if the wire length of the send header is provided\n\tcase *stats.OutPayload:\n\t\t// TODO(tonywang): response latency (seconds) of the gRPC single message send\n\t\tmonitor.SentMessageSize(Payload, float64(s.WireLength))\n\tcase *stats.OutTrailer:\n\t\tmonitor.SentMessageSize(Tailer, float64(s.WireLength))\n\t}\n}", "func (o BucketReplicationConfigRuleFilterOutput) Prefix() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v BucketReplicationConfigRuleFilter) *string { return v.Prefix }).(pulumi.StringPtrOutput)\n}", "func (ub *UpdateBuilder) Prefix(\n\tsql string,\n\targs ...interface{},\n) *UpdateBuilder {\n\tub.sql = ub.sql.Prefix(sql, args...)\n\treturn ub\n}", "func (o AnalyticsConfigurationFilterPtrOutput) Prefix() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v *AnalyticsConfigurationFilter) *string {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn v.Prefix\n\t}).(pulumi.StringPtrOutput)\n}", "func (m *OutboundMock) GetNameAddressMinimockCounter() uint64 {\n\treturn atomic.LoadUint64(&m.GetNameAddressCounter)\n}", "func (o BucketReplicationConfigRuleFilterAndPtrOutput) Prefix() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v *BucketReplicationConfigRuleFilterAnd) *string {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn v.Prefix\n\t}).(pulumi.StringPtrOutput)\n}", "func (c *PreferenceInfo) GetNamePrefix() string {\n\tif c.OdoSettings.NamePrefix == nil {\n\t\treturn \"\"\n\t}\n\treturn *c.OdoSettings.NamePrefix\n}", "func (f *filter) Prefix(prefix string) Filter {\n\tif f.prefixed || prefix == \"\" {\n\t\treturn f\n\t}\n\n\tif len(f.allowed) > 0 {\n\t\tfor _, r := range f.allowed {\n\t\t\tr.prefix(prefix)\n\t\t}\n\n\t\tf.prefixed = true\n\t}\n\n\treturn f\n}", "func (m *Main) printStats() *time.Ticker {\n\tt := time.NewTicker(time.Second * 10)\n\tstart := time.Now()\n\tgo func() {\n\t\tfor range t.C {\n\t\t\tduration := time.Since(start)\n\t\t\tbytes := m.BytesProcessed()\n\t\t\tlog.Printf(\"Bytes: %s, Records: %v, Duration: %v, Rate: %v/s, %v rec/s\", pdk.Bytes(bytes), m.totalRecs.Get(), duration, pdk.Bytes(float64(bytes)/duration.Seconds()), float64(m.totalRecs.Get())/duration.Seconds())\n\t\t}\n\t}()\n\treturn t\n}", "func (u UptimePlugin) MetricKeyPrefix() string {\n\tif u.Prefix == \"\" {\n\t\tu.Prefix = \"uptime\"\n\t}\n\treturn u.Prefix\n}", "func Prefix(value string) Option {\n\treturn addParam(\"prefix\", value)\n}", "func (r *Routing) Prefix(prefix string, f func()) {\n\n\tdefer func() {\n\t\tr.routerWithPrefix = nil\n\t\tif len(r.prefixes) > 0 {\n\t\t\tr.prefixes = r.prefixes[:len(r.prefixes)-1]\n\t\t}\n\t}()\n\n\tif len(prefix) == 0 {\n\t\tpanic(\"Prefix(): the prefix can't be empty\")\n\t}\n\n\tr.prefixes = append(r.prefixes, prefix)\n\n\tvar mergePrefix = strings.Join(r.prefixes, \"/\")\n\n\tr.routerWithPrefix = r.Router.PathPrefix(fmt.Sprintf(\"/%s\", mergePrefix)).Subrouter().StrictSlash(true)\n\tf()\n\n}", "func NameHasPrefix(v string) predicate.Watchlist {\n\treturn predicate.Watchlist(func(s *sql.Selector) {\n\t\ts.Where(sql.HasPrefix(s.C(FieldName), v))\n\t})\n}", "func (o ApplicationStatusOperationStateSyncResultSourceKustomizeOutput) NameSuffix() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v ApplicationStatusOperationStateSyncResultSourceKustomize) *string { return v.NameSuffix }).(pulumi.StringPtrOutput)\n}", "func GetOutputPrefix(funcName string) string {\n\ttabs := \"\"\n\tif debug {\n\t\ttabs = strings.Repeat(\" \", funcDepth)\n\t}\n\treturn fmt.Sprintf(\"(%14s) %s[%s] \", DurClock(time.Since(startTime)), tabs, funcName)\n}" ]
[ "0.63966745", "0.6346358", "0.62546855", "0.58574206", "0.5699559", "0.5687043", "0.5642775", "0.55433357", "0.55392677", "0.5487048", "0.543249", "0.5431307", "0.539962", "0.5389051", "0.53605694", "0.53553647", "0.53547794", "0.52440846", "0.52403", "0.52047205", "0.5190972", "0.51728946", "0.51608306", "0.5142602", "0.5100493", "0.5091228", "0.50310636", "0.50286806", "0.5028541", "0.5023289", "0.50047445", "0.50006497", "0.49677578", "0.49502534", "0.49439755", "0.49403664", "0.49378914", "0.49349448", "0.49024683", "0.48985925", "0.48519158", "0.48415056", "0.4824108", "0.4815653", "0.48133105", "0.48128286", "0.48044145", "0.4789173", "0.47865427", "0.4785002", "0.4785002", "0.47760063", "0.47689033", "0.4763926", "0.4757598", "0.47290438", "0.472828", "0.47267663", "0.47216016", "0.4717718", "0.47031692", "0.47025055", "0.46820435", "0.467678", "0.46763012", "0.46697554", "0.4667464", "0.46580374", "0.46438134", "0.46437538", "0.4638273", "0.46368027", "0.46251738", "0.46236214", "0.46216345", "0.46128282", "0.46118972", "0.46061742", "0.46046972", "0.46043134", "0.45988667", "0.459387", "0.459238", "0.4590667", "0.459066", "0.4590627", "0.4590064", "0.45851508", "0.4580677", "0.45792493", "0.4577197", "0.45746434", "0.45670083", "0.45540944", "0.45528597", "0.45466104", "0.45455593", "0.45454982", "0.45436022", "0.45383337" ]
0.8604961
0
Collect takes something that implements the Monitor interface and returns a key/value map.
func Collect(mon Monitor) map[string]float64 { rv := make(map[string]float64) mon.Stats(func(name string, val float64) { rv[name] = val }) return rv }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func Collect() (result map[string]interface{}, err error) {\n\tresult = make(map[string]interface{})\n\n\tfor _, collector := range collectors {\n\t\tif shouldCollect(collector) {\n\t\t\tc, err := collector.Collect()\n\t\t\tif err != nil {\n\t\t\t\tlog.Warnf(\"[%s] %s\", collector.Name(), err)\n\t\t\t}\n\t\t\tif c != nil {\n\t\t\t\tresult[collector.Name()] = c\n\t\t\t}\n\t\t}\n\t}\n\n\treturn\n}", "func Collect(mon StatSource) map[string]float64 {\n\trv := make(map[string]float64)\n\tmon.Stats(func(key SeriesKey, field string, val float64) {\n\t\trv[key.WithField(field)] = val\n\t})\n\treturn rv\n}", "func (m *Client) Collect(ch chan<- prometheus.Metric) {\n\tm.storeMu.Lock()\n\tdefer m.storeMu.Unlock()\n\n\tch <- prometheus.MustNewConstMetric(m.storeValuesDesc, prometheus.GaugeValue, float64(len(m.store)))\n\n\tfor k, v := range m.store {\n\t\tch <- prometheus.MustNewConstMetric(m.storeSizesDesc, prometheus.GaugeValue, float64(len(v.value)), k)\n\t}\n}", "func (c *VMCollector) Collect(ch chan<- prometheus.Metric) {\n\tfor _, m := range c.getMetrics() {\n\t\tch <- m\n\t}\n}", "func (exp *Expvar) Collect() (map[string]interface{}, error) {\n\treq, err := http.NewRequest(http.MethodGet, exp.host, nil)\n\tlog.Println(exp.host)\n\tif err != nil {\n\t\tfmt.Println(err)\n\t\treturn nil, err\n\t}\n\n\tresp, err := exp.client.Do(req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tdata := make(map[string]interface{})\n\tif err := json.NewDecoder(resp.Body).Decode(&data); err != nil {\n\t\treturn nil, err\n\t}\n\n\tmemStats, ok := (data[\"memstats\"]).(map[string]interface{})\n\tif ok {\n\t\tdata[\"heap\"] = memStats[\"Alloc\"]\n\t}\n\n\tu, err := url.Parse(exp.host)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdata[\"host\"] = u.Hostname()\n\n\tdelete(data, \"memStats\")\n\tdelete(data, \"cmdline\")\n\n\treturn data, nil\n}", "func (c *collector) Collect(ch chan<- prometheus.Metric) {\n\tc.m.Lock()\n\tfor _, m := range c.metrics {\n\t\tch <- m.metric\n\t}\n\tc.m.Unlock()\n}", "func (coll WmiCollector) Collect(ch chan<- prometheus.Metric) {\n\texecute(coll.collector, ch)\n}", "func (c *MetricsCollector) Collect(ch chan<- prometheus.Metric) {\n\tfor _, s := range c.status {\n\t\ts.RLock()\n\t\tdefer s.RUnlock()\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tc.verify,\n\t\t\tprometheus.GaugeValue,\n\t\t\tfloat64(s.VerifyRestore),\n\t\t\t\"verify_restore\",\n\t\t\ts.BackupService,\n\t\t\ts.StorageService,\n\t\t)\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tc.verify,\n\t\t\tprometheus.GaugeValue,\n\t\t\tfloat64(s.VerifyDiff),\n\t\t\t\"verify_diff\",\n\t\t\ts.BackupService,\n\t\t\ts.StorageService,\n\t\t)\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tc.verify,\n\t\t\tprometheus.GaugeValue,\n\t\t\tfloat64(s.VerifyChecksum),\n\t\t\t\"verify_checksum\",\n\t\t\ts.BackupService,\n\t\t\ts.StorageService,\n\t\t)\n\t}\n\n}", "func (c collector) Collect(ch chan<- prometheus.Metric) {\n\tvar wg sync.WaitGroup\n\n\t// We don't bail out on errors because those can happen if there is a race condition between\n\t// the destruction of a container and us getting to read the cgroup data. We just don't report\n\t// the values we don't get.\n\n\tcollectors := []func(string, *regexp.Regexp){\n\t\tfunc(path string, re *regexp.Regexp) {\n\t\t\tdefer wg.Done()\n\t\t\tnuma, err := cgroups.GetNumaStats(cgroupPath(\"memory\", path))\n\t\t\tif err == nil {\n\t\t\t\tupdateNumaStatMetric(ch, re.FindStringSubmatch(filepath.Base(path))[0], numa)\n\t\t\t} else {\n\t\t\t\tlog.Error(\"failed to collect NUMA stats for %s: %v\", path, err)\n\t\t\t}\n\t\t},\n\t\tfunc(path string, re *regexp.Regexp) {\n\t\t\tdefer wg.Done()\n\t\t\tmemory, err := cgroups.GetMemoryUsage(cgroupPath(\"memory\", path))\n\t\t\tif err == nil {\n\t\t\t\tupdateMemoryUsageMetric(ch, re.FindStringSubmatch(filepath.Base(path))[0], memory)\n\t\t\t} else {\n\t\t\t\tlog.Error(\"failed to collect memory usage stats for %s: %v\", path, err)\n\t\t\t}\n\t\t},\n\t\tfunc(path string, re *regexp.Regexp) {\n\t\t\tdefer wg.Done()\n\t\t\tmigrate, err := cgroups.GetCPUSetMemoryMigrate(cgroupPath(\"cpuset\", path))\n\t\t\tif err == nil {\n\t\t\t\tupdateMemoryMigrateMetric(ch, re.FindStringSubmatch(filepath.Base(path))[0], migrate)\n\t\t\t} else {\n\t\t\t\tlog.Error(\"failed to collect memory migration stats for %s: %v\", path, err)\n\t\t\t}\n\t\t},\n\t\tfunc(path string, re *regexp.Regexp) {\n\t\t\tdefer wg.Done()\n\t\t\tcpuAcctUsage, err := cgroups.GetCPUAcctStats(cgroupPath(\"cpuacct\", path))\n\t\t\tif err == nil {\n\t\t\t\tupdateCPUAcctUsageMetric(ch, re.FindStringSubmatch(filepath.Base(path))[0], cpuAcctUsage)\n\t\t\t} else {\n\t\t\t\tlog.Error(\"failed to collect CPU accounting stats for %s: %v\", path, err)\n\t\t\t}\n\t\t},\n\t\tfunc(path string, re *regexp.Regexp) {\n\t\t\tdefer wg.Done()\n\t\t\thugeTlbUsage, err := cgroups.GetHugetlbUsage(cgroupPath(\"hugetlb\", path))\n\t\t\tif err == nil {\n\t\t\t\tupdateHugeTlbUsageMetric(ch, re.FindStringSubmatch(filepath.Base(path))[0], hugeTlbUsage)\n\t\t\t} else {\n\t\t\t\tlog.Error(\"failed to collect hugetlb stats for %s: %v\", path, err)\n\t\t\t}\n\t\t},\n\t\tfunc(path string, re *regexp.Regexp) {\n\t\t\tdefer wg.Done()\n\t\t\tblkioDeviceUsage, err := cgroups.GetBlkioThrottleBytes(cgroupPath(\"blkio\", path))\n\t\t\tif err == nil {\n\t\t\t\tupdateBlkioDeviceUsageMetric(ch, re.FindStringSubmatch(filepath.Base(path))[0], blkioDeviceUsage)\n\t\t\t} else {\n\t\t\t\tlog.Error(\"failed to collect blkio stats for %s: %v\", path, err)\n\t\t\t}\n\t\t},\n\t}\n\n\tcontainerIDRegexp := regexp.MustCompile(`[a-z0-9]{64}`)\n\n\tfor _, path := range walkCgroups() {\n\t\twg.Add(len(collectors))\n\t\tfor _, fn := range collectors {\n\t\t\tgo fn(path, containerIDRegexp)\n\t\t}\n\t}\n\n\t// We need to wait so that the response channel doesn't get closed.\n\twg.Wait()\n}", "func (c *ClusterManager) Collect(ch chan<- prometheus.Metric) {\n\toomCountByHost, ramUsageByHost := c.ReallyExpensiveAssessmentOfTheSystemState()\n\tfor host, oomCount := range oomCountByHost {\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tc.OOMCountDesc,\n\t\t\tprometheus.CounterValue,\n\t\t\tfloat64(oomCount),\n\t\t\thost,\n\t\t)\n\t}\n\tfor host, ramUsage := range ramUsageByHost {\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tc.RAMUsageDesc,\n\t\t\tprometheus.GaugeValue,\n\t\t\tramUsage,\n\t\t\thost,\n\t\t)\n\t}\n}", "func (o *requestMetrics) Collect(ch chan<- prometheus.Metric) {\n\tmetricFamilies, err := o.stStore.GetPromDirectMetrics()\n\tif err != nil {\n\t\tklog.Errorf(\"fetch prometheus metrics failed: %v\", err)\n\t\treturn\n\t}\n\to.handleMetrics(metricFamilies, ch)\n}", "func (c *metricbeatCollector) Collect(ch chan<- prometheus.Metric) {\n\n\tfor _, i := range c.metrics {\n\t\tch <- prometheus.MustNewConstMetric(i.desc, i.valType, i.eval(c.stats))\n\t}\n\n}", "func (c *auditdCollector) Collect(ch chan<- prometheus.Metric) {\n\n\tfor _, i := range c.metrics {\n\t\tch <- prometheus.MustNewConstMetric(i.desc, i.valType, i.eval(c.stats))\n\t}\n\n}", "func (co *VMICollector) Collect(ch chan<- prometheus.Metric) {\n\tcachedObjs := co.vmiInformer.GetIndexer().List()\n\tif len(cachedObjs) == 0 {\n\t\tlog.Log.V(4).Infof(\"No VMIs detected\")\n\t\treturn\n\t}\n\n\tvmis := make([]*k6tv1.VirtualMachineInstance, len(cachedObjs))\n\n\tfor i, obj := range cachedObjs {\n\t\tvmis[i] = obj.(*k6tv1.VirtualMachineInstance)\n\t}\n\n\tco.updateVMIsPhase(vmis, ch)\n\tco.updateVMIMetrics(vmis, ch)\n\treturn\n}", "func (p *Metrics) Collect(c chan<- prometheus.Metric) {\n\t//rlockCollect(c,&p.mucout,p.counters)\n\t//rlockCollect(c,&p.mugau,p.gauges)\n\t//rlockCollect(c,&p.muhist,p.historams)\n\t//rlockCollect(c,&p.musumm,p.summaries)\n\tp.rlockCollectCounter(c)\n\tp.rlockCollectGauge(c)\n\tp.rlockCollectHistorams(c)\n\tp.rlockCollectSummaries(c)\n}", "func (h *Metrics) Collect(in chan<- prometheus.Metric) {\n\th.duration.Collect(in)\n\th.totalRequests.Collect(in)\n\th.requestSize.Collect(in)\n\th.responseSize.Collect(in)\n\th.handlerStatuses.Collect(in)\n\th.responseTime.Collect(in)\n}", "func (c *environmentCollector) Collect(client *rpc.Client, ch chan<- prometheus.Metric, labelValues []string) error {\n\ttempItems, powerItems, err := c.environmentItems(client)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfor _, item := range tempItems {\n\t\tl := append(labelValues, item.Name)\n\n\t\tch <- prometheus.MustNewConstMetric(temperaturesDesc, prometheus.GaugeValue, item.Temperature, l...)\n\t}\n\n\tstatusValues := map[string]int{\n\t\t\"OK\": 1,\n\t\t\"Testing\": 2,\n\t\t\"Failed\": 3,\n\t\t\"Absent\": 4,\n\t\t\"Present\": 5,\n\t}\n\tfor _, item := range powerItems {\n\t\tl := append(labelValues, item.Name, item.Status)\n\n\t\tch <- prometheus.MustNewConstMetric(powerSupplyDesc, prometheus.GaugeValue, float64(statusValues[item.Status]), l...)\n\t}\n\n\treturn nil\n}", "func (c *OrchestratorCollector) Collect(ch chan<- prometheus.Metric) {\n\tc.mutex.Lock() // To protect metrics from concurrent collects\n\tdefer c.mutex.Unlock()\n\n\tstats, err := c.orchestratorClient.GetMetrics()\n\tif err != nil {\n\t\tc.upMetric.Set(serviceDown)\n\t\tch <- c.upMetric\n\t\tlog.Printf(\"Error getting Orchestrator stats: %v\", err)\n\t\treturn\n\t}\n\n\tc.upMetric.Set(serviceUp)\n\tch <- c.upMetric\n\n\tch <- prometheus.MustNewConstMetric(c.metrics[\"cluter_size\"],\n\t\tprometheus.GaugeValue, float64(len(stats.Status.Details.AvailableNodes)))\n\tch <- prometheus.MustNewConstMetric(c.metrics[\"is_active_node\"],\n\t\tprometheus.GaugeValue, boolToFloat64(stats.Status.Details.IsActiveNode))\n\tch <- prometheus.MustNewConstMetric(c.metrics[\"problems\"],\n\t\tprometheus.GaugeValue, float64(len(stats.Problems)))\n\tch <- prometheus.MustNewConstMetric(c.metrics[\"last_failover_id\"],\n\t\tprometheus.CounterValue, float64(stats.LastFailoverID))\n\tch <- prometheus.MustNewConstMetric(c.metrics[\"is_healthy\"],\n\t\tprometheus.GaugeValue, boolToFloat64(stats.Status.Details.Healthy))\n\tch <- prometheus.MustNewConstMetric(c.metrics[\"failed_seeds\"],\n\t\tprometheus.CounterValue, float64(stats.FailedSeeds))\n}", "func (collector *MetricsCollector) Collect(ch chan<- prometheus.Metric) {\n\tfilterMetricsByKind := func(kind string, orgMetrics []constMetric) (filteredMetrics []constMetric) {\n\t\tfor _, metric := range orgMetrics {\n\t\t\tif metric.kind == kind {\n\t\t\t\tfilteredMetrics = append(filteredMetrics, metric)\n\t\t\t}\n\t\t}\n\t\treturn filteredMetrics\n\t}\n\tcollector.defMetrics.reset()\n\tfor k := range collector.metrics {\n\t\tcounters := filterMetricsByKind(config.KeyMetricTypeCounter, collector.metrics[k])\n\t\tgauges := filterMetricsByKind(config.KeyMetricTypeGauge, collector.metrics[k])\n\t\thistograms := filterMetricsByKind(config.KeyMetricTypeHistogram, collector.metrics[k])\n\t\tcollectCounters(counters, collector.defMetrics, ch)\n\t\tcollectGauges(gauges, collector.defMetrics, ch)\n\t\tcollectHistograms(histograms, collector.defMetrics, ch)\n\t\tcollector.cache.Reset()\n\t}\n\tcollector.defMetrics.collectDefaultMetrics(ch)\n}", "func (m *ClientMetrics) Collect(ch chan<- prom.Metric) {\n\tm.clientStartedCounter.Collect(ch)\n\tm.clientHandledCounter.Collect(ch)\n\tm.clientStreamMsgReceived.Collect(ch)\n\tm.clientStreamMsgSent.Collect(ch)\n\tif m.clientHandledHistogramEnabled {\n\t\tm.clientHandledHistogram.Collect(ch)\n\t}\n}", "func (b *EBPFTelemetry) Collect(ch chan<- prometheus.Metric) {\n\tb.getHelpersTelemetry(ch)\n\tb.getMapsTelemetry(ch)\n}", "func (c *beatCollector) Collect(ch chan<- prometheus.Metric) {\n\n\tfor _, i := range c.metrics {\n\t\tch <- prometheus.MustNewConstMetric(i.desc, i.valType, i.eval(c.stats))\n\t}\n\n}", "func (collector *Collector) Collect(ch chan<- prometheus.Metric) {\n\tch <- prometheus.MustNewConstMetric(collector.incidentsCreatedCount, prometheus.CounterValue, collector.storage.GetIncidentsCreatedCount())\n}", "func (c *Collector) Collect(ch chan<- prometheus.Metric) {\n\tc.mut.RLock()\n\tdefer c.mut.RUnlock()\n\n\tif c.inner != nil {\n\t\tc.inner.Collect(ch)\n\t}\n}", "func (c *MosquittoCounter) Collect(ch chan<- prometheus.Metric) {\n\tch <- prometheus.MustNewConstMetric(\n\t\tc.Desc,\n\t\tprometheus.CounterValue,\n\t\tc.counter.value,\n\t)\n}", "func Collect(metrics []Metric, c CloudWatchService, namespace string) {\n\tid, err := GetInstanceID()\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tfor _, metric := range metrics {\n\t\tmetric.Collect(id, c, namespace)\n\t}\n}", "func (o *observer) Collect(ch chan<- prometheus.Metric) {\n\to.updateError.Collect(ch)\n\to.verifyError.Collect(ch)\n\to.expiration.Collect(ch)\n}", "func (c *filebeatCollector) Collect(ch chan<- prometheus.Metric) {\n\n\tfor _, i := range c.metrics {\n\t\tch <- prometheus.MustNewConstMetric(i.desc, i.valType, i.eval(c.stats))\n\t}\n\n}", "func (k *KACollector) Collect(ch chan<- prometheus.Metric) {\n\tk.mutex.Lock()\n\tdefer k.mutex.Unlock()\n\n\tvar err error\n\tvar kaStats []KAStats\n\n\tif k.useJSON {\n\t\tkaStats, err = k.json()\n\t\tif err != nil {\n\t\t\tch <- prometheus.MustNewConstMetric(k.metrics[\"keepalived_up\"], prometheus.GaugeValue, 0)\n\t\t\tlog.Printf(\"keepalived_exporter: %v\", err)\n\t\t\treturn\n\t\t}\n\t} else {\n\t\tkaStats, err = k.text()\n\t\tif err != nil {\n\t\t\tch <- prometheus.MustNewConstMetric(k.metrics[\"keepalived_up\"], prometheus.GaugeValue, 0)\n\t\t\tlog.Printf(\"keepalived_exporter: %v\", err)\n\t\t\treturn\n\t\t}\n\t}\n\n\tch <- prometheus.MustNewConstMetric(k.metrics[\"keepalived_up\"], prometheus.GaugeValue, 1)\n\n\tfor _, st := range kaStats {\n\t\tstate := \"\"\n\t\tif _, ok := state2string[st.Data.State]; ok {\n\t\t\tstate = state2string[st.Data.State]\n\t\t}\n\n\t\tch <- prometheus.MustNewConstMetric(k.metrics[\"keepalived_vrrp_advert_rcvd\"], prometheus.CounterValue,\n\t\t\tfloat64(st.Stats.AdvertRcvd), st.Data.Iname, st.Data.IfpIfname, strconv.Itoa(st.Data.Vrid), state)\n\t\tch <- prometheus.MustNewConstMetric(k.metrics[\"keepalived_vrrp_advert_sent\"], prometheus.CounterValue,\n\t\t\tfloat64(st.Stats.AdvertSent), st.Data.Iname, st.Data.IfpIfname, strconv.Itoa(st.Data.Vrid), state)\n\t\tch <- prometheus.MustNewConstMetric(k.metrics[\"keepalived_vrrp_become_master\"], prometheus.CounterValue,\n\t\t\tfloat64(st.Stats.BecomeMaster), st.Data.Iname, st.Data.IfpIfname, strconv.Itoa(st.Data.Vrid), state)\n\t\tch <- prometheus.MustNewConstMetric(k.metrics[\"keepalived_vrrp_release_master\"], prometheus.CounterValue,\n\t\t\tfloat64(st.Stats.ReleaseMaster), st.Data.Iname, st.Data.IfpIfname, strconv.Itoa(st.Data.Vrid), state)\n\t\tch <- prometheus.MustNewConstMetric(k.metrics[\"keepalived_vrrp_packet_len_err\"], prometheus.CounterValue,\n\t\t\tfloat64(st.Stats.PacketLenErr), st.Data.Iname, st.Data.IfpIfname, strconv.Itoa(st.Data.Vrid), state)\n\t\tch <- prometheus.MustNewConstMetric(k.metrics[\"keepalived_vrrp_advert_interval_err\"], prometheus.CounterValue,\n\t\t\tfloat64(st.Stats.AdvertIntervalErr), st.Data.Iname, st.Data.IfpIfname, strconv.Itoa(st.Data.Vrid), state)\n\t\tch <- prometheus.MustNewConstMetric(k.metrics[\"keepalived_vrrp_ip_ttl_err\"], prometheus.CounterValue,\n\t\t\tfloat64(st.Stats.AdvertIntervalErr), st.Data.Iname, st.Data.IfpIfname, strconv.Itoa(st.Data.Vrid), state)\n\t\tch <- prometheus.MustNewConstMetric(k.metrics[\"keepalived_vrrp_invalid_type_rcvd\"], prometheus.CounterValue,\n\t\t\tfloat64(st.Stats.InvalidTypeRcvd), st.Data.Iname, st.Data.IfpIfname, strconv.Itoa(st.Data.Vrid), state)\n\t\tch <- prometheus.MustNewConstMetric(k.metrics[\"keepalived_vrrp_addr_list_err\"], prometheus.CounterValue,\n\t\t\tfloat64(st.Stats.AddrListErr), st.Data.Iname, st.Data.IfpIfname, strconv.Itoa(st.Data.Vrid), state)\n\t\tch <- prometheus.MustNewConstMetric(k.metrics[\"keepalived_vrrp_invalid_authtype\"], prometheus.CounterValue,\n\t\t\tfloat64(st.Stats.InvalidAuthtype), st.Data.Iname, st.Data.IfpIfname, strconv.Itoa(st.Data.Vrid), state)\n\t\tch <- prometheus.MustNewConstMetric(k.metrics[\"keepalived_vrrp_authtype_mismatch\"], prometheus.CounterValue,\n\t\t\tfloat64(st.Stats.AuthtypeMismatch), st.Data.Iname, st.Data.IfpIfname, strconv.Itoa(st.Data.Vrid), state)\n\t\tch <- prometheus.MustNewConstMetric(k.metrics[\"keepalived_vrrp_auth_failure\"], prometheus.CounterValue,\n\t\t\tfloat64(st.Stats.AuthFailure), st.Data.Iname, st.Data.IfpIfname, strconv.Itoa(st.Data.Vrid), state)\n\t\tch <- prometheus.MustNewConstMetric(k.metrics[\"keepalived_vrrp_pri_zero_rcvd\"], prometheus.CounterValue,\n\t\t\tfloat64(st.Stats.PriZeroRcvd), st.Data.Iname, st.Data.IfpIfname, strconv.Itoa(st.Data.Vrid), state)\n\t\tch <- prometheus.MustNewConstMetric(k.metrics[\"keepalived_vrrp_pri_zero_sent\"], prometheus.CounterValue,\n\t\t\tfloat64(st.Stats.PriZeroSent), st.Data.Iname, st.Data.IfpIfname, strconv.Itoa(st.Data.Vrid), state)\n\t}\n\n\tif k.handle == nil {\n\t\treturn\n\t}\n\n\tsvcs, err := k.handle.GetServices()\n\tif err != nil {\n\t\tch <- prometheus.MustNewConstMetric(k.metrics[\"keepalived_up\"], prometheus.GaugeValue, 0)\n\t\tlog.Printf(\"keepalived_exporter: services: %v\", err)\n\t\treturn\n\t}\n\n\tfor _, s := range svcs {\n\t\tdsts, err := k.handle.GetDestinations(s)\n\t\tif err != nil {\n\t\t\tlog.Printf(\"keepalived_exporter: destinations: %v\", err)\n\t\t\tcontinue\n\t\t}\n\n\t\taddr := s.Address.String() + \":\" + strconv.Itoa(int(s.Port))\n\t\tproto := strconv.Itoa(int(s.Protocol))\n\n\t\tch <- prometheus.MustNewConstMetric(k.metrics[\"keepalived_lvs_vip_in_packets\"], prometheus.CounterValue,\n\t\t\tfloat64(s.Stats.PacketsIn), addr, proto)\n\t\tch <- prometheus.MustNewConstMetric(k.metrics[\"keepalived_lvs_vip_out_packets\"], prometheus.CounterValue,\n\t\t\tfloat64(s.Stats.PacketsOut), addr, proto)\n\t\tch <- prometheus.MustNewConstMetric(k.metrics[\"keepalived_lvs_vip_in_bytes\"], prometheus.CounterValue,\n\t\t\tfloat64(s.Stats.BytesIn), addr, proto)\n\t\tch <- prometheus.MustNewConstMetric(k.metrics[\"keepalived_lvs_vip_out_bytes\"], prometheus.CounterValue,\n\t\t\tfloat64(s.Stats.BytesOut), addr, proto)\n\t\tch <- prometheus.MustNewConstMetric(k.metrics[\"keepalived_lvs_vip_conn\"], prometheus.CounterValue,\n\t\t\tfloat64(s.Stats.Connections), addr, proto)\n\n\t\tfor _, d := range dsts {\n\t\t\taddr := d.Address.String() + \":\" + strconv.Itoa(int(d.Port))\n\n\t\t\tch <- prometheus.MustNewConstMetric(k.metrics[\"keepalived_lvs_rs_in_packets\"], prometheus.CounterValue,\n\t\t\t\tfloat64(d.Stats.PacketsIn), addr, proto)\n\t\t\tch <- prometheus.MustNewConstMetric(k.metrics[\"keepalived_lvs_rs_out_packets\"], prometheus.CounterValue,\n\t\t\t\tfloat64(d.Stats.PacketsOut), addr, proto)\n\t\t\tch <- prometheus.MustNewConstMetric(k.metrics[\"keepalived_lvs_rs_in_bytes\"], prometheus.CounterValue,\n\t\t\t\tfloat64(d.Stats.BytesIn), addr, proto)\n\t\t\tch <- prometheus.MustNewConstMetric(k.metrics[\"keepalived_lvs_rs_out_bytes\"], prometheus.CounterValue,\n\t\t\t\tfloat64(d.Stats.BytesOut), addr, proto)\n\t\t\tch <- prometheus.MustNewConstMetric(k.metrics[\"keepalived_lvs_rs_conn\"], prometheus.CounterValue,\n\t\t\t\tfloat64(d.Stats.Connections), addr, proto)\n\t\t}\n\t}\n}", "func (c *prometheusCollector) Collect(ch chan<- prometheus.Metric) {\n\tvar stats = c.db.Stats()\n\n\tch <- prometheus.MustNewConstMetric(c.maxOpenConnections, prometheus.GaugeValue, float64(stats.MaxOpenConnections))\n\tch <- prometheus.MustNewConstMetric(c.openConnections, prometheus.GaugeValue, float64(stats.OpenConnections))\n\tch <- prometheus.MustNewConstMetric(c.inUse, prometheus.GaugeValue, float64(stats.InUse))\n\tch <- prometheus.MustNewConstMetric(c.idle, prometheus.GaugeValue, float64(stats.Idle))\n\tch <- prometheus.MustNewConstMetric(c.waitCount, prometheus.CounterValue, float64(stats.WaitCount))\n\tch <- prometheus.MustNewConstMetric(c.waitDuration, prometheus.CounterValue, float64(stats.WaitDuration))\n\tch <- prometheus.MustNewConstMetric(c.maxIdleClosed, prometheus.CounterValue, float64(stats.MaxIdleClosed))\n\tch <- prometheus.MustNewConstMetric(c.maxIdleTimeClosed, prometheus.CounterValue, float64(stats.MaxIdleTimeClosed))\n\tch <- prometheus.MustNewConstMetric(c.maxLifetimeClosed, prometheus.CounterValue, float64(stats.MaxLifetimeClosed))\n}", "func (c *OSCollector) Collect(ch chan<- prometheus.Metric) {\n\tif desc, err := c.collect(ch); err != nil {\n\t\tlog.Println(\"[ERROR] failed collecting os metrics:\", desc, err)\n\t\treturn\n\t}\n}", "func (m *ClientMetrics) Collect(ch chan<- prometheus.Metric) {\n\tm.clientHandledSummary.Collect(ch)\n}", "func (c *environmentCollector) Collect(client *rpc.Client, ch chan<- prometheus.Metric, labelValues []string) error {\n\tout, err := client.RunCommand(\"show environment\")\n\tif err != nil {\n\t\treturn err\n\t}\n\titems, err := c.Parse(client.OSType, out)\n\tif err != nil {\n\t\tif client.Debug {\n\t\t\tlog.Printf(\"Parse environment for %s: %s\\n\", labelValues[0], err.Error())\n\t\t}\n\t\treturn nil\n\t}\n\n\tfor _, item := range items {\n\t\tl := append(labelValues, item.Name)\n\t\tif item.IsTemp {\n\t\t\tch <- prometheus.MustNewConstMetric(temperaturesDesc, prometheus.GaugeValue, float64(item.Temperature), l...)\n\t\t} else {\n\t\t\tval := 0\n\t\t\tif item.OK {\n\t\t\t\tval = 1\n\t\t\t}\n\t\t\tl = append(l, item.Status)\n\t\t\tch <- prometheus.MustNewConstMetric(powerSupplyDesc, prometheus.GaugeValue, float64(val), l...)\n\t\t}\n\t}\n\n\treturn nil\n}", "func (collector *collector) Collect(ch chan<- prometheus.Metric) {\n\tcontainerNames, err := collector.server.GetContainerNames()\n\tif err != nil {\n\t\tcollector.logger.Printf(\"Can't query container names: %s\", err)\n\t\treturn\n\t}\n\n\tfor _, containerName := range containerNames {\n\t\tstate, _, err := collector.server.GetContainerState(containerName)\n\t\tif err != nil {\n\t\t\tcollector.logger.Printf(\n\t\t\t\t\"Can't query container state for `%s`: %s\", containerName, err)\n\t\t\tcontinue\n\t\t}\n\n\t\tcollector.collectContainerMetrics(ch, containerName, state)\n\t}\n}", "func (e *Exporter) Collect(ch chan<- prometheus.Metric) {\n\te.mu.Lock()\n\tdefer e.mu.Unlock()\n\n\tfor _, cc := range e.collectors {\n\t\tcc.Collect(ch)\n\t}\n}", "func (c *MemoryCollector) Collect(ch chan<- prometheus.Metric) error {\n\tif desc, err := c.collect(ch); err != nil {\n\t\tlog.Error(\"failed collecting memory metrics:\", desc, err)\n\t\treturn err\n\t}\n\treturn nil\n}", "func (c *CephExporter) Collect(ch chan<- prometheus.Metric) {\n\tc.mu.Lock()\n\tdefer c.mu.Unlock()\n\n\tfor _, cc := range c.collectors {\n\t\tcc.Collect(ch)\n\t}\n}", "func (c *Exporter) Collect(ch chan<- prometheus.Metric) {\n\tc.mu.Lock()\n\tdefer c.mu.Unlock()\n\n\tfor _, cc := range c.collectors {\n\t\tcc.Collect(ch)\n\t}\n}", "func (c *Exporter) Collect(ch chan<- prometheus.Metric) {\n\tc.mu.Lock()\n\tdefer c.mu.Unlock()\n\n\tfor _, cc := range c.collectors {\n\t\tcc.Collect(ch)\n\t}\n}", "func (c *DiskCache) Collect(metrics chan<- prometheus.Metric) {\n\tc.requestTotals.Collect(metrics)\n\tc.missTotals.Collect(metrics)\n\tc.bytesStoredtotals.Collect(metrics)\n\tc.bytesFetchedtotals.Collect(metrics)\n\tc.bytesLoserTotals.Collect(metrics)\n\tc.errTotal.Collect(metrics)\n\tc.walkerRemovalTotal.Collect(metrics)\n\tc.walkerErrorTotal.Collect(metrics)\n\tc.walkerEmptyDirTotal.Collect(metrics)\n\tc.walkerEmptyDirRemovalTotal.Collect(metrics)\n}", "func (d *decorator) Collect(in chan<- prometheus.Metric) {\n\td.duration.Collect(in)\n\td.requests.Collect(in)\n}", "func (e *Exporter) Collect(ch chan<- prometheus.Metric) {\n\n\te.mutex.Lock() // To protect metrics from concurrent collects.\n\tdefer e.mutex.Unlock()\n\n\te.zpool.getStatus()\n\te.poolUsage.Set(float64(e.zpool.capacity))\n\te.providersOnline.Set(float64(e.zpool.online))\n\te.providersFaulted.Set(float64(e.zpool.faulted))\n\n\tch <- e.poolUsage\n\tch <- e.providersOnline\n\tch <- e.providersFaulted\n}", "func (c *CadvisorCollector) Collect(ch chan<- datapoint.Datapoint) {\n\tc.collectMachineInfo(ch)\n\tc.collectVersionInfo(ch)\n\tc.collectContainersInfo(ch)\n\t//c.errors.Collect(ch)\n}", "func (e *Exporter) Collect(ch chan<- prometheus.Metric) {\n\tok := e.collectPeersMetric(ch)\n\tok = e.collectLeaderMetric(ch) && ok\n\tok = e.collectNodesMetric(ch) && ok\n\tok = e.collectMembersMetric(ch) && ok\n\tok = e.collectMembersWanMetric(ch) && ok\n\tok = e.collectServicesMetric(ch) && ok\n\tok = e.collectHealthStateMetric(ch) && ok\n\tok = e.collectKeyValues(ch) && ok\n\n\tif ok {\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tup, prometheus.GaugeValue, 1.0,\n\t\t)\n\t} else {\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tup, prometheus.GaugeValue, 0.0,\n\t\t)\n\t}\n}", "func (cpuCollector *CPUCollector) Collect() {\n\tcpuCollector.cpuStats.GetCPUStats()\n\n\tcpuCollector.cpuMetrics.cpuTotal.Set(float64(cpuCollector.cpuStats.Total))\n\tcpuCollector.cpuMetrics.cupIdle.Set(float64(cpuCollector.cpuStats.Idle))\n\tcpuCollector.cpuMetrics.cpuUtilization.Set(cpuCollector.cpuStats.Utilization)\n}", "func (c *bgpCollector) Collect(client collector.Client, ch chan<- prometheus.Metric, labelValues []string) error {\n\terr := c.collect(client, ch, labelValues)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}", "func (e *Exporter) Collect(ch chan<- prometheus.Metric) {\n\te.withCollectors(func(cs []prometheus.Collector) {\n\t\tfor _, c := range cs {\n\t\t\tc.Collect(ch)\n\t\t}\n\t})\n}", "func (e *ebpfConntracker) Collect(ch chan<- prometheus.Metric) {\n\tebpfTelemetry := &netebpf.ConntrackTelemetry{}\n\tif err := e.telemetryMap.Lookup(unsafe.Pointer(&zero), unsafe.Pointer(ebpfTelemetry)); err != nil {\n\t\tlog.Tracef(\"error retrieving the telemetry struct: %s\", err)\n\t} else {\n\t\tdelta := ebpfTelemetry.Registers - conntrackerTelemetry.lastRegisters\n\t\tconntrackerTelemetry.lastRegisters = ebpfTelemetry.Registers\n\t\tch <- prometheus.MustNewConstMetric(conntrackerTelemetry.registersTotal, prometheus.CounterValue, float64(delta))\n\t}\n}", "func (coll WmiCollector) Collect(ch chan<- prometheus.Metric) {\n\tdefer trace()()\n\twg := sync.WaitGroup{}\n\twg.Add(len(coll.collectors))\n\tfor name, c := range coll.collectors {\n\t\tgo func(name string, c collector.Collector) {\n\t\t\texecute(name, c, ch)\n\t\t\twg.Done()\n\t\t}(name, c)\n\t}\n\twg.Wait()\n\tscrapeDurations.Collect(ch)\n}", "func (s PowerControl) Collect(ch chan<- prometheus.Metric) {\n\tmetric := config.GOFISH.Service\n\n\tchass, _ := metric.Chassis()\n\n\tfor _, v := range chass {\n\t\tpowers, _ := v.Power()\n\t\tif powers != nil {\n\t\t\tfor _, p := range powers.PowerControl {\n\t\t\t\tch <- prometheus.MustNewConstMetric(config.C_powercontrol, prometheus.GaugeValue, float64(0),\n\t\t\t\t\tfmt.Sprintf(\"%v\", p.PowerMetrics.AverageConsumedWatts),\n\t\t\t\t\tfmt.Sprintf(\"%v\", p.PowerCapacityWatts),\n\t\t\t\t\tfmt.Sprintf(\"%v\", p.MemberID),\n\t\t\t\t\tfmt.Sprintf(\"%v\", p.PowerMetrics.IntervalInMin),\n\t\t\t\t\tfmt.Sprintf(\"%v\", p.PowerMetrics.MaxConsumedWatts),\n\t\t\t\t\tfmt.Sprintf(\"%v\", p.PowerMetrics.MinConsumedWatts),\n\t\t\t\t)\n\n\t\t\t\tch <- prometheus.MustNewConstMetric(config.C_powerconsumedbyall, prometheus.GaugeValue, float64(p.PowerConsumedWatts),\n\t\t\t\t\tfmt.Sprintf(\"%v\", p.PowerCapacityWatts),\n\t\t\t\t\tfmt.Sprintf(\"%v\", p.MemberID),\n\t\t\t\t)\n\t\t\t}\n\n\t\t}\n\t}\n}", "func (c *InterfacesCollector) Collect(ch chan<- prometheus.Metric) {\n\tfor _, m := range c.collectors() {\n\t\tm.Collect(ch)\n\t}\n}", "func (r *RGWCollector) Collect(ch chan<- prometheus.Metric, version *Version) {\n\tif !r.background {\n\t\tr.logger.WithField(\"background\", r.background).Debug(\"collecting RGW GC stats\")\n\t\terr := r.collect()\n\t\tif err != nil {\n\t\t\tr.logger.WithField(\"background\", r.background).WithError(err).Error(\"error collecting RGW GC stats\")\n\t\t}\n\t}\n\n\tfor _, metric := range r.collectorList() {\n\t\tmetric.Collect(ch)\n\t}\n}", "func (e *Exporter) Collect(ch chan<- prometheus.Metric) {\n\te.mutex.Lock() // To protect metrics from concurrent collects.\n\tdefer e.mutex.Unlock()\n\n\tif err := e.scrape(); err != nil {\n\t\tlog.Error(err)\n\t\tnomad_up.Set(0)\n\t\tch <- nomad_up\n\t\treturn\n\t}\n\n\tch <- nomad_up\n\tch <- metric_uptime\n\tch <- metric_request_response_time_total\n\tch <- metric_request_response_time_avg\n\n\tfor _, metric := range metric_request_status_count_current {\n\t\tch <- metric\n\t}\n\tfor _, metric := range metric_request_status_count_total {\n\t\tch <- metric\n\t}\n}", "func (pc *PrometheusCollector) Collect(ch chan<- prometheus.Metric) {\n\tpc.attempts.Collect(ch)\n\tpc.errors.Collect(ch)\n\tpc.successes.Collect(ch)\n\tpc.failures.Collect(ch)\n\tpc.rejects.Collect(ch)\n\tpc.shortCircuits.Collect(ch)\n\tpc.timeouts.Collect(ch)\n\tpc.fallbackSuccesses.Collect(ch)\n\tpc.fallbackFailures.Collect(ch)\n\tpc.totalDuration.Collect(ch)\n\tpc.runDuration.Collect(ch)\n}", "func (c *StatsCollector) Collect(metricChannel chan<- prometheus.Metric) {\n\t// read all stats from Kamailio\n\tif completeStatMap, err := c.fetchStats(); err == nil {\n\t\t// and produce various prometheus.Metric for well-known stats\n\t\tproduceMetrics(completeStatMap, metricChannel)\n\t\t// produce prometheus.Metric objects for scripted stats (if any)\n\t\tconvertScriptedMetrics(completeStatMap, metricChannel)\n\t} else {\n\t\t// something went wrong\n\t\t// TODO: add a error metric\n\t\tlog.Error(\"Could not fetch values from kamailio\", err)\n\t}\n}", "func (m *Monitoring) collect() {\n\tfor {\n\t\tevents, ok := <-m.ch\n\t\tif !ok {\n\t\t\tlog.Printf(\"event channel is closed\")\n\t\t\treturn\n\t\t}\n\n\t\tif err := m.w.Write(context.Background(), events); err != nil {\n\t\t\tlog.Printf(\"failed to write metric events %+v: %v\", events, err)\n\t\t}\n\t}\n\n}", "func (e *exporter) Collect(ch chan<- prometheus.Metric) {\n\twg := sync.WaitGroup{}\n\twg.Add(len(e.Collectors))\n\tfor name, c := range e.Collectors {\n\t\tgo func(name string, c Collector) {\n\t\t\texecute(name, c, ch)\n\t\t\twg.Done()\n\t\t}(name, c)\n\t}\n\twg.Wait()\n}", "func (c *MSCluster_ClusterCollector) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {\n\tvar dst []MSCluster_Cluster\n\tq := queryAll(&dst, c.logger)\n\tif err := wmi.QueryNamespace(q, &dst, \"root/MSCluster\"); err != nil {\n\t\treturn err\n\t}\n\n\tfor _, v := range dst {\n\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tc.AddEvictDelay,\n\t\t\tprometheus.GaugeValue,\n\t\t\tfloat64(v.AddEvictDelay),\n\t\t\tv.Name,\n\t\t)\n\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tc.AdminAccessPoint,\n\t\t\tprometheus.GaugeValue,\n\t\t\tfloat64(v.AdminAccessPoint),\n\t\t\tv.Name,\n\t\t)\n\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tc.AutoAssignNodeSite,\n\t\t\tprometheus.GaugeValue,\n\t\t\tfloat64(v.AutoAssignNodeSite),\n\t\t\tv.Name,\n\t\t)\n\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tc.AutoBalancerLevel,\n\t\t\tprometheus.GaugeValue,\n\t\t\tfloat64(v.AutoBalancerLevel),\n\t\t\tv.Name,\n\t\t)\n\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tc.AutoBalancerMode,\n\t\t\tprometheus.GaugeValue,\n\t\t\tfloat64(v.AutoBalancerMode),\n\t\t\tv.Name,\n\t\t)\n\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tc.BackupInProgress,\n\t\t\tprometheus.GaugeValue,\n\t\t\tfloat64(v.BackupInProgress),\n\t\t\tv.Name,\n\t\t)\n\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tc.BlockCacheSize,\n\t\t\tprometheus.GaugeValue,\n\t\t\tfloat64(v.BlockCacheSize),\n\t\t\tv.Name,\n\t\t)\n\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tc.ClusSvcHangTimeout,\n\t\t\tprometheus.GaugeValue,\n\t\t\tfloat64(v.ClusSvcHangTimeout),\n\t\t\tv.Name,\n\t\t)\n\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tc.ClusSvcRegroupOpeningTimeout,\n\t\t\tprometheus.GaugeValue,\n\t\t\tfloat64(v.ClusSvcRegroupOpeningTimeout),\n\t\t\tv.Name,\n\t\t)\n\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tc.ClusSvcRegroupPruningTimeout,\n\t\t\tprometheus.GaugeValue,\n\t\t\tfloat64(v.ClusSvcRegroupPruningTimeout),\n\t\t\tv.Name,\n\t\t)\n\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tc.ClusSvcRegroupStageTimeout,\n\t\t\tprometheus.GaugeValue,\n\t\t\tfloat64(v.ClusSvcRegroupStageTimeout),\n\t\t\tv.Name,\n\t\t)\n\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tc.ClusSvcRegroupTickInMilliseconds,\n\t\t\tprometheus.GaugeValue,\n\t\t\tfloat64(v.ClusSvcRegroupTickInMilliseconds),\n\t\t\tv.Name,\n\t\t)\n\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tc.ClusterEnforcedAntiAffinity,\n\t\t\tprometheus.GaugeValue,\n\t\t\tfloat64(v.ClusterEnforcedAntiAffinity),\n\t\t\tv.Name,\n\t\t)\n\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tc.ClusterFunctionalLevel,\n\t\t\tprometheus.GaugeValue,\n\t\t\tfloat64(v.ClusterFunctionalLevel),\n\t\t\tv.Name,\n\t\t)\n\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tc.ClusterGroupWaitDelay,\n\t\t\tprometheus.GaugeValue,\n\t\t\tfloat64(v.ClusterGroupWaitDelay),\n\t\t\tv.Name,\n\t\t)\n\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tc.ClusterLogLevel,\n\t\t\tprometheus.GaugeValue,\n\t\t\tfloat64(v.ClusterLogLevel),\n\t\t\tv.Name,\n\t\t)\n\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tc.ClusterLogSize,\n\t\t\tprometheus.GaugeValue,\n\t\t\tfloat64(v.ClusterLogSize),\n\t\t\tv.Name,\n\t\t)\n\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tc.ClusterUpgradeVersion,\n\t\t\tprometheus.GaugeValue,\n\t\t\tfloat64(v.ClusterUpgradeVersion),\n\t\t\tv.Name,\n\t\t)\n\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tc.CrossSiteDelay,\n\t\t\tprometheus.GaugeValue,\n\t\t\tfloat64(v.CrossSiteDelay),\n\t\t\tv.Name,\n\t\t)\n\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tc.CrossSiteThreshold,\n\t\t\tprometheus.GaugeValue,\n\t\t\tfloat64(v.CrossSiteThreshold),\n\t\t\tv.Name,\n\t\t)\n\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tc.CrossSubnetDelay,\n\t\t\tprometheus.GaugeValue,\n\t\t\tfloat64(v.CrossSubnetDelay),\n\t\t\tv.Name,\n\t\t)\n\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tc.CrossSubnetThreshold,\n\t\t\tprometheus.GaugeValue,\n\t\t\tfloat64(v.CrossSubnetThreshold),\n\t\t\tv.Name,\n\t\t)\n\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tc.CsvBalancer,\n\t\t\tprometheus.GaugeValue,\n\t\t\tfloat64(v.CsvBalancer),\n\t\t\tv.Name,\n\t\t)\n\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tc.DatabaseReadWriteMode,\n\t\t\tprometheus.GaugeValue,\n\t\t\tfloat64(v.DatabaseReadWriteMode),\n\t\t\tv.Name,\n\t\t)\n\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tc.DefaultNetworkRole,\n\t\t\tprometheus.GaugeValue,\n\t\t\tfloat64(v.DefaultNetworkRole),\n\t\t\tv.Name,\n\t\t)\n\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tc.DetectedCloudPlatform,\n\t\t\tprometheus.GaugeValue,\n\t\t\tfloat64(v.DetectedCloudPlatform),\n\t\t\tv.Name,\n\t\t)\n\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tc.DetectManagedEvents,\n\t\t\tprometheus.GaugeValue,\n\t\t\tfloat64(v.DetectManagedEvents),\n\t\t\tv.Name,\n\t\t)\n\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tc.DetectManagedEventsThreshold,\n\t\t\tprometheus.GaugeValue,\n\t\t\tfloat64(v.DetectManagedEventsThreshold),\n\t\t\tv.Name,\n\t\t)\n\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tc.DisableGroupPreferredOwnerRandomization,\n\t\t\tprometheus.GaugeValue,\n\t\t\tfloat64(v.DisableGroupPreferredOwnerRandomization),\n\t\t\tv.Name,\n\t\t)\n\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tc.DrainOnShutdown,\n\t\t\tprometheus.GaugeValue,\n\t\t\tfloat64(v.DrainOnShutdown),\n\t\t\tv.Name,\n\t\t)\n\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tc.DynamicQuorumEnabled,\n\t\t\tprometheus.GaugeValue,\n\t\t\tfloat64(v.DynamicQuorumEnabled),\n\t\t\tv.Name,\n\t\t)\n\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tc.EnableSharedVolumes,\n\t\t\tprometheus.GaugeValue,\n\t\t\tfloat64(v.EnableSharedVolumes),\n\t\t\tv.Name,\n\t\t)\n\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tc.FixQuorum,\n\t\t\tprometheus.GaugeValue,\n\t\t\tfloat64(v.FixQuorum),\n\t\t\tv.Name,\n\t\t)\n\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tc.GracePeriodEnabled,\n\t\t\tprometheus.GaugeValue,\n\t\t\tfloat64(v.GracePeriodEnabled),\n\t\t\tv.Name,\n\t\t)\n\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tc.GracePeriodTimeout,\n\t\t\tprometheus.GaugeValue,\n\t\t\tfloat64(v.GracePeriodTimeout),\n\t\t\tv.Name,\n\t\t)\n\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tc.GroupDependencyTimeout,\n\t\t\tprometheus.GaugeValue,\n\t\t\tfloat64(v.GroupDependencyTimeout),\n\t\t\tv.Name,\n\t\t)\n\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tc.HangRecoveryAction,\n\t\t\tprometheus.GaugeValue,\n\t\t\tfloat64(v.HangRecoveryAction),\n\t\t\tv.Name,\n\t\t)\n\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tc.IgnorePersistentStateOnStartup,\n\t\t\tprometheus.GaugeValue,\n\t\t\tfloat64(v.IgnorePersistentStateOnStartup),\n\t\t\tv.Name,\n\t\t)\n\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tc.LogResourceControls,\n\t\t\tprometheus.GaugeValue,\n\t\t\tfloat64(v.LogResourceControls),\n\t\t\tv.Name,\n\t\t)\n\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tc.LowerQuorumPriorityNodeId,\n\t\t\tprometheus.GaugeValue,\n\t\t\tfloat64(v.LowerQuorumPriorityNodeId),\n\t\t\tv.Name,\n\t\t)\n\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tc.MaxNumberOfNodes,\n\t\t\tprometheus.GaugeValue,\n\t\t\tfloat64(v.MaxNumberOfNodes),\n\t\t\tv.Name,\n\t\t)\n\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tc.MessageBufferLength,\n\t\t\tprometheus.GaugeValue,\n\t\t\tfloat64(v.MessageBufferLength),\n\t\t\tv.Name,\n\t\t)\n\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tc.MinimumNeverPreemptPriority,\n\t\t\tprometheus.GaugeValue,\n\t\t\tfloat64(v.MinimumNeverPreemptPriority),\n\t\t\tv.Name,\n\t\t)\n\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tc.MinimumPreemptorPriority,\n\t\t\tprometheus.GaugeValue,\n\t\t\tfloat64(v.MinimumPreemptorPriority),\n\t\t\tv.Name,\n\t\t)\n\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tc.NetftIPSecEnabled,\n\t\t\tprometheus.GaugeValue,\n\t\t\tfloat64(v.NetftIPSecEnabled),\n\t\t\tv.Name,\n\t\t)\n\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tc.PlacementOptions,\n\t\t\tprometheus.GaugeValue,\n\t\t\tfloat64(v.PlacementOptions),\n\t\t\tv.Name,\n\t\t)\n\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tc.PlumbAllCrossSubnetRoutes,\n\t\t\tprometheus.GaugeValue,\n\t\t\tfloat64(v.PlumbAllCrossSubnetRoutes),\n\t\t\tv.Name,\n\t\t)\n\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tc.PreventQuorum,\n\t\t\tprometheus.GaugeValue,\n\t\t\tfloat64(v.PreventQuorum),\n\t\t\tv.Name,\n\t\t)\n\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tc.QuarantineDuration,\n\t\t\tprometheus.GaugeValue,\n\t\t\tfloat64(v.QuarantineDuration),\n\t\t\tv.Name,\n\t\t)\n\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tc.QuarantineThreshold,\n\t\t\tprometheus.GaugeValue,\n\t\t\tfloat64(v.QuarantineThreshold),\n\t\t\tv.Name,\n\t\t)\n\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tc.QuorumArbitrationTimeMax,\n\t\t\tprometheus.GaugeValue,\n\t\t\tfloat64(v.QuorumArbitrationTimeMax),\n\t\t\tv.Name,\n\t\t)\n\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tc.QuorumArbitrationTimeMin,\n\t\t\tprometheus.GaugeValue,\n\t\t\tfloat64(v.QuorumArbitrationTimeMin),\n\t\t\tv.Name,\n\t\t)\n\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tc.QuorumLogFileSize,\n\t\t\tprometheus.GaugeValue,\n\t\t\tfloat64(v.QuorumLogFileSize),\n\t\t\tv.Name,\n\t\t)\n\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tc.QuorumTypeValue,\n\t\t\tprometheus.GaugeValue,\n\t\t\tfloat64(v.QuorumTypeValue),\n\t\t\tv.Name,\n\t\t)\n\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tc.RequestReplyTimeout,\n\t\t\tprometheus.GaugeValue,\n\t\t\tfloat64(v.RequestReplyTimeout),\n\t\t\tv.Name,\n\t\t)\n\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tc.ResiliencyDefaultPeriod,\n\t\t\tprometheus.GaugeValue,\n\t\t\tfloat64(v.ResiliencyDefaultPeriod),\n\t\t\tv.Name,\n\t\t)\n\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tc.ResiliencyLevel,\n\t\t\tprometheus.GaugeValue,\n\t\t\tfloat64(v.ResiliencyLevel),\n\t\t\tv.Name,\n\t\t)\n\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tc.ResourceDllDeadlockPeriod,\n\t\t\tprometheus.GaugeValue,\n\t\t\tfloat64(v.ResourceDllDeadlockPeriod),\n\t\t\tv.Name,\n\t\t)\n\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tc.RootMemoryReserved,\n\t\t\tprometheus.GaugeValue,\n\t\t\tfloat64(v.RootMemoryReserved),\n\t\t\tv.Name,\n\t\t)\n\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tc.RouteHistoryLength,\n\t\t\tprometheus.GaugeValue,\n\t\t\tfloat64(v.RouteHistoryLength),\n\t\t\tv.Name,\n\t\t)\n\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tc.S2DBusTypes,\n\t\t\tprometheus.GaugeValue,\n\t\t\tfloat64(v.S2DBusTypes),\n\t\t\tv.Name,\n\t\t)\n\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tc.S2DCacheDesiredState,\n\t\t\tprometheus.GaugeValue,\n\t\t\tfloat64(v.S2DCacheDesiredState),\n\t\t\tv.Name,\n\t\t)\n\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tc.S2DCacheFlashReservePercent,\n\t\t\tprometheus.GaugeValue,\n\t\t\tfloat64(v.S2DCacheFlashReservePercent),\n\t\t\tv.Name,\n\t\t)\n\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tc.S2DCachePageSizeKBytes,\n\t\t\tprometheus.GaugeValue,\n\t\t\tfloat64(v.S2DCachePageSizeKBytes),\n\t\t\tv.Name,\n\t\t)\n\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tc.S2DEnabled,\n\t\t\tprometheus.GaugeValue,\n\t\t\tfloat64(v.S2DEnabled),\n\t\t\tv.Name,\n\t\t)\n\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tc.S2DIOLatencyThreshold,\n\t\t\tprometheus.GaugeValue,\n\t\t\tfloat64(v.S2DIOLatencyThreshold),\n\t\t\tv.Name,\n\t\t)\n\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tc.S2DOptimizations,\n\t\t\tprometheus.GaugeValue,\n\t\t\tfloat64(v.S2DOptimizations),\n\t\t\tv.Name,\n\t\t)\n\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tc.SameSubnetDelay,\n\t\t\tprometheus.GaugeValue,\n\t\t\tfloat64(v.SameSubnetDelay),\n\t\t\tv.Name,\n\t\t)\n\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tc.SameSubnetThreshold,\n\t\t\tprometheus.GaugeValue,\n\t\t\tfloat64(v.SameSubnetThreshold),\n\t\t\tv.Name,\n\t\t)\n\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tc.SecurityLevel,\n\t\t\tprometheus.GaugeValue,\n\t\t\tfloat64(v.SecurityLevel),\n\t\t\tv.Name,\n\t\t)\n\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tc.SecurityLevelForStorage,\n\t\t\tprometheus.GaugeValue,\n\t\t\tfloat64(v.SecurityLevelForStorage),\n\t\t\tv.Name,\n\t\t)\n\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tc.SharedVolumeVssWriterOperationTimeout,\n\t\t\tprometheus.GaugeValue,\n\t\t\tfloat64(v.SharedVolumeVssWriterOperationTimeout),\n\t\t\tv.Name,\n\t\t)\n\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tc.ShutdownTimeoutInMinutes,\n\t\t\tprometheus.GaugeValue,\n\t\t\tfloat64(v.ShutdownTimeoutInMinutes),\n\t\t\tv.Name,\n\t\t)\n\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tc.UseClientAccessNetworksForSharedVolumes,\n\t\t\tprometheus.GaugeValue,\n\t\t\tfloat64(v.UseClientAccessNetworksForSharedVolumes),\n\t\t\tv.Name,\n\t\t)\n\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tc.WitnessDatabaseWriteTimeout,\n\t\t\tprometheus.GaugeValue,\n\t\t\tfloat64(v.WitnessDatabaseWriteTimeout),\n\t\t\tv.Name,\n\t\t)\n\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tc.WitnessDynamicWeight,\n\t\t\tprometheus.GaugeValue,\n\t\t\tfloat64(v.WitnessDynamicWeight),\n\t\t\tv.Name,\n\t\t)\n\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tc.WitnessRestartInterval,\n\t\t\tprometheus.GaugeValue,\n\t\t\tfloat64(v.WitnessRestartInterval),\n\t\t\tv.Name,\n\t\t)\n\n\t}\n\n\treturn nil\n}", "func (c *Collector) Collect(ch chan<- prometheus.Metric) {\n\tc.Lock()\n\tdefer c.Unlock()\n\n\tc.totalScrapes.Inc()\n\terr := c.getDadataBalance()\n\tif err != nil {\n\t\tc.failedBalanceScrapes.Inc()\n\t}\n\terr = c.getDadataStats()\n\tif err != nil {\n\t\tc.failedStatsScrapes.Inc()\n\t}\n\n\tch <- c.totalScrapes\n\tch <- c.failedBalanceScrapes\n\tch <- c.failedStatsScrapes\n\tch <- c.CurrentBalance\n\tch <- c.ServicesClean\n\tch <- c.ServicesMerging\n\tch <- c.ServicesSuggestions\n}", "func (c *interfaceCollector) Collect(client *rpc.Client, ch chan<- prometheus.Metric, labelValues []string) error {\n\tstats, err := c.interfaceStats(client)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfor _, s := range stats {\n\t\tc.collectForInterface(s, ch, labelValues)\n\t}\n\n\treturn nil\n}", "func (b Blackbox) Collect(metrics chan<- prometheus.Metric) {\n\tb.fetchReferenceDiscoveryMetrics.Collect(metrics)\n\tb.httpPostMetrics.Collect(metrics)\n\tb.wantedRefs.Collect(metrics)\n}", "func (k *KubernetesCollector) Collect(ch chan<- prometheus.Metric) error {\n\tvar metric prometheus.Metric\n\n\tlistOptions := metav1.ListOptions{\n\t\tLabelSelector: labels.Everything().String(),\n\t\tFieldSelector: fields.Everything().String(),\n\t}\n\tnodes, err := k.client.CoreV1().Nodes().List(context.TODO(), listOptions)\n\tif err != nil {\n\t\treturn trace.Wrap(err, \"failed to query nodes: %v\", err)\n\t}\n\n\tfor _, item := range nodes.Items {\n\t\tfor _, condition := range item.Status.Conditions {\n\t\t\tif condition.Type != v1.NodeReady {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif condition.Status == v1.ConditionTrue {\n\t\t\t\tif metric, err = k.nodeIsReady.newConstMetric(1.0, item.Name); err != nil {\n\t\t\t\t\treturn trace.Wrap(err, \"failed to create prometheus metric: %v\", err)\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tif metric, err = k.nodeIsReady.newConstMetric(0.0, item.Name); err != nil {\n\t\t\t\t\treturn trace.Wrap(err, \"failed to create prometheus metric: %v\", err)\n\t\t\t\t}\n\t\t\t}\n\t\t\tch <- metric\n\t\t}\n\t}\n\n\treturn nil\n}", "func (c *KubernetesCollector) Collect(ch chan<- prometheus.Metric) {\n\tctx, cancel := context.WithTimeout(context.Background(), c.timeout)\n\tdefer cancel()\n\tclusters, _, err := c.client.Kubernetes.List(ctx, nil)\n\tif err != nil {\n\t\tc.errors.WithLabelValues(\"kubernetes\").Add(1)\n\t\tlevel.Warn(c.logger).Log(\n\t\t\t\"msg\", \"can't list clusters\",\n\t\t\t\"err\", err,\n\t\t)\n\t}\n\n\tfor _, cluster := range clusters {\n\t\tlabels := []string{\n\t\t\tcluster.ID,\n\t\t\tcluster.Name,\n\t\t\tcluster.RegionSlug,\n\t\t\tcluster.VersionSlug,\n\t\t}\n\n\t\tvar active float64\n\t\t//TODO(dazwilkin) better reflect richer Kubernetes cluster states\n\t\tif cluster.Status.State == godo.KubernetesClusterStatusRunning {\n\t\t\tactive = 1.0\n\t\t}\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tc.Up,\n\t\t\tprometheus.GaugeValue,\n\t\t\tactive,\n\t\t\tlabels...,\n\t\t)\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tc.NodePools,\n\t\t\tprometheus.GaugeValue,\n\t\t\tfloat64(len(cluster.NodePools)),\n\t\t\tlabels...,\n\t\t)\n\n\t\tfor _, nodepool := range cluster.NodePools {\n\t\t\t// Assume NodePools are constrained to the cluster's Region\n\t\t\t// If so, we can labels a cluster's NodePools by the cluster's region\n\t\t\tlabels := []string{\n\t\t\t\tnodepool.ID,\n\t\t\t\tnodepool.Name,\n\t\t\t\tcluster.RegionSlug,\n\t\t\t}\n\t\t\tch <- prometheus.MustNewConstMetric(\n\t\t\t\tc.Nodes,\n\t\t\t\tprometheus.GaugeValue,\n\t\t\t\tfloat64(nodepool.Count),\n\t\t\t\tlabels...,\n\t\t\t)\n\t\t}\n\t}\n}", "func (y *YarnMetrics) Collect(ch chan<- prometheus.Metric) {\n\ty.metricsLock.Lock()\n\tdefer y.metricsLock.Unlock()\n\tif y.metrics != nil {\n\t\tch <- prometheus.MustNewConstMetric(y.containerStatusDesc, prometheus.GaugeValue,\n\t\t\tfloat64(y.metrics.ContainersLaunched), y.nodeName, \"launched\")\n\t\tch <- prometheus.MustNewConstMetric(y.containerStatusDesc, prometheus.GaugeValue,\n\t\t\tfloat64(y.metrics.ContainersCompleted), y.nodeName, \"completed\")\n\t\tch <- prometheus.MustNewConstMetric(y.containerStatusDesc, prometheus.GaugeValue,\n\t\t\tfloat64(y.metrics.ContainersFailed), y.nodeName, \"failed\")\n\t\tch <- prometheus.MustNewConstMetric(y.containerStatusDesc, prometheus.GaugeValue,\n\t\t\tfloat64(y.metrics.ContainersKilled), y.nodeName, \"killed\")\n\t\tch <- prometheus.MustNewConstMetric(y.containerStatusDesc, prometheus.GaugeValue,\n\t\t\tfloat64(y.metrics.ContainersRunning), y.nodeName, \"running\")\n\t\tch <- prometheus.MustNewConstMetric(y.containerStatusDesc, prometheus.GaugeValue,\n\t\t\tfloat64(y.metrics.ContainersIniting), y.nodeName, \"initing\")\n\t}\n\tif y.nodeStatus != nil {\n\t\tch <- prometheus.MustNewConstMetric(y.nodeStatusDesc, prometheus.GaugeValue,\n\t\t\ty.nodeStatus.NodeHealthyFloat, y.nodeName)\n\t}\n}", "func (w *Writer) Collect(ch chan<- prometheus.Metric) {\n\tw.kafkaWriteStatus.Collect(ch)\n\tw.queuedForWrites.Collect(ch)\n}", "func (c *VM) Collect(ctx context.Context) error {\n\tmetrics := cgm.Metrics{}\n\n\tc.Lock()\n\n\tif c.runTTL > time.Duration(0) {\n\t\tif time.Since(c.lastEnd) < c.runTTL {\n\t\t\tc.logger.Warn().Msg(collector.ErrTTLNotExpired.Error())\n\t\t\tc.Unlock()\n\t\t\treturn collector.ErrTTLNotExpired\n\t\t}\n\t}\n\tif c.running {\n\t\tc.logger.Warn().Msg(collector.ErrAlreadyRunning.Error())\n\t\tc.Unlock()\n\t\treturn collector.ErrAlreadyRunning\n\t}\n\n\tc.running = true\n\tc.lastStart = time.Now()\n\tc.Unlock()\n\n\tif err := c.parseMemstats(ctx, &metrics); err != nil {\n\t\tc.setStatus(metrics, err)\n\t\treturn fmt.Errorf(\"%s parseMemstats: %w\", c.pkgID, err)\n\t}\n\n\tif err := c.parseVMstats(ctx, &metrics); err != nil {\n\t\tc.setStatus(metrics, err)\n\t\treturn fmt.Errorf(\"%s parseVMstats: %w\", c.pkgID, err)\n\t}\n\n\tc.setStatus(metrics, nil)\n\treturn nil\n}", "func infoCollect(\n\tmetrics cmetrics,\n\tinfo string,\n\tlabelValues ...string,\n) []prometheus.Metric {\n\tvar res []prometheus.Metric\n\tstats := parseInfo(info)\n\tvalidLabelValues := make([]string, len(labelValues))\n\tfor pos, lv := range labelValues {\n\t\tvalidLabelValues[pos] = sanitizeLabelValue(lv)\n\t}\n\tfor key, m := range metrics {\n\t\tv, ok := stats[key]\n\t\tif !ok {\n\t\t\t// key presence depends on (namespace) configuration\n\t\t\tcontinue\n\t\t}\n\t\tf, err := parseFloatOrBool(v)\n\t\tif err != nil {\n\t\t\tlog.Printf(\"%q invalid value %q: %s\", key, v, err)\n\t\t\tcontinue\n\t\t}\n\t\tres = append(\n\t\t\tres,\n\t\t\tprometheus.MustNewConstMetric(m.desc, m.typ, f, validLabelValues...),\n\t\t)\n\t}\n\treturn res\n}", "func (e *Exporter) Collect(ch chan<- prometheus.Metric) {\n\te.mutex.Lock() // To protect metrics from concurrent collects.\n\tdefer e.mutex.Unlock()\n\tif err := e.collect(ch); err != nil {\n\t\tglog.Error(fmt.Sprintf(\"Error collecting stats: %s\", err))\n\t}\n\treturn\n}", "func (m *Metrics) Collect() error {\n\tfor range time.Tick(m.cInterval) {\n\t\tcontainers, err := m.docker.ContainerList()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tfor _, container := range containers {\n\t\t\tif _, ok := m.metrics[container.Names[0][1:]]; !ok {\n\t\t\t\tgo func() {\n\t\t\t\t\tif err := m.collect(container.Names[0][1:]); err != nil {\n\t\t\t\t\t\tlog.Fatal().Err(err).Msg(\"collection metrics error\")\n\t\t\t\t\t}\n\t\t\t\t}()\n\t\t\t\tlog.Info().Msgf(\"new container %s\", container.Names[0][1:])\n\t\t\t}\n\t\t}\n\t}\n\n\treturn nil\n}", "func (p *Collector) Collect(c chan<- prometheus.Metric) {\n\tp.Sink.mu.Lock()\n\tdefer p.Sink.mu.Unlock()\n\n\texpire := p.Sink.expiration != 0\n\tnow := time.Now()\n\tfor k, v := range p.Sink.gauges {\n\t\tlast := p.Sink.updates[k]\n\t\tif expire && last.Add(p.Sink.expiration).Before(now) {\n\t\t\tdelete(p.Sink.updates, k)\n\t\t\tdelete(p.Sink.gauges, k)\n\t\t} else {\n\t\t\tv.Collect(c)\n\t\t}\n\t}\n\tfor k, v := range p.Sink.summaries {\n\t\tlast := p.Sink.updates[k]\n\t\tif expire && last.Add(p.Sink.expiration).Before(now) {\n\t\t\tdelete(p.Sink.updates, k)\n\t\t\tdelete(p.Sink.summaries, k)\n\t\t} else {\n\t\t\tv.Collect(c)\n\t\t}\n\t}\n\tfor k, v := range p.Sink.counters {\n\t\tlast := p.Sink.updates[k]\n\t\tif expire && last.Add(p.Sink.expiration).Before(now) {\n\t\t\tdelete(p.Sink.updates, k)\n\t\t\tdelete(p.Sink.counters, k)\n\t\t} else {\n\t\t\tv.Collect(c)\n\t\t}\n\t}\n}", "func (c *VM) Collect() error {\n\tmetrics := cgm.Metrics{}\n\n\tc.Lock()\n\n\tif c.runTTL > time.Duration(0) {\n\t\tif time.Since(c.lastEnd) < c.runTTL {\n\t\t\tc.logger.Warn().Msg(collector.ErrTTLNotExpired.Error())\n\t\t\tc.Unlock()\n\t\t\treturn collector.ErrTTLNotExpired\n\t\t}\n\t}\n\tif c.running {\n\t\tc.logger.Warn().Msg(collector.ErrAlreadyRunning.Error())\n\t\tc.Unlock()\n\t\treturn collector.ErrAlreadyRunning\n\t}\n\n\tc.running = true\n\tc.lastStart = time.Now()\n\tc.Unlock()\n\n\tif err := c.parseMemstats(&metrics); err != nil {\n\t\tc.setStatus(metrics, err)\n\t\treturn errors.Wrap(err, c.pkgID)\n\t}\n\n\tif err := c.parseVMstats(&metrics); err != nil {\n\t\tc.setStatus(metrics, err)\n\t\treturn errors.Wrap(err, c.pkgID)\n\t}\n\n\tc.setStatus(metrics, nil)\n\treturn nil\n}", "func (c *storageCollector) Collect(client *rpc.Client, ch chan<- prometheus.Metric, labelValues []string) error {\n\tvar x = StorageRpc{}\n\terr := client.RunCommandAndParse(\"show system storage\", &x)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfor _, f := range x.Information.Filesystems {\n\t\tl := append(labelValues, f.FilesystemName, f.MountedOn)\n\n\t\tch <- prometheus.MustNewConstMetric(totalBlocksDesc, prometheus.GaugeValue, float64(f.TotalBlocks), l...)\n\t\tch <- prometheus.MustNewConstMetric(usedBlocksDesc, prometheus.GaugeValue, float64(f.UsedBlocks), l...)\n\t\tch <- prometheus.MustNewConstMetric(availableBlocksDesc, prometheus.GaugeValue, float64(f.AvailableBlocks), l...)\n\t\tpercent := strings.TrimSpace(f.UsedPercent)\n\t\tvalue, err := strconv.ParseFloat(percent, 64)\n\t\tif err != nil {\n\t\t\tvalue = 0\n\t\t}\n\t\tch <- prometheus.MustNewConstMetric(usedPercentDesc, prometheus.GaugeValue, value, l...)\n\t}\n\n\treturn nil\n}", "func (c *Client) Collect(ch chan<- prometheus.Metric) {\n\tc.metrics.functionInvocation.Collect(ch)\n\tc.metrics.functionsHistogram.Collect(ch)\n\tc.metrics.queueHistogram.Collect(ch)\n\tc.metrics.functionInvocationStarted.Collect(ch)\n\tc.metrics.serviceReplicasGauge.Reset()\n\tfor _, service := range c.services {\n\t\tvar serviceName string\n\t\tif len(service.Namespace) > 0 {\n\t\t\tserviceName = fmt.Sprintf(\"%s.%s\", service.Name, service.Namespace)\n\t\t} else {\n\t\t\tserviceName = service.Name\n\t\t}\n\t\tc.metrics.serviceReplicasGauge.\n\t\t\tWithLabelValues(serviceName).\n\t\t\tSet(float64(service.Replicas))\n\t}\n\tc.metrics.serviceReplicasGauge.Collect(ch)\n}", "func (p *ProcMetrics) Collect() {\n\tif m, err := CollectProcInfo(p.pid); err == nil {\n\t\tnow := time.Now()\n\n\t\tif !p.lastTime.IsZero() {\n\t\t\tratio := 1.0\n\t\t\tswitch {\n\t\t\tcase m.CPU.Period > 0 && m.CPU.Quota > 0:\n\t\t\t\tratio = float64(m.CPU.Quota) / float64(m.CPU.Period)\n\t\t\tcase m.CPU.Shares > 0:\n\t\t\t\tratio = float64(m.CPU.Shares) / 1024\n\t\t\tdefault:\n\t\t\t\tratio = 1 / float64(runtime.NumCPU())\n\t\t\t}\n\n\t\t\tinterval := ratio * float64(now.Sub(p.lastTime))\n\n\t\t\tp.cpu.user.time = m.CPU.User - p.last.CPU.User\n\t\t\tp.cpu.user.percent = 100 * float64(p.cpu.user.time) / interval\n\n\t\t\tp.cpu.system.time = m.CPU.Sys - p.last.CPU.Sys\n\t\t\tp.cpu.system.percent = 100 * float64(p.cpu.system.time) / interval\n\n\t\t\tp.cpu.total.time = (m.CPU.User + m.CPU.Sys) - (p.last.CPU.User + p.last.CPU.Sys)\n\t\t\tp.cpu.total.percent = 100 * float64(p.cpu.total.time) / interval\n\t\t}\n\n\t\tp.memory.available = m.Memory.Available\n\t\tp.memory.size = m.Memory.Size\n\t\tp.memory.resident.usage = m.Memory.Resident\n\t\tp.memory.resident.percent = 100 * float64(p.memory.resident.usage) / float64(p.memory.available)\n\t\tp.memory.shared.usage = m.Memory.Shared\n\t\tp.memory.text.usage = m.Memory.Text\n\t\tp.memory.data.usage = m.Memory.Data\n\t\tp.memory.pagefault.major.count = m.Memory.MajorPageFaults - p.last.Memory.MajorPageFaults\n\t\tp.memory.pagefault.minor.count = m.Memory.MinorPageFaults - p.last.Memory.MinorPageFaults\n\n\t\tp.files.open = m.Files.Open\n\t\tp.files.max = m.Files.Max\n\n\t\tp.threads.num = m.Threads.Num\n\t\tp.threads.switches.voluntary.count = m.Threads.VoluntaryContextSwitches - p.last.Threads.VoluntaryContextSwitches\n\t\tp.threads.switches.involuntary.count = m.Threads.InvoluntaryContextSwitches - p.last.Threads.InvoluntaryContextSwitches\n\n\t\tp.last = m\n\t\tp.lastTime = now\n\t\tp.engine.Report(p)\n\t}\n}", "func (o *OSDCollector) Collect(ch chan<- prometheus.Metric) {\n\tif err := o.collectOSDPerf(); err != nil {\n\t\tlog.Println(\"failed collecting osd perf stats:\", err)\n\t}\n\n\tif err := o.collectOSDDump(); err != nil {\n\t\tlog.Println(\"failed collecting osd dump:\", err)\n\t}\n\n\tif err := o.collectOSDDF(); err != nil {\n\t\tlog.Println(\"failed collecting osd metrics:\", err)\n\t}\n\n\tif err := o.collectOSDTreeDown(ch); err != nil {\n\t\tlog.Println(\"failed collecting osd metrics:\", err)\n\t}\n\n\tfor _, metric := range o.collectorList() {\n\t\tmetric.Collect(ch)\n\t}\n\n\tif err := o.collectOSDScrubState(ch); err != nil {\n\t\tlog.Println(\"failed collecting osd scrub state:\", err)\n\t}\n}", "func (r *RegionStatistics) Collect() {\n\tr.RLock()\n\tdefer r.RUnlock()\n\tregionMissPeerRegionCounter.Set(float64(len(r.stats[MissPeer])))\n\tregionExtraPeerRegionCounter.Set(float64(len(r.stats[ExtraPeer])))\n\tregionDownPeerRegionCounter.Set(float64(len(r.stats[DownPeer])))\n\tregionPendingPeerRegionCounter.Set(float64(len(r.stats[PendingPeer])))\n\tregionOfflinePeerRegionCounter.Set(float64(len(r.stats[OfflinePeer])))\n\tregionLearnerPeerRegionCounter.Set(float64(len(r.stats[LearnerPeer])))\n\tregionEmptyRegionCounter.Set(float64(len(r.stats[EmptyRegion])))\n\tregionOversizedRegionCounter.Set(float64(len(r.stats[OversizedRegion])))\n\tregionUndersizedRegionCounter.Set(float64(len(r.stats[UndersizedRegion])))\n\tregionWitnessLeaderRegionCounter.Set(float64(len(r.stats[WitnessLeader])))\n}", "func (c *goCollector) Collect(ch chan<- Metric) {\n\t// Collect base non-memory metrics.\n\tc.base.Collect(ch)\n\n\tif len(c.sampleBuf) == 0 {\n\t\treturn\n\t}\n\n\t// Collect must be thread-safe, so prevent concurrent use of\n\t// sampleBuf elements. Just read into sampleBuf but write all the data\n\t// we get into our Metrics or MemStats.\n\t//\n\t// This lock also ensures that the Metrics we send out are all from\n\t// the same updates, ensuring their mutual consistency insofar as\n\t// is guaranteed by the runtime/metrics package.\n\t//\n\t// N.B. This locking is heavy-handed, but Collect is expected to be called\n\t// relatively infrequently. Also the core operation here, metrics.Read,\n\t// is fast (O(tens of microseconds)) so contention should certainly be\n\t// low, though channel operations and any allocations may add to that.\n\tc.mu.Lock()\n\tdefer c.mu.Unlock()\n\n\t// Populate runtime/metrics sample buffer.\n\tmetrics.Read(c.sampleBuf)\n\n\t// Collect all our runtime/metrics user chose to expose from sampleBuf (if any).\n\tfor i, metric := range c.rmExposedMetrics {\n\t\t// We created samples for exposed metrics first in order, so indexes match.\n\t\tsample := c.sampleBuf[i]\n\n\t\t// N.B. switch on concrete type because it's significantly more efficient\n\t\t// than checking for the Counter and Gauge interface implementations. In\n\t\t// this case, we control all the types here.\n\t\tswitch m := metric.(type) {\n\t\tcase *counter:\n\t\t\t// Guard against decreases. This should never happen, but a failure\n\t\t\t// to do so will result in a panic, which is a harsh consequence for\n\t\t\t// a metrics collection bug.\n\t\t\tv0, v1 := m.get(), unwrapScalarRMValue(sample.Value)\n\t\t\tif v1 > v0 {\n\t\t\t\tm.Add(unwrapScalarRMValue(sample.Value) - m.get())\n\t\t\t}\n\t\t\tm.Collect(ch)\n\t\tcase *gauge:\n\t\t\tm.Set(unwrapScalarRMValue(sample.Value))\n\t\t\tm.Collect(ch)\n\t\tcase *batchHistogram:\n\t\t\tm.update(sample.Value.Float64Histogram(), c.exactSumFor(sample.Name))\n\t\t\tm.Collect(ch)\n\t\tdefault:\n\t\t\tpanic(\"unexpected metric type\")\n\t\t}\n\t}\n\n\tif c.msMetricsEnabled {\n\t\t// ms is a dummy MemStats that we populate ourselves so that we can\n\t\t// populate the old metrics from it if goMemStatsCollection is enabled.\n\t\tvar ms runtime.MemStats\n\t\tmemStatsFromRM(&ms, c.sampleMap)\n\t\tfor _, i := range c.msMetrics {\n\t\t\tch <- MustNewConstMetric(i.desc, i.valType, i.eval(&ms))\n\t\t}\n\t}\n}", "func (m httpReferenceDiscoveryMetrics) Collect(metrics chan<- prometheus.Metric) {\n\tm.firstPacket.Collect(metrics)\n\tm.totalTime.Collect(metrics)\n\tm.advertisedRefs.Collect(metrics)\n}", "func (e *Exporter) Collect(ch chan<- prometheus.Metric) {\n\te.mutex.Lock() // To protect metrics from concurrent collects.\n\tdefer e.mutex.Unlock()\n\n\tup, result := e.scrape(ch)\n\n\tch <- e.totalScrapes\n\tch <- e.jsonParseFailures\n\tch <- prometheus.MustNewConstMetric(iqAirUp, prometheus.GaugeValue, up)\n\tch <- prometheus.MustNewConstMetric(iqAirCO2, prometheus.GaugeValue, float64(result.CO2))\n\tch <- prometheus.MustNewConstMetric(iqAirP25, prometheus.GaugeValue, float64(result.P25))\n\tch <- prometheus.MustNewConstMetric(iqAirP10, prometheus.GaugeValue, float64(result.P10))\n\tch <- prometheus.MustNewConstMetric(iqAirTemp, prometheus.GaugeValue, float64(result.Temperature))\n\tch <- prometheus.MustNewConstMetric(iqAirHumidity, prometheus.GaugeValue, float64(result.Humidity))\n}", "func (e *Exporter) Collect(ch chan<- prometheus.Metric) {\n\te.mutex.Lock() // To protect metrics from concurrent collects.\n\tdefer e.mutex.Unlock()\n\tif err := e.collect(ch); err != nil {\n\t\tlog.Errorf(\"Error scraping ingestor: %s\", err)\n\t}\n\treturn\n}", "func (e *Exporter) Collect(ch chan<- prometheus.Metric) {\n\t// Protect metrics from concurrent collects.\n\te.mutex.Lock()\n\tdefer e.mutex.Unlock()\n\n\t// Scrape metrics from Tankerkoenig API.\n\tif err := e.scrape(ch); err != nil {\n\t\te.logger.Printf(\"error: cannot scrape tankerkoenig api: %v\", err)\n\t}\n\n\t// Collect metrics.\n\te.up.Collect(ch)\n\te.scrapeDuration.Collect(ch)\n\te.failedScrapes.Collect(ch)\n\te.totalScrapes.Collect(ch)\n}", "func (m *Mare) Reduce(reduceFunc func(a, b interface{}) interface{}) map[interface{}]interface{} {\n\tresults := make(map[interface{}]interface{})\n\n\tfor item := range m.mapOutChan {\n\t\tmapItem, present := results[item.Key]\n\t\tif present {\n\t\t\tif m.trace {\n\t\t\t\tlog.Printf(\"Reducing %v with %v for key %v\", mapItem, item.Value, item.Key)\n\t\t\t}\n\t\t\tresults[item.Key] = reduceFunc(mapItem, item.Value)\n\t\t} else {\n\t\t\tif m.trace {\n\t\t\t\tlog.Printf(\"Saving %v for key %v\", item.Value, item.Key)\n\t\t\t}\n\t\t\tresults[item.Key] = item.Value\n\t\t}\n\t}\n\n\treturn results\n}", "func (c *DebugFsStatCollector) Collect(ch chan<- prometheus.Metric) {\n\tc.updateProbeStats(0, \"kprobe\", ch)\n\tc.updateProbeStats(0, \"uprobe\", ch)\n}", "func (c *System) Collect(ctx context.Context) error {\n\tmetrics := cgm.Metrics{}\n\n\tc.Lock()\n\n\tif c.runTTL > time.Duration(0) {\n\t\tif time.Since(c.lastEnd) < c.runTTL {\n\t\t\tc.logger.Warn().Msg(collector.ErrTTLNotExpired.Error())\n\t\t\tc.Unlock()\n\t\t\treturn collector.ErrTTLNotExpired\n\t\t}\n\t}\n\tif c.running {\n\t\tc.logger.Warn().Msg(collector.ErrAlreadyRunning.Error())\n\t\tc.Unlock()\n\t\treturn collector.ErrAlreadyRunning\n\t}\n\n\tc.running = true\n\tc.lastStart = time.Now()\n\tc.Unlock()\n\n\tvar dst []Win32_PerfFormattedData_PerfOS_System\n\tqry := wmi.CreateQuery(dst, \"\")\n\tif err := wmi.Query(qry, &dst); err != nil {\n\t\tc.logger.Error().Err(err).Str(\"query\", qry).Msg(\"wmi query error\")\n\t\tc.setStatus(metrics, err)\n\t\treturn fmt.Errorf(\"wmi %s query: %w\", c.pkgID, err)\n\t}\n\n\tmetricType := \"L\"\n\ttagUnitsPercent := cgm.Tag{Category: \"units\", Value: \"percent\"}\n\tfor _, item := range dst {\n\t\tif done(ctx) {\n\t\t\treturn fmt.Errorf(\"context: %w\", ctx.Err())\n\t\t}\n\n\t\t_ = c.addMetric(&metrics, \"\", \"AlignmentFixupsPerSec\", metricType, item.AlignmentFixupsPerSec, cgm.Tags{})\n\t\t_ = c.addMetric(&metrics, \"\", \"ContextSwitchesPerSec\", metricType, item.ContextSwitchesPerSec, cgm.Tags{})\n\t\t_ = c.addMetric(&metrics, \"\", \"ExceptionDispatchesPerSec\", metricType, item.ExceptionDispatchesPerSec, cgm.Tags{})\n\t\t_ = c.addMetric(&metrics, \"\", \"FileControlBytesPerSec\", metricType, item.FileControlBytesPerSec, cgm.Tags{})\n\t\t_ = c.addMetric(&metrics, \"\", \"FileControlOperationsPerSec\", metricType, item.FileControlOperationsPerSec, cgm.Tags{})\n\t\t_ = c.addMetric(&metrics, \"\", \"FileDataOperationsPerSec\", metricType, item.FileDataOperationsPerSec, cgm.Tags{})\n\t\t_ = c.addMetric(&metrics, \"\", \"FileReadBytesPerSec\", metricType, item.FileReadBytesPerSec, cgm.Tags{})\n\t\t_ = c.addMetric(&metrics, \"\", \"FileReadOperationsPerSec\", metricType, item.FileReadOperationsPerSec, cgm.Tags{})\n\t\t_ = c.addMetric(&metrics, \"\", \"FileWriteBytesPerSec\", metricType, item.FileWriteBytesPerSec, cgm.Tags{})\n\t\t_ = c.addMetric(&metrics, \"\", \"FileWriteOperationsPerSec\", metricType, item.FileWriteOperationsPerSec, cgm.Tags{})\n\t\t_ = c.addMetric(&metrics, \"\", \"FloatingEmulationsPerSec\", metricType, item.FloatingEmulationsPerSec, cgm.Tags{})\n\t\t_ = c.addMetric(&metrics, \"\", \"PercentRegistryQuotaInUse\", metricType, item.PercentRegistryQuotaInUse, cgm.Tags{tagUnitsPercent})\n\t\t_ = c.addMetric(&metrics, \"\", \"Processes\", metricType, item.Processes, cgm.Tags{})\n\t\t_ = c.addMetric(&metrics, \"\", \"ProcessorQueueLength\", metricType, item.ProcessorQueueLength, cgm.Tags{})\n\t\t_ = c.addMetric(&metrics, \"\", \"SystemCallsPerSec\", metricType, item.SystemCallsPerSec, cgm.Tags{})\n\t\t_ = c.addMetric(&metrics, \"\", \"Threads\", metricType, item.Threads, cgm.Tags{})\n\t}\n\n\tc.setStatus(metrics, nil)\n\treturn nil\n}", "func (*noOpConntracker) Collect(ch chan<- prometheus.Metric) {}", "func (o *Orchestrator) collectActual(ctx context.Context) map[Worker][]interface{} {\n\ttype result struct {\n\t\tworker Worker\n\t\tactual []interface{}\n\t\terr error\n\t}\n\n\tlistCtx, _ := context.WithTimeout(ctx, o.timeout)\n\tresults := make(chan result, len(o.workers))\n\terrs := make(chan result, len(o.workers))\n\tfor _, worker := range o.workers {\n\t\tgo func(worker Worker) {\n\t\t\tlistResults, err := worker.List(listCtx)\n\t\t\tif err != nil {\n\t\t\t\terrs <- result{worker: worker, err: err}\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tresults <- result{worker: worker, actual: listResults}\n\t\t}(worker)\n\t}\n\n\tt := time.NewTimer(o.timeout)\n\tvar state []WorkerState\n\tactual := make(map[Worker][]interface{})\n\tfor i := 0; i < len(o.workers); i++ {\n\t\tselect {\n\t\tcase <-ctx.Done():\n\t\t\tbreak\n\t\tcase nextResult := <-results:\n\t\t\tactual[nextResult.worker] = nextResult.actual\n\t\t\tstate = append(state, WorkerState{Worker: nextResult.worker, Tasks: nextResult.actual})\n\t\tcase err := <-errs:\n\t\t\to.log.Printf(\"Error trying to list tasks from %s: %s\", err.worker, err.err)\n\t\tcase <-t.C:\n\t\t\to.log.Printf(\"Communicator timeout. Using results available...\")\n\t\t\tbreak\n\t\t}\n\t}\n\n\to.lastActual = state\n\treturn actual\n}", "func (e *Exporter) Collect(ch chan<- prometheus.Metric) {\n\tresp, err := e.Pihole.GetMetrics()\n\tif err != nil {\n\t\tlog.Errorf(\"Pihole error: %s\", err.Error())\n\t\treturn\n\t}\n\tlog.Debugf(\"PiHole metrics: %#v\", resp)\n\tch <- prometheus.MustNewConstMetric(\n\t\tdomainsBeingBlocked, prometheus.CounterValue, float64(resp.DomainsBeingBlocked))\n\n\tch <- prometheus.MustNewConstMetric(\n\t\tdnsQueries, prometheus.CounterValue, float64(resp.DNSQueriesToday))\n\n\tch <- prometheus.MustNewConstMetric(\n\t\tadsBlocked, prometheus.CounterValue, float64(resp.AdsBlockedToday))\n\n\tch <- prometheus.MustNewConstMetric(\n\t\tadsPercentage, prometheus.CounterValue, float64(resp.AdsPercentageToday))\n\n\tfor k, v := range resp.Querytypes {\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tqueryTypes, prometheus.CounterValue, v, k)\n\t}\n\tfor k, v := range resp.TopQueries {\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\ttopQueries, prometheus.CounterValue, float64(v), k)\n\t}\n\tfor k, v := range resp.TopAds {\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\ttopAds, prometheus.CounterValue, float64(v), k)\n\n\t}\n\tfor k, v := range resp.TopSources {\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\ttopSources, prometheus.CounterValue, float64(v), k)\n\t}\n}", "func CollectProcessMetrics(refresh time.Duration) {\n\t// Short circuit if the metics system is disabled\n\tif !Enabled {\n\t\treturn\n\t}\n\t// Create the various data collectors\n\tmemstates := make([]*runtime.MemStats, 2)\n\tdiskstates := make([]*DiskStats, 2)\n\tfor i := 0; i < len(memstates); i++ {\n\t\tmemstates[i] = new(runtime.MemStats)\n\t\tdiskstates[i] = new(DiskStats)\n\t}\n\t// Define the various metics to collect\n\tmemAllocs := metics.GetOrRegisterMeter(\"system/memory/allocs\", metics.DefaultRegistry)\n\tmemFrees := metics.GetOrRegisterMeter(\"system/memory/frees\", metics.DefaultRegistry)\n\tmemInuse := metics.GetOrRegisterMeter(\"system/memory/inuse\", metics.DefaultRegistry)\n\tmemPauses := metics.GetOrRegisterMeter(\"system/memory/pauses\", metics.DefaultRegistry)\n\n\tvar diskReads, diskReadBytes, diskWrites, diskWriteBytes metics.Meter\n\tif err := ReadDiskStats(diskstates[0]); err == nil {\n\t\tdiskReads = metics.GetOrRegisterMeter(\"system/disk/readcount\", metics.DefaultRegistry)\n\t\tdiskReadBytes = metics.GetOrRegisterMeter(\"system/disk/readdata\", metics.DefaultRegistry)\n\t\tdiskWrites = metics.GetOrRegisterMeter(\"system/disk/writecount\", metics.DefaultRegistry)\n\t\tdiskWriteBytes = metics.GetOrRegisterMeter(\"system/disk/writedata\", metics.DefaultRegistry)\n\t} else {\n\t\tbgmlogs.Debug(\"Failed to read disk metics\", \"err\", err)\n\t}\n\t// Iterate loading the different states and updating the meters\n\tfor i := 1; ; i++ {\n\t\truntime.ReadMemStats(memstates[i%2])\n\t\tmemAllocs.Mark(int64(memstates[i%2].Mallocs - memstates[(i-1)%2].Mallocs))\n\t\tmemFrees.Mark(int64(memstates[i%2].Frees - memstates[(i-1)%2].Frees))\n\t\tmemInuse.Mark(int64(memstates[i%2].Alloc - memstates[(i-1)%2].Alloc))\n\t\tmemPauses.Mark(int64(memstates[i%2].PauseTotalNs - memstates[(i-1)%2].PauseTotalNs))\n\n\t\tif ReadDiskStats(diskstates[i%2]) == nil {\n\t\t\tdiskReads.Mark(diskstates[i%2].ReadCount - diskstates[(i-1)%2].ReadCount)\n\t\t\tdiskReadBytes.Mark(diskstates[i%2].ReadBytes - diskstates[(i-1)%2].ReadBytes)\n\t\t\tdiskWrites.Mark(diskstates[i%2].WriteCount - diskstates[(i-1)%2].WriteCount)\n\t\t\tdiskWriteBytes.Mark(diskstates[i%2].WriteBytes - diskstates[(i-1)%2].WriteBytes)\n\t\t}\n\t\ttime.Sleep(refresh)\n\t}\n}", "func (c *solarCollector) Collect(ch chan<- prometheus.Metric) {\n\tc.mutex.Lock() // To protect metrics from concurrent collects.\n\tdefer c.mutex.Unlock()\n\tif err := c.collect(ch); err != nil {\n\t\tlog.Printf(\"Error getting solar controller data: %s\", err)\n\t\tc.scrapeFailures.Inc()\n\t\tc.scrapeFailures.Collect(ch)\n\t}\n\treturn\n}", "func (w *HotCache) CollectMetrics() {\n\tw.writeFlow.CollectMetrics(\"write\")\n\tw.readFlow.CollectMetrics(\"read\")\n}", "func (_m *IProvider) Monitor(_a0 map[string]*model.WorkerConfig, _a1 []*message.Message) {\n\t_m.Called(_a0, _a1)\n}", "func (p *promProducer) Collect(ch chan<- prometheus.Metric) {\n\tfor _, obj := range p.store.Objects() {\n\t\tmessage, ok := obj.(producers.MetricsMessage)\n\t\tif !ok {\n\t\t\tpromLog.Warnf(\"Unsupported message type %T\", obj)\n\t\t\tcontinue\n\t\t}\n\t\tdims := dimsToMap(message.Dimensions)\n\n\t\tfor _, d := range message.Datapoints {\n\t\t\tpromLog.Debugf(\"Processing datapoint %s\", d.Name)\n\t\t\tvar tagKeys []string\n\t\t\tvar tagVals []string\n\t\t\tfor k, v := range dims {\n\t\t\t\ttagKeys = append(tagKeys, sanitizeName(k))\n\t\t\t\ttagVals = append(tagVals, v)\n\t\t\t}\n\t\t\tfor k, v := range d.Tags {\n\t\t\t\ttagKeys = append(tagKeys, sanitizeName(k))\n\t\t\t\ttagVals = append(tagVals, v)\n\t\t\t}\n\n\t\t\tname := sanitizeName(d.Name)\n\t\t\tval, err := coerceToFloat(d.Value)\n\t\t\tif err != nil {\n\t\t\t\tpromLog.Warnf(\"Bad datapoint value %q: %s\", d.Value, err)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tdesc := prometheus.NewDesc(name, \"DC/OS Metrics Datapoint\", tagKeys, nil)\n\t\t\tmetric, err := prometheus.NewConstMetric(desc, prometheus.GaugeValue, val, tagVals...)\n\t\t\tif err != nil {\n\t\t\t\tpromLog.Warnf(\"Could not create Prometheus metric %s: %s\", name, err)\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tpromLog.Debugf(\"Emitting datapoint %s\", name)\n\t\t\tch <- metric\n\t\t}\n\n\t}\n}", "func (e *Exporter) Collect(ch chan<- prometheus.Metric) {\n\te.mutex.Lock() // To protect metrics from concurrent collects.\n\tdefer e.mutex.Unlock()\n\n\tup := e.scrape(ch)\n\n\tch <- prometheus.MustNewConstMetric(artifactoryUp, prometheus.GaugeValue, up)\n\tch <- e.totalScrapes\n\tch <- e.jsonParseFailures\n}", "func (e *Exporter) Collect(ch chan<- prometheus.Metric) {\n\te.mutex.Lock() // To protect metrics from concurrent collects.\n\tdefer e.mutex.Unlock()\n\tif err := e.collect(ch); err != nil {\n\t\tlog.Errorf(\"Error scraping: %s\", err)\n\t}\n\treturn\n}", "func (s *CPUStat) Collect() {\n\tfile, err := os.Open(root + \"proc/stat\")\n\tdefer file.Close()\n\n\tif err != nil {\n\t\treturn\n\t}\n\tscanner := bufio.NewScanner(file)\n\tfor scanner.Scan() {\n\t\tf := regexp.MustCompile(\"\\\\s+\").Split(scanner.Text(), -1)\n\n\t\tisCPU, err := regexp.MatchString(\"^cpu\\\\d*\", f[0])\n\t\tif err == nil && isCPU {\n\t\t\tif f[0] == \"cpu\" {\n\t\t\t\tparseCPUline(s.All, f)\n\t\t\t\tpopulateComputedStats(s.All, float64(len(s.cpus)))\n\t\t\t\ts.All.TotalCount.Set(float64(len(s.cpus)))\n\t\t\t} else {\n\t\t\t\tperCPU, ok := s.cpus[f[0]]\n\t\t\t\tif !ok {\n\t\t\t\t\tperCPU = NewPerCPU(s.m, f[0])\n\t\t\t\t\ts.cpus[f[0]] = perCPU\n\t\t\t\t}\n\t\t\t\tparseCPUline(perCPU, f)\n\t\t\t\tpopulateComputedStats(perCPU, 1.0)\n\t\t\t\tperCPU.TotalCount.Set(1)\n\t\t\t}\n\t\t}\n\t}\n}", "func (p *plug) Collect(ch chan<- prometheus.Metric) {\n\tp.doStats(ch, doMetric)\n}", "func (p *perfStoreManager) collect() {\n\tallCgroups, err := p.cgroupSt.ListAllCgroups(sets.NewString(appclass.AppClassOnline))\n\tif err != nil {\n\t\treturn\n\t}\n\n\tp.cpuLock.Lock()\n\tdefer p.cpuLock.Unlock()\n\n\twg := sync.WaitGroup{}\n\tfor k, v := range allCgroups {\n\t\tfor _, ignored := range p.IgnoredCgroups {\n\t\t\tif checkSubCgroup(ignored, k) {\n\t\t\t\tklog.V(4).Infof(\"cgroup(%s) has been ignored\", k)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t}\n\n\t\twg.Add(1)\n\t\tgo func(cg string, ref *cgstore.CgroupRef) {\n\t\t\tdefer wg.Done()\n\n\t\t\tcgPath, err := cgroup.GetPerfEventCgroupPath(cg)\n\t\t\tif err != nil {\n\t\t\t\tklog.Errorf(\"get perf_event cgroup path err: %v\", err)\n\t\t\t\treturn\n\t\t\t}\n\t\t\t// check pids\n\t\t\tpids, err := cgroup.GetPids(cgPath)\n\t\t\tif err != nil {\n\t\t\t\tklog.Errorf(\"cgroup(%s) get pid err: %v\", cg, err)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tif len(pids) == 0 {\n\t\t\t\tklog.V(4).Infof(\"cgroup(%s) has no pid\", cg)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\t// read cpus\n\t\t\tcpus, err := cgroup.GetCpuSet(cg, true)\n\t\t\tif err != nil {\n\t\t\t\tklog.Errorf(\"cgroup(%s) get cpu sets err: %v\", cg, err)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tif len(cpus) == 0 {\n\t\t\t\tklog.Errorf(\"cgroup(%s) get cpu sets is nil\", cg)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tstart := time.Now()\n\t\t\tcpuStartTotal, err := cgroup.GetCPUTotalUsage(cg)\n\t\t\tif err != nil {\n\t\t\t\tklog.Errorf(\"cgroup(%s) collect cpu usage failed: %v\", cg, err)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tpmuData, err := pmu.GetPMUValue(int(p.CollectDuration.Seconds()),\n\t\t\t\tcgPath, strings.Join(cpus, \",\"))\n\t\t\tif err != nil {\n\t\t\t\tklog.Errorf(\"cgroup(%s) collect perf data err: %v\", cg, err)\n\t\t\t\treturn\n\t\t\t}\n\t\t\ttimeElapsed := time.Since(start).Nanoseconds()\n\t\t\tcpuEndTotal, err := cgroup.GetCPUTotalUsage(cg)\n\t\t\tif err != nil {\n\t\t\t\tklog.Errorf(\"cgroup(%s) collect cpu usage failed: %v\", cg, err)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tpmuData.CPUUsage = float64(cpuEndTotal-cpuStartTotal) / float64(timeElapsed)\n\n\t\t\tmetric := &PerfMetrics{\n\t\t\t\tSpec: *ref,\n\t\t\t\tValue: pmuData,\n\t\t\t}\n\t\t\tp.addContainerPerf(cg, pmuData.Timestamp, metric)\n\t\t}(k, v)\n\t}\n\twg.Wait()\n\n\tp.delContainerPerfs()\n\n\treturn\n}", "func (c *collector) Collect(ch chan<- prometheus.Metric) {\n\tc.mu.Lock()\n\t// Get the last views\n\tviews := c.views\n\t// Now clear them out for the next accumulation\n\tc.views = c.views[:0]\n\tc.mu.Unlock()\n\n\tif len(views) == 0 {\n\t\treturn\n\t}\n\n\t// seen is necessary because within each Collect cycle\n\t// if a Metric is sent to Prometheus with the same make up\n\t// that is \"name\" and \"labels\", it will error out.\n\tseen := make(map[prometheus.Metric]bool)\n\n\tfor _, vd := range views {\n\t\tfor _, row := range vd.Rows {\n\t\t\tmetric := c.toMetric(vd.View, row)\n\t\t\tif _, ok := seen[metric]; !ok && metric != nil {\n\t\t\t\tch <- metric\n\t\t\t\tseen[metric] = true\n\t\t\t}\n\t\t}\n\t}\n}", "func WalkCollect[T any](dir string, walkFunc WalkCollectFunc[T]) ([]T, error) {\n\tsem := semaphore.NewWeighted(int64(runtime.NumCPU()))\n\teg, ctx := errgroup.WithContext(context.Background())\n\tmu := sync.Mutex{}\n\tvals := make([]T, 0, 4)\n\n\terr := filepath.WalkDir(dir, func(path string, dirent fs.DirEntry, err error) error {\n\t\tif err != nil {\n\t\t\treturn nil\n\t\t}\n\t\tif err := sem.Acquire(ctx, 1); err != nil {\n\t\t\treturn fmt.Errorf(\"walk collect acquire semaphore: %w\", err)\n\t\t}\n\t\teg.Go(func() error {\n\t\t\tdefer sem.Release(1)\n\t\t\tvs, err := walkFunc(path, dirent)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tif len(vs) > 0 {\n\t\t\t\tmu.Lock()\n\t\t\t\tvals = append(vals, vs...)\n\t\t\t\tmu.Unlock()\n\t\t\t}\n\t\t\treturn nil\n\t\t})\n\t\treturn nil\n\t})\n\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"walk collect walk error: %w\", err)\n\t}\n\tif err := eg.Wait(); err != nil {\n\t\treturn nil, fmt.Errorf(\"walk collect wait err group: %w\", err)\n\t}\n\treturn vals, nil\n}", "func (c *ImageCollector) Collect(ch chan<- prometheus.Metric) {\n\tctx, cancel := context.WithTimeout(context.Background(), c.timeout)\n\tdefer cancel()\n\timages, _, err := c.client.Images.ListUser(ctx, nil)\n\tif err != nil {\n\t\tc.errors.WithLabelValues(\"image\").Add(1)\n\t\tlevel.Warn(c.logger).Log(\n\t\t\t\"msg\", \"can't list images\",\n\t\t\t\"err\", err,\n\t\t)\n\t\treturn\n\t}\n\n\tfor _, img := range images {\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tc.MinDiskSize,\n\t\t\tprometheus.GaugeValue,\n\t\t\tfloat64(img.MinDiskSize*1024*1024*1024),\n\t\t\tfmt.Sprintf(\"%d\", img.ID), img.Name, img.Regions[0], img.Type, img.Distribution,\n\t\t)\n\t}\n}" ]
[ "0.66612345", "0.64524883", "0.61589915", "0.6053547", "0.60013217", "0.5850025", "0.58179253", "0.57633007", "0.5758564", "0.5738406", "0.5733382", "0.56891465", "0.56716955", "0.56574595", "0.5633677", "0.5612909", "0.5587006", "0.55863476", "0.5575713", "0.55747694", "0.5563292", "0.5562807", "0.5555224", "0.5551914", "0.5524502", "0.5521713", "0.55105126", "0.5475407", "0.5472246", "0.5470153", "0.5448496", "0.5425806", "0.54255027", "0.5413054", "0.54040396", "0.53876483", "0.538337", "0.5371942", "0.5371942", "0.5361608", "0.53611803", "0.5343685", "0.5327349", "0.5324224", "0.5300103", "0.52959794", "0.52938974", "0.528633", "0.52862364", "0.5283032", "0.5279695", "0.5268463", "0.5267793", "0.5265303", "0.52588946", "0.5257184", "0.5254721", "0.5244667", "0.52357626", "0.5223566", "0.52222806", "0.5213277", "0.5206262", "0.520616", "0.5200722", "0.51942134", "0.5183866", "0.5172727", "0.5164642", "0.5162619", "0.5161114", "0.5149109", "0.51466477", "0.51422375", "0.5130038", "0.5129037", "0.5111172", "0.5102989", "0.5097552", "0.5097054", "0.5090915", "0.50895983", "0.50859344", "0.5080687", "0.5074928", "0.505767", "0.50575334", "0.50542015", "0.50533605", "0.50494564", "0.50385946", "0.5029948", "0.5023425", "0.50178546", "0.5010516", "0.5006585", "0.49955755", "0.49910468", "0.4990242", "0.498811" ]
0.77349365
0
BoolAsFloat converts a bool value into a float64 value, for easier datapoint usage.
func BoolAsFloat(val bool) float64 { if val { return 1 } return 0 }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func BoolToFloat(b bool) float64 {\n\tif b {\n\t\treturn 1.\n\t}\n\treturn 0.\n}", "func boolToFloat(b bool) float64 {\n\tif b {\n\t\treturn 1\n\t}\n\treturn 0\n}", "func boolFloat64(b bool) float64 {\n\tif !b {\n\t\treturn 0.0\n\t}\n\n\treturn 1.0\n}", "func boolsToFloats(b stochnet.BoolVec) linalg.Vector {\n\tres := make(linalg.Vector, len(b.Activations()))\n\tfor i, x := range b.Activations() {\n\t\tif x {\n\t\t\tres[i] = 1\n\t\t}\n\t}\n\treturn res\n}", "func ToFloat(value interface{}) interface{} {\n\tswitch value := value.(type) {\n\tcase bool:\n\t\tif value {\n\t\t\treturn 1.0\n\t\t}\n\t\treturn 0.0\n\tcase *bool:\n\t\treturn ToFloat(*value)\n\tcase int:\n\t\treturn float64(value)\n\tcase *int32:\n\t\treturn ToFloat(*value)\n\tcase float32:\n\t\treturn value\n\tcase *float32:\n\t\treturn ToFloat(*value)\n\tcase float64:\n\t\treturn value\n\tcase *float64:\n\t\treturn ToFloat(*value)\n\tcase string:\n\t\tval, err := strconv.ParseFloat(value, 64)\n\t\tif err != nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn val\n\tcase *string:\n\t\treturn ToFloat(*value)\n\t}\n\treturn 0.0\n}", "func ToFloat(val interface{}) (float64, bool) {\n\tswitch t := val.(type) {\n\n\tcase float64:\n\t\treturn t, true\n\n\tcase variable:\n\t\tv, ok := t.value.(float64)\n\t\tif !ok {\n\t\t\treturn 0, false\n\t\t}\n\t\treturn v, true\n\n\tdefault:\n\t\treturn 0, false\n\t}\n}", "func ToFloat(v Value) (float64, bool) {\n\tswitch v.iface.(type) {\n\tcase int64:\n\t\treturn float64(v.AsInt()), true\n\tcase float64:\n\t\treturn v.AsFloat(), true\n\tcase string:\n\t\tn, f, tp := StringToNumber(v.AsString())\n\t\tswitch tp {\n\t\tcase IsInt:\n\t\t\treturn float64(n), true\n\t\tcase IsFloat:\n\t\t\treturn f, true\n\t\t}\n\t}\n\treturn 0, false\n}", "func (me TPositiveFloatType) ToXsdtFloat() xsdt.Float { return xsdt.Float(me) }", "func FloatValue(v Value) (float64, bool) {\n\tif v.Type() != FloatType {\n\t\treturn 0, false\n\t}\n\tval, ok := (v.Value()).(float64)\n\treturn val, ok\n}", "func TestCheckBinaryExprFloatEqlBool(t *testing.T) {\n\tenv := MakeSimpleEnv()\n\n\texpectCheckError(t, `2.0 == true`, env,\n\t\t`cannot convert true to type float64`,\n\t\t`invalid operation: 2 == true (mismatched types float64 and bool)`,\n\t)\n\n}", "func TestCheckBinaryExprFloatQuoBool(t *testing.T) {\n\tenv := MakeSimpleEnv()\n\n\texpectCheckError(t, `2.0 / true`, env,\n\t\t`cannot convert true to type float64`,\n\t\t`invalid operation: 2 / true (mismatched types float64 and bool)`,\n\t)\n\n}", "func TestCheckBinaryExprFloatAddBool(t *testing.T) {\n\tenv := MakeSimpleEnv()\n\n\texpectCheckError(t, `2.0 + true`, env,\n\t\t`cannot convert true to type float64`,\n\t\t`invalid operation: 2 + true (mismatched types float64 and bool)`,\n\t)\n\n}", "func IsFloat(t Type) bool {\n\treturn int(t)&flagIsFloat == flagIsFloat\n}", "func TestCheckBinaryExprFloatGtrBool(t *testing.T) {\n\tenv := MakeSimpleEnv()\n\n\texpectCheckError(t, `2.0 > true`, env,\n\t\t`cannot convert true to type float64`,\n\t\t`invalid operation: 2 > true (mismatched types float64 and bool)`,\n\t)\n\n}", "func (d Definition) IsFloat() bool {\n\tif k, ok := d.Output.(reflect.Kind); ok {\n\t\tif k == reflect.Float32 || k == reflect.Float64 {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func TestCheckBinaryExprBoolEqlFloat(t *testing.T) {\n\tenv := MakeSimpleEnv()\n\n\texpectCheckError(t, `true == 2.0`, env,\n\t\t`cannot convert 2 to type bool`,\n\t\t`invalid operation: true == 2 (mismatched types bool and float64)`,\n\t)\n\n}", "func (o *FloatObject) AsFloat() (float64) {\n return o.Value\n}", "func (v Value) IsFloat() bool {\n\treturn IsFloat(v.typ)\n}", "func TestCheckBinaryExprFloatGeqBool(t *testing.T) {\n\tenv := MakeSimpleEnv()\n\n\texpectCheckError(t, `2.0 >= true`, env,\n\t\t`cannot convert true to type float64`,\n\t\t`invalid operation: 2 >= true (mismatched types float64 and bool)`,\n\t)\n\n}", "func (v Value) Float(bitSize int) (float64, error) {\n\tif v.typ != Number {\n\t\treturn 0, v.newError(\"%s is not a number\", v.Raw())\n\t}\n\tf, err := strconv.ParseFloat(v.Raw(), bitSize)\n\tif err != nil {\n\t\treturn 0, v.newError(\"%v\", err)\n\t}\n\treturn f, nil\n}", "func TestCheckBinaryExprBoolQuoFloat(t *testing.T) {\n\tenv := MakeSimpleEnv()\n\n\texpectCheckError(t, `true / 2.0`, env,\n\t\t`cannot convert 2 to type bool`,\n\t\t`invalid operation: true / 2 (mismatched types bool and float64)`,\n\t)\n\n}", "func (state *State) IsFloat(index int) bool {\n\treturn IsFloat(state.get(index))\n}", "func Floatbits(tk obj.Token, args []oop.VarDef) oop.Val {\n\tval := args[0].Val\n\tif val.Type != oop.Int && val.Type != oop.Float {\n\t\tval.Data = 0.0\n\t}\n\tf := val.Data.(float64)\n\treturn oop.Val{Data: float64(*(*uint64)(unsafe.Pointer(&f))), Type: oop.Int}\n}", "func TestCheckBinaryExprBoolGtrFloat(t *testing.T) {\n\tenv := MakeSimpleEnv()\n\n\texpectCheckError(t, `true > 2.0`, env,\n\t\t`cannot convert 2 to type bool`,\n\t\t`invalid operation: true > 2 (mismatched types bool and float64)`,\n\t)\n\n}", "func toFloatMaybe(v starlark.Value) (float64, bool) {\n\treturn starlark.AsFloat(v)\n}", "func toFloatMaybe(v starlark.Value) (float64, bool) {\n\treturn starlark.AsFloat(v)\n}", "func toFloat64(v interface{}) (float64, bool) {\n\tswitch value := v.(type) {\n\tcase int64:\n\t\treturn float64(value), true\n\tcase float64:\n\t\treturn value, true\n\tcase PositionPoint:\n\t\treturn toFloat64(value.Value)\n\t}\n\treturn 0, false\n}", "func TestCheckBinaryExprBoolAddFloat(t *testing.T) {\n\tenv := MakeSimpleEnv()\n\n\texpectCheckError(t, `true + 2.0`, env,\n\t\t`cannot convert 2 to type bool`,\n\t\t`invalid operation: true + 2 (mismatched types bool and float64)`,\n\t)\n\n}", "func (v Value) AsFloat() float64 {\n\treturn v.iface.(float64)\n}", "func floatsToBools(v linalg.Vector) stochnet.BoolVec {\n\tbools := make([]bool, len(v))\n\tfor i, f := range v {\n\t\tbools[i] = f >= 0.5\n\t}\n\treturn stochnet.ConstBoolVec(bools)\n}", "func CastBool(val interface{}) (bool, bool) {\n\tswitch val.(type) {\n\tcase bool:\n\t\treturn val.(bool), true\n\tcase int:\n\t\treturn val.(int) != 0, true\n\tcase int8:\n\t\treturn val.(int8) != 0, true\n\tcase int16:\n\t\treturn val.(int16) != 0, true\n\tcase int32:\n\t\treturn val.(int32) != 0, true\n\tcase int64:\n\t\treturn val.(int64) != 0, true\n\tcase uint:\n\t\treturn val.(uint) != 0, true\n\tcase uint8:\n\t\treturn val.(uint8) != 0, true\n\tcase uint16:\n\t\treturn val.(uint16) != 0, true\n\tcase uint32:\n\t\treturn val.(uint32) != 0, true\n\tcase uint64:\n\t\treturn val.(uint64) != 0, true\n\tcase float32:\n\t\treturn val.(float32) != 0, true\n\tcase float64:\n\t\treturn val.(float64) != 0, true\n\tcase string:\n\t\tif bval, err := strconv.ParseBool(val.(string)); err != nil {\n\t\t\tif fval, ok := CastFloat(val.(string)); ok {\n\t\t\t\treturn fval != 0, true\n\t\t\t}\n\t\t\treturn false, false\n\t\t} else {\n\t\t\treturn bval, true\n\t\t}\n\t}\n\treturn false, false\n}", "func (v AnnotationValue) AsFloat() float64 {\n\treturn v.Value.(float64)\n}", "func FloatAndFloatToBool_EQ(v, w FloatValue) BoolValue {\n\treturn BoolValue{value: v.value == w.value, err: firstError(v, w)}\n}", "func TernaryFloat64(condition bool, trueVal, falseVal float64) float64 {\n\tif condition {\n\t\treturn trueVal\n\t} else {\n\t\treturn falseVal\n\t}\n}", "func AsF(str string) (float64, bool) {\n\tres, err := strconv.ParseFloat(str, 64)\n\treturn res, err == nil\n}", "func IsFloat(val any) bool {\n\tif val == nil {\n\t\treturn false\n\t}\n\n\tswitch rv := val.(type) {\n\tcase float32, float64:\n\t\treturn true\n\tcase string:\n\t\treturn rv != \"\" && rxFloat.MatchString(rv)\n\t}\n\treturn false\n}", "func (v *Value) AsFloat64(dv float64) float64 {\n\tif v.IsUndefined() {\n\t\treturn dv\n\t}\n\tswitch tv := v.raw.(type) {\n\tcase string:\n\t\tf, err := strconv.ParseFloat(tv, 64)\n\t\tif err != nil {\n\t\t\treturn dv\n\t\t}\n\t\treturn f\n\tcase int:\n\t\treturn float64(tv)\n\tcase float64:\n\t\treturn tv\n\tcase bool:\n\t\tif tv {\n\t\t\treturn 1.0\n\t\t}\n\t\treturn 0.0\n\tcase time.Time:\n\t\tns := tv.UnixNano()\n\t\treturn float64(ns)\n\tcase time.Duration:\n\t\tns := tv.Nanoseconds()\n\t\treturn float64(ns)\n\t}\n\treturn dv\n}", "func FloatSignbit(x *big.Float,) bool", "func (t *Typed) FloatIf(key string) (float64, bool) {\n\tvalue, exists := t.GetIf(key)\n\tif exists == false {\n\t\treturn 0, false\n\t}\n\tswitch t := value.(type) {\n\tcase float32:\n\t\treturn float64(t), true\n\tcase float64:\n\t\treturn float64(t), true\n\tcase string:\n\t\tf, err := strconv.ParseFloat(t, 10)\n\t\treturn f, err == nil\n\t}\n\treturn 0, false\n}", "func FakeBool(v interface{}) bool {\n\tswitch r := v.(type) {\n\tcase float64:\n\t\treturn r != 0\n\tcase string:\n\t\treturn r != \"\"\n\tcase bool:\n\t\treturn r\n\tcase nil:\n\t\treturn false\n\tdefault:\n\t\treturn true\n\t}\n}", "func IsFloat64(v interface{}) bool {\n\tr := elconv.AsValueRef(reflect.ValueOf(v))\n\treturn r.Kind() == reflect.Float64\n}", "func TestFloat(tst *testing.T) {\n\n\t// Test bool\n\tf, err := StringToFloat(\"1.256898\")\n\tbrtesting.AssertEqual(tst, err, nil, \"StringToFloat failed\")\n\tbrtesting.AssertEqual(tst, f, 1.256898, \"StringToFloat failed\")\n\tf, err = StringToFloat(\"go-bedrock\")\n\tbrtesting.AssertNotEqual(tst, err, nil, \"StringToFloat failed\")\n}", "func Float(flag string, value float64, description string) *float64 {\n\tvar v float64\n\tFloatVar(&v, flag, value, description)\n\treturn &v\n}", "func IsFloat(data interface{}) bool {\n\treturn typeIs(data,\n\t\treflect.Float32,\n\t\treflect.Float64,\n\t)\n}", "func (formatter) fBool(v *types.RecordValue) *types.RecordValue {\n\tif v.Value != strBoolTrue {\n\t\tv.Value = \"\"\n\t}\n\n\treturn v\n}", "func (s *Smpval) SetFloat(f float64) bool {\n\tif s.flag == Float && s.val.CanSet() {\n\t\ts.val.SetFloat(f)\n\t\ts.f = s.val.Float()\n\t\treturn true\n\t}\n\treturn false\n}", "func ConvertToFloat64(value interface{}) (float64, bool) {\n\tswitch v := value.(type) {\n\tcase int, uint, int8, uint8, int16, uint16, int32, uint32, int64, uint64, float32, float64:\n\t\tnum := reflect.ValueOf(value).Convert(reflect.TypeOf(float64(0))).Float()\n\t\treturn num, !math.IsInf(num, 0) && !math.IsNaN(num)\n\tcase string:\n\t\tnum, err := strconv.ParseFloat(v, 64)\n\t\tif err == nil {\n\t\t\treturn num, true\n\t\t}\n\t}\n\treturn float64(0), false\n}", "func (v *Value) Float64() float64 {\n\tswitch {\n\tcase v.fvalOk:\n\tcase v.ivalOk:\n\t\tv.fval = float64(v.ival)\n\t\tv.fvalOk = true\n\tcase v.svalOk:\n\t\t// Perform a best-effort conversion from string to float64.\n\t\tv.fval = 0.0\n\t\tstrs := matchFloat.FindStringSubmatch(v.sval)\n\t\tif len(strs) >= 2 {\n\t\t\tv.fval, _ = strconv.ParseFloat(strs[1], 64)\n\t\t}\n\t\tv.fvalOk = true\n\t}\n\treturn v.fval\n}", "func IsFloat(id int) bool {\n\treturn id >= IDFloat16 && id <= IDFloat64\n}", "func TestCheckBinaryExprFloatRemBool(t *testing.T) {\n\tenv := MakeSimpleEnv()\n\n\texpectCheckError(t, `2.0 % true`, env,\n\t\t`cannot convert true to type float64`,\n\t\t`invalid operation: 2 % true (mismatched types float64 and bool)`,\n\t)\n\n}", "func TestCheckBinaryExprFloatNeqBool(t *testing.T) {\n\tenv := MakeSimpleEnv()\n\n\texpectCheckError(t, `2.0 != true`, env,\n\t\t`cannot convert true to type float64`,\n\t\t`invalid operation: 2 != true (mismatched types float64 and bool)`,\n\t)\n\n}", "func (s *Value) asBool() (bool, error) {\n\t// A missing value is considered false\n\tif s == nil {\n\t\treturn false, nil\n\t}\n\tswitch s.Name {\n\tcase \"true\":\n\t\treturn true, nil\n\tcase \"false\":\n\t\treturn false, nil\n\tdefault:\n\t\treturn false, fmt.Errorf(\"invalid boolean: %s\", s.Name)\n\t}\n}", "func TestCheckBinaryExprBoolGeqFloat(t *testing.T) {\n\tenv := MakeSimpleEnv()\n\n\texpectCheckError(t, `true >= 2.0`, env,\n\t\t`cannot convert 2 to type bool`,\n\t\t`invalid operation: true >= 2 (mismatched types bool and float64)`,\n\t)\n\n}", "func (f Number) Bool(context.Context) bool {\n\treturn float64(f) != 0\n}", "func (v *Value) AsBool(dv bool) bool {\n\tif v.IsUndefined() {\n\t\treturn dv\n\t}\n\tswitch tv := v.raw.(type) {\n\tcase string:\n\t\tb, err := strconv.ParseBool(tv)\n\t\tif err != nil {\n\t\t\treturn dv\n\t\t}\n\t\treturn b\n\tcase int:\n\t\treturn tv == 1\n\tcase float64:\n\t\treturn tv == 1.0\n\tcase bool:\n\t\treturn tv\n\tcase time.Time:\n\t\treturn tv.UnixNano() > 0\n\tcase time.Duration:\n\t\treturn tv.Nanoseconds() > 0\n\t}\n\treturn dv\n}", "func TestCheckBinaryExprFloatLssBool(t *testing.T) {\n\tenv := MakeSimpleEnv()\n\n\texpectCheckError(t, `2.0 < true`, env,\n\t\t`cannot convert true to type float64`,\n\t\t`invalid operation: 2 < true (mismatched types float64 and bool)`,\n\t)\n\n}", "func FloatConverter(str string, target reflect.Value) (ok bool) {\n\tf, err := strconv.ParseFloat(str, 64)\n\tif err != nil {\n\t\treturn\n\t}\n\n\ttarget.SetFloat(f)\n\treturn true\n}", "func (mysql *MySQLDatabase) IsFloat(column Column) bool {\n\treturn IsStringInSlice(column.DataType, mysql.GetFloatDatatypes())\n}", "func TestCheckBinaryExprFloatEqlFloat(t *testing.T) {\n\tenv := MakeSimpleEnv()\n\n\texpectConst(t, `2.0 == 2.0`, env, (2.0 == 2.0), ConstBool)\n}", "func Floatfrombits(tk obj.Token, args []oop.VarDef) oop.Val {\n\tval := args[0].Val\n\tif val.Type != oop.Int {\n\t\tval.Data = 0.0\n\t}\n\tb := uint64(val.Data.(float64))\n\treturn oop.Val{Data: *(*float64)(unsafe.Pointer(&b)), Type: oop.Float}\n}", "func (s *SetAttributeState) getFloatValue() float64 {\n\treturn s.value.(float64)\n}", "func (v *Variant) IsFloating() bool {\n\treturn gobool(C.g_variant_is_floating(v.native()))\n}", "func (feature Feature) FieldAsFloat64(index int) float64 {\n\tval := C.OGR_F_GetFieldAsDouble(feature.cval, C.int(index))\n\treturn float64(val)\n}", "func (v *Value) Float() float64 {\n\treturn (float64)(C.value_get_double(v.value))\n}", "func NewFloat(v float64) Float {\n\treturn Float{v, true}\n}", "func FloatVal(v float64) predicate.Property {\n\treturn predicate.Property(func(s *sql.Selector) {\n\t\ts.Where(sql.EQ(s.C(FieldFloatVal), v))\n\t})\n}", "func ToFloat(value interface{}) (float64, error) {\n\tv := reflect.ValueOf(value)\n\tswitch v.Kind() {\n\tcase reflect.Float32, reflect.Float64:\n\t\treturn v.Float(), nil\n\t}\n\treturn 0, fmt.Errorf(\"cannot convert %v to float64\", v.Kind())\n}", "func IsFloat(str string) bool {\n\treturn str != \"\" && rxFloat.MatchString(str)\n}", "func (v Value) Float() (float64, error) {\n\tswitch v.Value.Kind() {\n\tcase reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:\n\t\treturn float64(v.Value.Int()), nil\n\tcase reflect.Float32, reflect.Float64:\n\t\treturn v.Value.Float(), nil\n\t}\n\treturn 0, fmt.Errorf(\"Kind %s is not a float\", v.Value.Kind())\n}", "func GetFloat(fixControlMap map[uint64]string, key uint64) (value float64, exists bool, parseErr error) {\n\tif fixControlMap == nil {\n\t\treturn 0, false, nil\n\t}\n\trawValue, ok := fixControlMap[key]\n\tif !ok {\n\t\treturn 0, false, nil\n\t}\n\t// The same as tidbOptFloat64 in sessionctx/variable.\n\tvalue, parseErr = strconv.ParseFloat(rawValue, 64)\n\treturn value, true, parseErr\n}", "func TestCheckBinaryExprFloatGtrFloat(t *testing.T) {\n\tenv := MakeSimpleEnv()\n\n\texpectConst(t, `2.0 > 2.0`, env, (2.0 > 2.0), ConstBool)\n}", "func (v *missingValue) SetFloat(value float64) bool {\n\treturn false\n}", "func (f FloatFlag) Float(ctx context.Context, flagger ...Flagger) float64 {\n\ti, ok := f.value(ctx, flagger...)\n\tif !ok {\n\t\treturn f.defaultFloat\n\t}\n\tv, ok := i.(float64)\n\tif !ok {\n\t\treturn f.defaultFloat\n\t}\n\treturn v\n}", "func TestCheckBinaryExprFloatOrBool(t *testing.T) {\n\tenv := MakeSimpleEnv()\n\n\texpectCheckError(t, `2.0 | true`, env,\n\t\t`cannot convert true to type float64`,\n\t\t`invalid operation: 2 | true (mismatched types float64 and bool)`,\n\t)\n\n}", "func (v Value) Float() float64 {\n\tswitch {\n\tcase v == 0:\n\t\treturn 0\n\tcase v == 64:\n\t\treturn 0.5\n\tcase v == 127:\n\t\treturn 1\n\tcase v < 64:\n\t\treturn float64(v) / 128\n\tdefault:\n\t\treturn float64(v-1) / 126\n\t}\n}", "func ToFloat(value interface{}) (val float64, err error) {\n\n\tdefer func() {\n\t\tif e := recover(); e != nil {\n\t\t\terr = errors.New(\"can not convert to as type float\")\n\t\t\treturn\n\t\t}\n\t}()\n\n\tval = reflect.ValueOf(value).Float()\n\treturn\n}", "func TestCheckBinaryExprFloatAndBool(t *testing.T) {\n\tenv := MakeSimpleEnv()\n\n\texpectCheckError(t, `2.0 & true`, env,\n\t\t`cannot convert true to type float64`,\n\t\t`invalid operation: 2 & true (mismatched types float64 and bool)`,\n\t)\n\n}", "func (num Number) Float64() (float64, bool) {\n\tf, err := json.Number(num).Float64()\n\tif err != nil {\n\t\treturn 0, false\n\t}\n\treturn f, true\n}", "func MapBoolFloat64Ptr(f func(*bool) *float64, list []*bool) []*float64 {\n\tif f == nil {\n\t\treturn []*float64{}\n\t}\n\tnewList := make([]*float64, len(list))\n\tfor i, v := range list {\n\t\tnewList[i] = f(v)\n\t}\n\treturn newList\n}", "func (v Value) Float() float64 {\n\tswitch v.Typ {\n\tdefault:\n\t\tf, _ := strconv.ParseFloat(v.String(), 64)\n\t\treturn f\n\tcase ':':\n\t\treturn float64(v.IntegerV)\n\t}\n}", "func (v Value) Float() float64 {\n\treturn v.v.Float()\n}", "func TestCheckBinaryExprFloatLeqBool(t *testing.T) {\n\tenv := MakeSimpleEnv()\n\n\texpectCheckError(t, `2.0 <= true`, env,\n\t\t`cannot convert true to type float64`,\n\t\t`invalid operation: 2 <= true (mismatched types float64 and bool)`,\n\t)\n\n}", "func ToBool(v interface{}, def bool) bool {\r\n\tif b, ok := v.(bool); ok {\r\n\t\treturn b\r\n\t}\r\n\tif i, ok := v.(int); ok {\r\n\t\treturn i > 0\r\n\t}\r\n\tif i, ok := v.(float64); ok {\r\n\t\treturn i > 0\r\n\t}\r\n\tif i, ok := v.(float32); ok {\r\n\t\treturn i > 0\r\n\t}\r\n\tif ss, ok := v.([]string); ok {\r\n\t\tv = ss[0]\r\n\t}\r\n\tif s, ok := v.(string); ok {\r\n\t\tif s == \"on\" {\r\n\t\t\treturn true\r\n\t\t}\r\n\t\tif s == \"off\" || s == \"\" {\r\n\t\t\treturn false\r\n\t\t}\r\n\t\tif b, err := strconv.ParseBool(s); err == nil {\r\n\t\t\treturn b\r\n\t\t}\r\n\t}\r\n\r\n\treturn def\r\n\r\n}", "func BoolAsUiNodeInputAttributesValue(v *bool) UiNodeInputAttributesValue {\n\treturn UiNodeInputAttributesValue{\n\t\tBool: v,\n\t}\n}", "func (f FormField) Float() float64 {\n\tif result, err := strconv.ParseFloat(f.Value, 64); err == nil {\n\t\treturn result\n\t}\n\treturn 0.0\n}", "func (c *Configurator) Float64F(name string, value float64, usage string) *float64 {\n\tp := new(float64)\n\n\tc.Float64VarF(p, name, value, usage)\n\n\treturn p\n}", "func TestCheckBinaryExprBoolNeqFloat(t *testing.T) {\n\tenv := MakeSimpleEnv()\n\n\texpectCheckError(t, `true != 2.0`, env,\n\t\t`cannot convert 2 to type bool`,\n\t\t`invalid operation: true != 2 (mismatched types bool and float64)`,\n\t)\n\n}", "func ToBool(value interface{}) bool {\n\tswitch value := value.(type) {\n\tcase bool:\n\t\treturn value\n\tcase *bool:\n\t\treturn *value\n\tcase string:\n\t\tswitch value {\n\t\tcase \"\", \"false\":\n\t\t\treturn false\n\t\t}\n\t\treturn true\n\tcase *string:\n\t\treturn ToBool(*value)\n\tcase float64:\n\t\tif value != 0 {\n\t\t\treturn true\n\t\t}\n\t\treturn false\n\tcase *float64:\n\t\treturn ToBool(*value)\n\tcase float32:\n\t\tif value != 0 {\n\t\t\treturn true\n\t\t}\n\t\treturn false\n\tcase *float32:\n\t\treturn ToBool(*value)\n\tcase int:\n\t\tif value != 0 {\n\t\t\treturn true\n\t\t}\n\t\treturn false\n\tcase *int:\n\t\treturn ToBool(*value)\n\t}\n\treturn false\n}", "func (jo *Object) GetFloat(field string) (float64, bool) {\n\tjp, ok := jo.GetProperty(field)\n\tif !ok {\n\t\treturn 0, false\n\t}\n\treturn jp.GetFloat()\n}", "func (c *Cell) Float() (float64, error) {\n\tf, err := strconv.ParseFloat(c.Value, 64)\n\tif err != nil {\n\t\treturn math.NaN(), err\n\t}\n\treturn f, nil\n}", "func (v *missingValue) GetFloat() (float64, bool) {\n\treturn 0.0, false\n}", "func (v Float) Float64() float64 {\n\treturn v.v\n}", "func TestCheckBinaryExprFloatAndNotBool(t *testing.T) {\n\tenv := MakeSimpleEnv()\n\n\texpectCheckError(t, `2.0 &^ true`, env,\n\t\t`cannot convert true to type float64`,\n\t\t`invalid operation: 2 &^ true (mismatched types float64 and bool)`,\n\t)\n\n}", "func (v *Value) Float() float64 {\n return Util.ToFloat(v.data)\n}", "func Float(str string) bool {\n\t_, err := strconv.ParseFloat(str, 0)\n\treturn err == nil\n}", "func TestCheckBinaryExprBoolRemFloat(t *testing.T) {\n\tenv := MakeSimpleEnv()\n\n\texpectCheckError(t, `true % 2.0`, env,\n\t\t`cannot convert 2 to type bool`,\n\t\t`invalid operation: true % 2 (mismatched types bool and float64)`,\n\t)\n\n}", "func NewFloat(f float64, valid bool) Float {\n\treturn Float{\n\t\tNullFloat64: sql.NullFloat64{\n\t\t\tFloat64: f,\n\t\t\tValid: valid,\n\t\t},\n\t}\n}", "func TestPriceFloatValueOne(t *testing.T) {\n\tonePrice := &Price{\n\t\tAmountWant: 1,\n\t\tAmountHave: 1,\n\t}\n\n\tif priceValue, _ := onePrice.ToFloat(); priceValue != float64(1) {\n\t\tt.Errorf(\"A price of one should return 1 when calling ToFloat\")\n\t\treturn\n\t}\n\treturn\n}", "func (t *Typed) FloatsIf(key string) ([]float64, bool) {\n\tvalue, exists := t.GetIf(key)\n\tif exists == false {\n\t\treturn nil, false\n\t}\n\tif n, ok := value.([]float64); ok {\n\t\treturn n, true\n\t}\n\tif a, ok := value.([]interface{}); ok {\n\t\tl := len(a)\n\t\tn := make([]float64, l)\n\t\tfor i := 0; i < l; i++ {\n\t\t\tswitch t := a[i].(type) {\n\t\t\tcase float64:\n\t\t\t\tn[i] = t\n\t\t\tcase string:\n\t\t\t\tf, err := strconv.ParseFloat(t, 10)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn n, false\n\t\t\t\t}\n\t\t\t\tn[i] = f\n\t\t\tdefault:\n\t\t\t\treturn n, false\n\t\t\t}\n\t\t}\n\t\treturn n, true\n\t}\n\treturn nil, false\n}", "func TestCheckBinaryExprBoolLssFloat(t *testing.T) {\n\tenv := MakeSimpleEnv()\n\n\texpectCheckError(t, `true < 2.0`, env,\n\t\t`cannot convert 2 to type bool`,\n\t\t`invalid operation: true < 2 (mismatched types bool and float64)`,\n\t)\n\n}" ]
[ "0.79568267", "0.7608123", "0.6812237", "0.6481996", "0.6444785", "0.64386845", "0.63174844", "0.62975734", "0.6223633", "0.6176762", "0.61434895", "0.6118684", "0.6105061", "0.6064999", "0.6007207", "0.5978421", "0.5931329", "0.5929108", "0.5906539", "0.5903915", "0.5897242", "0.58755755", "0.58454806", "0.5838845", "0.58372945", "0.58372945", "0.5831517", "0.58252287", "0.58147776", "0.5813939", "0.5810761", "0.5797016", "0.5783428", "0.57817036", "0.5780816", "0.576837", "0.57578295", "0.5748081", "0.5744147", "0.5734095", "0.5731119", "0.57229024", "0.5714041", "0.57117736", "0.5709423", "0.56999564", "0.5694239", "0.5659471", "0.5653608", "0.56298834", "0.5619768", "0.56186986", "0.5616682", "0.5604302", "0.5588394", "0.5579833", "0.5575067", "0.5571778", "0.55578804", "0.5557297", "0.55535436", "0.5551791", "0.55400825", "0.55389714", "0.55209804", "0.5514602", "0.5512523", "0.54851085", "0.54758507", "0.5472185", "0.5469537", "0.546613", "0.5454398", "0.54491454", "0.544605", "0.5440235", "0.54375476", "0.5431244", "0.5416185", "0.5412128", "0.5411632", "0.54009485", "0.5394656", "0.5385954", "0.53796643", "0.5374708", "0.5373496", "0.53698575", "0.53517723", "0.5341083", "0.5339396", "0.53348535", "0.5334836", "0.5330038", "0.53288287", "0.53282964", "0.5324366", "0.5323073", "0.5316203", "0.53136533" ]
0.87412244
0
FloatHash CRCs a byte array and converts the CRC into a float64, for easier datapoint usage.
func FloatHash(data []byte) float64 { return float64(crc.CRC(crc.InitialCRC, data)) }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func ChecksumIEEE(data []byte) uint32 {}", "func f32hash(p unsafe.Pointer, h uintptr) uintptr {\n\tf := *(*float32)(p)\n\tswitch {\n\tcase f == 0:\n\t\treturn c1 * (c0 ^ h) // +0, -0\n\tcase f != f:\n\t\treturn c1 * (c0 ^ h ^ uintptr(fastrand())) // any kind of NaN\n\tdefault:\n\t\treturn memhash(p, h, 4)\n\t}\n}", "func hash(x []byte) uint32 {\n\treturn crc32.ChecksumIEEE(x)\n}", "func BytesToFloat(b []byte) float32 {\n\tif b == nil {\n\t\treturn 0.0\n\t} else {\n\t\treturn *(*float32)(unsafe.Pointer(&b[0]))\n\t}\n}", "func ieeeCLMUL(crc uint32, p []byte) uint32", "func ByteToFloat32(bytes []byte) float32 {\n\tbits := binary.LittleEndian.Uint32(bytes)\n\treturn math.Float32frombits(bits)\n}", "func BinaryToFloat64(bytes []byte) float64 {\n\tbits := binary.BigEndian.Uint64(bytes)\n\tfloat := math.Float64frombits(bits)\n\treturn float\n}", "func newChecksum(b []byte) hash.Hash32 {\n\tcrc := crc32.NewIEEE()\n\tcrc.Write(b)\n\treturn crc\n}", "func castagnoliSSE42(crc uint32, p []byte) uint32", "func Floatfrombits(tk obj.Token, args []oop.VarDef) oop.Val {\n\tval := args[0].Val\n\tif val.Type != oop.Int {\n\t\tval.Data = 0.0\n\t}\n\tb := uint64(val.Data.(float64))\n\treturn oop.Val{Data: *(*float64)(unsafe.Pointer(&b)), Type: oop.Float}\n}", "func ReadFloat(buffer []byte, offset int) float32 {\n bits := ReadUInt32(buffer, offset)\n return math.Float32frombits(bits)\n}", "func BytesToFloat(s []byte) (float64, error) {\n // Find the decimal point\n n := bytes.IndexByte(s, '.')\n // ...or just process as an int if there isn't one\n if n == -1 {\n i, err := BytesToInt(s)\n return float64(i), err\n }\n // ...and count decimal places\n dp := (len(s) - n) - 1\n\n // Read the integer section\n i, err := BytesToInt(s[:n])\n if err != nil {\n return 0, err\n }\n f := float64(i)\n // ...and the decimals\n i, err = BytesToInt(s[n+1:])\n if err != nil {\n return 0, err\n }\n f += float64(i) / math.Pow10(dp)\n\n return f, nil\n}", "func ByteToFloat64(bytes []byte) float64 {\n\tbits := binary.BigEndian.Uint64(bytes)\n\treturn math.Float64frombits(bits)\n}", "func Float64frombytes(bytes []byte) float64 {\n\tbits := binary.LittleEndian.Uint64(bytes)\n\tfloat := math.Float64frombits(bits)\n\treturn float\n}", "func Float32frombytes(bytes []byte) float32 {\n\tbits := binary.LittleEndian.Uint32(bytes)\n\tfloat := math.Float32frombits(bits)\n\treturn float\n}", "func Float32frombytes(bytes []byte) float32 {\n\tbits := binary.LittleEndian.Uint32(bytes)\n\tfloat := math.Float32frombits(bits)\n\treturn float\n}", "func fltToFloat32(k string, x gosmc.SMCBytes, size uint32) (float32, error) {\n\treturn math.Float32frombits(binary.LittleEndian.Uint32(x[:size])), nil\n}", "func Hash(mdfcge []byte) [32]byte {\n\treturn sha256.Sum256(mdfcge)\n}", "func (g *GLTF) bytesToArrayF32(data []byte, componentType, count int) (math32.ArrayF32, error) {\n\n\t// If component is UNSIGNED_INT nothing to do\n\tif componentType == UNSIGNED_INT {\n\t\tarr := (*[1 << 30]float32)(unsafe.Pointer(&data[0]))[:count]\n\t\treturn math32.ArrayF32(arr), nil\n\t}\n\n\t// Converts UNSIGNED_SHORT or SHORT to UNSIGNED_INT\n\tif componentType == UNSIGNED_SHORT || componentType == SHORT {\n\t\tout := math32.NewArrayF32(count, count)\n\t\tfor i := 0; i < count; i++ {\n\t\t\tout[i] = float32(data[i*2]) + float32(data[i*2+1])*256\n\t\t}\n\t\treturn out, nil\n\t}\n\n\t// Converts UNSIGNED_BYTE or BYTE to UNSIGNED_INT\n\tif componentType == UNSIGNED_BYTE || componentType == BYTE {\n\t\tout := math32.NewArrayF32(count, count)\n\t\tfor i := 0; i < count; i++ {\n\t\t\tout[i] = float32(data[i])\n\t\t}\n\t\treturn out, nil\n\t}\n\n\treturn (*[1 << 30]float32)(unsafe.Pointer(&data[0]))[:count], nil\n}", "func HexToBytes(h string) []byte {\n\ts, err := hex.DecodeString(h)\n\tif err != nil {\n\t\tfmt.Errorf(\"faild to convert BytesToFloat(%s) with error : %s\", h, err.Error())\n\t\treturn []byte(\"\")\n\t}\n\treturn s\n}", "func (buff *Bytes) ToFloat32() float32 {\r\n\treturn *(*float32)(unsafe.Pointer(&(*buff)[0]))\r\n}", "func updateCRC(crc uint32, buf []byte) uint32 {\n\t// The CRC-32 computation in bzip2 treats bytes as having bits in big-endian\n\t// order. That is, the MSB is read before the LSB. Thus, we can use the\n\t// standard library version of CRC-32 IEEE with some minor adjustments.\n\tcrc = internal.ReverseUint32(crc)\n\tvar arr [4096]byte\n\tfor len(buf) > 0 {\n\t\tcnt := copy(arr[:], buf)\n\t\tbuf = buf[cnt:]\n\t\tfor i, b := range arr[:cnt] {\n\t\t\tarr[i] = internal.ReverseLUT[b]\n\t\t}\n\t\tcrc = crc32.Update(crc, crc32.IEEETable, arr[:cnt])\n\t}\n\treturn internal.ReverseUint32(crc)\n}", "func (n *eeNum) float64() *float64 { return (*float64)(unsafe.Pointer(&n.data)) }", "func (p *BinlogFile) ReadFloat() ([]float32, error) {\n\tif p.reader == nil {\n\t\tlog.Warn(\"Binlog file: binlog reader not yet initialized\")\n\t\treturn nil, errors.New(\"binlog reader not yet initialized\")\n\t}\n\n\tresult := make([]float32, 0)\n\tfor {\n\t\tevent, err := p.reader.NextEventReader()\n\t\tif err != nil {\n\t\t\tlog.Warn(\"Binlog file: failed to iterate events reader\", zap.Error(err))\n\t\t\treturn nil, fmt.Errorf(\"failed to iterate events reader, error: %w\", err)\n\t\t}\n\n\t\t// end of the file\n\t\tif event == nil {\n\t\t\tbreak\n\t\t}\n\n\t\tif event.TypeCode != storage.InsertEventType {\n\t\t\tlog.Warn(\"Binlog file: binlog file is not insert log\")\n\t\t\treturn nil, errors.New(\"binlog file is not insert log\")\n\t\t}\n\n\t\tif p.DataType() != schemapb.DataType_Float {\n\t\t\tlog.Warn(\"Binlog file: binlog data type is not float\")\n\t\t\treturn nil, errors.New(\"binlog data type is not float\")\n\t\t}\n\n\t\tdata, err := event.PayloadReaderInterface.GetFloatFromPayload()\n\t\tif err != nil {\n\t\t\tlog.Warn(\"Binlog file: failed to read float data\", zap.Error(err))\n\t\t\treturn nil, fmt.Errorf(\"failed to read float data, error: %w\", err)\n\t\t}\n\n\t\tresult = append(result, data...)\n\t}\n\n\treturn result, nil\n}", "func (buff *Bytes) ToFloat64() float64 {\r\n\treturn *(*float64)(unsafe.Pointer(&(*buff)[0]))\r\n}", "func ComputeCRC32(data []byte) uint32 {\n\treturn crc32.ChecksumIEEE(data)\n}", "func (p RedisDsl) HASHES_GET_FLOAT64(keys []string, field string) ([]*float64, error) {\n\treplys := p.HASHES_GET(keys, field)\n\n\toutput := make([]*float64, len(keys))\n\tfor i, reply := range replys {\n\t\tptr, err := ReplyToFloat64Ptr(reply)\n\t\tif nil != err {\n\t\t\treturn nil, err\n\t\t}\n\t\toutput[i] = ptr\n\t}\n\n\treturn output, nil\n}", "func (d *DecompressionBuffer) readFloat() float64 {\n\tb := d.data[0:8]\n\tbuf := bytes.NewReader(b)\n\tvar result float64\n\terr := binary.Read(buf, binary.BigEndian, &result)\n\tif err != nil {\n\t\tfmt.Println(\"binary.Read failed:\", err)\n\t\tpanic(\"Failed to decompress in ReadFirst\")\n\t}\n\n\treturn result\n}", "func (this Hash) Float(key string) HashFloat {\n\treturn newHashFloat(this, key)\n}", "func crc(b []byte) uint32 {\n\tc := crc32.Update(0, crcTable, b)\n\treturn c>>15 | c<<17 + 0xa282ead8\n}", "func Floatbits(tk obj.Token, args []oop.VarDef) oop.Val {\n\tval := args[0].Val\n\tif val.Type != oop.Int && val.Type != oop.Float {\n\t\tval.Data = 0.0\n\t}\n\tf := val.Data.(float64)\n\treturn oop.Val{Data: float64(*(*uint64)(unsafe.Pointer(&f))), Type: oop.Int}\n}", "func KDFeHash(h crypto.Hash, z []byte, use string, partyUInfo, partyVInfo []byte, bits int) []byte {\n\thash := h.New()\n\n\tout := kdf(hash, bits, func() {\n\t\thash.Write(z)\n\t\thash.Write([]byte(use))\n\t\thash.Write([]byte{0}) // Terminating null character for C-string.\n\t\thash.Write(partyUInfo)\n\t\thash.Write(partyVInfo)\n\t})\n\treturn out\n}", "func fletcher32(payload []byte) uint32 {\n\ts1 := uint16(0)\n\ts2 := uint16(0)\n\n\tsz := len(payload) & (^1)\n\tfor i := 0; i < sz; i += 2 {\n\t\ts1 += uint16(payload[i]) | (uint16(payload[i+1]) << 8)\n\t\ts2 += s1\n\t}\n\tif len(payload)&1 != 0 {\n\t\ts1 += uint16(payload[sz])\n\t\ts2 += s1\n\t}\n\treturn (uint32(s2) << 16) | uint32(s1)\n}", "func (hm *HM) ReadFloat(addr int) (val float64, err error) {\n\tb, err := hm.shm.ReadN(addr, int(unsafe.Sizeof(float64(0))))\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\tbits := binary.BigEndian.Uint64(b)\n\treturn math.Float64frombits(bits), nil\n}", "func fmix(h uint32) uint32 {\n\th ^= h >> 16\n\th *= 0x85ebca6b\n\th ^= h >> 13\n\th *= 0xc2b2ae35\n\th ^= h >> 16\n\treturn h\n}", "func float32bits(f float32) uint32 { return *(*uint32)(unsafe.Pointer(&f)) }", "func FloatGobDecode(z *big.Float, buf []byte) error", "func smcBytesToFloat32(x gosmc.SMCBytes, size uint32) float32 {\n\treturn float32(smcBytesToUint32(x, size))\n}", "func codecToFloat64(codec string) float64 {\n\tif len(codec) != 4 {\n\t\treturn -1.0\n\t}\n\tc1 := []rune(string(codec[0]))[0]\n\tc2 := []rune(string(codec[1]))[0]\n\tc3 := []rune(string(codec[2]))[0]\n\tc4 := []rune(string(codec[3]))[0]\n\treturn float64((c1 & 255) + ((c2 & 255) << 8) + ((c3 & 255) << 16) + ((c4 & 255) << 24))\n}", "func fixedInt26ToFloat(fixedInt fixed.Int26_6) float32 {\n\tvar result float32\n\ti := int32(fixedInt)\n\tresult += float32(i >> 6)\n\tresult += float32(i&0x003F) / float32(64.0)\n\treturn result\n}", "func (f *Float) HashKey() HashKey {\n\th := fnv.New64a()\n\th.Write([]byte(f.Inspect()))\n\treturn HashKey{Type: f.Type(), Value: h.Sum64()}\n}", "func (h *hashLongestMatchQuickly) HashBytes(data []byte) uint32 {\n\tvar hash uint64 = ((binary.LittleEndian.Uint64(data) << (64 - 8*h.hashLen)) * kHashMul64)\n\n\t/* The higher bits contain more mixture from the multiplication,\n\t so we take our results from there. */\n\treturn uint32(hash >> (64 - h.bucketBits))\n}", "func (ch *ConsistentHash) fnv32Hash(key string) uint32 {\n\tnew32Hash := fnv.New32()\n\tnew32Hash.Write([]byte(key))\n\treturn new32Hash.Sum32()\n}", "func (p *RedisHashFieldCounterFloat64) Float64() (float64, error) {\n\treturn p.operationReturnsAmount(\"HGET\")\n}", "func (tv *TypedFloat) Float32() float32 {\n\tif len(tv.Bytes) == 0 {\n\t\treturn 0.0\n\t}\n\tvar value big.Float\n\t_ = value.GobDecode(tv.Bytes)\n\tflt32, _ := value.Float32()\n\treturn flt32\n}", "func (p RedisDsl) HASH_INCRBYFLOAT(key, field string, amount float64) (float64, error) {\n\treturn p.Cmd(\"HINCRBYFLOAT\", key, field, amount).Float64()\n}", "func crc32Demo() {\n\t// hasher\n\th := crc32.NewIEEE()\n\tfmt.Println(reflect.TypeOf(h))\n\n\t// write a string converted to bytes\n\th.Write([]byte(\"test\"))\n\n\t// checksum\n\tv := h.Sum32()\n\tfmt.Println(reflect.TypeOf(v)) // uint32\n\tfmt.Println(v)\n}", "func CalcHash(buf []byte, hasher hash.Hash) []byte {\n\thasher.Write(buf)\n\treturn hasher.Sum(nil)\n}", "func (p Pointer) Float32BE(offset int) float32 {\n\treturn float32(bits.ReverseBytes32(*(*uint32)(unsafe.Pointer(uintptr(int(p) + offset)))))\n}", "func (sdeep *SSDEEP) FuzzyByte(blob []byte) (*FuzzyHash, error) {\n\tn := len(blob)\n\tif n < minFileSize {\n\t\treturn nil, errors.New(\"Did not process files large enough to produce meaningful results\")\n\t}\n\tsdeep.getBlockSize(n)\n\tr := bytes.NewReader(blob)\n\treturn sdeep.FuzzyReader(r, \"\")\n}", "func (p Pointer) Float64BE(offset int) float64 {\n\treturn float64(bits.ReverseBytes64(*(*uint64)(unsafe.Pointer(uintptr(int(p) + offset)))))\n}", "func FNVHash32(value uint32) uint32 {\n\thash := FNVOffsetBasis32\n\tfor i := 0; i < 4; i++ {\n\t\toctet := value & 0x00FF\n\t\tvalue >>= 8\n\n\t\thash ^= octet\n\t\thash *= FNVPrime32\n\t}\n\treturn hash\n}", "func HashFunction(buf []byte) uint32 {\n\tvar hash uint32 = 5381\n\tfor _, b := range buf {\n\t\thash = ((hash << 5) + hash) + uint32(b)\n\t}\n\treturn hash\n}", "func Bytes32ToIpfsHash(value [32]byte) (string, error) {\n\tbyteArray := [34]byte{18, 32}\n\tcopy(byteArray[2:], value[:])\n\tif len(byteArray) != 34 {\n\t\treturn \"\", errors.New(\"invalid bytes32 value\")\n\t}\n\n\thash := base58.Encode(byteArray[:])\n\treturn hash, nil\n}", "func MakeRedisHashFieldCounterFloat64(redis dog_pool.RedisClientInterface, key, field string) (*RedisHashFieldCounterFloat64, error) {\n\tswitch {\n\tcase nil == redis:\n\t\treturn nil, fmt.Errorf(\"Nil redis connection\")\n\tcase len(key) == 0:\n\t\treturn nil, fmt.Errorf(\"Empty redis key\")\n\tcase len(field) == 0:\n\t\treturn nil, fmt.Errorf(\"Empty redis field\")\n\tdefault:\n\t\treturn &RedisHashFieldCounterFloat64{redis, key, field, nil}, nil\n\t}\n}", "func floatBits(f float64) uint64 {\n\t// Take f parameter and determine bit pattern.\n\t// Translate bit pattern into a value of type uint64\n\ti := *(*uint64)(unsafe.Pointer(&f))\n\t//fmt.Printf(\"strconv.FormatUint: %v\\n\", strconv.FormatUint(i, 2))\n\t// Return new value\n\treturn i\n}", "func hashFP(fp model.Fingerprint) uint {\n\treturn uint(fp ^ (fp >> 32) ^ (fp >> 16))\n}", "func fixToFloat(x int32) float32 {\n\treturn float32(x>>6) + float32(x&0x3f)/0x3f\n}", "func (v Value) Float(bitSize int) (float64, error) {\n\tif v.typ != Number {\n\t\treturn 0, v.newError(\"%s is not a number\", v.Raw())\n\t}\n\tf, err := strconv.ParseFloat(v.Raw(), bitSize)\n\tif err != nil {\n\t\treturn 0, v.newError(\"%v\", err)\n\t}\n\treturn f, nil\n}", "func checksum(height int, b []byte) []byte {\n\th := make([]byte, 8)\n\tbinary.LittleEndian.PutUint64(h, uint64(height))\n\tcs := fnv.New64a()\n\tcs.Write(h)\n\tcs.Write(b)\n\treturn cs.Sum(nil)\n}", "func fnvHash64(data []byte) uint64 {\n\thash := offset64\n\tfor _, c := range data {\n\t\thash *= prime64\n\t\thash ^= uint64(c)\n\t}\n\treturn hash\n}", "func (b Bytes) ToFloat64() float64 {\n\treturn math.Float64frombits(binary.LittleEndian.Uint64(b))\n}", "func DecodeFloatAscending(buf []byte, tmp []byte) ([]byte, float64, error) {\n\t// Handle the simplistic cases first.\n\tswitch buf[0] {\n\tcase floatNaN, floatNaNDesc:\n\t\treturn buf[1:], math.NaN(), nil\n\tcase floatInfinity:\n\t\treturn buf[1:], math.Inf(1), nil\n\tcase floatNegativeInfinity:\n\t\treturn buf[1:], math.Inf(-1), nil\n\tcase floatZero:\n\t\treturn buf[1:], 0, nil\n\t}\n\ttmp = tmp[len(tmp):cap(tmp)]\n\tidx := bytes.IndexByte(buf, floatTerminator)\n\tif idx == -1 {\n\t\treturn nil, 0, util.Errorf(\"did not find terminator %#x in buffer %#x\", floatTerminator, buf)\n\t}\n\tswitch {\n\tcase buf[0] == floatNegLarge:\n\t\t// Negative large.\n\t\te, m, tmp2 := decodeLargeNumber(true, buf[:idx+1], tmp)\n\t\treturn buf[idx+1:], makeFloatFromMandE(true, e, m, tmp2), nil\n\tcase buf[0] > floatNegLarge && buf[0] <= floatNegMedium:\n\t\t// Negative medium.\n\t\te, m, tmp2 := decodeMediumNumber(true, buf[:idx+1], tmp)\n\t\treturn buf[idx+1:], makeFloatFromMandE(true, e, m, tmp2), nil\n\tcase buf[0] == floatNegSmall:\n\t\t// Negative small.\n\t\te, m, tmp2 := decodeSmallNumber(true, buf[:idx+1], tmp)\n\t\treturn buf[idx+1:], makeFloatFromMandE(true, e, m, tmp2), nil\n\tcase buf[0] == floatPosLarge:\n\t\t// Positive large.\n\t\te, m, tmp2 := decodeLargeNumber(false, buf[:idx+1], tmp)\n\t\treturn buf[idx+1:], makeFloatFromMandE(false, e, m, tmp2), nil\n\tcase buf[0] >= floatPosMedium && buf[0] < floatPosLarge:\n\t\t// Positive medium.\n\t\te, m, tmp2 := decodeMediumNumber(false, buf[:idx+1], tmp)\n\t\treturn buf[idx+1:], makeFloatFromMandE(false, e, m, tmp2), nil\n\tcase buf[0] == floatPosSmall:\n\t\t// Positive small.\n\t\te, m, tmp2 := decodeSmallNumber(false, buf[:idx+1], tmp)\n\t\treturn buf[idx+1:], makeFloatFromMandE(false, e, m, tmp2), nil\n\tdefault:\n\t\treturn nil, 0, util.Errorf(\"unknown prefix of the encoded byte slice: %q\", buf)\n\t}\n}", "func FloatToBytes(dst []byte, val interface{}) ([]byte, error) {\n\tvar f float64\n\tswitch val.(type) {\n\tcase float32:\n\t\tf = float64(val.(float32))\n\tcase *float32:\n\t\tf = float64(*val.(*float32))\n\tcase float64:\n\t\tf = val.(float64)\n\tcase *float64:\n\t\tf = *val.(*float64)\n\tdefault:\n\t\treturn dst, ErrUnknownType\n\t}\n\n\tdst = strconv.AppendFloat(dst, f, 'f', -1, 64)\n\treturn dst, nil\n}", "func (p RedisDsl) HASH_GET_FLOAT64(key, field string) (*float64, error) {\n\treturn ReplyToFloat64Ptr(p.HASH_GET(key, field))\n}", "func calcSingleHash(data, md5Hash string, out chan interface{}) {\n\twg := &sync.WaitGroup{}\n\n\tcrcChannelWithMd5 := make(chan string)\n\n\twg.Add(1)\n\tgo func(data string, channel chan string, wg *sync.WaitGroup) {\n\t\tresult := DataSignerCrc32(data)\n\t\tchannel <- result\n\t\tdefer wg.Done()\n\t\tdefer close(channel)\n\t}(md5Hash, crcChannelWithMd5, wg)\n\n\tresult := DataSignerCrc32(data) + \"~\" + <-crcChannelWithMd5\n\n\tout <- result\n\twg.Wait()\n}", "func fpToFloat32(t string, x gosmc.SMCBytes, size uint32) (float32, error) {\n\tif v, ok := AppleFPConv[t]; ok {\n\t\tres := binary.BigEndian.Uint16(x[:size])\n\t\tif v.Signed {\n\t\t\treturn float32(int16(res)) / v.Div, nil\n\t\t} else {\n\t\t\treturn float32(res) / v.Div, nil\n\t\t}\n\t}\n\n\treturn 0.0, fmt.Errorf(\"unable to convert to float32 type %q, bytes %v to float32\", t, x)\n}", "func crc(data []byte) uint16 {\n\tvar crc16 uint16 = 0xffff\n\tl := len(data)\n\tfor i := 0; i < l; i++ {\n\t\tcrc16 ^= uint16(data[i])\n\t\tfor j := 0; j < 8; j++ {\n\t\t\tif crc16&0x0001 > 0 {\n\t\t\t\tcrc16 = (crc16 >> 1) ^ 0xA001\n\t\t\t} else {\n\t\t\t\tcrc16 >>= 1\n\t\t\t}\n\t\t}\n\t}\n\treturn crc16\n}", "func Hash(b []byte) uint32 {\n\tconst (\n\t\tseed = 0xbc9f1d34\n\t\tm = 0xc6a4a793\n\t)\n\th := uint32(seed) ^ uint32(len(b))*m\n\tfor ; len(b) >= 4; b = b[4:] {\n\t\th += uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24\n\t\th *= m\n\t\th ^= h >> 16\n\t}\n\tswitch len(b) {\n\tcase 3:\n\t\th += uint32(b[2]) << 16\n\t\tfallthrough\n\tcase 2:\n\t\th += uint32(b[1]) << 8\n\t\tfallthrough\n\tcase 1:\n\t\th += uint32(b[0])\n\t\th *= m\n\t\th ^= h >> 24\n\t}\n\treturn h\n}", "func GetFloatData(response *bcsmonitor.QueryResponse) float64 {\n\tif len(response.Data.Result) == 0 {\n\t\treturn 0\n\t}\n\tvalueStr, ok := response.Data.Result[0].Value[1].(string)\n\tif !ok {\n\t\treturn 0\n\t}\n\tvalue, err := strconv.ParseFloat(valueStr, 64)\n\tif err != nil {\n\t\treturn 0\n\t}\n\treturn value\n}", "func CtoF(t float32) float32 {\n\treturn (t*9/5 + 32)\n}", "func TestCheckBinaryExprFloatRhlFloat(t *testing.T) {\n\tenv := MakeSimpleEnv()\n\n\texpectConst(t, `2.0 >> 2.0`, env, NewConstInt64(2.0 >> 2.0), ConstInt)\n}", "func (p pngChunk) CalculateCRC() uint32 {\n\tcrcTable := crc32.MakeTable(crc32.IEEE)\n\n\treturn crc32.Checksum(p.BytesForCRC(), crcTable)\n}", "func HexadecAxpy(c, b []float64, s float64, ci, bi int)", "func hash(f []byte) (string, error) {\n\tsha := sha256.New()\n\t_, err := sha.Write(f)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\treturn fmt.Sprintf(\"%x\", sha.Sum(nil)), nil\n}", "func UnLexFloat(b uint64) float64 {\n\tif b>>63 == 1 {\n\t\tb = b ^ (1 << 63)\n\t} else {\n\t\tb = ^b\n\t}\n\treturn math.Float64frombits(b)\n}", "func CalcHash(r io.Reader) (b []byte, err error) {\n\thash := sha512.New()\n\t_, err = io.Copy(hash, r)\n\tif err != nil {\n\t\treturn\n\t}\n\n\tsum := hash.Sum(b)\n\tb = make([]byte, hex.EncodedLen(len(sum)))\n\thex.Encode(b, sum)\n\n\treturn\n}", "func Checksum(data []byte, tab *Table) uint64 {}", "func FNVHash64(value uint64) uint64 {\n\thash := FNVOffsetBasis64\n\tfor i := 0; i < 8; i++ {\n\t\toctet := value & 0x00FF\n\t\tvalue >>= 8\n\n\t\thash ^= octet\n\t\thash *= FNVPrime64\n\t}\n\treturn hash\n}", "func hexDec(hash []byte) []byte {\n var r = make([]byte, len(hash))\n carry := true\n for i := 0; i < len(hash); i++ {\n val := hash[i]\n if (val == 48) {\n r[i] = val\n continue\n }\n // a->f\n if val > 96 {\n val -= 96-9\n } else {\n val -= 48\n }\n if carry {\n val -=1\n carry = false\n }\n if (val+1) == 0 {\n val = 15\n carry = true\n }\n if val >= 10 {\n r[i] = val+96-9\n } else {\n r[i] = val+48\n }\n }\n return r\n}", "func (b *BloomFilter) hash2(value []byte) uint32 {\n\tf := crc32.NewIEEE()\n\tf.Write(value)\n\thash := f.Sum32()\n\treturn hash\n}", "func KDFe(hashAlg Algorithm, z []byte, use string, partyUInfo, partyVInfo []byte, bits int) ([]byte, error) {\n\th, err := hashAlg.Hash()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn KDFeHash(h, z, use, partyUInfo, partyVInfo, bits), nil\n}", "func M128ToFloat(src []M128) []float32 {\n\theader := *(*reflect.SliceHeader)(unsafe.Pointer(&src))\n\n\t// The length and capacity of the slice are different.\n\theader.Len *= 4\n\theader.Cap *= 4\n\n\t// Convert slice header to an []int32\n\tdst := *(*[]float32)(unsafe.Pointer(&header))\n\n\treturn dst\n}", "func (f *FactoidTransaction) ComputeFullHash() (*Bytes32, error) {\n\tdata, err := f.MarshalBinary()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\ttxid := Bytes32(sha256.Sum256(data))\n\treturn &txid, nil\n}", "func (p RedisDsl) HASH_DECRBYFLOAT(key, field string, amount float64) (float64, error) {\n\treturn p.HASH_INCRBYFLOAT(key, field, -1*amount)\n}", "func (in *InBuffer) ReadFloat32BE() float32 {\n\treturn math.Float32frombits(in.ReadUint32BE())\n}", "func (in *InBuffer) ReadFloat32BE() float32 {\n\treturn math.Float32frombits(in.ReadUint32BE())\n}", "func ByteHash(data ...[]byte) []byte {\n\n\thw := sha3.NewKeccak256()\n\tfor _, d := range data {\n\t\thw.Write(d)\n\t}\n\thash := hw.Sum(nil)\n\treturn hash\n}", "func Float32s(reply interface{}, err error) ([]float32, error) {\n\tvar result []float32\n\terr = sliceHelper(reply, err, \"Float32s\", func(n int) { result = make([]float32, n) }, func(i int, v interface{}) error {\n\t\tp, ok := v.([]byte)\n\t\tif !ok {\n\t\t\treturn fmt.Errorf(\"redisai-go: unexpected element type for Float32s, got type %T\", v)\n\t\t}\n\t\tvar f, err = strconv.ParseFloat(string(p), 64)\n\t\tresult[i] = float32(f)\n\t\treturn err\n\t})\n\treturn result, err\n}", "func (c *Cell) Float() (float64, error) {\n\tif !c.Is(Numeric) {\n\t\treturn 0, fmt.Errorf(\"cell data is not numeric\")\n\t}\n\n\tf, err := strconv.ParseFloat(c.data, 64)\n\tif err != nil {\n\t\treturn 0, errors.Wrap(err, \"failed to convert cell data to number\")\n\t}\n\treturn f, nil\n}", "func hash_to_field(hash int,hlen int ,DST []byte,M []byte,ctr int) []*FP {\n\tq := NewBIGints(Modulus)\n\tL := ceil(q.nbits()+AESKEY*8,8)\n\tvar u []*FP\n\tvar fd =make([]byte,L)\n\tOKM:=core.XMD_Expand(hash,hlen,L*ctr,DST,M)\n\t\n\tfor i:=0;i<ctr;i++ {\n\t\tfor j:=0;j<L;j++ {\n\t\t\tfd[j]=OKM[i*L+j];\n\t\t}\n\t\tu = append(u,NewFPbig(DBIG_fromBytes(fd).Mod(q)))\n\t}\n\treturn u\n}", "func (in *InBuffer) ReadFloat64BE() float64 {\n\treturn math.Float64frombits(in.ReadUint64BE())\n}", "func (in *InBuffer) ReadFloat64BE() float64 {\n\treturn math.Float64frombits(in.ReadUint64BE())\n}", "func (p *BinlogFile) ReadFloatVector() ([]float32, int, error) {\n\tif p.reader == nil {\n\t\tlog.Warn(\"Binlog file: binlog reader not yet initialized\")\n\t\treturn nil, 0, errors.New(\"binlog reader not yet initialized\")\n\t}\n\n\tdim := 0\n\tresult := make([]float32, 0)\n\tfor {\n\t\tevent, err := p.reader.NextEventReader()\n\t\tif err != nil {\n\t\t\tlog.Warn(\"Binlog file: failed to iterate events reader\", zap.Error(err))\n\t\t\treturn nil, 0, fmt.Errorf(\"failed to iterate events reader, error: %w\", err)\n\t\t}\n\n\t\t// end of the file\n\t\tif event == nil {\n\t\t\tbreak\n\t\t}\n\n\t\tif event.TypeCode != storage.InsertEventType {\n\t\t\tlog.Warn(\"Binlog file: binlog file is not insert log\")\n\t\t\treturn nil, 0, errors.New(\"binlog file is not insert log\")\n\t\t}\n\n\t\tif p.DataType() != schemapb.DataType_FloatVector {\n\t\t\tlog.Warn(\"Binlog file: binlog data type is not float vector\")\n\t\t\treturn nil, 0, errors.New(\"binlog data type is not float vector\")\n\t\t}\n\n\t\tdata, dimension, err := event.PayloadReaderInterface.GetFloatVectorFromPayload()\n\t\tif err != nil {\n\t\t\tlog.Warn(\"Binlog file: failed to read float vector data\", zap.Error(err))\n\t\t\treturn nil, 0, fmt.Errorf(\"failed to read float vector data, error: %w\", err)\n\t\t}\n\n\t\tdim = dimension\n\t\tresult = append(result, data...)\n\t}\n\n\treturn result, dim, nil\n}", "func (d LEByteDecoder) DecodeFloat32Array(buf *bytes.Reader, size int) ([]JsonFloat32, error) {\n\tif err := CheckSize(buf, size*4); err != nil {\n\t\treturn nil, errors.Wrap(err, \"decoding f32 array\")\n\t}\n\n\tvar arr [4]byte\n\tvar value float32\n\tslice := make([]JsonFloat32, size)\n\tfor i := 0; i < size; i++ {\n\t\tif n, err := buf.Read(arr[:]); n != 4 || err != nil {\n\t\t\treturn slice, errors.New(\"Could not read 4 bytes from buffer\")\n\t\t}\n\t\tvalue = math.Float32frombits(binary.LittleEndian.Uint32(arr[:]))\n\t\tslice[i] = JsonFloat32{F: value}\n\t}\n\n\treturn slice, nil\n}", "func computeHash(rdr io.Reader) string {\n\tsha256 := sha256.New()\n\tio.Copy(sha256, rdr)\n\thash := sha256.Sum(make([]byte, 0))\n\treturn hex.EncodeToString(hash)\n}", "func hashInt(s string) uint32 {\n\tb := []byte(s)\n\th := crc32.ChecksumIEEE(b)\n\treturn h\n}", "func (bf *BloomFilter) hashBytes(b []byte) uint64 {\n\tbf.hashLock.Lock()\n\tdefer bf.hashLock.Unlock()\n\n\tbf.hash.Write(b)\n\tdefer bf.hash.Reset()\n\n\treturn bf.hash.Sum64()\n}", "func CalcHash32(data []byte) Hash32 {\n\treturn hash.Sum(data)\n}", "func hexInc(hash []byte) []byte {\n for i := 0; i < len(hash) -1; i++ {\n val := hash[i]\n if (val == 48) { // this value is a zero\n continue\n } else {\n carry := true\n var start int\n if (val == 102) { // leave it alone if it's an f\n start = i - 1\n } else {\n start = i\n }\n for j := start; j >= 0; j-- {\n val2 := hash[j]\n // a->f\n if val2 > 96 {\n val2 -= 96-9\n } else {\n val2 -= 48\n }\n if carry {\n val2 +=1\n carry = false\n }\n if val2 == 16 {\n val2 = 0\n carry = true\n }\n if val2 >= 10 {\n hash[j] = val2+96-9\n } else {\n hash[j] = val2+48\n }\n }\n break\n }\n }\n return hash\n}" ]
[ "0.6334815", "0.6051349", "0.59986734", "0.59832597", "0.5759242", "0.572957", "0.5727986", "0.56612784", "0.56001383", "0.55469215", "0.5530653", "0.55262834", "0.55157834", "0.5508986", "0.54603994", "0.54603994", "0.542981", "0.5387967", "0.5368816", "0.5361708", "0.534282", "0.5323975", "0.5306723", "0.5304277", "0.5297634", "0.52769303", "0.52768016", "0.5255247", "0.52537817", "0.52502435", "0.52334434", "0.5192707", "0.51685786", "0.5162666", "0.51585835", "0.5153054", "0.515065", "0.5146293", "0.5125848", "0.5123368", "0.51228935", "0.51061267", "0.50938576", "0.5092573", "0.5084787", "0.5069745", "0.5047803", "0.5047571", "0.5040898", "0.5039398", "0.5036139", "0.5028478", "0.50258195", "0.50211775", "0.5020752", "0.5015917", "0.5015661", "0.50141263", "0.501193", "0.49966654", "0.49826184", "0.497068", "0.4953631", "0.4951988", "0.4950371", "0.49413943", "0.49358502", "0.49327025", "0.4931174", "0.49294806", "0.4926509", "0.49181768", "0.4911291", "0.49108154", "0.4902997", "0.4901697", "0.4900037", "0.49000055", "0.48941982", "0.48915905", "0.48900908", "0.48900118", "0.48894256", "0.48882255", "0.48879507", "0.48848912", "0.48848912", "0.48845983", "0.48791647", "0.4875846", "0.4873963", "0.48722908", "0.48722908", "0.4871646", "0.4871645", "0.4870445", "0.4870083", "0.4860463", "0.48595884", "0.48595724" ]
0.8096048
0
SanitizeName cleans a stat or datapoint name to be representable in a wide range of data collection software.
func SanitizeName(name string) string { rname := []byte(name) for i, r := range rname { switch { case r >= 'A' && r <= 'Z': case r >= 'a' && r <= 'z': case r >= '0' && r <= '9': default: switch r { case '_', '.', '-': case '/': rname[i] = '.' default: rname[i] = '_' } } } return string(rname) }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func SanitizeName(name string, escape bool) string {\n\tif escape {\n\t\t// Escape underscore\n\t\tname = strings.ReplaceAll(name, \"_\", \"\\\\_\")\n\t}\n\treturn name\n}", "func sanitizeName(name string) string {\n\toutput := strings.ToLower(illegalChars.ReplaceAllString(name, \"_\"))\n\n\tif legalLabel.MatchString(output) {\n\t\treturn output\n\t}\n\t// Prefix name with _ if it begins with a number\n\treturn \"_\" + output\n}", "func SanitizeName(name string, removeSlash bool) (string, error) {\n\twithoutDiacritics, _, err := transform.String(transformer, strings.ToLower(name))\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\twithoutSpaces := strings.Replace(withoutDiacritics, \" \", \"_\", -1)\n\twithoutSpecials := specialChars.ReplaceAllString(withoutSpaces, \"\")\n\n\tsanitized := withoutSpecials\n\tif removeSlash {\n\t\tsanitized = strings.Replace(sanitized, \"/\", \"_\", -1)\n\t}\n\n\treturn sanitized, nil\n}", "func SanitizeName(name string) string {\n\tresult := strings.Builder{}\n\tblackList := []rune{'$', '\\\\', '/', '.', ';', ':'}\n\n\tfor _, ch := range name {\n\t\tif ch < 26 {\n\t\t\tch = '.'\n\t\t} else {\n\t\t\tfor _, badCh := range blackList {\n\t\t\t\tif ch == badCh {\n\t\t\t\t\tch = '.'\n\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\tresult.WriteRune(ch)\n\t}\n\n\treturn result.String()\n}", "func Sanitize(s string) string {\n\ts = CamelCase(s)\n\tif slices.Contains(ReservedNames, s) {\n\t\treturn s + \"_\"\n\t}\n\n\treturn s\n}", "func SanitizeLabelName(name string) string {\n\treturn invalidLabelCharRE.ReplaceAllString(name, \"_\")\n}", "func SanitizeName(filename string) string {\n\tif len(filename) > 1 && filename[1] == ':' &&\n\t\truntime.GOOS == \"windows\" {\n\t\tfilename = filename[2:]\n\t}\n\tfilename = strings.Replace(filename, `\\`, `/`, -1)\n\tfilename = strings.TrimLeft(filename, \"/.\")\n\treturn strings.Replace(filename, \"../\", \"\", -1)\n}", "func (ig *InstanceGroup) NameSanitized() string {\n\treturn names.Sanitize(ig.Name)\n}", "func SanitizePrometheusNames(name string) string {\n\tnonPromoChars := regexp.MustCompile(\"[^a-zA-Z\\\\d_]\")\n\tnameBytes := []byte(name)\n\treplaceChar := []byte(\"_\")\n\treturn string(nonPromoChars.ReplaceAll(nameBytes, replaceChar))\n}", "func sanitizeMetricName(namespace string, v *view.View) string {\n\tif namespace != \"\" {\n\t\tnamespace = strings.Replace(namespace, \" \", \"\", -1)\n\t\treturn sanitizeString(namespace) + \".\" + sanitizeString(v.Name)\n\t}\n\treturn sanitizeString(v.Name)\n}", "func SanitizeACName(s string) (string, error) {\n\ts = strings.ToLower(s)\n\ts = invalidACNameChars.ReplaceAllString(s, \"-\")\n\ts = invalidACNameEdges.ReplaceAllString(s, \"\")\n\n\tif s == \"\" {\n\t\treturn \"\", errors.New(\"must contain at least one valid character\")\n\t}\n\n\treturn s, nil\n}", "func SanitizeMetricName(name string) (string, bool) {\n\tif model.IsValidMetricName(model.LabelValue(name)) {\n\t\treturn name, true\n\t}\n\treturn sanitize(name, MetricNameTable)\n}", "func sanitizeName(field string) string {\n\tif len(field) == 0 {\n\t\treturn \"\"\n\t}\n\n\tfieldSlice := strings.Split(field, \"\")\n\tfield = \"\"\n\n\tfieldSlice[0] = strings.ToUpper(fieldSlice[0])\n\tfor _, f := range fieldSlice {\n\t\tfield += f\n\t}\n\n\treturn field\n}", "func cleanName(s string) string {\n\ts = strings.Replace(s, \" \", \"_\", -1) // Remove spaces\n\ts = strings.Replace(s, \"(\", \"\", -1) // Remove open parenthesis\n\ts = strings.Replace(s, \")\", \"\", -1) // Remove close parenthesis\n\ts = strings.Replace(s, \"/\", \"\", -1) // Remove forward slashes\n\ts = strings.ToLower(s)\n\treturn s\n}", "func cleanName(name string) string {\n\tname = strings.TrimSpace(strings.ToLower(name))\n\n\tfor {\n\t\tif i := nameStripRE.FindStringIndex(name); i != nil {\n\t\t\tname = name[i[1]:]\n\t\t} else {\n\t\t\tbreak\n\t\t}\n\t}\n\n\tname = strings.Trim(name, \"-\")\n\t// Remove dots at the beginning of names\n\tif len(name) > 1 && name[0] == '.' {\n\t\tname = name[1:]\n\t}\n\treturn name\n}", "func SanitizeJobName(name string) string {\n\n\tname = strings.ToLower(name)\n\tre := regexp.MustCompile(`[^a-z0-9-]`)\n\treturn re.ReplaceAllString(name, \"-\")\n}", "func sanitizedName(filename string) string {\n\tif len(filename) > 1 && filename[1] == ':' &&\n\t\truntime.GOOS == \"windows\" {\n\t\tfilename = filename[2:]\n\t}\n\tfilename = filepath.ToSlash(filename)\n\tfilename = strings.TrimLeft(filename, \"/.\")\n\treturn strings.Replace(filename, \"../\", \"\", -1)\n}", "func sanitiseName(key string) string {\n\treturn strings.ReplaceAll(key, \".\", \"_\")\n}", "func sanitizeVolumeName(name string) string {\n\tname = strings.ToLower(name)\n\tname = invalidDNS1123Characters.ReplaceAllString(name, \"-\")\n\tif len(name) > validation.DNS1123LabelMaxLength {\n\t\tname = name[0:validation.DNS1123LabelMaxLength]\n\t}\n\treturn strings.Trim(name, \"-\")\n}", "func CleanName(name string) string {\n\tname = strings.Replace(name, \"-\", \"_\", -1)\n\tname = strings.Replace(name, \" \", \"_\", -1)\n\tname = strings.Replace(name, \"/\", \"_\", -1)\n\treturn name\n}", "func promSanitizeMetricName(name string) string {\n\tres := make([]rune, 0, len(name))\n\tfor _, rVal := range name {\n\t\tif unicode.IsDigit(rVal) || unicode.IsLetter(rVal) || rVal == ':' {\n\t\t\tres = append(res, rVal)\n\t\t} else {\n\t\t\tres = append(res, '_')\n\t\t}\n\t}\n\treturn string(res)\n}", "func cleanName(s string) string {\n\ts = strings.Replace(s, \" \", \"_\", -1) // Remove spaces\n\ts = strings.Replace(s, \"(\", \"_\", -1) // Remove open parenthesis\n\ts = strings.Replace(s, \":\", \"_\", -1) // Remove open parenthesis\n\ts = strings.Replace(s, \")\", \"_\", -1) // Remove close parenthesis\n\ts = strings.Replace(s, \"\\\\\", \"_\", -1) // Remove backward slashes\n\ts = strings.ToLower(s)\n\treturn s\n}", "func NormalizeWikiName(name string) string {\n\treturn strings.Replace(name, \"-\", \" \", -1)\n}", "func sanitizeArtifactName(name string) string {\n\treturn disallowedArtifactNameChar.ReplaceAllString(name, \"-\")\n}", "func SanitizeDatabaseName(schema string) string {\n\treturn pgx.Identifier{schema}.Sanitize()\n}", "func sanitizedBranchName(input string) string {\n\treturn strings.Replace(input, \" \", \"-\", -1)\n}", "func ValidName(name string) string {\n\tre := regexp.MustCompile(`[^a-zA-Z0-9\\-\\.]`)\n\tname = re.ReplaceAllLiteralString(name, \"-\")\n\tname = strings.ToLower(name)\n\treturn name\n}", "func (sink *influxdbSink) checkSanitizedMetricName(name string) error {\n\tif !metricAllowedChars.MatchString(name) {\n\t\treturn fmt.Errorf(\"Invalid metric name %q\", name)\n\t}\n\n\treturn nil\n}", "func SanitizeLabelName(name string) (string, bool) {\n\tif model.LabelName(name).IsValid() {\n\t\treturn name, true\n\t}\n\treturn sanitize(name, LabelNameTable)\n}", "func sanitizeAndUniquify(name string) string {\n\tif strings.ContainsAny(name, \"_.\") {\n\t\tname = strings.Replace(name, \"_\", \"-\", -1)\n\t\tname = strings.Replace(name, \".\", \"-\", -1)\n\t\tname = fmt.Sprintf(\"%s-%s\", name, utilrand.String(5))\n\t}\n\treturn name\n}", "func sanitizeAndUniquify(name string) string {\n\tif strings.ContainsAny(name, \"_.\") {\n\t\tname = strings.Replace(name, \"_\", \"-\", -1)\n\t\tname = strings.Replace(name, \".\", \"-\", -1)\n\t\tname = fmt.Sprintf(\"%s-%s\", name, utilrand.String(5))\n\t}\n\treturn name\n}", "func sanitize(name string, table Table) (string, bool) {\n\tvar b strings.Builder\n\n\tfor i, r := range name {\n\t\tswitch {\n\t\tcase i == 0:\n\t\t\tif unicode.In(r, table.First) {\n\t\t\t\tb.WriteRune(r)\n\t\t\t}\n\t\tdefault:\n\t\t\tif unicode.In(r, table.Rest) {\n\t\t\t\tb.WriteRune(r)\n\t\t\t} else {\n\t\t\t\tb.WriteString(\"_\")\n\t\t\t}\n\t\t}\n\t}\n\n\tname = strings.Trim(b.String(), \"_\")\n\tif name == \"\" {\n\t\treturn \"\", false\n\t}\n\treturn name, true\n}", "func SanitizeFileName(name string) string {\n\tname = strings.TrimSpace(name)\n\tif strings.IndexAny(name, \"%+\") != -1 {\n\t\tn, err := url.QueryUnescape(name)\n\t\tif err == nil && n != \"\" {\n\t\t\tname = n\n\t\t}\n\t}\n\text := path.Ext(name)\n\tif ext != \"\" {\n\t\tname = name[:len(name)-len(ext)]\n\t\text = NormalizeExt(ext)\n\t}\n\n\tb := strings.Builder{}\n\tb.Grow(len(name))\n\tlastWasPlaceholder := false\n\tfor i, r := range name {\n\t\tlastWasPlaceholder = writeSafeFileNameRune(&b, i, r, lastWasPlaceholder)\n\t}\n\n\treturn b.String() + ext\n}", "func Sanitize(s string) string {\n\treg := regexp.MustCompile(\"[^A-Za-z0-9]\")\n\treturn strings.ToLower(reg.ReplaceAllString(s, \"\"))\n}", "func NormalizeName(s string) string {\n\treturn strings.ToLower(strings.TrimSpace(s))\n}", "func Sanitize(filename string) (string, error) {\n\treg, err := regexp.Compile(\"[^a-zA-Z0-9.]+\")\n\tif err != nil {\n\t\treturn filename, err\n\t}\n\treturn reg.ReplaceAllString(filename, \"\"), nil\n}", "func normalizeMetricName(s string) string {\n\tr1 := regWhitespace.ReplaceAllLiteral([]byte(s), []byte{'_'})\n\tr2 := bytes.Replace(r1, []byte{'/'}, []byte{'-'}, -1)\n\treturn string(regNonAlphaNum.ReplaceAllLiteral(r2, nil))\n}", "func sanitizeLabel(value string) string {\n\n\t// Valid label values must be 63 characters or less and must be empty or begin and end with an alphanumeric character ([a-z0-9A-Z])\n\t// with dashes (-), underscores (_), dots (.), and alphanumerics between.\n\n\t// replace all invalid characters with a hyphen\n\treg := regexp.MustCompile(`[^a-zA-Z0-9-_.]+`)\n\tvalue = reg.ReplaceAllString(value, \"-\")\n\n\t// replace double hyphens with a single one\n\tvalue = strings.Replace(value, \"--\", \"-\", -1)\n\n\t// ensure it starts with an alphanumeric character\n\treg = regexp.MustCompile(`^[-_.]+`)\n\tvalue = reg.ReplaceAllString(value, \"\")\n\n\t// maximize length at 63 characters\n\tif len(value) > 63 {\n\t\tvalue = value[:63]\n\t}\n\n\t// ensure it ends with an alphanumeric character\n\treg = regexp.MustCompile(`[-_.]+$`)\n\tvalue = reg.ReplaceAllString(value, \"\")\n\n\treturn value\n}", "func NormalizeUsername(name string) (string, error) {\n\torigName := name\n\tif i := strings.Index(name, \"@\"); i != -1 && i == strings.LastIndex(name, \"@\") {\n\t\tname = name[:i]\n\t}\n\tname = disallowedCharacter.ReplaceAllString(name, \"-\")\n\tif strings.HasPrefix(name, \"-\") || strings.HasSuffix(name, \"-\") || strings.Contains(name, \"--\") {\n\t\treturn \"\", fmt.Errorf(\"username %q could not be normalized to acceptable format\", origName)\n\t}\n\tif err := suspiciousnames.CheckNameAllowedForUserOrOrganization(name); err != nil {\n\t\treturn \"\", err\n\t}\n\treturn name, nil\n}", "func sanitize(ident string) string {\n\t// TODO: use regex to replace all other characters other than [A-Za-z0-9_]\n\treturn strings.Replace(strings.Trim(ident, \" \"), \" \", \"_\", -1)\n}", "func FilterMetricName(name string) string {\n\treturn reg.ReplaceAllString(name, \"_\")\n}", "func NormalizeName(name string) string {\n\tname = strings.TrimLeft(name, \"_\")\n\treturn strings.ToUpper(name[:1]) + name[1:]\n}", "func NormalizeName(title string) (string, error) {\n\n\tnormalizedName, err := GetRequest(config.NormalizeServer + \"?searchText=\" + url.QueryEscape(title))\n\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tvar response Response\n\tif json.Unmarshal([]byte(normalizedName), &response); err != nil {\n\t\treturn \"\", err\n\t}\n\tif response.Status != 200 {\n\t\treturn \"\", revel.NewErrorFromPanic(\"Server responded with\" + strconv.Itoa(response.Status))\n\t}\n\tlog.Println(\"normalized\", title, \"to\", response.Content)\n\treturn response.Content, nil\n}", "func normMetricNameParse(name string) (string, bool) {\n\tif name == \"\" || len(name) > MaxNameLen {\n\t\treturn name, false\n\t}\n\n\tvar i, ptr int\n\tres := make([]byte, 0, len(name))\n\n\t// skip non-alphabetic characters\n\tfor ; i < len(name) && !isAlpha(name[i]); i++ {\n\t}\n\n\t// if there were no alphabetic characters it wasn't valid\n\tif i == len(name) {\n\t\treturn \"\", false\n\t}\n\n\tfor ; i < len(name); i++ {\n\t\tswitch {\n\t\tcase isAlphaNum(name[i]):\n\t\t\tres = append(res, name[i])\n\t\t\tptr++\n\t\tcase name[i] == '.':\n\t\t\t// we skipped all non-alpha chars up front so we have seen at least one\n\t\t\tswitch res[ptr-1] {\n\t\t\t// overwrite underscores that happen before periods\n\t\t\tcase '_':\n\t\t\t\tres[ptr-1] = '.'\n\t\t\tdefault:\n\t\t\t\tres = append(res, '.')\n\t\t\t\tptr++\n\t\t\t}\n\t\tdefault:\n\t\t\t// we skipped all non-alpha chars up front so we have seen at least one\n\t\t\tswitch res[ptr-1] {\n\t\t\t// no double underscores, no underscores after periods\n\t\t\tcase '.', '_':\n\t\t\tdefault:\n\t\t\t\tres = append(res, '_')\n\t\t\t\tptr++\n\t\t\t}\n\t\t}\n\t}\n\n\tif res[ptr-1] == '_' {\n\t\tres = res[:ptr-1]\n\t}\n\n\treturn string(res), true\n}", "func cleanName(name string) string {\n\tvar builder strings.Builder\n\tfor i, c := range name {\n\t\tif !isLegalIdentifierPart(c) {\n\t\t\tbuilder.WriteRune('_')\n\t\t} else {\n\t\t\tif i == 0 && !isLegalIdentifierStart(c) {\n\t\t\t\tbuilder.WriteRune('_')\n\t\t\t}\n\t\t\tbuilder.WriteRune(c)\n\t\t}\n\t}\n\treturn builder.String()\n}", "func StripName(str string) string {\n\tnotAlpha := regexp.MustCompile(`[^a-zA-Z\\- ]`)\n\treturn notAlpha.ReplaceAllString(str, \"\")\n}", "func canonicalizeExperimentName(name string) string {\n\tswitch name = strcase.ToSnake(name); name {\n\tcase \"ndt_7\":\n\t\tname = \"ndt\" // since 2020-03-18, we use ndt7 to implement ndt by default\n\tdefault:\n\t}\n\treturn name\n}", "func sanitiseBINDFileInput(s string) string {\n\t// Remove SOA records.\n\tsoaRe := regexp.MustCompile(`(?m)[\\r\\n]+^.*IN\\s+SOA.*$`)\n\ts = soaRe.ReplaceAllString(s, \"\")\n\n\t// Remove all comments.\n\tcommentRe := regexp.MustCompile(`(?m)[\\r\\n]+^.*;;.*$`)\n\ts = commentRe.ReplaceAllString(s, \"\")\n\n\t// Swap all the tabs to spaces.\n\tr := strings.NewReplacer(\n\t\t\"\\t\", \" \",\n\t\t\"\\n\\n\", \"\\n\",\n\t)\n\ts = r.Replace(s)\n\ts = strings.TrimSpace(s)\n\n\treturn s\n}", "func NormalizedName(s string) string {\n\treturn strings.Map(normalizedChar, s)\n}", "func cleanUserName(userName string) string {\n\tuserName = strings.Replace(userName, \"-\", \"\", -1)\n\tuserName = strings.Replace(userName, \".\", \"\", -1)\n\tuserName = strings.Replace(userName, \"_\", \"\", -1)\n\treturn strings.ToLower(userName)\n}", "func Name(name string) (string, error) {\n\tvar validName string\n\terrorInvalid := errors.New(\"invalid name format\")\n\tif len(name) > 50 || len(name) < 2 {\n\t\treturn validName, errorInvalid\n\t}\n\tnameRegexp := regexp.MustCompile(\"^[\\\\p{L}\\\\s'.-]+$\")\n\tif !nameRegexp.MatchString(name) {\n\t\treturn validName, errorInvalid\n\t}\n\tvalidName = strings.TrimSpace(name)\n\tvalidName = strings.ToUpper(validName)\n\treturn validName, nil\n}", "func Normalize(name string) string {\n\tfargs := func(c rune) bool {\n\t\treturn !unicode.IsLetter(c) && !unicode.IsNumber(c)\n\t}\n\t// get function\n\treturn strings.Join(strings.FieldsFunc(name, fargs), \"-\")\n}", "func (self *FCSInfo) cleanName(s *string, isFile bool) {\n\n\tif isFile == true {\n\t\t*s = strings.Replace(*s, \"/\", \"-\", -1)\n\t\t*s = strings.Replace(*s, \"\\\\\", \"-\", -1)\n\t}\n\n}", "func cleanStackName(input string) string {\n\treg, _ := regexp.Compile(\"[^a-zA-Z0-9\\\\-]+\")\n\n\treturn reg.ReplaceAllString(strings.Replace(input, \"/\", \"-\", 0), \"\")\n}", "func sanitize(s string) string {\n\treturn strings.ReplaceAll(s, \"-\", \"\")\n}", "func cleanName(name string, drop int) string {\n\tvar b strings.Builder\n\tfor i := 0; i < len(name); i++ {\n\t\tif name[i] == '/' {\n\t\t\tif i < len(name)-1 && name[i+1] == '/' {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif drop > 0 {\n\t\t\t\tdrop--\n\t\t\t\tb.Reset()\n\t\t\t\tcontinue\n\t\t\t}\n\t\t}\n\t\tb.WriteByte(name[i])\n\t}\n\treturn b.String()\n}", "func sanitizePackageName(input string) string {\n\tbuilder := make([]rune, 0, len(input))\n\n\tfor _, r := range input {\n\t\tif unicode.IsLetter(r) || unicode.IsNumber(r) {\n\t\t\tbuilder = append(builder, unicode.ToLower(r))\n\t\t}\n\t}\n\n\treturn string(builder)\n}", "func (c *hostNameFormatConfig) FormatName(name string) (string, error) {\n\tif !c.useFullName {\n\t\t//only split if string is not an IP address\n\t\tip := net.ParseIP(name)\n\t\tif ip == nil {\n\t\t\tname = strings.SplitN(name, \".\", 2)[0]\n\t\t}\n\t}\n\n\tif !c.IsValid(name) {\n\t\treturn \"\", errors.New(fmt.Sprintf(\"Invalid name of '%s'\", name))\n\t}\n\treturn strings.ToLower(name), nil\n}", "func (f *FortiWebClient) SafeName(url string) string {\n\n\treturn strings.Replace(url, \"/\", \"_\", -1)\n}", "func (ma MetricAlert) QuerySafeName() string {\n\ttmp := strings.Replace(ma.Name, \"[\", \"\\\\\\\\[\", -1)\n\ttmp = strings.Replace(tmp, \"]\", \"\\\\\\\\]\", -1)\n\ttmp = strings.Replace(tmp, \"(\", \"\\\\\\\\(\", -1)\n\ttmp = strings.Replace(tmp, \")\", \"\\\\\\\\)\", -1)\n\ttmp = strings.Replace(tmp, \"-\", \"\\\\\\\\-\", -1)\n\ttmp = strings.Replace(tmp, \".\", \"\\\\\\\\.\", -1)\n\ttmp = strings.Replace(tmp, \":\", \"\\\\\\\\:\", -1)\n\treturn tmp\n}", "func sanitizePackageName(input string) string {\n\tvar builder []rune\n\n\tfor _, r := range input {\n\t\tif unicode.IsLetter(r) || unicode.IsNumber(r) {\n\t\t\tbuilder = append(builder, unicode.ToLower(rune(r)))\n\t\t}\n\t}\n\n\treturn string(builder)\n}", "func Sanitize(s string) string {\n\tNewSanitizer()\n\treturn sanitizer.defaultPolicy.Sanitize(s)\n}", "func mangleName(name string) string {\n\tr, _ := regexp.Compile(\"[^0-9a-zA-Z]+\")\n\treturn r.ReplaceAllString(name, \"\")\n}", "func (ident Identifier) Sanitize() string {\n\tparts := make([]string, len(ident))\n\tfor i := range ident {\n\t\ts := strings.ReplaceAll(ident[i], string([]byte{0}), \"\")\n\t\tparts[i] = `\"` + strings.ReplaceAll(s, `\"`, `\"\"`) + `\"`\n\t}\n\treturn strings.Join(parts, \".\")\n}", "func SanitizeInput(input string) string {\n\tinput = strings.TrimSpace(input)\n\tif input == \"\" {\n\t\treturn input\n\t}\n\tinput = strings.TrimPrefix(input, \"\\\"\")\n\tinput = strings.TrimSuffix(input, \"\\\"\")\n\treturn strings.NewReplacer(\" \", \" \", \"\\n\", \" \", \"\\t\", \" \", \"\\r\", \" \").Replace(input)\n}", "func UnicodeSanitize(s string) string {\n\tsource := []rune(s)\n\ttarget := make([]rune, 0, len(source))\n\n\tfor i, r := range source {\n\t\tif r == '%' && i+2 < len(source) && ishex(source[i+1]) && ishex(source[i+2]) {\n\t\t\ttarget = append(target, r)\n\t\t} else if unicode.IsLetter(r) || unicode.IsDigit(r) || unicode.IsMark(r) || r == '.' || r == '/' || r == '\\\\' || r == '_' || r == '-' || r == '#' || r == '+' || r == '~' {\n\t\t\ttarget = append(target, r)\n\t\t}\n\t}\n\n\tvar result string\n\n\tif true {\n\t\t// remove accents - see https://blog.golang.org/normalization\n\t\tt := transform.Chain(norm.NFD, transform.RemoveFunc(isMn), norm.NFC)\n\t\tresult, _, _ = transform.String(t, string(target))\n\t} else {\n\t\tresult = string(target)\n\t}\n\n\treturn result\n}", "func sanitize(s string) string {\n\tif len(s) == 0 {\n\t\treturn s\n\t}\n\n\t// Note: No length limit for label keys because Prometheus doesn't\n\t// define a length limit, thus we should NOT be truncating label keys.\n\t// See https://github.com/orijtech/prometheus-go-metrics-exporter/issues/4.\n\ts = strings.Map(sanitizeRune, s)\n\tif unicode.IsDigit(rune(s[0])) {\n\t\ts = keyStr + delimeter + s\n\t}\n\tif s[0] == '_' {\n\t\ts = keyStr + s\n\t}\n\treturn s\n}", "func removeExtraNames(input string) string {\n\tinput = strings.Replace(input, \"-\", \" \", -1)\n\tsplitInput := strings.Split(input, \" \")\n\treturn splitInput[0]\n}", "func ProjectNameSafe(name string) string {\n\treturn strings.Replace(strings.ToLower(name), \" \", \"\", -1)\n}", "func NormalizeName(name string) (string, error) {\n\tif name == \"\" {\n\t\treturn DefaultSpanName, ErrEmpty\n\t}\n\tvar err error\n\tif len(name) > MaxNameLen {\n\t\tname = TruncateUTF8(name, MaxNameLen)\n\t\terr = ErrTooLong\n\t}\n\tname, ok := normMetricNameParse(name)\n\tif !ok {\n\t\treturn DefaultSpanName, ErrInvalid\n\t}\n\treturn name, err\n}", "func sanitize(s string) string {\n\tif len(s) >= lenStartCommand {\n\t\tif s[:lenStartCommand] == startCommand {\n\t\t\ts = s[lenStartCommand:]\n\t\t}\n\t}\n\n\tif len(s) >= lenPunchCommand {\n\t\tif s[:lenPunchCommand] == punchCommand {\n\t\t\ts = s[lenPunchCommand:]\n\t\t}\n\t}\n\tif len(s) >= lenBotTag {\n\t\tif s[:lenBotTag] == botTag {\n\t\t\ts = s[lenBotTag:]\n\t\t}\n\t}\n\treturn s\n}", "func SanitizeFieldName(name string) string {\n\tname = codegen.Goify(name, true)\n\tif strings.HasSuffix(name, \"Id\") {\n\t\tname = strings.TrimSuffix(name, \"Id\")\n\t\tname = name + \"ID\"\n\t}\n\n\treturn name\n}", "func canonicalizeName(q string) string {\n\tq = strings.Replace(q, \"'\", \"’\", -1)\n\treturn q\n}", "func NormalizeVersionName(version string) string {\n\tfor _, char := range TrimChars {\n\t\tversion = strings.ReplaceAll(version, char, \"\")\n\t}\n\treturn version\n}", "func (manager *ComposeStackManager) NormalizeStackName(name string) string {\n\treturn stackNameNormalizeRegex.ReplaceAllString(strings.ToLower(name), \"\")\n}", "func SafeComputerName(name string) string {\n\tif len(name) <= 0 {\n\t\treturn \"computername\"\n\t}\n\tinvalidChars := []string{\"\\\\\", \"/\", \":\", \"*\", \"?\", \"\\\"\", \"<\", \">\", \"|\"}\n\tfor _, s := range invalidChars {\n\t\tname = strings.Replace(name, s, \"\", -1)\n\t}\n\ti := len(name)\n\tif i > 15 {\n\t\ti = 15\n\t}\n\treturn name[0:i]\n}", "func TestSanitizedName(t *testing.T) {\n\ttables := []struct {\n\t\tbadPath string\n\t\tgoodPath string\n\t}{\n\t\t{\"\\\\..\\\\1.txt\", \"1.txt\"},\n\t\t{\"///foo/bar\", \"foo/bar\"},\n\t\t{\"C:/loo/bar2\", \"loo/bar2\"},\n\t\t{\"C:\\\\loo\\\\bar2\", \"loo\\\\bar2\"},\n\t\t{\"../../foo../../ba..r\", \"foo../ba..r\"},\n\t\t{\"../my.file\", \"my.file\"},\n\t}\n\tfor _, table := range tables {\n\t\tfixedPath := sanitizedName(table.badPath)\n\t\tif fixedPath != table.goodPath {\n\t\t\tt.Errorf(\"expected and fixedPath didn't match - %s : %s\", table.goodPath, fixedPath)\n\t\t}\n\t}\n}", "func sanitizeFileName(v string) string {\n\treturn path.Clean(strings.ReplaceAll(v, \"../\", \"\"))\n}", "func UnicodeSanitize(s string) string {\n\tsource := []rune(s)\n\ttarget := make([]rune, 0, len(source))\n\n\tfor _, r := range source {\n\t\tif unicode.IsLetter(r) ||\n\t\t\tunicode.IsDigit(r) ||\n\t\t\tunicode.IsMark(r) ||\n\t\t\tr == '.' ||\n\t\t\tr == '/' ||\n\t\t\tr == '\\\\' ||\n\t\t\tr == '_' ||\n\t\t\tr == '-' ||\n\t\t\tr == '%' ||\n\t\t\tr == ' ' ||\n\t\t\tr == '#' {\n\t\t\ttarget = append(target, r)\n\t\t}\n\t}\n\n\treturn string(target)\n}", "func fixname(s string) string {\n\tr := strings.Replace(s, \"<\", `\"`, -1)\n\tr = strings.Replace(s, \">\", `\"`, -1)\n\treturn r\n}", "func ValidName(s string) error {\n\n\tif len(s) < 3 {\n\t\treturn errors.New(\"name is less than 3 characters long\")\n\t}\n\tfor i, x := range s {\n\t\tif !unicode.IsLetter(x) {\n\t\t\treturn fmt.Errorf(\n\t\t\t\t\"element %d, '%v' of name is not a letter\", i, x)\n\t\t}\n\t}\n\treturn nil\n}", "func FixNameSymbol(s string) string {\n\tacceptSpecialChars := \"$-+\"\n\n\ttmp, err := stringz.ReplaceWithTags(s, \"(\", \")\", \"\", 10)\n\tif err == nil {\n\t\ts = tmp\n\t}\n\ttmp, err = stringz.ReplaceWithTags(s, \"[\", \"]\", \"\", 10)\n\tif err == nil {\n\t\ts = tmp\n\t}\n\n\t// letter/digit/space/acceptSpecialChars accepted\n\tvar rName []rune\n\tfor _, v := range s {\n\t\tif unicode.IsLetter(v) || unicode.IsDigit(v) || unicode.IsSpace(v) || strings.Contains(acceptSpecialChars, string(v)) {\n\t\t\trName = append(rName, v)\n\t\t} else {\n\t\t\trName = append(rName, '-')\n\t\t}\n\t}\n\ts = string(rName)\n\n\t// use '-' instead of space\n\tfor {\n\t\tif strings.Contains(s, \" \") {\n\t\t\ts = strings.Replace(s, \" \", \"-\", -1)\n\t\t} else {\n\t\t\tbreak\n\t\t}\n\t}\n\tfor {\n\t\tif strings.Contains(s, \"--\") {\n\t\t\ts = strings.Replace(s, \"--\", \"-\", -1)\n\t\t} else {\n\t\t\tbreak\n\t\t}\n\t}\n\ts = stringz.TrimLeftAll(s, \" \")\n\ts = stringz.TrimRightAll(s, \" \")\n\ts = stringz.TrimLeftAll(s, \"-\")\n\ts = stringz.TrimRightAll(s, \"-\")\n\treturn s\n}", "func SafeFileName(str string) string {\n\tname := strings.ToLower(str)\n\tname = path.Clean(path.Base(name))\n\tname = strings.Trim(name, \" \")\n\tseparators, err := regexp.Compile(`[ &_=+:]`)\n\tif err == nil {\n\t\tname = separators.ReplaceAllString(name, \"-\")\n\t}\n\tlegal, err := regexp.Compile(`[^[:alnum:]-.]`)\n\tif err == nil {\n\t\tname = legal.ReplaceAllString(name, \"\")\n\t}\n\tfor strings.Contains(name, \"--\") {\n\t\tname = strings.Replace(name, \"--\", \"-\", -1)\n\t}\n\treturn name\n}", "func NormalizeName(name string) (reference.Named, error) {\n\t// NOTE: this code is in symmetrie with containers/image/pkg/shortnames.\n\tref, err := reference.Parse(name)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"normalizing name %q: %w\", name, err)\n\t}\n\n\tnamed, ok := ref.(reference.Named)\n\tif !ok {\n\t\treturn nil, fmt.Errorf(\"%q is not a named reference\", name)\n\t}\n\n\t// Enforce \"localhost\" if needed.\n\tregistry := reference.Domain(named)\n\tif !(strings.ContainsAny(registry, \".:\") || registry == \"localhost\") {\n\t\tname = toLocalImageName(ref.String())\n\t}\n\n\t// Another parse which also makes sure that docker.io references are\n\t// correctly normalized (e.g., docker.io/alpine to\n\t// docker.io/library/alpine).\n\tnamed, err = reference.ParseNormalizedNamed(name)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif _, hasTag := named.(reference.NamedTagged); hasTag {\n\t\t// Strip off the tag of a tagged and digested reference.\n\t\tnamed, err = normalizeTaggedDigestedNamed(named)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn named, nil\n\t}\n\tif _, hasDigest := named.(reference.Digested); hasDigest {\n\t\treturn named, nil\n\t}\n\n\t// Make sure to tag \"latest\".\n\treturn reference.TagNameOnly(named), nil\n}", "func (n *UnresolvedName) NormalizeVarName() (VarName, error) {\n\treturn classifyColumnItem(n)\n}", "func SanitizeLine(s string) string {\n\ts = strings.ToLower(s)\n\ts = strings.Replace(s, \"⁄\", \"/\", -1)\n\ts = strings.Replace(s, \" / \", \"/\", -1)\n\n\t// special cases\n\ts = strings.Replace(s, \"butter milk\", \"buttermilk\", -1)\n\ts = strings.Replace(s, \"bicarbonate of soda\", \"baking soda\", -1)\n\ts = strings.Replace(s, \"soda bicarbonate\", \"baking soda\", -1)\n\n\t// remove parentheses\n\tre := regexp.MustCompile(`(?s)\\((.*)\\)`)\n\tfor _, m := range re.FindAllStringSubmatch(s, -1) {\n\t\ts = strings.Replace(s, m[0], \" \", 1)\n\t}\n\n\ts = \" \" + strings.TrimSpace(s) + \" \"\n\n\t// replace unicode fractions with fractions\n\tfor v := range corpusFractionNumberMap {\n\t\ts = strings.Replace(s, v, \" \"+corpusFractionNumberMap[v].fractionString+\" \", -1)\n\t}\n\n\t// remove non-alphanumeric\n\treg, _ := regexp.Compile(\"[^a-zA-Z0-9/.]+\")\n\ts = reg.ReplaceAllString(s, \" \")\n\n\t// replace fractions with unicode fractions\n\tfor v := range corpusFractionNumberMap {\n\t\ts = strings.Replace(s, corpusFractionNumberMap[v].fractionString, \" \"+v+\" \", -1)\n\t}\n\n\ts = strings.Replace(s, \" one \", \" 1 \", -1)\n\n\treturn s\n}", "func filterCharsAndNormalize(strData string) string {\n\tpattern := regexp.MustCompile(`[\\W_]+`)\n\treturn strings.ToLower(pattern.ReplaceAllString(strData, ` `))\n}", "func normalizeAssertionOrName(s string) (string, error) {\n\tif libkb.CheckUsername.F(s) {\n\t\treturn libkb.NewNormalizedUsername(s).String(), nil\n\t}\n\n\t// TODO: this fails for http and https right now (see CORE-2968).\n\tsocialAssertion, isSocialAssertion := libkb.NormalizeSocialAssertion(s)\n\tif isSocialAssertion {\n\t\treturn socialAssertion.String(), nil\n\t}\n\n\tif expr, err := libkb.AssertionParseAndOnly(s); err == nil {\n\t\t// If the expression only contains a single url, make sure\n\t\t// it's not a just considered a single keybase username. If\n\t\t// it is, then some non-username slipped into the default\n\t\t// \"keybase\" case and should be considered an error.\n\t\turls := expr.CollectUrls(nil)\n\t\tif len(urls) == 1 && urls[0].IsKeybase() {\n\t\t\treturn \"\", NoSuchUserError{s}\n\t\t}\n\n\t\t// Normalize and return. Ideally `AssertionParseAndOnly`\n\t\t// would normalize for us, but that doesn't work yet, so for\n\t\t// now we'll just lower-case. This will incorrectly lower\n\t\t// case http/https/web assertions, as well as case-sensitive\n\t\t// social assertions in AND expressions. TODO: see CORE-2967.\n\t\treturn strings.ToLower(s), nil\n\t}\n\n\treturn \"\", BadTLFNameError{s}\n}", "func (u UnqualifiedStar) NormalizeVarName() (VarName, error) { return u, nil }", "func isValidName(name string) bool { return name != \"_\" && name != \"this\" }", "func validateName(name string) bool {\n\treturn nameFilter.MatchString(name)\n}", "func ServiceName(instanceGroupName string) string {\n\treturn names.Sanitize(instanceGroupName)\n}", "func ValidName(str string) bool {\n\tvar nameRegex = regexp.MustCompile(`^[a-zA-Z0-9\\-._]{0,80}$`)\n\treturn nameRegex.MatchString(str)\n}", "func cleanPackageName(name string) GoPackageName {\n\treturn GoPackageName(strs.GoSanitized(name))\n}", "func shortenName(name, origin string) string {\n\tif name == origin {\n\t\treturn \"@\"\n\t}\n\treturn strings.TrimSuffix(name, \".\"+origin)\n}", "func (data *Data) Sanitize() []string {\n\tvar warnings []string\n\n\treturn warnings\n}", "func NameEscape(name string) string {\n\tname = strings.Replace(name, \"@\", \"-\", -1)\n\tname = strings.Replace(name, \".\", \"-\", -1)\n\tname = strings.Replace(name, \"_\", \"-\", -1)\n\treturn name\n}", "func toValidImageRefName(name string) string {\n\tif len(name) == 0 {\n\t\treturn name\n\t}\n\n\t// in some certain situations, ref name might contain slashes or other symbols\n\t// k8s naming refers to DNS1123, which states that only alphanumeric characters\n\t// dashes are allowed in a subdomain name\n\t// bug example: https://bugzilla.redhat.com/show_bug.cgi?id=1970805\n\tresult := dnsSubdomainAllowedCharacters.ReplaceAllString(name, \"-\")\n\n\t// Also should check allowed subdomain name length and trim accordingly\n\tif len(result) > kvalidation.DNS1123SubdomainMaxLength {\n\t\tresult = result[:kvalidation.DNS1123SubdomainMaxLength]\n\t}\n\n\t// if after the trim name ends with a dash, trim it, too, must end with an alphanumeric\n\tresult = strings.Trim(result, \"-\")\n\n\treturn result\n}", "func formatName(n string) string {\n\tn = strings.TrimSpace(n)\n\tinput := []rune(n)\n\tvar output []rune\n\tupperit := true\n\tfor _, ch := range input {\n\t\tif unicode.IsSpace(ch) {\n\t\t\tupperit = true\n\t\t\tcontinue\n\t\t}\n\t\tif upperit {\n\t\t\tch = unicode.ToUpper(ch)\n\t\t\tupperit = false\n\t\t}\n\t\toutput = append(output, ch)\n\t}\n\treturn string(output)\n}", "func ClearName(key string) string {\n\tif strings.Contains(key, \"(\") {\n\t\tkey = strings.Split(key, \"(\")[0]\n\t}\n\n\tkey = strings.Replace(key, \"-\", \" \", -1)\n\tkey = strings.Replace(key, \"_\", \" \", -1)\n\tkey = strings.Replace(key, \"/\", \" \", -1)\n\tkey = strings.Replace(key, \"\\\\\", \" \", -1)\n\tkey = strings.Replace(key, \"'\", \" \", -1)\n\tkey = strings.Replace(key, \".\", \" \", -1)\n\n\tkey = strings.TrimPrefix(key, \"Registry \")\n\tkey = strings.TrimPrefix(key, \"Sponsoring \")\n\n\tkey = strings.TrimSpace(key)\n\tkey = strings.ToLower(key)\n\n\treturn key\n}" ]
[ "0.7389727", "0.72810036", "0.7142888", "0.70128125", "0.69416517", "0.6806906", "0.6788584", "0.6731622", "0.66658103", "0.66246176", "0.6588962", "0.6581411", "0.6501689", "0.6480518", "0.64712274", "0.64551795", "0.6452825", "0.6396267", "0.63956964", "0.6363661", "0.6355872", "0.63526", "0.63011116", "0.62948763", "0.6279739", "0.62455255", "0.6224606", "0.6192481", "0.6164385", "0.6122291", "0.6122291", "0.6120577", "0.6082971", "0.6082587", "0.60650545", "0.6050886", "0.6041889", "0.59149516", "0.5909055", "0.59082824", "0.5836518", "0.5829554", "0.5822239", "0.5792064", "0.575026", "0.5744798", "0.5698411", "0.569732", "0.5693887", "0.5683626", "0.56668586", "0.5661056", "0.5652414", "0.56470346", "0.56411207", "0.5633678", "0.56320244", "0.56294835", "0.5623874", "0.5622538", "0.56113434", "0.5570827", "0.5570721", "0.55663323", "0.5565096", "0.5548434", "0.5532812", "0.5525509", "0.5499028", "0.54945475", "0.5486654", "0.5474161", "0.54697466", "0.5467706", "0.5461002", "0.5440939", "0.54210603", "0.54179484", "0.54144365", "0.540723", "0.5368404", "0.5357338", "0.5354845", "0.5333352", "0.53171784", "0.530566", "0.5290001", "0.52688336", "0.52427864", "0.5241986", "0.5241571", "0.52389663", "0.5233505", "0.5232828", "0.52275646", "0.5222069", "0.5221342", "0.52109486", "0.5200414", "0.51953024" ]
0.7296913
1
StoreOfferInDB stores the hotel offer into db TODO: add error handling of each Create operation perform all the operations as single transaction for rolling back ?
func (db *DbCtxt) StoreOfferInDB(offers *HotelOffers) { var ( hotels []*Hotel rooms []*Room ratePlans []*RatePlan ) for _, offer := range offers.Offers { var hotel *Hotel err := json.Unmarshal(offer.Hotel, &hotel) if err != nil { log.Errorf("error in unmarshalling hotel data[%s] err[%s]", offer.Hotel, err.Error()) return } hotel.Object = string(offer.Hotel) hotels = append(hotels, hotel) rooms = append(rooms, &Room{HotelID: hotel.ID, Object: string(offer.Room)}) ratePlans = append(ratePlans, &RatePlan{HotelID: hotel.ID, Object: string(offer.RatePlan)}) } db.client.Table("hotels").CreateInBatches(hotels, len(hotels)) db.client.Table("rooms").CreateInBatches(rooms, len(rooms)) db.client.Table("rate_plans").CreateInBatches(ratePlans, len(ratePlans)) return }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func CreateOffer(c *fiber.Ctx) {\n\tUserID := userIDF(c.Get(\"token\"))\n\n\tvar Query QueryParamsOffer\n\n\tif errorParse := c.BodyParser(&Query); errorParse != nil {\n\t\tfmt.Println(\"Error parsing data\", errorParse)\n\t\tc.JSON(ErrorResponse{MESSAGE: \"Error al parsear información\"})\n\t\tc.Status(400)\n\t\treturn\n\t}\n\n\tvar IsOwner IsOwnerShop\n\tvar Position LocationSQL\n\n\tfmt.Println(UserID, Query.ShopID, Query.Title, \"?\")\n\n\tErrorOwner := sq.Select(\n\t\t\"shop_id\",\n\t).\n\t\tFrom(\"shop\").\n\t\tWhere(\n\t\t\t\"user_id = ? AND shop_id = ? AND status = true\",\n\t\t\tUserID,\n\t\t\tQuery.ShopID,\n\t\t).\n\t\tRunWith(database).\n\t\tQueryRow().\n\t\tScan(\n\t\t\t&IsOwner.ShopID,\n\t\t)\n\n\tif ErrorOwner != nil {\n\t\tfmt.Println(\"Not is owner or active shop\", ErrorOwner)\n\t\tc.JSON(ErrorResponse{MESSAGE: \"Not is owner or active shop\"})\n\t\tc.SendStatus(400)\n\t\treturn\n\t}\n\n\tErrorShop := sq.Select(\n\t\t\"lat\",\n\t\t\"lon\",\n\t).\n\t\tFrom(\"shop\").\n\t\tWhere(\"shop_id = ? AND user_id = ?\", Query.ShopID, UserID).\n\t\tRunWith(database).\n\t\tQueryRow().\n\t\tScan(\n\t\t\t&Position.Lat,\n\t\t\t&Position.Lon,\n\t\t)\n\n\tif ErrorShop != nil {\n\t\tfmt.Println(\"Not found shop\")\n\t\tc.JSON(ErrorResponse{MESSAGE: \"Not found shop\"})\n\t\tc.SendStatus(400)\n\t\treturn\n\t}\n\n\tid, errorInsert := sq.Insert(\"offers\").\n\t\tColumns(\n\t\t\t\"user_id\",\n\t\t\t\"shop_id\",\n\t\t\t\"title\",\n\t\t\t\"description\",\n\t\t\t\"date_init\",\n\t\t\t\"date_end\",\n\t\t\t\"image_url\",\n\t\t\t\"lat\",\n\t\t\t\"lon\",\n\t\t\t\"active\",\n\t\t).\n\t\tValues(\n\t\t\tUserID,\n\t\t\tQuery.ShopID,\n\t\t\tQuery.Title,\n\t\t\tQuery.Description,\n\t\t\tQuery.DateInit,\n\t\t\tQuery.DateEnd,\n\t\t\tQuery.ImageURL,\n\t\t\t&Position.Lat,\n\t\t\t&Position.Lon,\n\t\t\t0,\n\t\t).\n\t\tRunWith(database).\n\t\tExec()\n\n\tIDLast, _ := id.LastInsertId()\n\tfmt.Println(\"This offer id\", IDLast)\n\n\tif errorInsert != nil {\n\t\tfmt.Println(\"Error to save shop\", errorInsert)\n\t}\n\n\tPosLat := Position.Lat.String\n\tPosLon := Position.Lon.String\n\n\tLat, errLat := strconv.ParseFloat(PosLat, 64)\n\tif errLat != nil {\n\t\tfmt.Println(\"Error to conver lat\", errLat)\n\t}\n\n\tLon, errLon := strconv.ParseFloat(PosLon, 64)\n\tif errLon != nil {\n\t\tfmt.Println(\"Error to conver lon\", errLon)\n\t}\n\n\t// IDString := fmt.Sprintf(\"%s\", IDLast)\n\tIDString := strconv.FormatInt(IDLast, 10)\n\n\tresInsertMongo, errInsertMongo := mongodb.Collection(\"offers\").InsertOne(context.TODO(), bson.M{\n\t\t\"offer_id\": IDString,\n\t\t\"shop_id\": Query.ShopID,\n\t\t\"title\": Query.Title,\n\t\t\"location\": bson.M{\n\t\t\t\"type\": \"Point\",\n\t\t\t\"coordinates\": []float64{Lon, Lat},\n\t\t},\n\t\t\"date_init\": Query.DateInit,\n\t\t\"date_end\": Query.DateEnd,\n\t\t\"active\": false,\n\t})\n\n\tif errInsertMongo != nil {\n\t\tfmt.Println(errInsertMongo, \"Error to Insert mongo\")\n\t}\n\n\tIDMongo := resInsertMongo.InsertedID\n\n\tfmt.Println(IDMongo, \"Id of offer in mongodb\")\n\n\tc.JSON(SuccessResponseOffer{MESSAGE: \"Created offers\", OfferID: IDString, Status: 200})\n\n}", "func storeOffer(offer *Offer) error {\n\n\tvar err error\n\n\tkeyId := fmt.Sprintf(\"%s:%d\", OFFERS, offer.Id)\n\tkey, err := as.NewKey(NAMESPACE, OFFERS, keyId)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\trec := as.BinMap{\n\t\t\"offer_id\": offer.Id,\n\t\t\"broker_id\": offer.BrokerId,\n\t\t\"ttl\": offer.TTL,\n\t\t\"ticker\": offer.Ticker,\n\t\t\"quantity\": offer.Quantity,\n\t\t\"price\": offer.Price,\n\t}\n\n\t// TODO: use the Put Operation to store the record.\n\n\treturn nil\n}", "func (h *Hotel) Create(a *config.AppContext) error {\n\treturn a.Db.Create(h).Error\n}", "func (c *Controller) CreateOffer(session session.Manager) http.HandlerFunc {\n\treturn func(w http.ResponseWriter, r *http.Request) {\n\t\tvar creation offer.Creation\n\t\terr := json.NewDecoder(r.Body).Decode(&creation)\n\n\t\tif err != nil {\n\t\t\trespond(w, r, http.StatusUnprocessableEntity, ErrInvalidFormData.Error())\n\t\t\treturn\n\t\t}\n\n\t\tuid, ok := session.Get(r.Context(), \"uid\").(uint64)\n\t\tif !ok {\n\t\t\trespond(w, r, http.StatusForbidden, ErrForbiddenAction.Error())\n\t\t\treturn\n\t\t}\n\n\t\terr = c.offerService.Create(&creation, uid)\n\n\t\tif err != nil && (err == offer.ErrUserNotFound || err == offer.ErrRestaurantNotFound) {\n\t\t\trespond(w, r, http.StatusNotFound, err.Error())\n\t\t\treturn\n\t\t} else if err != nil {\n\t\t\trespond(w, r, http.StatusInternalServerError, ErrProcessingFailed.Error())\n\t\t\treturn\n\t\t}\n\n\t\trespond(w, r, http.StatusOK, true)\n\t\treturn\n\t}\n}", "func StoreRatePlan(input RatePlanInput) error {\n\tvar cancellationPolicyStr = \"\"\n\n\tvar otherConditionsStr = \"\"\n\tfor index, val := range input.OtherConditions {\n\t\tif index > 0 {\n\t\t\totherConditionsStr += \" , \"\n\t\t}\n\t\totherConditionsStr += val\n\t}\n\n\tfor index, val := range input.CancellationPolicy {\n\t\tif index > 0 {\n\t\t\tcancellationPolicyStr += \" ; \"\n\t\t}\n\t\tvar capacityStr = \"type:\" + val.Type + \", expires_days_before:\" + strconv.Itoa(val.ExpiresDaysBefore)\n\t\tcancellationPolicyStr += capacityStr\n\t}\n\tdata := RatePlan{\n\t\tHotelID: input.HotelID,\n\t\tRatePlanID: input.RatePlanID,\n\t\tCancellationPolicy: cancellationPolicyStr,\n\t\tName: input.Name,\n\t\tOtherConditions: otherConditionsStr,\n\t\tMealPlan: input.MealPlan,\n\t}\n\tlogrus.WithFields(logrus.Fields{\n\t\t\"HotelID\": input.HotelID,\n\t\t\"RatePlanID\": input.RatePlanID,\n\t\t\"CancellationPolicy\": cancellationPolicyStr,\n\t\t\"Name\": input.Name,\n\t\t\"OtherConditions\": otherConditionsStr,\n\t\t\"MealPlan\": input.MealPlan,\n\t}).Info(\"inserting a row in mysql table - rate_plans\")\n\tif err := config.DB.Create(data).Error; err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}", "func PutOffers(offers db.Offers, users db.Users, restaurants db.Restaurants, sessionManager session.Manager,\n\timageStorage storage.Images, facebookPost facebook.Post, regions db.Regions) router.HandlerWithParams {\n\thandler := func(w http.ResponseWriter, r *http.Request, user *model.User, restaurant *model.Restaurant, currentOffer *model.Offer) *router.HandlerError {\n\t\tofferPOST, err := parseOffer(r, restaurant)\n\t\tif err != nil {\n\t\t\treturn router.NewHandlerError(err, \"Failed to parse the offer\", http.StatusBadRequest)\n\t\t}\n\t\t// If the image_data field isn't set, the image field of offer also doesn't get set and\n\t\t// therefore the update won't affect the stored image. This in turn means, that currently\n\t\t// there's no way to update the offer to remove an image.\n\t\toffer, err := model.MapOfferPOSTToOffer(offerPOST, getImageDataToChecksumMapper(imageStorage))\n\t\tif err != nil {\n\t\t\treturn router.NewHandlerError(err, \"Failed to map the offer to the internal representation\", http.StatusInternalServerError)\n\t\t}\n\t\terr = offers.UpdateID(currentOffer.ID, offer)\n\t\tif err != nil {\n\t\t\treturn router.NewHandlerError(err, \"Failed to update the offer in DB\", http.StatusInternalServerError)\n\t\t}\n\t\toffer.ID = currentOffer.ID\n\n\t\tlocation, handlerErr := getLocationForRestaurant(restaurant, regions)\n\t\tif handlerErr != nil {\n\t\t\treturn handlerErr\n\t\t}\n\t\tdate := model.DateFromTime(offer.FromTime, location)\n\t\thandlerErr = facebookPost.Update(date, user, restaurant)\n\t\tif handlerErr != nil {\n\t\t\treturn handlerErr\n\t\t}\n\t\tpreviousDate := model.DateFromTime(currentOffer.FromTime, location)\n\t\tif previousDate != date {\n\t\t\thandlerErr = facebookPost.Update(previousDate, user, restaurant)\n\t\t\tif handlerErr != nil {\n\t\t\t\treturn handlerErr\n\t\t\t}\n\t\t}\n\n\t\tofferJSON, handlerError := mapOfferToJSON(offer, imageStorage)\n\t\tif handlerError != nil {\n\t\t\treturn handlerError\n\t\t}\n\t\treturn writeJSON(w, offerJSON)\n\t}\n\n\treturn forRestaurantWithParams(sessionManager, users, restaurants, forOffer(offers, handler))\n}", "func (t *Transaction) Store() error {\n\terr := database.DB.Create(&t).Error\n\treturn err\n}", "func (suite *ModelSuite) Test_CreateShipmentOffer() {\n\tt := suite.T()\n\tnow := time.Now()\n\ttdl, _ := testdatagen.MakeTDL(suite.db, \"california\", \"90210\", \"2\")\n\ttsp, _ := testdatagen.MakeTSP(suite.db, testdatagen.RandomSCAC())\n\tmarket := \"dHHG\"\n\tshipment, _ := testdatagen.MakeShipment(suite.db, now, now, now.AddDate(0, 0, 1), tdl, \"OHAI\", &market)\n\tshipmentOffer, err := CreateShipmentOffer(suite.db, shipment.ID, tsp.ID, false)\n\n\tif err != nil {\n\t\tt.Errorf(\"Failed to create Shipment Offer: %v\", err)\n\t}\n\texpectedShipmentOffer := ShipmentOffer{}\n\tif err := suite.db.Find(&expectedShipmentOffer, shipmentOffer.ID); err != nil {\n\t\tt.Fatalf(\"could not find shipmentOffer: %v\", err)\n\t}\n}", "func (db *Database) AddOfferings(offerings []TripOffering) error {\n for _, offer := range offerings {\n stmt, err := db.Prepare(fmt.Sprintf(\"INSERT INTO TripOffering (TripNumber, Date, ScheduledStartTime, ScheduledArrivalTime, DriverName, BusID) VALUES (%d, %q, %q, %q, %q, %d)\", offer.TripNumber, offer.Date, offer.ScheduledStartTime, offer.ScheduledArrivalTime, offer.DriverName, offer.BusID))\n if err != nil {\n return err\n }\n _, err = stmt.Exec()\n if err != nil {\n return err\n }\n }\n return nil\n}", "func (b *Backend) InsertPoller(poller common.Poller) (err error) {\n\n\t// exit if there is no database connection\n\terr = b.checkDB()\n\tif err != nil {\n\t\treturn\n\t}\n\n\t// attempt to encrypt the provided password before storing to database\n\tif poller.EncryptedPassword == \"\" && poller.PlainTextPassword != \"\" {\n\t\tlog.Debug(\"encrypting password before database insert/update\")\n\t\tpoller.EncryptedPassword, err = crypto.Encrypt(poller.PlainTextPassword)\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\t}\n\n\t// Create an Id\n\tpoller.Id = common.ComputeId(poller.VcenterHost)\n\n\t// begin a transaction, set all related objects to absent\n\ttx := b.db.MustBegin()\n\n\t// Store the user record in the DB\n\t_, err = tx.NamedExec(insertPoller, &poller)\n\tif err != nil {\n\t\treturn\n\t}\n\n\t// commit transaction\n\terr = tx.Commit()\n\tif err != nil {\n\t\tlog.Errorf(\"failed to commit transaction to database: %s\", err)\n\t\treturn\n\t}\n\n\t// update poll date if it's an external poller\n\tif !poller.Internal {\n\t\terr = b.UpdateLastPollDate(poller)\n\t}\n\n\treturn\n}", "func (store *dbStore) CreateRestaurant(restaurant *structs.Restaurant) error {\r\n\tsqlStatement := fmt.Sprint(\"INSERT INTO choice(type,picture_url) VALUES ('\", restaurant.Choice.Type, \"','\", restaurant.Choice.PictureURL, \"')\")\r\n\r\n\tfmt.Println(sqlStatement)\r\n\r\n\tres, err := store.db.Exec(sqlStatement)\r\n\tif err != nil {\r\n\t\treturn err\r\n\t}\r\n\r\n\tchoiceID, err := res.LastInsertId()\r\n\r\n\tsqlStatement = fmt.Sprint(\"INSERT INTO restaurant(choice_id,name,category,location,price) VALUES ('\", choiceID, \"','\", restaurant.Name, \"','\", restaurant.Category, \"','\", restaurant.Location, \"','\", restaurant.Price, \"')\")\r\n\r\n\tfmt.Println(sqlStatement)\r\n\r\n\t_, err = store.db.Query(sqlStatement)\r\n\tif err != nil {\r\n\t\tfmt.Printf(\"failed to execute create restaurant query on the database: %v\", err)\r\n\t\treturn err\r\n\t}\r\n\r\n\treturn nil\r\n}", "func UpdateOffer(c *fiber.Ctx) {\n\tUserID := userIDF(c.Get(\"token\"))\n\tOfferID := c.Params(\"offer_id\")\n\n\tvar Query QueryParamsOfferUpdate\n\tvar IsOwner IsOwnerShop\n\tvar OffersMongo bson.D\n\n\tif errorParse := c.BodyParser(&Query); errorParse != nil {\n\t\tfmt.Println(\"Error parsing data\", errorParse)\n\t\tc.JSON(ErrorResponse{MESSAGE: \"Error al parsear información\"})\n\t\tc.Status(400)\n\t\treturn\n\t}\n\n\tErrorOwner := sq.Select(\n\t\t\"shop_id\",\n\t).\n\t\tFrom(\"shop\").\n\t\tWhere(\n\t\t\t\"user_id = ? AND shop_id = ? AND status = true\",\n\t\t\tUserID,\n\t\t\tQuery.ShopID,\n\t\t).\n\t\tRunWith(database).\n\t\tQueryRow().\n\t\tScan(\n\t\t\t&IsOwner.ShopID,\n\t\t)\n\n\tif ErrorOwner != nil {\n\t\tfmt.Println(\"Not is owner or active shop\", ErrorOwner)\n\t\tc.JSON(ErrorResponse{MESSAGE: \"Not is owner or active shop\"})\n\t\tc.SendStatus(400)\n\t\treturn\n\t}\n\n\tqueryUpdateValue := sq.Update(\"offers\")\n\n\tif len(Query.Title) > 0 {\n\t\tqueryUpdateValue = queryUpdateValue.Set(\"title\", Query.Title)\n\t\tOffersMongo = append(OffersMongo, bson.E{\"title\", Query.Title})\n\t}\n\n\tif len(Query.Description) > 0 {\n\t\tqueryUpdateValue = queryUpdateValue.Set(\"description\", Query.Description)\n\t\tOffersMongo = append(OffersMongo, bson.E{\"description\", Query.Description})\n\t}\n\n\tif len(Query.DateInit) > 0 {\n\t\tqueryUpdateValue = queryUpdateValue.Set(\"date_init\", Query.DateInit)\n\t\tOffersMongo = append(OffersMongo, bson.E{\"date_init\", Query.DateInit})\n\t}\n\n\tif len(Query.DateEnd) > 0 {\n\t\tqueryUpdateValue = queryUpdateValue.Set(\"date_end\", Query.DateEnd)\n\t\tOffersMongo = append(OffersMongo, bson.E{\"date_end\", Query.DateEnd})\n\t}\n\n\tif len(Query.ImageURL) > 0 {\n\t\tqueryUpdateValue = queryUpdateValue.Set(\"image_url\", Query.ImageURL)\n\t\tOffersMongo = append(OffersMongo, bson.E{\"image_url\", Query.ImageURL})\n\t}\n\n\tif Query.Active >= 0 && Query.Active <= 1 {\n\t\tqueryUpdateValue = queryUpdateValue.Set(\"active\", Query.Active)\n\n\t\tActive := false\n\t\tif Query.Active == 0 {\n\t\t\tActive = false\n\t\t} else {\n\t\t\tActive = true\n\t\t}\n\n\t\tOffersMongo = append(OffersMongo, bson.E{\"active\", Active})\n\t}\n\n\t_, ErrorUpdateOffer := queryUpdateValue.\n\t\tWhere(\"offers_id = ? AND user_id = ? AND shop_id = ?\", OfferID, UserID, Query.ShopID).\n\t\tRunWith(database).\n\t\tExec()\n\n\tif ErrorUpdateOffer != nil {\n\t\tfmt.Println(ErrorUpdateOffer, \"Problem with update offer\")\n\t\tc.JSON(ErrorResponse{MESSAGE: \"Problem with update offer\"})\n\t\tc.SendStatus(500)\n\t\treturn\n\t}\n\n\topts := options.Update().SetUpsert(true)\n\tfilter := bson.D{{\"offer_id\", OfferID}}\n\tupdate := bson.D{\n\t\t{\"$set\", OffersMongo},\n\t}\n\n\tresultMongoOffers, errOfferMongo := mongodb.Collection(\"offers\").UpdateOne(context.TODO(), filter, update, opts)\n\n\tif errOfferMongo != nil {\n\t\tfmt.Println(\"promblem with update offer in mongodb\")\n\t}\n\n\tif resultMongoOffers.MatchedCount != 0 {\n\t\tfmt.Println(\"matched and replaced an existing document\")\n\t\treturn\n\t}\n\tif resultMongoOffers.UpsertedCount != 0 {\n\t\tfmt.Printf(\"inserted a new document with ID %v\\n\", resultMongoOffers.UpsertedID)\n\t}\n\n\tfmt.Println(OffersMongo, queryUpdateValue)\n\n\tc.JSON(SuccessResponseOfferStatus{MESSAGE: \"Success update\", Status: 200})\n}", "func StoreSupplier(db *sqlx.DB, supplier *Supplier) (int64, error) {\n\n\tinsertSupplier := `INSERT INTO suppliers (supplier_code, supplier_description, supplier_contact_person, supplier_contact, supplier_email) VALUES ($1, $2, $3, $4, $5) RETURNING id`\n\t_, err := db.Exec(insertSupplier, supplier.SupplierCode, supplier.SupplierDesc, supplier.SupplierPerson, supplier.SupplierContact, supplier.SupplierEmail)\n\n\tif err != nil {\n\t\treturn 404, err\n\t}\n\n\treturn 200, nil\n}", "func (o *Offer) Insert(exec boil.Executor, columns boil.Columns) error {\n\tif o == nil {\n\t\treturn errors.New(\"stellarcore: no offers provided for insertion\")\n\t}\n\n\tvar err error\n\n\tif err := o.doBeforeInsertHooks(exec); err != nil {\n\t\treturn err\n\t}\n\n\tnzDefaults := queries.NonZeroDefaultSet(offerColumnsWithDefault, o)\n\n\tkey := makeCacheKey(columns, nzDefaults)\n\tofferInsertCacheMut.RLock()\n\tcache, cached := offerInsertCache[key]\n\tofferInsertCacheMut.RUnlock()\n\n\tif !cached {\n\t\twl, returnColumns := columns.InsertColumnSet(\n\t\t\tofferColumns,\n\t\t\tofferColumnsWithDefault,\n\t\t\tofferColumnsWithoutDefault,\n\t\t\tnzDefaults,\n\t\t)\n\n\t\tcache.valueMapping, err = queries.BindMapping(offerType, offerMapping, wl)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tcache.retMapping, err = queries.BindMapping(offerType, offerMapping, returnColumns)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif len(wl) != 0 {\n\t\t\tcache.query = fmt.Sprintf(\"INSERT INTO \\\"offers\\\" (\\\"%s\\\") %%sVALUES (%s)%%s\", strings.Join(wl, \"\\\",\\\"\"), strmangle.Placeholders(dialect.UseIndexPlaceholders, len(wl), 1, 1))\n\t\t} else {\n\t\t\tcache.query = \"INSERT INTO \\\"offers\\\" %sDEFAULT VALUES%s\"\n\t\t}\n\n\t\tvar queryOutput, queryReturning string\n\n\t\tif len(cache.retMapping) != 0 {\n\t\t\tqueryReturning = fmt.Sprintf(\" RETURNING \\\"%s\\\"\", strings.Join(returnColumns, \"\\\",\\\"\"))\n\t\t}\n\n\t\tcache.query = fmt.Sprintf(cache.query, queryOutput, queryReturning)\n\t}\n\n\tvalue := reflect.Indirect(reflect.ValueOf(o))\n\tvals := queries.ValuesFromMapping(value, cache.valueMapping)\n\n\tif boil.DebugMode {\n\t\tfmt.Fprintln(boil.DebugWriter, cache.query)\n\t\tfmt.Fprintln(boil.DebugWriter, vals)\n\t}\n\n\tif len(cache.retMapping) != 0 {\n\t\terr = exec.QueryRow(cache.query, vals...).Scan(queries.PtrsFromMapping(value, cache.retMapping)...)\n\t} else {\n\t\t_, err = exec.Exec(cache.query, vals...)\n\t}\n\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"stellarcore: unable to insert into offers\")\n\t}\n\n\tif !cached {\n\t\tofferInsertCacheMut.Lock()\n\t\tofferInsertCache[key] = cache\n\t\tofferInsertCacheMut.Unlock()\n\t}\n\n\treturn o.doAfterInsertHooks(exec)\n}", "func storeWinningBid(bid *Bid) error {\n\n\tvar err error\n\n\t// The current time is used a couple places\n\tts := time.Now().Unix()\n\n\t// Read the offer, to get the ticker_id\n\n\tofferKeyId := fmt.Sprintf(\"%s:%d\", OFFERS, bid.OfferId)\n\tofferKey, err := as.NewKey(NAMESPACE, OFFERS, offerKeyId)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tofferRec, err := db.Get(readPolicy, offerKey)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif offerRec == nil {\n\t\treturn fmt.Errorf(\"Record not found %#v\", offerKey)\n\t}\n\n\tticker := offerRec.Bins[\"ticker\"].(string)\n\tquantity := offerRec.Bins[\"quantity\"].(int)\n\tsellerId := offerRec.Bins[\"broker_id\"].(int)\n\n\t// Update the current ticker price\n\n\ttickerKeyId := fmt.Sprintf(\"%s:%d\", TICKERS, ticker)\n\ttickerKey, err := as.NewKey(NAMESPACE, TICKERS, tickerKeyId)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\ttickerBins := as.BinMap{\n\t\t\"ticker\": ticker,\n\t\t\"price\": bid.Price,\n\t\t\"time\": ts,\n\t}\n\n\t// TODO: use the Put operation to update the ticker latest price\n\n\t// Store the ticker price for historical prices\n\t// There is an index on ticker\n\n\tpriceKeyId := fmt.Sprintf(\"%s:%d:%d\", PRICES, ticker, ts)\n\tpriceKey, err := as.NewKey(NAMESPACE, PRICES, priceKeyId)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tpriceBins := as.BinMap{\n\t\t\"ticker\": ticker,\n\t\t\"price\": bid.Price,\n\t\t\"time\": ts,\n\t}\n\n\t// TODO: Use the Put operation to store Record the price change\n\n\t// Update Porfolio\n\n\tsellerKeyId := fmt.Sprintf(\"%s:%d\", PORTFOLIOS, sellerId)\n\tsellerKey, err := as.NewKey(NAMESPACE, PORTFOLIOS, sellerKeyId)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tbuyerKeyId := fmt.Sprintf(\"%s:%d\", PORTFOLIOS, bid.BrokerId)\n\tbuyerKey, err := as.NewKey(NAMESPACE, PORTFOLIOS, buyerKeyId)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tsellerBins := as.BinMap{\n\t\tticker: quantity,\n\t}\n\n\tbuyerBins := as.BinMap{\n\t\tticker: quantity,\n\t}\n\n\t// TODO: To do, use the Add operationg to increment the portfolios\n\t// of the buyer and seller\n\n\treturn nil\n}", "func (main *Main) Create(e echo.Context) (err error) {\n\n\t// get request and validate\n\treq := new(request.Create)\n\te.Bind(req)\n\tif err = e.Validate(req); err != nil {\n\t\treturn rest.ConstructErrorResponse(e, exception.NewInputValidationFailed(err.Error()))\n\t}\n\t// map req to input data\n\treqData := input.NewNewTransactionCreate(\n\t\tmap[string]interface{}{\n\t\t\t\"Name\": req.Name,\n\t\t\t\"Qty\": req.Qty,\n\t\t\t\"Price\": req.Price,\n\t\t\t\"Weight\": req.Weight,\n\t\t\t\"Images\": req.Images,\n\t\t\t\"Description\": req.Description,\n\t\t},\n\t)\n\t//insert data to db\n\ttransaction, exc := TransactionModel.Create(reqData)\n\tif exc != nil {\n\t\treturn rest.ConstructErrorResponse(e, exc)\n\t}\n\tdata := map[string]contract.Model{\n\t\t\"created_transaction\": transaction,\n\t}\n\treturn rest.ConstructSuccessResponse(e, data)\n}", "func (s *Store) Create(c *gin.Context) {\n\n}", "func (r *StoreRepository) CreateStore(store *model.Store) error {\n\t_id := bson.NewObjectId()\n\tstore.Id = _id\n\terr := r.C.Insert(&store)\n\n\treturn err\n\n}", "func (c *Client) Store(agmi models.AgencyMonthlyInfo) error {\n\tif err := c.Db.Store(agmi); err != nil {\n\t\treturn fmt.Errorf(\"Store() error: %q\", err)\n\t}\n\treturn nil\n}", "func make_offer(stub shim.ChaincodeStubInterface, args []string) pb.Response {\n\n\tvar err error\n\tfmt.Println(\"starting mark_for_sale\")\n\n\t// this is quirky\n\t// todo - get the \"company that authed the transfer\" from the certificate instead of an argument\n\t// should be possible since we can now add attributes to the enrollment cert\n\t// as is.. this is a bit broken (security wise), but it's much much easier to demo! holding off for demos sake\n\n\tif len(args) != 5 {\n\t\treturn shim.Error(\"Incorrect number of arguments. Expecting 3\")\n\t}\n\n\t// input sanitation\n\terr = sanitize_arguments(args)\n\tif err != nil {\n\t\treturn shim.Error(err.Error())\n\t}\n\n\tvar marble_id = args[0]\n\tvar buyer_id = args[1]\n\tvar authed_by_company = args[2]\n\toffer_price, err2 := strconv.Atoi(args[3])\n\tvar offer_id = args[4]\n\n\tif err2 != nil {\n\t\treturn shim.Error(\"4th argument must be a numeric string\")\n\t}\n\tfmt.Println(marble_id + \"->\" + buyer_id + \"->\" + offer_id + \"->\" + strconv.Itoa(offer_price) + \" - |\" + authed_by_company)\n\n\t// check if user already exists\n\tbuyer, err := get_owner(stub, buyer_id)\n\tif err != nil {\n\t\treturn shim.Error(\"This buyer does not exist - \" + buyer_id)\n\t}\n\n\tmarble, err := get_marble(stub, marble_id)\n\tif err != nil {\n\t\treturn shim.Error(\"This marble does not exist -\" + marble_id)\n\t}\n\n\tvar offer Offer\n\toffer.Id = offer_id\n\toffer.Buyer = buyer\n\toffer.Marble = marble\n\toffer.OfferPrice = offer_price\n\toffer.Status = \"PROPOSED\"\n\n\t//store user\n\tofferAsBytes, _ := json.Marshal(offer) //convert to array of bytes\n\terr = stub.PutState(offer.Id, offerAsBytes) //store owner by its Id\n\tif err != nil {\n\t\tfmt.Println(\"Could not store offer\")\n\t\treturn shim.Error(err.Error())\n\t}\n\n\tfmt.Println(\"- end make_offer\")\n\treturn shim.Success(nil)\n\n}", "func(db *Persistence) CreateNewApplication(appName, description, redirect string,\n trimName bool) error {\n\n log.Debug(\"adding new application to datbase...\")\n appId := uuid.New()\n\n query := `INSERT INTO applications(application_id,application_name,description,\n redirect_url,trim_app_name) VALUES($1,$2,$3,$4,$5)`\n _, err := db.Session.Exec(context.Background(), query, appId, appName, description,\n redirect, trimName)\n return err\n}", "func (explenation *Explenation) Save() error {\n\t_, err := govalidator.ValidateStruct(explenation)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tdataStore, dataStoreErr := CreateDataStore()\n\tif err != nil {\n\t\treturn dataStoreErr\n\t}\n\texplenationsCol := dataStore.GetCollection(CollectionExplenationsProperty)\n\tdefer dataStore.Close()\n\n\texplenation.SetID()\n\texplenation.UpdatedAt = time.Now()\n\n\t_, err = explenationsCol.UpsertId(explenation.ID, bson.M{\"$set\": explenation})\n\treturn err\n}", "func Store(c *gin.Context) {\r\n\tvar post Post\r\n\tif err := c.ShouldBindJSON(&post); err != nil {\r\n\t\tc.JSON(http.StatusBadRequest, gin.H{\r\n\t\t\t\"messege\": err.Error(),\r\n\t\t\t\"data\": \"\",\r\n\t\t})\r\n\t\treturn\r\n\t}\r\n\tpost.Status = \"Active\"\r\n\tdb.Create(&post)\r\n\tc.JSON(http.StatusOK, gin.H{\r\n\t\t\"messege\": \"\",\r\n\t\t\"data\": post,\r\n\t})\r\n}", "func SupplierAddPokemonInDatabase(c echo.Context) error {\n\t// auth := AuthorizedSupplier(c)\n\t// if !auth {\n\t// \treturn echo.NewHTTPError(http.StatusUnauthorized, \"This account does not have access to this route\")\n\t// }\n\tpokemon_id, err := strconv.Atoi(c.Param(\"pokemon_id\"))\n\tif err != nil {\n\t\treturn c.JSON(http.StatusBadRequest, map[string]interface{}{\n\t\t\t\"message\": \"invalid pokemon id\",\n\t\t})\n\t}\n\t//Consuming Pokedex API\n\tpokedex_url := fmt.Sprintf(\"https://pokeapi.co/api/v2/pokemon/%d\", pokemon_id)\n\tresponse, err := http.Get(pokedex_url)\n\tif err != nil {\n\t\treturn c.JSON(http.StatusBadGateway, err)\n\t}\n\tresponse_data, _ := ioutil.ReadAll(response.Body)\n\tdefer response.Body.Close()\n\tvar pokedex models.Pokedexs\n\tjson.Unmarshal(response_data, &pokedex)\n\tpokemon := models.Pokemons{\n\t\tID: pokedex.ID,\n\t\tName: pokedex.Name,\n\t}\n\tc.Bind(&pokemon)\n\taddPokemonDb, err := database.AddPokemon(pokemon) //Add Pokemon\n\tif err != nil {\n\t\treturn c.JSON(http.StatusInternalServerError, err)\n\t}\n\treturn c.JSON(http.StatusOK, addPokemonDb)\n}", "func (repo *Repository) Create(ctx context.Context, claims auth.Claims, req CreateRequest, now time.Time) (*Expenditure, error) {\n\tspan, ctx := tracer.StartSpanFromContext(ctx, \"internal.expenditure.Create\")\n\tdefer span.Finish()\n\tif claims.Audience == \"\" {\n\t\treturn nil, errors.WithStack(ErrForbidden)\n\t}\n\n\t// Admin users can update branch they have access to.\n\tif !claims.HasRole(auth.RoleAdmin) {\n\t\treturn nil, errors.WithStack(ErrForbidden)\n\t}\n\n\t// Validate the request.\n\tv := webcontext.Validator()\n\terr := v.StructCtx(ctx, req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// If now empty set it to the current time.\n\tif now.IsZero() {\n\t\tnow = time.Now()\n\t}\n\n\t// Always store the time as UTC.\n\tnow = now.UTC()\n\t// Postgres truncates times to milliseconds when storing. We and do the same\n\t// here so the value we return is consistent with what we store.\n\tnow = now.Truncate(time.Millisecond)\n\n\tsalesRep, err := models.Users(models.UserWhere.PhoneNumber.EQ(req.SalesRepPhoneNumber)).One(ctx, repo.DbConn)\n\tif err != nil {\n\t\tif err.Error() == sql.ErrNoRows.Error() {\n\t\t\treturn nil, errors.New(\"Invalid phone number\")\n\t\t}\n\t\treturn nil, err\n\t}\n\tm := models.RepsExpense{\n\t\tID: uuid.NewRandom().String(),\n\t\tSalesRepID: salesRep.ID,\n\t\tDate: now.Unix(),\n\t\tAmount: req.Amount,\n\t\tReason: req.Reason,\n\t}\n\n\tif err := m.Insert(ctx, repo.DbConn, boil.Infer()); err != nil {\n\t\treturn nil, errors.WithMessage(err, \"Insert expenditure failed\")\n\t}\n\n\treturn &Expenditure{\n\t\tID: m.ID,\n\t\tSalesRepID: req.SalesRepPhoneNumber,\n\t\tDate: now,\n\t\tAmount: req.Amount,\n\t\tReason: req.Reason,\n\t}, nil\n}", "func (db *PoetDb) ValidateAndStore(proofMessage *types.PoetProofMessage) error {\n\tif err := db.Validate(proofMessage.PoetProof, proofMessage.PoetServiceID,\n\t\tproofMessage.RoundID, proofMessage.Signature); err != nil {\n\n\t\treturn err\n\t}\n\n\terr := db.storeProof(proofMessage)\n\treturn err\n}", "func (s *CreateUserEndpoint) saveToDB(user *User) (int, error) {\n\t// implementation removed\n\treturn 0, nil\n}", "func (v *Vessel) Save() error {\n\tdb := adaptors.DBConnector()\n\tdefer db.Close()\n\n\terr := db.Table(\"vessels\").Create(&Vessel{\n\t\tv.ID,\n\t\tv.Name,\n\t\tv.Beam,\n\t\tv.LOA,\n\t\tv.Draft,\n\t\tv.Status,\n\t}).Error\n\n\treturn err\n}", "func (client MongoClient) Store(ctx context.Context, app apps.App) error {\n\n\tdata, err := bson.Marshal(app)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tcoll := client.conn.Database(nameDB).Collection(nameColl)\n\t_, err = coll.InsertOne(ctx, data)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}", "func PostOffers(offers db.Offers, users db.Users, restaurants db.Restaurants, sessionManager session.Manager,\n\timageStorage storage.Images, facebookPost facebook.Post, regions db.Regions) router.HandlerWithParams {\n\thandler := func(w http.ResponseWriter, r *http.Request, user *model.User, restaurant *model.Restaurant) *router.HandlerError {\n\t\tofferPOST, err := parseOffer(r, restaurant)\n\t\tif err != nil {\n\t\t\treturn router.NewHandlerError(err, \"Failed to parse the offer\", http.StatusBadRequest)\n\t\t}\n\t\toffer, err := model.MapOfferPOSTToOffer(offerPOST, getImageDataToChecksumMapper(imageStorage))\n\t\tif err != nil {\n\t\t\treturn router.NewHandlerError(err, \"Failed to map the offer to the internal representation\", http.StatusInternalServerError)\n\t\t}\n\t\toffers, err := offers.Insert(offer)\n\t\tif err != nil {\n\t\t\treturn router.NewHandlerError(err, \"Failed to store the offer in the DB\", http.StatusInternalServerError)\n\t\t}\n\n\t\tlocation, handlerErr := getLocationForRestaurant(restaurant, regions)\n\t\tif handlerErr != nil {\n\t\t\treturn handlerErr\n\t\t}\n\t\tdate := model.DateFromTime(offer.FromTime, location)\n\t\thandlerErr = facebookPost.Update(date, user, restaurant)\n\t\tif handlerErr != nil {\n\t\t\treturn handlerErr\n\t\t}\n\n\t\tofferJSON, handlerError := mapOfferToJSON(offers[0], imageStorage)\n\t\tif handlerError != nil {\n\t\t\treturn handlerError\n\t\t}\n\t\treturn writeJSON(w, offerJSON)\n\t}\n\treturn forRestaurant(sessionManager, users, restaurants, handler)\n}", "func (f *freeClientPool) saveToDb() {\n\tnow := f.clock.Now()\n\tstorage := freeClientPoolStorage{\n\t\tLogOffset: uint64(f.logOffset(now)),\n\t\tList: make([]*freeClientPoolEntry, len(f.addressMap)),\n\t}\n\ti := 0\n\tfor _, e := range f.addressMap {\n\t\tif e.connected {\n\t\t\tf.calcLogUsage(e, now)\n\t\t}\n\t\tstorage.List[i] = e\n\t\ti++\n\t}\n\tenc, err := rlp.EncodeToBytes(storage)\n\tif err != nil {\n\t\tlog.Error(\"Failed to encode client list\", \"err\", err)\n\t} else {\n\t\tf.db.Put([]byte(\"freeClientPool\"), enc)\n\t}\n}", "func Create(db *db.DB) *echo.Echo {\n\n\te := echo.New()\n\te.Validator = &CustomValidator{validator: validator.New()}\n\te.Logger.SetLevel(log.DEBUG)\n\te.Pre(middleware.RemoveTrailingSlash())\n\te.Use(middleware.Logger())\n\te.Use(middleware.CORSWithConfig(middleware.CORSConfig{\n\t\tAllowOrigins: []string{\"*\"},\n\t\tAllowHeaders: []string{echo.HeaderOrigin, echo.HeaderContentType, echo.HeaderAccept, echo.HeaderAuthorization},\n\t\tAllowMethods: []string{echo.GET, echo.HEAD, echo.PUT, echo.PATCH, echo.POST, echo.DELETE},\n\t}))\n\n\t// ticketStore := store.NewTicketStore(db)\n\t// ticketHandler := handler.NewTicketHandler(ticketStore)\n\n\te.POST(\"/ticket/buy\", controllers.BuyTicket)\n\n\treturn e\n}", "func (ms *mysqlstore) Create(record store.Record) (err error) {\n\tdefer func(t0 time.Time) {\n\t\terrStr := \"\"\n\t\tif err != nil {\n\t\t\terrStr = err.Error()\n\t\t}\n\t\tms.logger.Printf(\"%v\",\n\t\t\tlogrec{\n\t\t\t\tService: \"mysql\",\n\t\t\t\tOperation: \"create\",\n\t\t\t\tID: record.ID(),\n\t\t\t\tError: errStr,\n\t\t\t\tDuration: fmt.Sprintf(\"%v\", time.Since(t0)),\n\t\t\t},\n\t\t)\n\t}(time.Now())\n\n\t// run create op\n\n\treturn\n}", "func (t *trip) Store(ctx context.Context, trip *model.Trip) (*model.Trip, error) {\n\tvar tripId int32\n\t//todo check the origin and destination relation are exist or not\n\terr := t.conn.QueryRow(ctx, `INSERT INTO trips \n\t\t\t\t\t\t(origin_id, destination_id, dates, price)\n\t\t\t\t\t\tVALUES ($1, $2, $3, $4)\n\t\t\t\t\t\tRETURNING id`,\n\t\ttrip.Origin.ID, trip.Destination.ID, trip.Dates, trip.Price).Scan(&tripId)\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\ttrip.ID = tripId\n\n\treturn trip, nil\n}", "func PostEmployee(ctx *lambda.Context, evt *lambda.Event, res *lambda.ProxyResponse, params url.Values) {\r\n\r\n // Get parameters from URL request\r\n storeId := params.Get(\"store\")\r\n \r\n // Read body from request\r\n var bodyByte []byte\r\n if tBody, err := apiutils.GetBodyFromEvent(evt); err != nil {\r\n res.Headers[\"Content-Type\"] = \"charset=UTF-8\"\r\n res.StatusCode = StatusInternalServerError\r\n res.Body = err.Error()\r\n return\r\n } else {\r\n bodyByte = tBody\r\n }\r\n\r\n // Struct to unmarshal body of request\r\n var e Employee\r\n\r\n \r\n // Unmarshal body into Cake struct defined above\r\n if err := json.Unmarshal(bodyByte, &e); err != nil {\r\n \r\n logrus.WithFields(logrus.Fields{\r\n \"err\": err,\r\n }).Warn(\"Error marshaling JSON to Cake struct\") \r\n \r\n res.Headers[\"Content-Type\"] = \"charset=UTF-8\"\r\n res.StatusCode = StatusUnprocessableEntity\r\n res.Body = \"Error marshaling JSON to EmployeeRole struct\"\r\n return\r\n }\r\n \r\n \r\n \r\n // Query to insert cake\r\n query := fmt.Sprintf(\r\n \"INSERT INTO `%s`.`employee` \" + \r\n \"(`first_name`,\" +\r\n \"`middle_name`,\" +\r\n \"`last_name`,\" +\r\n \"`second_last_name`,\" +\r\n \"`display_name`,\" +\r\n \"`phone_number`,\" +\r\n \"`employee_role_id`,\" +\r\n \"`active`) \" +\r\n \"VALUES (?,?,?,?,?,?,?,?)\", \r\n \r\n storeId)\r\n \r\n parameters := []interface{}{\r\n e.FirstName,\r\n e.MiddleName,\r\n e.LastName,\r\n e.SecondLastName,\r\n e.DisplayName,\r\n e.PhoneNumber,\r\n e.EmployeeRoleId,\r\n 1,\r\n }\r\n \r\n \r\n // Build query to run in MySQL\r\n upsertQueries := []apiutils.UpsertQuery{\r\n {\r\n Query: query,\r\n Parameters: parameters,\r\n },\r\n\r\n }\r\n \r\n // Run queries\r\n getLastInsertId := true\r\n lastInsertId, _, errStr, httpResponse := apiutils.RunUpsertQueries(upsertQueries, getLastInsertId)\r\n if httpResponse != 0 {\r\n res.Headers[\"Content-Type\"] = \"charset=UTF-8\"\r\n res.StatusCode = strconv.Itoa(httpResponse)\r\n res.Body = errStr\r\n return\r\n }\r\n res.Body = strconv.Itoa(lastInsertId)\r\n res.StatusCode = StatusOK\r\n}", "func addTransaction(db meddler.DB, lender *User, debtor *User, amount int, expense *Expense) (*Transaction, error) {\n trans := new(Transaction)\n trans.LenderId = lender.Id\n trans.DebtorId = debtor.Id\n trans.Amount = amount\n trans.Date = expense.Date\n trans.ExpenseId = expense.Id\n\n err := meddler.Insert(db, \"transactions\", trans)\n if err != nil {\n return nil, err\n }\n\n lender.UpdateBalance(db, amount)\n debtor.UpdateBalance(db, -amount)\n\n return trans, nil\n}", "func (couchDbDatabase CouchDBDriver) Store(checkoff models.Checkoff) {\n\tdb := connectToCouchDB()\n\n\t_, err := db.Save(checkoff, checkoff.GetID(), \"\")\n\n\tif err != nil {\n\t\tlog.Panicln(err)\n\t}\n}", "func (db *Datastore) Create(txn transaction.Transaction) error {\n\tfmt.Println(`Creating txn with data:`, txn)\n\t// your DB operations to transactions ...\n\treturn nil\n}", "func (tp *TxPool) AddTransactionToDatabaseMempool(txHash *common.Hash, txDesc TxDesc) error {\n\ttx := txDesc.Desc.Tx\n\ttempDesc := TempDesc{\n\t\tStartTime: txDesc.StartTime,\n\t\tIsPushMessage: txDesc.IsFowardMessage,\n\t\tHeight: txDesc.Desc.Height,\n\t\tFee: txDesc.Desc.Fee,\n\t\tFeePerKB: txDesc.Desc.FeePerKB,\n\t}\n\tswitch tx.GetType() {\n\t//==================For PRV Transfer Only\n\tcase common.TxNormalType:\n\t\t{\n\t\t\tnormalTx := tx.(*transaction.Tx)\n\t\t\tvalueTx, err := json.Marshal(normalTx)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tvalueDesc, err := json.Marshal(tempDesc)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\terr = tp.config.DataBaseMempool.AddTransaction(txHash, common.TxNormalType, valueTx, valueDesc)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t//==================For PRV & TxCustomToken Transfer\n\tcase common.TxCustomTokenType:\n\t\t{\n\t\t\tcustomTokenTx := tx.(*transaction.TxCustomToken)\n\t\t\tvalueTx, err := json.Marshal(customTokenTx)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tvalueDesc, err := json.Marshal(tempDesc)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\terr = tp.config.DataBaseMempool.AddTransaction(txHash, common.TxCustomTokenType, valueTx, valueDesc)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\tcase common.TxCustomTokenPrivacyType:\n\t\t{\n\t\t\tcustomTokenPrivacyTx := tx.(*transaction.TxCustomTokenPrivacy)\n\t\t\tvalueTx, err := json.Marshal(customTokenPrivacyTx)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tvalueDesc, err := json.Marshal(tempDesc)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\terr = tp.config.DataBaseMempool.AddTransaction(txHash, common.TxCustomTokenPrivacyType, valueTx, valueDesc)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\treturn nil\n}", "func Insert(ctx context.Context, req *proto.CreateRequest) error {\n\tlog.Printf(\"Inside Insert()\\n\")\n\n\ttxn, err := GetDbConn().Begin()\n\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tdefer txn.Rollback()\n\tlog.Printf(\"Transaction started \\n\")\n\n\tfmt.Printf(\"Creating prepared statement\\n\")\n\tstmtStr, err := txn.Prepare(\"INSERT INTO restaurant_scores(business_id, business_name, business_address, business_city, business_state, business_postal_code, business_latitude, business_longitude, business_location, business_phone_number, inspection_id, inspection_date, inspection_score, inspection_type, violation_id, violation_description, risk_category, neighborhoods_old, police_districts, supervisor_districts, fire_prevention_districts, zip_codes, analysis_neighborhoods) VALUES( $1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14, $15, $16, $17, $18, $19, $20, $21, $22, $23 )\")\n\t//TODO clean\n\t// stmtStr, err := txn.Prepare(\"INSERT INTO restaurant_scores(business_id, business_name, business_address, business_city, business_state, business_postal_code, business_latitude, business_longitude, business_location, business_phone_number, inspection_id, inspection_date, inspection_score, inspection_type, violation_id, violation_description, risk_category, neighborhoods_old, police_districts, supervisor_districts, fire_prevention_districts, zip_codes, analysis_neighborhoods) VALUES( 1, 2, 'test', 'test', 'test', 'test', 'test', 'test', 'test', 'test', 'test', 'test', 'test', 'test', 'test', 'test', 'test', 'test', 'test', 'test', 'test', 'test', 'test' )\")\n\tif err != nil {\n\t\tfmt.Printf(\"Error in creating statement %v\", err)\n\t\treturn err\n\t}\n\tdefer stmtStr.Close()\n\tlog.Printf(\"Statement Insertd \\n\")\n\n\tlog.Printf(\"Executing the statement for business_id %v \\n\", req.GetRecord().GetBusinessId())\n\t//Keeping long statement as punch cards time has gone\n\tres, err := stmtStr.Exec(req.GetRecord().GetBusinessId(), req.GetRecord().GetBusinessName(), req.GetRecord().GetBusinessAddress(), req.GetRecord().GetBusinessCity(), req.GetRecord().GetBusinessState(), req.GetRecord().GetBusinessPostalCode(), req.GetRecord().GetBusinessLatitude(), req.GetRecord().GetBusinessLongitude(), req.GetRecord().GetBusinessLocation(), req.GetRecord().GetBusinessPhoneNumber(), req.GetRecord().GetInspectionId(), req.GetRecord().GetInspectionDate(), req.GetRecord().GetInspectionScore(), req.GetRecord().GetInspectionType(), req.GetRecord().GetViolationId(), req.GetRecord().GetViolationDescription(), req.GetRecord().GetRiskCategory(), req.GetRecord().GetNeighborhoodsOld(), req.GetRecord().GetPoliceDistricts(), req.GetRecord().GetSupervisorDistricts(), req.GetRecord().GetFirePreventionDistricts(), req.GetRecord().GetZipCodes(), req.GetRecord().GetAnalysisNeighborhoods())\n\tif err != nil {\n\t\tlog.Printf(\"Error while inserting rows %v\", err)\n\t}\n\tlog.Printf(\"INSERT done with Result = %v\\n doing commit now \\n\", res)\n\n\terr = txn.Commit()\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tlog.Printf(\"Exiting Insert()\\n\")\n\treturn nil\n}", "func (o *Offer) createOffer(APIstub shim.ChaincodeStubInterface, args []string) sc.Response {\n\tif len(args) != 4 {\n\t\treturn shim.Error(\"Incorrect number of arguments. Expecting 4\")\n\t}\n\n\tvehicleListingAsBytes, _ := APIstub.GetState(args[2])\n\tvehicleListing := VehicleListing{}\n\tjson.Unmarshal(vehicleListingAsBytes, &vehicleListing)\n\tif vehicleListing.ID == \"\" {\n\t\treturn shim.Error(\"VehicleListing is not found.\")\n\t}\n\n\tif vehicleListing.State != 0 {\n\t\treturn shim.Error(\"Listing is not FOR SALE.\")\n\t}\n\n\tmemberAsBytes, _ := APIstub.GetState(args[3])\n\tmember := Member{}\n\tjson.Unmarshal(memberAsBytes, &member)\n\tif member == (Member{}) {\n\t\treturn shim.Error(\"Member is not found.\")\n\t}\n\n\tbidPrice, _ := strconv.ParseFloat(args[1], 64)\n\toffer := Offer{ID: args[0], BidPrice: bidPrice, Listing: vehicleListing, Member: member}\n\n\tvehicleListing.Offers = append(vehicleListing.Offers, offer)\n\n\tvehicleListingAsBytes, _ = json.Marshal(vehicleListing)\n\tAPIstub.PutState(args[2], vehicleListingAsBytes)\n\n\tofferAsBytes, _ := json.Marshal(offer)\n\tAPIstub.PutState(args[0], offerAsBytes)\n\treturn shim.Success(nil)\n}", "func (p *psqlExchangeRepository) Create(data *models.Exchange) (int64, error) {\n\tvar lastId int64\n\tquery := \"INSERT INTO exchange_rate (from_cur, to_cur) VALUES ($1, $2) RETURNING id\"\n\t_ = p.Conn.QueryRow(query, data.From, data.To).Scan(&lastId)\n\treturn lastId, nil\n}", "func (i *Invite) SaveNew(request events.APIGatewayProxyRequest) (events.APIGatewayProxyResponse, error) {\n\tconn, err := db.Connect()\n\tdefer conn.Close()\n\tif err != nil {\n\t\treturn common.APIError(http.StatusInternalServerError, err)\n\t}\n\n\tsession := conn.NewSession(nil)\n\tdefer session.Close()\n\n\ttokenUser := common.GetTokenUser(request)\n\tif !tokenUser.IsAdmin() {\n\t\tfilter := dbr.And(\n\t\t\tdbr.Eq(\"trip_id\", request.PathParameters[\"id\"]),\n\t\t\tdbr.Eq(\"user_id\", tokenUser.UserID),\n\t\t\tdbr.Or(\n\t\t\t\tdbr.Eq(\"role\", ParticipantOwnerRole),\n\t\t\t\tdbr.Eq(\"role\", ParticipantAdminRole),\n\t\t\t),\n\t\t)\n\t\ttotal, err := db.Validate(session, []string{\"count(id) total\"}, db.TableTripParticipant, filter)\n\t\tif err != nil {\n\t\t\treturn common.APIError(http.StatusInternalServerError, err)\n\t\t}\n\t\tif total <= 0 {\n\t\t\treturn common.APIError(http.StatusForbidden, errors.New(\"only trip owner or admins can create invites\"))\n\t\t}\n\t}\n\n\terr = json.Unmarshal([]byte(request.Body), i)\n\tif err != nil {\n\t\treturn common.APIError(http.StatusBadRequest, err)\n\t}\n\n\tre := regexp.MustCompile(`^[a-z0-9._%+\\-]+@[a-z0-9.\\-]+\\.[a-z]{2,4}$`)\n\n\tif i.Email == \"\" || !re.MatchString(i.Email) {\n\t\treturn common.APIError(http.StatusBadRequest, errors.New(\"invalid email\"))\n\t}\n\n\ti.ID = uuid.New().String()\n\ti.TripID = request.PathParameters[\"id\"]\n\ti.CreatedBy = tokenUser.UserID\n\ti.CreatedDate = time.Now()\n\n\ttx, err := session.Begin()\n\tif err != nil {\n\t\treturn common.APIError(http.StatusInternalServerError, err)\n\t}\n\tdefer tx.RollbackUnlessCommitted()\n\n\terr = db.Insert(tx, db.TableTripInvite, *i)\n\tif err != nil {\n\t\treturn common.APIError(http.StatusInternalServerError, err)\n\t}\n\n\ttx.Commit()\n\n\t//TODO: send invite to email\n\n\treturn common.APIResponse(i, http.StatusCreated)\n}", "func (ts *TicketStore) Create() *Ticket {\n\n\tnow := time.Now()\n\n\tnewTicket := &Ticket{\n\t\tTimeIn: now.Add(time.Duration(-350) * time.Minute),\n\t}\n\n\terr := ts.db.Update(func(tx *bolt.Tx) error {\n\t\tb := tx.Bucket(ts.bucket)\n\n\t\tid, _ := b.NextSequence()\n\t\tnewTicket.ID = int64(id)\n\n\t\tbuf, err := json.Marshal(newTicket)\n\t\tcheckError(err)\n\n\t\treturn b.Put(itob(newTicket.ID), buf)\n\t})\n\n\tcheckError(err)\n\n\treturn newTicket\n}", "func (r *repositorySupplier) EntityCreate(input *schemas.SchemaSupplier) (*models.ModelSupplier, schemas.SchemaDatabaseError) {\n\tvar supplier models.ModelSupplier\n\tphone, _ := strconv.ParseUint(input.Phone, 10, 64)\n\tsupplier.Name = input.Name\n\tsupplier.Phone = phone\n\tsupplier.Address = input.Address\n\n\terr := make(chan schemas.SchemaDatabaseError, 1)\n\n\tdb := r.db.Model(&supplier)\n\n\tcheckSupplierName := db.Debug().First(&supplier, \"name = ?\", supplier.Name)\n\n\tif checkSupplierName.RowsAffected > 0 {\n\t\terr <- schemas.SchemaDatabaseError{\n\t\t\tCode: http.StatusConflict,\n\t\t\tType: \"error_create_01\",\n\t\t}\n\t\treturn &supplier, <-err\n\t}\n\n\tcheckSupplierPhone := db.Debug().First(&supplier, \"phone = ?\", supplier.Phone)\n\n\tif checkSupplierPhone.RowsAffected > 0 {\n\t\terr <- schemas.SchemaDatabaseError{\n\t\t\tCode: http.StatusConflict,\n\t\t\tType: \"error_create_02\",\n\t\t}\n\t\treturn &supplier, <-err\n\t}\n\n\taddSupplier := db.Debug().Create(&supplier).Commit()\n\n\tif addSupplier.RowsAffected < 1 {\n\t\terr <- schemas.SchemaDatabaseError{\n\t\t\tCode: http.StatusForbidden,\n\t\t\tType: \"error_create_03\",\n\t\t}\n\t\treturn &supplier, <-err\n\t}\n\n\terr <- schemas.SchemaDatabaseError{}\n\treturn &supplier, <-err\n}", "func insertPollDB(poll *Poll) error {\n\t// create new MongoDB session\n\tcollection, session := initMongoDB(\"poll\")\n\tdefer session.Close()\n\n\ttimeEST := time.Now().Add(-4 * time.Hour)\n\tpoll.Time = timeEST.Format(\"20060102150405\")\n\ts1 := rand.NewSource(time.Now().UnixNano())\n\tr1 := rand.New(s1)\n\n\tpoll.User = user\n\tpoll.ID = bson.NewObjectId().String()\n\tpoll.ID = poll.ID[13 : len(poll.ID)-2]\n\tpoll.Code = 1000 + r1.Intn(9000)\n\tpoll.Options = make([]string, 1)\n\n\treturn collection.Insert(poll)\n}", "func (p *PgDb) CreateEmp(r *model.Emp) (*model.Emp, error) {\n\tcnt := \"postgers (p *PgDb) CreateEmp\" // Имя текущего метода для логирования\n\t//mylog.PrintfDebug(\"[DEBUG] %v - START, param: '%+v'\", cnt, r)\n\n\t// Начинаем транзакцию\n\ttx, err := p.Db.Beginx()\n\tif err != nil {\n\t\terrM := fmt.Sprintf(\"[ERROR] %v - ERROR - p.Db.Beginx()\", cnt)\n\t\tlog.Printf(errM)\n\t\treturn nil, errors.Wrap(err, errM)\n\t}\n\n\t// =====================================================================\n\t// Переменная часть кода\n\t// =====================================================================\n\t// Вызов в транзакционном режиме\n\tv, err := p.CreateEmpTx(r, tx, true)\n\t// =====================================================================\n\tif err != nil {\n\t\tdefer tx.Rollback()\n\t\terrM := fmt.Sprintf(\"[ERROR] %v - ERROR - p.CreateEmpTx(id, tx)\", cnt)\n\t\tlog.Printf(errM)\n\t\treturn nil, errors.Wrap(err, errM)\n\t}\n\n\t// завершаем транзакцию\n\tif err := tx.Commit(); err != nil {\n\t\terrM := fmt.Sprintf(\"[ERROR] %v - ERROR - tx.Commit()\", cnt)\n\t\tlog.Printf(errM)\n\t\treturn nil, errors.Wrap(err, errM)\n\t}\n\n\t//mylog.PrintfDebug(\"[DEBUG] %v - SUCCESS - tx.Commit()\", cnt)\n\treturn v, nil\n}", "func (m *Manager) Create(vote models.Vote) error {\n\treturn m.Store.Database.Create(&vote).Error\n}", "func ArticlePost(c *fiber.Ctx) {\n\tShopID := c.Params(\"shop_id\")\n\tHallwaysID := c.Params(\"hallways_id\")\n\tUserID := userIDF(c.Get(\"token\"))\n\n\tvar Article ArticlePostStruct\n\n\tif errorParse := c.BodyParser(&Article); errorParse != nil {\n\t\tfmt.Println(\"Error parsing data\", errorParse)\n\t\tc.JSON(ErrorResponse{MESSAGE: \"Error al parsear información\"})\n\t\tc.Status(400)\n\t\treturn\n\t}\n\n\tvar IsOwner IsOwnerShop\n\n\tErrorOwner := sq.Select(\n\t\t\"shop_id\",\n\t).\n\t\tFrom(\"shop\").\n\t\tWhere(\n\t\t\t\"user_id = ? AND shop_id = ?\",\n\t\t\tUserID,\n\t\t\tShopID,\n\t\t).\n\t\tRunWith(database).\n\t\tQueryRow().\n\t\tScan(\n\t\t\t&IsOwner.ShopID,\n\t\t)\n\n\tif ErrorOwner != nil {\n\t\tfmt.Println(\"Not is owner or active shop\", ErrorOwner)\n\t\tc.JSON(ErrorResponse{MESSAGE: \"Not is owner or active shop\"})\n\t\tc.SendStatus(400)\n\t\treturn\n\t}\n\n\tid, errorInsert := sq.Insert(\"articles\").\n\t\tColumns(\n\t\t\t\"hallways_id\",\n\t\t\t\"name\",\n\t\t\t\"description\",\n\t\t\t\"price\",\n\t\t\t\"count_article\",\n\t\t).\n\t\tValues(\n\t\t\tHallwaysID,\n\t\t\tArticle.Name,\n\t\t\tArticle.Description,\n\t\t\tArticle.Price,\n\t\t\tArticle.CountArticle,\n\t\t).\n\t\tRunWith(database).\n\t\tExec()\n\n\tif errorInsert != nil {\n\t\tfmt.Println(\"Error to save article\", errorInsert)\n\t}\n\n\tIDLast, _ := id.LastInsertId()\n\tIDS := strconv.FormatInt(IDLast, 10)\n\n\tif len(Article.URL) > 0 {\n\t\t_, errorInsert := sq.Insert(\"article_images\").\n\t\t\tColumns(\n\t\t\t\t\"article_id\",\n\t\t\t\t\"url\",\n\t\t\t).\n\t\t\tValues(\n\t\t\t\tIDS,\n\t\t\t\tArticle.URL,\n\t\t\t).\n\t\t\tRunWith(database).\n\t\t\tExec()\n\n\t\tif errorInsert != nil {\n\t\t\tfmt.Println(\"Error to save article image\", errorInsert)\n\t\t}\n\t}\n\n\tc.JSON(SuccessResponse{MESSAGE: IDS})\n}", "func (p *PgDb) CreateEmpTx(r *model.Emp, tx *sqlx.Tx, isValidate bool) (*model.Emp, error) {\n\tcnt := \"postgres (p *PgDb) CreateEmpTx\" // Имя текущего метода для логирования\n\t//mylog.PrintfDebug(\"[DEBUG] %v - START, param: '%+v'\", cnt, r)\n\n\t// Проверяем определен ли контекст транзакции\n\tif tx == nil {\n\t\terrM := fmt.Sprintf(\"[ERROR] %v - ERROR - tx *sqlx.Tx is NULL\", cnt)\n\t\tlog.Printf(errM)\n\t\treturn nil, errors.New(errM)\n\t}\n\n\t//=====================================================================\n\t// Добавить валидацию входной структуры\n\t//=====================================================================\n\n\t// =====================================================================\n\t// Переменная часть кода\n\t// =====================================================================\n\t// Если запускаем с проверками\n\tif isValidate {\n\t\t{ //Если Dept NULL или НЕ существует, то ошибка\n\t\t\tif !r.Deptno.Valid {\n\t\t\t\terrM := fmt.Sprintf(\"[ERROR] %v - ERROR - r.Deptno is NULL\", cnt)\n\t\t\t\tlog.Printf(errM)\n\t\t\t\treturn nil, errors.New(errM)\n\t\t\t}\n\t\t\tdeptno := int(r.Deptno.Int64)\n\t\t\t// Запрос в транзакции\n\t\t\tdeptExists, err := p.DeptExistsTx(deptno, tx)\n\t\t\tif err != nil {\n\t\t\t\terrM := fmt.Sprintf(\"[ERROR] %v - ERROR - p.DeptExistsTx(deptno, tx), args = '%v'\", cnt, deptno)\n\t\t\t\tlog.Printf(errM)\n\t\t\t\treturn nil, errors.WithMessage(err, errM)\n\t\t\t}\n\t\t\tif !deptExists {\n\t\t\t\terrM := fmt.Sprintf(\"[ERROR] %v - ERROR - dept '%v' does not exist\", cnt, deptno)\n\t\t\t\tlog.Printf(errM)\n\t\t\t\treturn nil, errors.New(errM)\n\t\t\t}\n\t\t\t//mylog.PrintfDebug(\"[DEBUG] %v - dept %v exists\", cnt, deptno)\n\t\t}\n\t\t{ //Если Emp существует, то игнорируем\n\t\t\texists, err := p.EmpExistsTx(r.Empno, tx)\n\t\t\tif err != nil {\n\t\t\t\terrM := fmt.Sprintf(\"[ERROR] %v - ERROR - p.EmpExistsTx(r.Empno, tx), args = '%v'\", cnt, r.Empno)\n\t\t\t\tlog.Printf(errM)\n\t\t\t\treturn nil, errors.WithMessage(err, errM)\n\t\t\t}\n\t\t\t// Если запись существует, то ни чего не делаем, возвращем, что пришло на вход\n\t\t\tif exists {\n\t\t\t\terrM := fmt.Sprintf(\"[WARN] %v - WARN - emp '%v' already exist - nothing to do\", cnt, r.Empno)\n\t\t\t\tlog.Printf(errM)\n\t\t\t\treturn nil, nil\n\t\t\t}\n\t\t\t//mylog.PrintfDebug(\"[DEBUG] %v - emp '%v' does not exist - can be created\", cnt, r.Empno)\n\t\t}\n\t}\n\t// =====================================================================\n\n\t// =====================================================================\n\t// Переменная часть кода\n\t// =====================================================================\n\tstmText := sqlInsertEmpText\n\t// =====================================================================\n\n\t//Выполняем команду\n\tres, err := tx.NamedExec(stmText, r)\n\tif err != nil {\n\t\terrM := fmt.Sprintf(\"[ERROR] %v - ERROR - tx.NamedExec(stmText, r), args = '%+v'\", cnt, r)\n\t\tlog.Printf(errM)\n\t\treturn nil, errors.Wrap(err, errM)\n\t}\n\n\t{ // Необязательная часть - можно удалить в последствии\n\t\t// Проверим количество обработанных строк\n\t\trowCount, err := res.RowsAffected()\n\t\t_ = rowCount\n\t\t//mylog.PrintfDebug(\"[DEBUG] %v -- process %v rows\", cnt, rowCount)\n\t\tif err != nil {\n\t\t\terrM := fmt.Sprintf(\"[ERROR] %v - ERROR - res.RowsAffected()\", cnt)\n\t\t\tlog.Printf(errM)\n\t\t\treturn nil, errors.Wrap(err, errM)\n\t\t}\n\t}\n\n\t// =====================================================================\n\t// Переменная часть кода\n\t// =====================================================================\n\t// считаем данные обновленные данные - в БД могли быть тригера, которые поменяли данные\n\t// если запустили без проверок, то можно не возвращать результат - он будет запрошен уровнем выше\n\tif isValidate {\n\t\tv, err := p.GetEmpTx(r.Empno, tx)\n\t\tif err != nil {\n\t\t\terrM := fmt.Sprintf(\"[ERROR] %v - ERROR - p.GetEmpTx(r.Empno, tx), args = '%v'\", cnt, r.Empno)\n\t\t\tlog.Printf(errM)\n\t\t\treturn nil, errors.WithMessage(err, errM)\n\t\t}\n\t\tr = v\n\t}\n\t// =====================================================================\n\n\t//mylog.PrintfDebug(\"[DEBUG] %v - SUCCESS\", cnt)\n\n\treturn r, nil\n}", "func SaveStoreInTransaction(store *Store, db *gorm.DB) (err error) {\n\tif err := db.Where(Store{Key: store.Key}).FirstOrCreate(store).Error; err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}", "func (csi ChannelStoreImpl) Save(channel *models.Channel, db *gorm.DB) *u.AppError {\n\ttransaction := db.Begin()\n\tchannel.PreSave()\n\tif appError := channel.IsValid(false); appError != nil {\n\t\ttransaction.Rollback()\n\t\treturn u.NewLocAppError(\"channelStoreImpl.Save.channel.PreSave\", appError.ID, nil, appError.DetailedError)\n\t}\n\tif !transaction.NewRecord(channel) {\n\t\ttransaction.Rollback()\n\t\treturn u.NewLocAppError(\"channelStoreImpl.Save\", \"save.transaction.create.already_exist\", nil, \"Channel Name: \"+channel.ChannelName)\n\t}\n\tif err := transaction.Create(&channel).Error; err != nil {\n\t\ttransaction.Rollback()\n\t\treturn u.NewLocAppError(\"channelStoreImpl.Save\", \"save.transaction.create.encounterError :\"+err.Error(), nil, \"\")\n\t}\n\ttransaction.Commit()\n\treturn nil\n}", "func (db Database) SaveExchange() (bool, error) {\n\treturn true, makeError()\n}", "func (repository *VesselRepository) Create(vessel *pb.Vessel) error {\n\t_, err := repository.collection.InsertOne(context.TODO(), vessel)\n\treturn err\n}", "func (s ArticleService) Create(ctx context.Context, i Article) error {\n\tstat := `INSERT INTO articles (title, description, content) VALUES(?,?,?);`\n\tif s.DB == nil {\n\t\tpanic(\"no existing database\")\n\t}\n\t_, err := s.DB.ExecContext(ctx, stat, i.Title, i.Desc, i.Content)\n\treturn err\n}", "func (a *Api) Create(db *system.DB) (err error) {\n\n\tif err = a.validateError(); err != nil {\n\t\treturn\n\t}\n\n\ttx, err := db.Begin()\n\n\tdefer func() {\n\t\tif err != nil {\n\t\t\ttx.Rollback()\n\t\t\treturn\n\t\t}\n\n\t\tif err = tx.Commit(); err != nil {\n\t\t\ttx.Rollback()\n\t\t\treturn\n\t\t}\n\t}()\n\n\tif err != nil {\n\t\tlog.Println(\"Api.Create() Error -> \", err)\n\t\treturn\n\t}\n\n\ta.IsActive = true\n\ta.CreatedAt = time.Now()\n\ta.UpdatedAt = time.Now()\n\n\terr = tx.QueryRow(a.queryCreate(),\n\t\ta.UserID,\n\t\ta.Token,\n\t\ta.PushNotificationToken,\n\t\ta.PushNotificationService,\n\t\ta.ManufacturerName,\n\t\ta.ManufacturerModel,\n\t\ta.ManufacturerVersion,\n\t\ta.DeviceID,\n\t\ta.IsActive,\n\t\ta.CreatedAt,\n\t\ta.UpdatedAt).Scan(&a.ID)\n\n\tif err != nil {\n\t\tlog.Printf(\"Api.Create() QueryRow() -> %v Error -> %v\", a.queryCreate(), err)\n\t\treturn\n\t}\n\n\tlog.Println(\"Api.Create() create successful, id -> \", a.ID)\n\treturn\n}", "func storeBid(bid *Bid) error {\n\n\tvar err error\n\n\tkeyId := fmt.Sprintf(\"%s:%d\", BIDS, bid.Id)\n\tkey, err := as.NewKey(NAMESPACE, BIDS, keyId)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\trec := as.BinMap{\n\t\t\"bid_id\": bid.Id,\n\t\t\"broker_id\": bid.BrokerId,\n\t\t\"offer_id\": bid.OfferId,\n\t\t\"price\": bid.Price,\n\t}\n\n\t// TODO: use the Put Operation to store the record.\n\n\treturn nil\n}", "func SaveBook(book Book) Book {\n\tutility.DB().Create(&book)\n\treturn book\n}", "func accept_offer(stub shim.ChaincodeStubInterface, args []string) pb.Response {\n\tvar err error\n\tfmt.Println(\"starting mark_for_sale\")\n\n\t// this is quirky\n\t// todo - get the \"company that authed the transfer\" from the certificate instead of an argument\n\t// should be possible since we can now add attributes to the enrollment cert\n\t// as is.. this is a bit broken (security wise), but it's much much easier to demo! holding off for demos sake\n\n\tif len(args) != 2 {\n\t\treturn shim.Error(\"Incorrect number of arguments. Expecting 3\")\n\t}\n\n\t// input sanitation\n\terr = sanitize_arguments(args)\n\tif err != nil {\n\t\treturn shim.Error(err.Error())\n\t}\n\n\tvar offer_id = args[0]\n\tvar authed_by_company = args[1]\n\n\t//check if offer exists and is authed by company to which owner of marble belongs.\n\n\tfmt.Println(offer_id + \" - |\" + authed_by_company)\n\n\tofferAsBytes, err := stub.GetState(offer_id)\n\tif err != nil {\n\t\treturn shim.Error(\"This offer does not exist\")\n\t}\n\n\toffer := Offer{}\n\tjson.Unmarshal(offerAsBytes, &offer)\n\n\tif offer.Marble.Owner.Company != authed_by_company {\n\t\treturn shim.Error(\"This user is not authorized to perform this operation\")\n\t}\n\n\toffer.Status = \"ACCEPTED\"\n\n\t//store user\n\tupdateOfferAsBytes, _ := json.Marshal(offer) //convert to array of bytes\n\terr = stub.PutState(offer.Id, updateOfferAsBytes) //store owner by its Id\n\tif err != nil {\n\t\tfmt.Println(\"Could not update offer\")\n\t\treturn shim.Error(err.Error())\n\t}\n\n\tfmt.Println(\"- end accept offer\")\n\treturn shim.Success(nil)\n\n}", "func (repo *Repository) Store(entity Entity) error {\n\tstmtIns, err := repo.Db.Prepare(entity.GenerateInsertSql())\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer stmtIns.Close()\n\n\tres, err := stmtIns.Exec(entity.ToInsertArgs()...)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\trepo.LastInsertId, err = res.LastInsertId()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}", "func Store(w http.ResponseWriter, r *http.Request) {\n\tc := flight.Context(w, r)\n\n\t//if !c.FormValid(\"name\") {\n\t//\tCreate(w, r)\n\t//\treturn\n\t//}\n\n\tif !u.IsPositiveInteger(r.FormValue(\"amount\")) {\n\t\tc.FlashNotice(\"Enter valid amount\")\n\t\tCreate(w, r)\n\t\treturn\n\t}\n\n\t_, err := code.Create(c.DB, r.FormValue(\"amount\"), r.FormValue(\"details\"))\n\tif err != nil {\n\t\tc.FlashErrorGeneric(err)\n\t\tCreate(w, r)\n\t\treturn\n\t}\n\n\tc.FlashSuccess(\"Item added.\")\n\tc.Redirect(uri)\n}", "func CreatePerson(db *sql.DB) {}", "func (ms *MicroStellar) CreateOffer(sourceSeed string, sellAsset *Asset, buyAsset *Asset, price string, sellAmount string, options ...*Options) error {\n\tofferType := OfferCreate\n\n\tif len(options) > 0 {\n\t\tif options[0].passiveOffer {\n\t\t\tofferType = OfferCreatePassive\n\t\t}\n\t}\n\n\treturn ms.ManageOffer(sourceSeed, &OfferParams{\n\t\tOfferType: offerType,\n\t\tSellAsset: sellAsset,\n\t\tSellAmount: sellAmount,\n\t\tBuyAsset: buyAsset,\n\t\tPrice: price,\n\t}, options...)\n}", "func (p *Poet) Create(db *sql.DB) error {\n\tvar (\n\t\terr error\n\t)\n\n\t// assume id is already assigned\n\n\t// set birthday\n\tp.BirthDate = time.Now().Truncate(time.Millisecond)\n\n\t// prepare statement if not already done so.\n\tif poetCreateStmt == nil {\n\t\t// create statement\n\t\tstmt := `INSERT INTO poets (\n id, designer, name, birthDate, deathDate, description, language, programFileName, parameterFileName, parameterFileIncluded, path\n ) VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11)`\n\t\tpoetCreateStmt, err = db.Prepare(stmt)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\t_, err = poetCreateStmt.Exec(\n\t\tp.Id,\n\t\tp.Designer.Id,\n\t\tp.Name,\n\t\tp.BirthDate,\n\t\tp.DeathDate,\n\t\tp.Description,\n\t\tp.Language,\n\t\tp.ProgramFileName,\n\t\tp.ParameterFileName,\n\t\tp.ParameterFileIncluded,\n\t\tp.Path,\n\t)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}", "func (repository *MongoRepository) Create(ctx context.Context, vessel *Vessel) error {\n\tlog.Printf(\"Create a new vessle : %v\", *vessel)\n\t_, err := repository.collection.InsertOne(ctx, vessel)\n\treturn err\n}", "func (ms *MicroStellar) ManageOffer(sourceSeed string, params *OfferParams, options ...*Options) error {\n\tif !ValidAddressOrSeed(sourceSeed) {\n\t\treturn errors.Errorf(\"invalid source address or seed: %s\", sourceSeed)\n\t}\n\n\tif err := params.BuyAsset.Validate(); err != nil {\n\t\treturn errors.Wrap(err, \"ManageOffer\")\n\t}\n\n\tif err := params.SellAsset.Validate(); err != nil {\n\t\treturn errors.Wrap(err, \"ManageOffer\")\n\t}\n\n\trate := build.Rate{\n\t\tSelling: params.SellAsset.ToStellarAsset(),\n\t\tBuying: params.BuyAsset.ToStellarAsset(),\n\t\tPrice: build.Price(params.Price),\n\t}\n\n\tvar offerID uint64\n\tif params.OfferID != \"\" {\n\t\tvar err error\n\t\tif offerID, err = strconv.ParseUint(params.OfferID, 10, 64); err != nil {\n\t\t\treturn errors.Wrapf(err, \"ManageOffer: bad OfferID: %v\", params.OfferID)\n\t\t}\n\t}\n\n\tvar builder build.ManageOfferBuilder\n\tswitch params.OfferType {\n\tcase OfferCreate:\n\t\tamount := build.Amount(params.SellAmount)\n\t\tbuilder = build.CreateOffer(rate, amount)\n\tcase OfferCreatePassive:\n\t\tamount := build.Amount(params.SellAmount)\n\t\tbuilder = build.CreatePassiveOffer(rate, amount)\n\tcase OfferUpdate:\n\t\tamount := build.Amount(params.SellAmount)\n\t\tbuilder = build.UpdateOffer(rate, amount, build.OfferID(offerID))\n\tcase OfferDelete:\n\t\tbuilder = build.DeleteOffer(rate, build.OfferID(offerID))\n\tdefault:\n\t\treturn errors.Errorf(\"ManageOffer: bad OfferType: %v\", params.OfferType)\n\t}\n\n\ttx := NewTx(ms.networkName, ms.params)\n\n\tif len(options) > 0 {\n\t\ttx.SetOptions(options[0])\n\t}\n\n\ttx.Build(sourceAccount(sourceSeed), builder)\n\ttx.Sign(sourceSeed)\n\ttx.Submit()\n\treturn tx.Err()\n}", "func (b *OGame) offerMarketplace(marketItemType int64, itemID any, quantity, priceType, price, priceRange int64, celestialID ogame.CelestialID) error {\n\tparams := url.Values{\"page\": {\"ingame\"}, \"component\": {\"marketplace\"}, \"tab\": {\"create_offer\"}, \"action\": {\"submitOffer\"}, \"asJson\": {\"1\"}}\n\tconst (\n\t\tshipsItemType = iota + 1\n\t\tresourcesItemType\n\t\titemItemType\n\t)\n\tvar itemIDPayload string\n\tvar itemType int64\n\tif itemIDStr, ok := itemID.(string); ok {\n\t\tif len(itemIDStr) == 40 {\n\t\t\titemType = itemItemType\n\t\t\titemIDPayload = itemIDStr\n\t\t} else {\n\t\t\treturn errors.New(\"invalid itemID string\")\n\t\t}\n\t} else if itemIDInt64, ok := itemID.(int64); ok {\n\t\tif itemIDInt64 >= 1 && itemIDInt64 <= 3 {\n\t\t\titemType = resourcesItemType\n\t\t\titemIDPayload = utils.FI64(itemIDInt64)\n\t\t} else if ogame.ID(itemIDInt64).IsShip() {\n\t\t\titemType = shipsItemType\n\t\t\titemIDPayload = utils.FI64(itemIDInt64)\n\t\t} else {\n\t\t\treturn errors.New(\"invalid itemID int64\")\n\t\t}\n\t} else if itemIDInt, ok := itemID.(int); ok {\n\t\tif itemIDInt >= 1 && itemIDInt <= 3 {\n\t\t\titemType = resourcesItemType\n\t\t\titemIDPayload = strconv.Itoa(itemIDInt)\n\t\t} else if ogame.ID(itemIDInt).IsShip() {\n\t\t\titemType = shipsItemType\n\t\t\titemIDPayload = strconv.Itoa(itemIDInt)\n\t\t} else {\n\t\t\treturn errors.New(\"invalid itemID int\")\n\t\t}\n\t} else if itemIDID, ok := itemID.(ogame.ID); ok {\n\t\tif itemIDID.IsShip() {\n\t\t\titemType = shipsItemType\n\t\t\titemIDPayload = utils.FI64(itemIDID)\n\t\t} else {\n\t\t\treturn errors.New(\"invalid itemID ID\")\n\t\t}\n\t} else {\n\t\treturn errors.New(\"invalid itemID type\")\n\t}\n\n\tvals := url.Values{\n\t\t\"page\": {\"ingame\"},\n\t\t\"component\": {\"marketplace\"},\n\t\t\"tab\": {\"create_offer\"},\n\t}\n\tpageHTML, err := b.getPageContent(vals)\n\tif err != nil {\n\t\treturn err\n\t}\n\tgetToken := func(pageHTML []byte) (string, error) {\n\t\tm := regexp.MustCompile(`var token = \"([^\"]+)\"`).FindSubmatch(pageHTML)\n\t\tif len(m) != 2 {\n\t\t\treturn \"\", errors.New(\"unable to find token\")\n\t\t}\n\t\treturn string(m[1]), nil\n\t}\n\ttoken, _ := getToken(pageHTML)\n\n\tpayload := url.Values{\n\t\t\"marketItemType\": {utils.FI64(marketItemType)},\n\t\t\"itemType\": {utils.FI64(itemType)},\n\t\t\"itemId\": {itemIDPayload},\n\t\t\"quantity\": {utils.FI64(quantity)},\n\t\t\"priceType\": {utils.FI64(priceType)},\n\t\t\"price\": {utils.FI64(price)},\n\t\t\"priceRange\": {utils.FI64(priceRange)},\n\t\t\"token\": {token},\n\t}\n\tvar res struct {\n\t\tStatus string `json:\"status\"`\n\t\tMessage string `json:\"message\"`\n\t\tErrors []struct {\n\t\t\tMessage string `json:\"message\"`\n\t\t\tError int64 `json:\"error\"`\n\t\t} `json:\"errors\"`\n\t}\n\tby, err := b.postPageContent(params, payload, ChangePlanet(celestialID))\n\tif err != nil {\n\t\treturn err\n\t}\n\tif err := json.Unmarshal(by, &res); err != nil {\n\t\treturn err\n\t}\n\tif len(res.Errors) > 0 {\n\t\treturn errors.New(utils.FI64(res.Errors[0].Error) + \" : \" + res.Errors[0].Message)\n\t}\n\treturn err\n}", "func CreatePriceHistory(w http.ResponseWriter, r *http.Request) {\n\trequestBody, _ := ioutil.ReadAll(r.Body)\n\tvar price entity.PriceHistory\n\tjson.Unmarshal(requestBody, &price)\n\n\tdatabase.Connector.Create(&price)\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\tw.WriteHeader(http.StatusCreated)\n\tjson.NewEncoder(w).Encode(price)\n}", "func Store(category *models.Category) (*models.Category, error) {\n\tif err := db.DB.Create(&category).Error; err != nil {\n\t\treturn category, err\n\t}\n\treturn category, nil\n}", "func (db *DbAnimalRepo) Store(animal domain.Animal) error {\n\ts := fmt.Sprintf(\"INSERT INTO animals VALUES (%d, \\\"%s\\\", \\\"%s\\\")\",\n\t\tanimal.ID, animal.Specie, \"true\")\n\treturn db.dbHandler.Execute(s)\n}", "func (me *Model) Store(\n\tctx context.Context,\n\tcol *mongo.Collection,\n) (err error) {\n\tvar res *mongo.InsertOneResult\n\tres, err = col.InsertOne(ctx, me)\n\tif err != nil {\n return\n }\n\tid, ok := res.InsertedID.(primitive.ObjectID)\n\tif !ok {\n\t\terr = errors.New(\"The returned ID was not ObjectID\")\n\t\tlog.Println(\"[WARN]\" + err.Error())\n\t\treturn\n\t}\n\tme.ID = id\n\treturn\n}", "func (me *DB) Store(conf *pb.Configuration) error {\n\tconfb, err := proto.Marshal(conf)\n\tif err != nil {\n\t\treturn errors.Wrap(err, 500, errors.E_proto_marshal_error)\n\t}\n\n\terr = me.session.Query(\"INSERT INTO \"+tblConf+\"(cluster, conf) VALUES(?,?)\",\n\t\tconf.GetCluster(), confb).Exec()\n\tif err != nil {\n\t\treturn errors.Wrap(err, 500, errors.E_database_error)\n\t}\n\treturn nil\n}", "func CreateBooking(s *mgo.Session) func(w http.ResponseWriter, r *http.Request) {\n\treturn func(w http.ResponseWriter, r *http.Request) {\n\t\t// Open bookings collection\n\t\tsession := s.Copy()\n\t\tdefer session.Close()\n\t\tdbc := session.DB(\"tickets\").C(\"bookings\")\n\n\t\t// UUID\n\t\tparams := mux.Vars(r)\n\t\tuuid := params[\"uuid\"]\n\n\t\t// Create a booking to store incoming JSON booking request\n\t\tvar booking Booking\n\n\t\t// Decode POST JSON file\n\t\tdecoder := json.NewDecoder(r.Body)\n\t\terr := decoder.Decode(&booking)\n\t\tif err != nil {\n\t\t\tErrorWithJSON(w, \"Incorrect body\", http.StatusBadRequest)\n\t\t\treturn\n\t\t}\n\n\t\t// Check if the URL uuid and the booking JSON UUID match\n\t\tif uuid != booking.UUID || booking.UUID == \"\" {\n\t\t\tErrorWithJSON(w, \"Database error, uuid of request doesn't match booking uuid\", http.StatusInternalServerError)\n\t\t\tlog.Println(\"Error booking uuids don't match: \", uuid, booking.UUID)\n\t\t\treturn\n\t\t}\n\n\t\t// Payment\n\t\tvar payment Payment\n\t\tpayment.Name = booking.Name\n\t\tpayment.OrderDescription = booking.UUID\n\t\tpayment.CardNumber = booking.CCNumber\n\t\tpayment.Cvc = booking.CVV\n\t\tpayment.Month = strconv.Itoa(booking.Month)\n\t\tpayment.Year = strconv.Itoa(booking.Year)\n\t\tpayment.Amount = booking.Total\n\n\t\t// Clear sensitivite data in booking\n\t\tbooking.CCNumber = \"0000-0000-0000-0000\"\n\t\tbooking.CVV = \"000\"\n\t\tbooking.Month = 99\n\t\tbooking.Year = 0000\n\n\t\t// Invoke payment\n\t\tresp := makePayment(&payment)\n\t\tlog.Println(\"response Status:\", resp.Status)\n\t\tlog.Println(\"response Headers:\", resp.Header)\n\t\tbody, _ := ioutil.ReadAll(resp.Body)\n\t\tlog.Println(\"response Body:\", string(body))\n\n\t\tif resp.StatusCode == http.StatusOK {\n\t\t\t// Insert booking to database\n\t\t\terr = dbc.Insert(&booking)\n\t\t\tif err != nil {\n\t\t\t\t// Check if a booking with a same UUID exists\n\t\t\t\tif mgo.IsDup(err) {\n\t\t\t\t\tErrorWithJSON(w, \"Booking with uuid already exists\", http.StatusBadRequest)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tErrorWithJSON(w, \"Database error, failed to insert booking\", http.StatusInternalServerError)\n\t\t\t\tlog.Println(\"Failed to insert booking: \", err)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\t// Create a QR code\n\t\t\terr := qrcode.WriteFile(\"https://store.kings.cam.ac.uk/bookings/\"+booking.UUID, qrcode.Medium, 256, booking.UUID+\".png\")\n\n\t\t\t// Send an email\n\t\t\tsendmail(&booking)\n\t\t\tif err != nil {\n\t\t\t\tlog.Println(\"Failed to create a QR code: \", err)\n\t\t\t}\n\n\t\t\t// Write response\n\t\t\tw.Header().Set(\"Content-Type\", \"application/json\")\n\t\t\tw.Header().Set(\"Location\", r.URL.Path)\n\t\t\tw.WriteHeader(http.StatusCreated)\n\t\t\treturn\n\t\t}\n\t}\n}", "func(b *Book) CreateBook() *Book{\n\tdb.NewRecord(b) //our orm\n\tdb.Create(&b)\n\treturn b\n}", "func (ch *ClickHouse) storeTable(adapter adapters.SQLAdapter, tableHelper *TableHelper, fdata *schema.ProcessedFile, table *adapters.Table) error {\n\tdbSchema, err := tableHelper.EnsureTableWithoutCaching(ch.ID(), table)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif err := adapter.BulkInsert(dbSchema, fdata.GetPayload()); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}", "func CreateBook(d *dao.Dao, tx *sql3.Tx, ctx context.Context, name string) (result sql2.Result, err error) {\n\tresult, err = d.Db.ExecTx(ctx, tx, \"insert book(id,name) value(?,?)\", 1, name)\n\treturn\n}", "func AddBookToUser(db *gorm.DB) func(c echo.Context) error {\n return func(c echo.Context) error {\n userBook := &models.UserBook{}\n returnBook := models.ReturnBook{}\n // gets user\n if loggedUser, logErr := GetUser(c, db); logErr == nil {\n // gets book id\n if id, err := strconv.Atoi(c.Param(\"id\")); err == nil {\n userBook.BookID = id\n userBook.UserID = loggedUser.ID\n userBook.InBooks = true\n userBook.Status = models.NOT_READ\n // creates record in user_book\n db.Create(&userBook)\n // selects all neded information for FE\n db.Table(\"books\").Select(\n \"books.id, books.title, books.author, books.image_url, user_book.in_books, user_book.status, user_book.created_at\").\n Joins(\"JOIN user_book ON user_book.book_id = books.id\").Where(\"user_id = ? AND book_id = ?\", loggedUser.ID, id).Scan(&returnBook)\n return c.JSON(http.StatusOK, returnBook)\n } else {\n return c.JSON(http.StatusBadRequest, map[string]string{\"message\": \"FAIL\"})\n }\n } else {\n return c.JSON(http.StatusBadRequest, map[string]string{\"message\": logErr.Error()})\n }\n }\n}", "func (db *Db) CreateEquipement(ip, host, typ string) (*Equipement, error) {\n\tlog.Println(\"CreateEquipement : \", ip, host, typ)\n\tif ok, _ := regexp.MatchString(tools.ValidIPAddressRegex, ip); !ok {\n\t\treturn nil, errors.New(\"IP invalid !\")\n\t}\n\tif ok, _ := regexp.MatchString(tools.ValidHostAddressRegex, host); !ok {\n\t\treturn nil, errors.New(\"Host invalid !\")\n\t}\n\t//*\n\tlog.Printf(\"Type : %v\", typ)\n\tlog.Printf(\"AutorizedTypes : %v\", db.GetEquipementTypes())\n\n\tidType := -1\n\tif idType = posInSlice(db.GetEquipementTypes(), typ); idType == -1 {\n\t\treturn nil, errors.New(\"Type invalid !\")\n\t}\n\t//*/\n\t//TODo check if equipement exist (normaly done by the orm)\n\tequi := &Equipement{IP: ip, Hostname: host, Type: idType}\n\n\t_, err := (*db.Orm).Insert(equi)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tlog.Printf(\"Equi %s created !\", equi.Hostname)\n\treturn equi, nil\n}", "func (server *LaptopServer) CreateLaptop(ctx context.Context, req *pb.CreateLaptopRequest) (*pb.CreateLaptopResponse, error) {\n\tlaptop := req.GetLaptop()\n\n\tlog.Printf(\"recieved a create laptop request for laptop with id: %v\", laptop.GetId())\n\n\tif len(laptop.GetId()) > 0 {\n\t\t_, err := uuid.Parse(laptop.GetId())\n\t\tif err != nil {\n\t\t\treturn nil, status.Errorf(codes.InvalidArgument, \"laptop id is invalid: %v\", err)\n\t\t}\n\t} else {\n\t\tid, err := uuid.NewRandom()\n\t\tif err != nil {\n\t\t\treturn nil, status.Error(codes.Internal, \"cannot generate new ID for laptop\")\n\t\t}\n\t\tlaptop.Id = id.String()\n\t}\n\n\t// Heavy processing\n\t// time.Sleep(6 * time.Second)\n\n\t// Don't save if request cancelled\n\tif ctx.Err() == context.Canceled {\n\t\tlog.Print(\"request cancelled\")\n\t\treturn nil, status.Errorf(codes.Canceled, \"request cancelled\")\n\t}\n\n\t// Don't save if deadline exceeded\n\tif ctx.Err() == context.DeadlineExceeded {\n\t\tlog.Print(\"deadline exceeded\")\n\t\treturn nil, status.Errorf(codes.DeadlineExceeded, \"deadline exeeded\")\n\t}\n\n\terr := server.LaptopStore.Save(laptop)\n\tif errors.Is(err, ErrAlreadyExists) {\n\t\treturn nil, status.Errorf(codes.AlreadyExists, \"unable to save data: %v\", err)\n\t} else if err != nil {\n\t\treturn nil, status.Errorf(codes.Internal, \"unable to save data: %v\", err)\n\t}\n\n\tlog.Printf(\"saved laptop with id: %v\", laptop.GetId())\n\n\treturn &pb.CreateLaptopResponse{\n\t\tId: laptop.GetId(),\n\t}, nil\n}", "func CreatePoll(w http.ResponseWriter, r *http.Request) {\n\tbodyBytes, err := ioutil.ReadAll(r.Body)\n\tif err != nil {\n\t\tpanic(err.Error())\n\t}\n\tvar pollInfo model.Poll\n\tjson.Unmarshal(bodyBytes, &pollInfo)\n\n\tsession, err := model.Rdb.HGetAll(model.Ctx, r.Context().Value(\"SID\").(string)).Result()\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tif _, err := model.DB.Exec(`INSERT INTO polls (title, option1, option2, option3, option4,\n\t\toption5, created_by, user_voted) VALUES (?, ?, ?, ?, ?, ?, ?, ?)`,\n\t\tpollInfo.Title,\n\t\tpollInfo.Option1,\n\t\tpollInfo.Option2,\n\t\tpollInfo.Option3,\n\t\tpollInfo.Option4,\n\t\tpollInfo.Option5,\n\t\tsession[\"userId\"],\n\t\t\"{}\",\n\t); err != nil {\n\t\tpanic(err)\n\t}\n\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\tjson.NewEncoder(w).Encode(Response{true, \"\"})\n}", "func CreateNew(dbFile, feeXPub string) error {\n\tlog.Infof(\"Initializing new database at %s\", dbFile)\n\n\tdb, err := bolt.Open(dbFile, 0600, &bolt.Options{Timeout: 1 * time.Second})\n\tif err != nil {\n\t\treturn fmt.Errorf(\"unable to open db file: %w\", err)\n\t}\n\n\tdefer db.Close()\n\n\t// Create all storage buckets of the VSP if they don't already exist.\n\terr = db.Update(func(tx *bolt.Tx) error {\n\t\t// Create parent bucket.\n\t\tvspBkt, err := tx.CreateBucket(vspBktK)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"failed to create %s bucket: %w\", string(vspBktK), err)\n\t\t}\n\n\t\t// Initialize with initial database version (1).\n\t\terr = vspBkt.Put(versionK, uint32ToBytes(initialVersion))\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tlog.Info(\"Generating ed25519 signing key\")\n\n\t\t// Generate ed25519 key\n\t\t_, signKey, err := ed25519.GenerateKey(rand.Reader)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"failed to generate signing key: %w\", err)\n\t\t}\n\t\terr = vspBkt.Put(privateKeyK, signKey.Seed())\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\t// Generate a secret key for initializing the cookie store.\n\t\tlog.Info(\"Generating cookie secret\")\n\t\tsecret := make([]byte, 32)\n\t\t_, err = rand.Read(secret)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\terr = vspBkt.Put(cookieSecretK, secret)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tlog.Info(\"Storing extended public key\")\n\t\t// Store fee xpub\n\t\terr = vspBkt.Put(feeXPubK, []byte(feeXPub))\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\t// Create ticket bucket.\n\t\t_, err = vspBkt.CreateBucket(ticketBktK)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"failed to create %s bucket: %w\", string(ticketBktK), err)\n\t\t}\n\n\t\t// Create vote change bucket.\n\t\t_, err = vspBkt.CreateBucket(voteChangeBktK)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"failed to create %s bucket: %w\", string(voteChangeBktK), err)\n\t\t}\n\n\t\treturn nil\n\t})\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tlog.Info(\"Database initialized\")\n\n\treturn nil\n}", "func (a *Admin) Store(c *gin.Context) {\n\n\tadmin := models.Admin{}\n\terr := c.ShouldBind(&admin)\n\tif old, err := json.Marshal(admin); err == nil {\n\t\thelper.SetFlash(c, string(old), \"old\")\n\t}\n\n\tif err != nil {\n\t\thelper.SetFlash(c, err.Error(), \"error\")\n\t\tc.Redirect(http.StatusFound, \"/admin/admin/create\")\n\t\treturn\n\t}\n\n\tif err := helper.ValidateStruct(admin); err != nil {\n\t\thelper.SetFlash(c, err.Error(), \"error\")\n\t\tc.Redirect(http.StatusFound, \"/admin/admin/create\")\n\t\treturn\n\t}\n\n\tif err := helper.ValidateVariable(admin.Password, \"gte=6,lte=18\"); err != nil {\n\t\thelper.SetFlash(c, \"密码长度为6-18个字符\", \"error\")\n\t\tc.Redirect(http.StatusFound, \"/admin/admin/create\")\n\t\treturn\n\t}\n\tadmin.GeneratePassword()\n\n\texisted := 0\n\tdb.Mysql.Model(&models.Admin{}).Where(\"name = ?\", admin.Name).Count(&existed)\n\tif existed > 0 {\n\t\thelper.SetFlash(c, \"姓名已经存在\", \"error\")\n\t\tc.Redirect(http.StatusFound, \"/admin/admin/create\")\n\t\treturn\n\t}\n\n\tdb.Mysql.Model(&models.Admin{}).Where(\"mobile = ?\", admin.Mobile).Count(&existed)\n\tif existed > 0 {\n\t\thelper.SetFlash(c, \"手机号码已经存在\", \"error\")\n\t\tc.Redirect(http.StatusFound, \"/admin/admin/create\")\n\t\treturn\n\t}\n\n\tdb.Mysql.Model(&models.Admin{}).Where(\"email = ?\", admin.Email).Count(&existed)\n\tif existed > 0 {\n\t\thelper.SetFlash(c, \"邮箱已经存在\", \"error\")\n\t\tc.Redirect(http.StatusFound, \"/admin/admin/create\")\n\t\treturn\n\t}\n\n\tif err := db.Mysql.Create(&admin).Error; err != nil {\n\t\thelper.SetFlash(c, err.Error(), \"error\")\n\t\tc.Redirect(http.StatusFound, \"/admin/admin/create\")\n\t\treturn\n\t}\n\n\thelper.SetFlash(c, \"创建管理员成功\", \"success\")\n\tc.Redirect(http.StatusFound, \"/admin/admin\")\n}", "func (c *MySQLClient) Insert(p *purchase.Purchase) error {\n\tif p.ID != 0 {\n\t\treturn fmt.Errorf(\"purchase cannot have a preexisting ID\")\n\t}\n\n\tvar err error\n\tvar buyBytes, sellBytes []byte\n\tif p.BuyOrder != nil {\n\t\tbuyBytes, err = json.Marshal(p.BuyOrder)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"unable to marshal buy order: %v\", err)\n\t\t}\n\t}\n\tif p.SellOrder != nil {\n\t\tsellBytes, err = json.Marshal(p.SellOrder)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"unable to marshal sell order: %v\", err)\n\t\t}\n\t}\n\n\tquery := `INSERT INTO trader_one(buy_order, sell_order) VALUES (?, ?)`\n\tctx, cancelFunc := context.WithTimeout(context.Background(), 5*time.Second)\n\tdefer cancelFunc()\n\tstmt, err := c.db.PrepareContext(ctx, query)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"unable to prepare SQL statement: %v\", err)\n\t}\n\tdefer stmt.Close()\n\n\tres, err := stmt.ExecContext(ctx, jsonString(buyBytes), jsonString(sellBytes))\n\tif err != nil {\n\t\treturn fmt.Errorf(\"unable to insert row: %v\", err)\n\t}\n\tid, err := res.LastInsertId()\n\tif err != nil {\n\t\treturn fmt.Errorf(\"unable to find new ID: %v\", err)\n\t}\n\tp.ID = id\n\treturn nil\n}", "func (main *Main) Create(e echo.Context) (err error) {\n\n\t// get request and validate\n\treq := new(request.Create)\n\te.Bind(req)\n\tif err = e.Validate(req); err != nil {\n\t\treturn rest.ConstructErrorResponse(e, exception.NewInputValidationFailed(err.Error()))\n\t}\n\t// map req to input data\n\treqData := input.NewNewProductCreate(\n\t\tmap[string]interface{}{\n\t\t\t\"Name\": req.Name,\n\t\t\t\"Qty\": req.Qty,\n\t\t\t\"Price\": req.Price,\n\t\t\t\"Weight\": req.Weight,\n\t\t\t\"Images\": req.Images,\n\t\t\t\"Description\": req.Description,\n\t\t},\n\t)\n\t//insert data to db\n\tproduct, exc := ProductModel.Create(reqData)\n\tif exc != nil {\n\t\treturn rest.ConstructErrorResponse(e, exc)\n\t}\n\tdata := map[string]contract.Model{\n\t\t\"created_product\": product,\n\t}\n\treturn rest.ConstructSuccessResponse(e, data)\n}", "func (p *pollData) Insert(poll PollDeserializer) (err error) {\n\n\tpollTags := GetStructTags(\"full\", \"db\", Poll{})\n\ttx, err := rrsql.DB.Beginx()\n\tif err != nil {\n\t\tlog.Printf(\"Fail to get sql connection: %v\\n\", err)\n\t\treturn err\n\t}\n\t// Either rollback or commit transaction\n\tdefer func() {\n\t\tif err != nil {\n\t\t\ttx.Rollback()\n\t\t\treturn\n\t\t}\n\t\terr = tx.Commit()\n\t}()\n\n\t// Insert poll first\n\t// Notice colon in 'VALUES (:%s)', because strings.Join will not create the first colon\n\tpollQ := fmt.Sprintf(`INSERT INTO polls (%s) VALUES (:%s)`,\n\t\tstrings.Join(pollTags, \",\"), strings.Join(pollTags, \",:\"))\n\tpollInserted, err := tx.NamedExec(pollQ, poll.Poll)\n\tif err != nil {\n\t\treturn err\n\t}\n\tpollID, err := pollInserted.LastInsertId()\n\tif err != nil {\n\t\treturn err\n\t}\n\tif len(poll.Choices) > 0 {\n\t\tchoiceTags := GetStructTags(\"full\", \"db\", Choice{}, []string{\"total_vote\", \"created_at\", \"updated_at\"})\n\n\t\tchoiceQ := fmt.Sprintf(`INSERT INTO polls_choices (%s) VALUES (:%s)`,\n\t\t\tstrings.Join(choiceTags, \",\"), strings.Join(choiceTags, \",:\"))\n\n\t\t// Change poll_id in options to the poll id we just inserted\n\t\t// Batch insert for tx is merged but not released yet, use for loop\n\t\t// Info: https://github.com/jmoiron/sqlx/pull/285\n\t\tfor _, choice := range poll.Choices {\n\t\t\tchoice.PollID.Int = pollID\n\t\t\tchoice.PollID.Valid = true\n\t\t\tif _, err := tx.NamedExec(choiceQ, choice); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\treturn nil\n}", "func (r Resolver) addToDB() {\n\tquery := fmt.Sprintf(\n\t\t`INSERT INTO noobles (title, category, description, audio, creator)\nVALUES ($1, $2, $3, $4, $5)\nRETURNING id`,\n\t)\n\n\terr := database.PGclient.QueryRow(context.TODO(), query,\n\t\tr.nooble.Title, r.nooble.Category, r.nooble.Description, r.nooble.Audio, r.nooble.Creator.Email).Scan(&r.nooble.ID)\n\tif err != nil {\n\t\tlog.Println(err)\n\t\treturn\n\t}\n}", "func (i *AuthFlowInfo) Store(tx *sqlx.Tx) error {\n\tlog.Debug(\"Storing auth flow info\")\n\tstore := i.toAuthFlowInfo()\n\treturn db.RunWithinTransaction(tx, func(tx *sqlx.Tx) error {\n\t\tif i.PollingCode != nil {\n\t\t\tif err := i.PollingCode.Store(tx); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t\t_, err := tx.NamedExec(\n\t\t\t`INSERT INTO AuthInfo (state_h, iss, restrictions, capabilities, subtoken_capabilities, name, \n expires_in, polling_code, rotation, response_type, max_token_len) \n VALUES(:state_h, :iss, :restrictions, :capabilities, :subtoken_capabilities, :name,\n :expires_in, :polling_code, :rotation, :response_type, :max_token_len)`,\n\t\t\tstore)\n\t\treturn errors.WithStack(err)\n\t})\n}", "func (book *Book) InsertBook() {\n\tdb := utils.OpenDB()\n\tdefer db.Close()\n\tdb.Create(book)\n}", "func Store(w http.ResponseWriter, r *http.Request) {\n\tc := flight.Context(w, r)\n\n\t//submit_value := r.FormValue(\"sb\")\n\n\tif !c.FormValid(\"tittle\",\"issian\") {\n\t\tCreate(w, r)\n\t\treturn\n\t}\n\n\t// var publish string\n\t// if submit_value==\"save\"{\n\t// \tpublish = \"0\"\n\t// }else {\n\t// \tpublish = \"1\"\n\t// }\n\t// fmt.Println(publish)\n\n\t_, err := article.Create(c.DB, r.FormValue(\"tittle\"), r.FormValue(\"issian\"), r.FormValue(\"sb\"), c.UserID)\n\tif err != nil {\n\t\tc.FlashErrorGeneric(err)\n\t\tCreate(w, r)\n\t\treturn\n\t}\n\n\tc.FlashSuccess(\"Article added.\")\n\tc.Redirect(uri)\n}", "func ArticlePut(c *fiber.Ctx) {\n\tShopID := c.Params(\"shop_id\")\n\tHallwaysID := c.Params(\"hallways_id\")\n\tArticleID := c.Params(\"article_id\")\n\tUserID := userIDF(c.Get(\"token\"))\n\n\tvar Article ArticlePostStruct\n\n\tif errorParse := c.BodyParser(&Article); errorParse != nil {\n\t\tfmt.Println(\"Error parsing data\", errorParse)\n\t\tc.JSON(ErrorResponse{MESSAGE: \"Error al parsear información\"})\n\t\tc.Status(400)\n\t\treturn\n\t}\n\n\tvar IsOwner IsOwnerShop\n\n\tErrorOwner := sq.Select(\n\t\t\"shop_id\",\n\t).\n\t\tFrom(\"shop\").\n\t\tWhere(\n\t\t\t\"user_id = ? AND shop_id = ?\",\n\t\t\tUserID,\n\t\t\tShopID,\n\t\t).\n\t\tRunWith(database).\n\t\tQueryRow().\n\t\tScan(\n\t\t\t&IsOwner.ShopID,\n\t\t)\n\n\tif ErrorOwner != nil {\n\t\tfmt.Println(\"Not is owner or active shop\", ErrorOwner)\n\t\tc.JSON(ErrorResponse{MESSAGE: \"Not is owner or active shop\"})\n\t\tc.SendStatus(400)\n\t\treturn\n\t}\n\n\tUpdateArticleSQL := sq.Update(\"articles\")\n\n\tif len(Article.Name) > 0 {\n\t\tUpdateArticleSQL = UpdateArticleSQL.Set(\"name\", Article.Name)\n\t}\n\n\tif len(Article.Description) > 0 {\n\t\tUpdateArticleSQL = UpdateArticleSQL.Set(\"description\", Article.Description)\n\t}\n\n\tif &Article.Price != nil {\n\t\tUpdateArticleSQL = UpdateArticleSQL.Set(\"price\", Article.Price)\n\t}\n\n\tif &Article.CountArticle != nil {\n\t\tUpdateArticleSQL = UpdateArticleSQL.Set(\"count_article\", Article.CountArticle)\n\t}\n\n\tif len(Article.URL) > 0 {\n\t\t_, ErrorUpdateImage := sq.Update(\"article_images\").\n\t\t\tSet(\"url\", Article.URL).\n\t\t\tWhere(\"article_id = ?\", ArticleID).\n\t\t\tRunWith(database).\n\t\t\tExec()\n\n\t\tif ErrorUpdateImage != nil {\n\t\t\tfmt.Println(\"Problem with update image for article\", ErrorUpdateImage)\n\t\t}\n\t}\n\n\t_, ErrorUpdateArticle := UpdateArticleSQL.\n\t\tWhere(\"article_id = ? and hallways_id = ?\", ArticleID, HallwaysID).\n\t\tRunWith(database).\n\t\tExec()\n\n\tif ErrorUpdateArticle != nil {\n\t\tfmt.Println(\"Problem with update article\", ErrorUpdateArticle)\n\t\tc.JSON(ErrorResponse{MESSAGE: \"Problem with update article\"})\n\t\tc.SendStatus(400)\n\t\treturn\n\t}\n\n\tc.JSON(SuccessResponse{MESSAGE: \"Update aricle\"})\n\n}", "func (l LiveAgent) Save() error {\n\n\tvar db = data.GetDB()\n\n\tquery := fmt.Sprintf(`\n\t\tINSERT INTO vicidial_live_agents \n\t\t\t(live_agent_id, user, server_ip, conf_exten, extension, status, lead_id, campaign_id, uniqueid, callerid, channel, random_id, \n\t\t\tlast_call_time, last_update_time, last_call_finish, closer_campaigns, call_server_ip, user_level, \n\t\t\tcomments, campaign_weight, calls_today, external_hangup, external_status, external_pause, external_dial, \n\t\t\texternal_ingroups, external_blended, external_igb_set_user, external_update_fields, external_update_fields_data, \n\t\t\texternal_timer_action, external_timer_action_message, external_timer_action_seconds, agent_log_id, last_state_change, \n\t\t\tagent_territories, outbound_autodial, manager_ingroup_set, ra_user, ra_extension, external_dtmf, external_transferconf, \n\t\t\texternal_park, external_timer_action_destination, on_hook_agent, on_hook_ring_time, ring_callerid, last_inbound_call_time, \n\t\t\tlast_inbound_call_finish, campaign_grade, external_recording, external_pause_code, pause_code, preview_lead_id, external_lead_id, \n\t\t\tlast_inbound_call_time_filtered, last_inbound_call_finish_filtered) \n\t\tVALUES \n\t\t\t(NULL, 'duser2', '172.16.10.209', '8600051', 'SIP/102', 'PAUSED', '0', 'DCAMP', '', '', '', '11036487', \n\t\t\t'2020-08-19 16:29:09', '2020-08-19 16:30:03', '2020-08-19 16:29:09', '-', NULL, '1', \n\t\t\tNULL, '0', '0', '', '', '', '', \n\t\t\tNULL, '0', '', '0', '', \n\t\t\t'', '', '-1', '196', '2020-08-19 16:29:14', NULL, 'N', 'N', '', '', '', '', '', '', 'N', '60', '', '2020-08-19 16:29:09', '2020-08-19 16:29:09', '10', '', '', 'LOGIN', '0', '0', '2020-08-19 16:29:09', '2020-08-19 16:29:09')\n\t`)\n\n\treturn db.Exec(query).Error\n}", "func Store(w http.ResponseWriter, r *http.Request) {\n\tc := flight.Context(w, r)\n\n\tif !c.FormValid(\"name\") {\n\t\tCreate(w, r)\n\t\treturn\n\t}\n\n\t_, err := summary.Create(c.DB, r.FormValue(\"name\"))\n\tif err != nil {\n\t\tc.FlashErrorGeneric(err)\n\t\tCreate(w, r)\n\t\treturn\n\t}\n\n\tc.FlashSuccess(\"Item added.\")\n\tc.Redirect(uri)\n}", "func Init_Bill() {\n\t// Enrollment\n\tif !State[\"Enrollment\"] {\n\t\tInit_Enrollment()\n\t}\n\n\t// Place\n\tdb.Model(&Place{}).Create(&place01)\n\tdb.Model(&Place{}).Create(&place02)\n\tdb.Model(&Place{}).Create(&place03)\n\n\t// PaymentType\n\tdb.Model(&PaymentType{}).Create(&paymentType_01)\n\tdb.Model(&PaymentType{}).Create(&paymentType_02)\n\tdb.Model(&PaymentType{}).Create(&paymentType_03)\n\n\t// Bill - DIDN'T SETUP\n\n\tState[\"Bill\"] = true\n}", "func (server *LaptopServer) CreateLaptop(ctx context.Context, req *pb.CreateLaptopRequest) (*pb.CreateLaptopResponse, error) {\r\n\tlaptop := req.GetLaptop()\r\n\tlog.Printf(\"receive a create-laptop request with id: %s\", laptop.Id)\r\n\tif len(laptop.Id) > 0 {\r\n\t\t// check valid UUID\r\n\t\t_, err := uuid.Parse(laptop.Id)\r\n\t\tif err != nil {\r\n\t\t\treturn nil, status.Errorf(codes.InvalidArgument, \"laptop ID is not a valid UUID: %v\", err)\r\n\t\t}\r\n\t} else {\r\n\t\t// if client hasn't sent laptop ID, we could generate it on server\r\n\t\tid, err := uuid.NewRandom()\r\n\t\tif err != nil {\r\n\t\t\treturn nil, status.Errorf(codes.Internal, \"cannot generate a new laptop id: %v\", err)\r\n\t\t}\r\n\t\tlaptop.Id = id.String()\r\n\t}\r\n\r\n\t// Simulate a heavy network\r\n\t// time.Sleep(6 * time.Second)\r\n\tif err := contextError(ctx); err != nil {\r\n\t\treturn nil, err\r\n\t}\r\n\r\n\t// save the laptop\r\n\terr := server.laptopStore.Save(laptop)\r\n\tif err != nil {\r\n\t\tcode := codes.Internal\r\n\t\tif errors.Is(err, ErrAlreadyExists) {\r\n\t\t\tcode = codes.AlreadyExists\r\n\t\t}\r\n\t\treturn nil, status.Errorf(code, \"cannot save laptop to the laptopStore: %v\", err)\r\n\t}\r\n\r\n\tlog.Printf(\"save laptop with id: %s\", laptop.Id)\r\n\r\n\tres := &pb.CreateLaptopResponse{\r\n\t\tId: laptop.Id,\r\n\t}\r\n\treturn res, nil\r\n}", "func saveBlockChainToDb(chain BlockChain) error {\r\n\tdb := gofiledb.GetClient()\r\n\treturn db.SetStruct(\"blockchain\", \"blockchain_v1\", chain)\r\n}", "func Db_create(db_name string) string{\n\t// Open the my.db data_backup file in your current directory.\n\t// It will be created if it doesn't exist.\n\tdb, err := bolt.Open(db_name, 0600, nil)\n\t//defer db.Close()\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\ttx, err := db.Begin(true)\n\tif err != nil {\n\t\tfmt.Println(\"Error : \",err)\n\t\tdb.Close()\n\t\treturn \"ERROR,DB_COMMIT\"\n\t}\n\tfmt.Printf(\"Created DB with ID %d\", tx.ID())\n\tdb.Close()\n\treturn \"OK,DB_COMMIT\"\n}", "func (l *Listing) CreateListing(db *gorm.DB) error {\n\tnow := time.Now()\n\tl.CreatedAt = now\n\tretVal := db.Create(l).Table(\"listings\").Scan(&l)\n\treturn retVal.Error\n}", "func Store() *bolt.DB {\n\treturn store\n}", "func (dao *Wager) Create(tx *gorm.DB, mod *models.Wager) error {\n\terr := tx.Create(mod).Error\n\tif err != nil {\n\t\treturn errs.Wrap(errs.ErrMySQLCreate, err.Error(), \"tx.Create\")\n\t}\n\treturn nil\n}", "func (r *ItemsRepository) save(i *Item) error {\n\tif query := r.databaseHandler.DB().Create(&i); query.Error != nil {\n\t\treturn query.Error\n\t}\n\treturn nil\n}" ]
[ "0.6893652", "0.63845044", "0.6010612", "0.5868746", "0.5757256", "0.550926", "0.54512924", "0.5415306", "0.5386392", "0.5367614", "0.5367389", "0.5325681", "0.52694136", "0.5249932", "0.5226924", "0.5207923", "0.5168792", "0.5058868", "0.5021252", "0.50161624", "0.5011729", "0.50038856", "0.4981908", "0.49727514", "0.49596134", "0.4953358", "0.49489963", "0.49386287", "0.49288124", "0.49284235", "0.492187", "0.4901028", "0.48727632", "0.48697323", "0.4865786", "0.48627126", "0.48581114", "0.4849399", "0.48414484", "0.48352835", "0.48237473", "0.48112833", "0.48089457", "0.48010984", "0.4798834", "0.47924978", "0.4790542", "0.47899503", "0.47839403", "0.4783603", "0.47610363", "0.47567588", "0.47309908", "0.47276762", "0.47255537", "0.4724943", "0.47182342", "0.4713264", "0.47072303", "0.4697952", "0.46971977", "0.46954218", "0.4693879", "0.46804014", "0.46797672", "0.46738717", "0.46736014", "0.46651193", "0.46595672", "0.46468237", "0.46448413", "0.46434644", "0.46393025", "0.46357045", "0.46351263", "0.46342167", "0.46275946", "0.46250358", "0.46248293", "0.46242398", "0.46146056", "0.46014985", "0.4597881", "0.45900732", "0.45898166", "0.45886797", "0.4588447", "0.45833242", "0.45800647", "0.4574838", "0.45696518", "0.45695028", "0.45600316", "0.45538208", "0.45532668", "0.4552289", "0.45460433", "0.45440048", "0.45435688", "0.45424426" ]
0.76290154
0
Format formats the integer number with the thousands separator for display.
func Format(number int64, thousandSep string) string { str := strconv.FormatInt(number, 10) nl := len(str) tl := len(thousandSep) rl := nl + (nl-1)/3*tl b := make([]byte, rl) if nl == rl { return str } count := 0 for i, j := nl-1, rl-1; i >= 0; i, j = i-1, j-1 { b[j] = str[i] count++ if count%3 == 0 && j >= 1 { copy(b[j-tl:j], thousandSep) j -= tl } } return strings.FromBytes(b) }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func NumberFormat(number float64, decimals uint, decPoint, thousandsSep string) string {\n\tneg := false\n\tif number < 0 {\n\t\tnumber = -number\n\t\tneg = true\n\t}\n\tdec := int(decimals)\n\t// Will round off\n\tstr := fmt.Sprintf(\"%.\"+strconv.Itoa(dec)+\"F\", number)\n\tprefix, suffix := \"\", \"\"\n\tif dec > 0 {\n\t\tprefix = str[:len(str)-(dec+1)]\n\t\tsuffix = str[len(str)-dec:]\n\t} else {\n\t\tprefix = str\n\t}\n\tsep := []byte(thousandsSep)\n\tn, l1, l2 := 0, len(prefix), len(sep)\n\t// thousands sep num\n\tc := (l1 - 1) / 3\n\ttmp := make([]byte, l2*c+l1)\n\tpos := len(tmp) - 1\n\tfor i := l1 - 1; i >= 0; i, n, pos = i-1, n+1, pos-1 {\n\t\tif l2 > 0 && n > 0 && n%3 == 0 {\n\t\t\tfor j := range sep {\n\t\t\t\ttmp[pos] = sep[l2-j-1]\n\t\t\t\tpos--\n\t\t\t}\n\t\t}\n\t\ttmp[pos] = prefix[i]\n\t}\n\ts := string(tmp)\n\tif dec > 0 {\n\t\ts += decPoint + suffix\n\t}\n\tif neg {\n\t\ts = \"-\" + s\n\t}\n\n\treturn s\n}", "func FormatNumberWithSeparators(n int64) string {\n\tin := strconv.FormatInt(n, 10)\n\tnumOfDigits := len(in)\n\tnumOfCommas := (numOfDigits - 1) / 3\n\n\tout := make([]byte, len(in)+numOfCommas)\n\n\tfor i, j, k := len(in)-1, len(out)-1, 0; ; i, j = i-1, j-1 {\n\t\tout[j] = in[i]\n\t\tif i == 0 {\n\t\t\treturn string(out)\n\t\t}\n\t\tif k++; k == 3 {\n\t\t\tj, k = j-1, 0\n\t\t\tout[j] = ','\n\t\t}\n\t}\n}", "func (d Digits) Format(in int32) string {\n\tf := fmt.Sprintf(\"%%0%dd\", d)\n\treturn fmt.Sprintf(f, in)\n}", "func FormatInt(n int64, groupSize int, grouping byte) string {\n\tif groupSize < 1 {\n\t\tgroupSize = 3\n\t}\n\n\tin := strconv.FormatInt(n, 10)\n\tnumOfDigits := len(in)\n\tif n < 0 {\n\t\tnumOfDigits-- // First character is the - sign (not a digit)\n\t}\n\tnumOfCommas := (numOfDigits - 1) / groupSize\n\n\tout := make([]byte, len(in)+numOfCommas)\n\tif n < 0 {\n\t\tin, out[0] = in[1:], '-'\n\t}\n\n\tfor i, j, k := len(in)-1, len(out)-1, 0; ; i, j = i-1, j-1 {\n\t\tout[j] = in[i]\n\t\tif i == 0 {\n\t\t\treturn string(out)\n\t\t}\n\t\tif k++; k == groupSize {\n\t\t\tj, k = j-1, 0\n\t\t\tout[j] = grouping\n\t\t}\n\t}\n}", "func prettyNumber(n int64) string {\n\tif n < 1000 {\n\t\treturn fmt.Sprintf(\"%d\", n)\n\t}\n\treturn fmt.Sprintf(\"%s,%03d\", prettyNumber(n/1000), n%1000)\n}", "func formatAmount(amount int, locale, currency string) string {\n\tisNegative := amount < 0\n\n\tif isNegative {\n\t\tamount *= -1\n\t}\n\n\tmajor, minor := amount/100, amount%100\n\tmajorString := strconv.Itoa(major)\n\n\tloc, _ := localisations[locale]\n\ttSep, dSep := loc.thousandsSeparator, loc.decimalSeparator\n\n\tvar out strings.Builder\n\n\tmajorLength := len(majorString)\n\tif majorLength%3 != 0 {\n\t\tout.WriteString(majorString[:majorLength%3])\n\t}\n\n\tfor i := len(majorString) % 3; i < majorLength; i += 3 {\n\t\tif i != 0 {\n\t\t\tout.WriteByte(tSep)\n\t\t}\n\t\tout.WriteString(majorString[i : i+3])\n\t}\n\n\tout.WriteByte(dSep)\n\tout.WriteString(fmt.Sprintf(\"%02d\", minor))\n\n\tconverted := out.String()\n\n\tif isNegative {\n\t\tconverted = fmt.Sprintf(loc.negativeFormatString, currencySymbols[currency], converted)\n\t} else {\n\t\tconverted = fmt.Sprintf(loc.positiveFormatString, currencySymbols[currency], converted)\n\t}\n\n\treturn converted\n}", "func Format(n int64) string {\n\tin := strconv.FormatInt(n, 10)\n\tnumOfDigits := len(in)\n\tif n < 0 {\n\t\tnumOfDigits-- // First character is the - sign (not a digit)\n\t}\n\tnumOfCommas := (numOfDigits - 1) / 3\n\n\tout := make([]byte, len(in)+numOfCommas)\n\tif n < 0 {\n\t\tin, out[0] = in[1:], '-'\n\t}\n\n\tfor i, j, k := len(in)-1, len(out)-1, 0; ; i, j = i-1, j-1 {\n\t\tout[j] = in[i]\n\t\tif i == 0 {\n\t\t\treturn string(out)\n\t\t}\n\t\tif k++; k == 3 {\n\t\t\tj, k = j-1, 0\n\t\t\tout[j] = ','\n\t\t}\n\t}\n}", "func Format(n int64) string {\n\tin := strconv.FormatInt(n, 10)\n\tnumOfDigits := len(in)\n\tif n < 0 {\n\t\tnumOfDigits-- // First character is the - sign (not a digit)\n\t}\n\tnumOfCommas := (numOfDigits - 1) / 3\n\n\tout := make([]byte, len(in)+numOfCommas)\n\tif n < 0 {\n\t\tin, out[0] = in[1:], '-'\n\t}\n\n\tfor i, j, k := len(in)-1, len(out)-1, 0; ; i, j = i-1, j-1 {\n\t\tout[j] = in[i]\n\t\tif i == 0 {\n\t\t\treturn string(out)\n\t\t}\n\t\tif k++; k == 3 {\n\t\t\tj, k = j-1, 0\n\t\t\tout[j] = ','\n\t\t}\n\t}\n}", "func IntComma(i int) string {\n\tif (i < 0) {\n\t\treturn \"-\" + IntComma(-i)\n\t}\n\tif (i < 1000) {\n\t\treturn fmt.Sprintf(\"%d\", i)\n\t}\n\treturn IntComma(i / 1000) + \",\" + fmt.Sprintf(\"%03d\", i % 1000)\n}", "func (l *Locale) FormatFloat(number float64, digits nbutils.Digits) string {\n\tnumber = nbutils.Round(number, digits.ToPrecision())\n\tformat := fmt.Sprintf(\"%%.%df\", digits.Scale)\n\tnumStr := fmt.Sprintf(format, number)\n\tparts := strings.Split(numStr, \".\")\n\tintPart := parts[0]\n\tvar decPart string\n\tif len(parts) > 1 {\n\t\tdecPart = parts[1]\n\t}\n\t// Add \"thousands\" separators\n\tvar (\n\t\tlastGrouping int\n\t\tkeepGrouping bool\n\t)\n\tgroups := []string{intPart}\n\t// Iterate on each group\n\tfor _, n := range l.Grouping {\n\t\tif n == 0 {\n\t\t\tkeepGrouping = true\n\t\t\tbreak\n\t\t}\n\t\tvar ok bool\n\t\tgroups, ok = groupDigits(groups, n)\n\t\tif !ok {\n\t\t\tbreak\n\t\t}\n\t\tlastGrouping = n\n\t}\n\t// Continue grouping if applicable\n\tif keepGrouping {\n\t\tok := true\n\t\tfor ok {\n\t\t\tgroups, ok = groupDigits(groups, lastGrouping)\n\t\t}\n\t}\n\tres := strings.Join(groups, l.ThousandsSep)\n\t// Add decimal part if any\n\tif decPart != \"\" {\n\t\tres += l.DecimalPoint + decPart\n\t}\n\treturn res\n}", "func (b *BookPrinter) FormatNumber(symbol string, amount *big.Rat) string {\n\tf, _ := amount.Float64()\n\tdec, ok := b.decs[symbol]\n\tif !ok {\n\t\tdec = 2\n\t}\n\treturn b.pr.Sprintf(\"%.*f\", dec, f)\n}", "func FormatInt(name string) string {\n\treturn formatIntFunction(name, true)\n}", "func FmtComma(number string) string {\n\tparts := strings.Split(number, \".\")\n\tpart0 := parts[0]\n\tszInt := len(part0)\n\tpart1 := \"\"\n\n\tif szInt == 0 || len(parts) > 2 {\n\t\treturn number\n\t} else if len(parts) == 2 {\n\t\tpart1 = \".\" + parts[1]\n\t}\n\n\tbound := szInt - 1\n\tbytes := make([]byte, szInt+bound/3)\n\n\tfor i, j := bound, len(bytes)-1; i >= 0 && j >= 0; i-- {\n\t\tisCommaPos := (bound-i)%3 == 0\n\t\tif i != bound && isCommaPos {\n\t\t\tbytes[j] = ','\n\t\t\tj--\n\t\t}\n\t\tbytes[j] = part0[i]\n\t\tj--\n\t}\n\n\treturn string(bytes) + part1\n}", "func FormatInt(value interface{}, _ map[string]string) string {\n\treturn strconv.FormatInt(int64(value.(int)), 10)\n}", "func FormatCommas(num int) string {\r\n\tnumString := strconv.Itoa(num)\r\n\tfor {\r\n\t\tformatted := commaRegEx.ReplaceAllString(numString, \"$1,$2\")\r\n\t\tif formatted == numString {\r\n\t\t\treturn formatted\r\n\t\t}\r\n\t\tnumString = formatted\r\n\t}\r\n}", "func (x *Int) Format(s fmt.State, ch rune) {}", "func formatAmount(amount string) string {\n\treturn strings.Replace(amount, \",\", \".\", -1)\n}", "func MoneyFormat(param float64) string {\n\tac := accounting.Accounting{Precision: 2}\n\treturn ac.FormatMoney(param)\n}", "func (s *Simulator) FormatIntDecimals(value sdk.Int, decRatio sdk.Dec) string {\n\tvalueDec := sdk.NewDecFromInt(value)\n\tfixedDec := valueDec.Mul(decRatio)\n\n\treturn fixedDec.String()\n}", "func Format(in string) (out string, err error) {\n\tout, err = Number(in)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tout = fmt.Sprintf(\"(%s) %s-%s\", out[:3], out[3:6], out[6:])\n\treturn out[:], err\n}", "func ReformatNumber(number string) string{\n\tnumber = strings.ReplaceAll(number,\"-\",\"\")\n\tnumber = strings.ReplaceAll(number,\" \",\"\")\n\tvar l int = len(number)\n\tif l <= 3{\n\t\treturn number\n\t}\n\tmod := l % 3\n\tvar two bool = false\n\tif mod == 1{ //最后2个分组,在你l - 2前增加 -\n\t\ttwo = true\n\t}\n\tvar add_last bool = false\n\tvar res string\n\tfor i := 0;i < l;i++{\n\t\tif i == l - 2 && two {\n\t\t\tres += \"-\"\n\t\t\tadd_last = true\n\t\t}else{\n\t\t\tif !add_last && i != 0 && i % 3 == 0{\n\t\t\t\tres += \"-\"\n\t\t\t}\n\t\t}\n\t\tres += string(number[i])\n\t}\n\treturn res\n}", "func Format(input string) (output string, e error) {\n\to,e := Number(input)\n\toutput = fmt.Sprintf(\"(%s) %s-%s\", o[:3], o[3:6], o[6:])\n\treturn\n}", "func comma(v int64) string {\n\tsign := \"\"\n\n\t// Min int64 can't be negated to a usable value, so it has to be special cased.\n\tif v == math.MinInt64 {\n\t\treturn \"-9,223,372,036,854,775,808\"\n\t}\n\n\tif v < 0 {\n\t\tsign = \"-\"\n\t\tv = 0 - v\n\t}\n\n\tparts := []string{\"\", \"\", \"\", \"\", \"\", \"\", \"\"}\n\tj := len(parts) - 1\n\n\tfor v > 999 {\n\t\tparts[j] = strconv.FormatInt(v%1000, 10)\n\t\tswitch len(parts[j]) {\n\t\tcase 2:\n\t\t\tparts[j] = \"0\" + parts[j]\n\t\tcase 1:\n\t\t\tparts[j] = \"00\" + parts[j]\n\t\t}\n\t\tv = v / 1000\n\t\tj--\n\t}\n\tparts[j] = strconv.Itoa(int(v))\n\treturn sign + strings.Join(parts[j:], \",\")\n}", "func comma(v int64) string {\n\tsign := \"\"\n\n\t// Min int64 can't be negated to a usable value, so it has to be special cased.\n\tif v == math.MinInt64 {\n\t\treturn \"-9,223,372,036,854,775,808\"\n\t}\n\n\tif v < 0 {\n\t\tsign = \"-\"\n\t\tv = 0 - v\n\t}\n\n\tparts := []string{\"\", \"\", \"\", \"\", \"\", \"\", \"\"}\n\tj := len(parts) - 1\n\n\tfor v > 999 {\n\t\tparts[j] = strconv.FormatInt(v%1000, 10)\n\t\tswitch len(parts[j]) {\n\t\tcase 2:\n\t\t\tparts[j] = \"0\" + parts[j]\n\t\tcase 1:\n\t\t\tparts[j] = \"00\" + parts[j]\n\t\t}\n\t\tv = v / 1000\n\t\tj--\n\t}\n\tparts[j] = strconv.Itoa(int(v))\n\treturn sign + strings.Join(parts[j:], \",\")\n}", "func Format(phoneNum string) (string, error) {\n\tr, err := Number(phoneNum)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn fmt.Sprintf(\"(%v) %v-%v\", r[:3], r[3:6], r[6:]), nil\n}", "func IntToStringWithFormat(v Int, format string) String {\n\tif format == \"%d\" { // Same as not using custom formatting.\n\t\treturn IntToString(v)\n\t}\n\n\tstr := &stringFromInt{from: v, format: format}\n\tv.AddListener(str)\n\treturn str\n}", "func FormatRate(n int64) string {\n\tin := strconv.FormatInt(n, 10)\n\tout := make([]byte, len(in)+(len(in)-2+int(in[0]/'0'))/3)\n\tif in[0] == '-' {\n\t\tin, out[0] = in[1:], '-'\n\t}\n\n\tfor i, j, k := len(in)-1, len(out)-1, 0; ; i, j = i-1, j-1 {\n\t\tout[j] = in[i]\n\t\tif i == 0 {\n\t\t\treturn string(out)\n\t\t}\n\t\tif k++; k == 3 {\n\t\t\tj, k = j-1, 0\n\t\t\tout[j] = ','\n\t\t}\n\t}\n}", "func PrettyNum(n int) string {\n\tneg := n < 0\n\tif neg {\n\t\tn *= -1 // absolute value\n\t}\n\tvar buffer bytes.Buffer\n\ts := strconv.Itoa(n)\n\tfor i, v := range jtext.ReverseStr(s) {\n\t\tif (i > 0) && (i%3 == 0) {\n\t\t\tbuffer.WriteRune(',')\n\t\t}\n\t\tbuffer.WriteRune(v)\n\t}\n\tif neg {\n\t\tbuffer.WriteRune('-')\n\t}\n\treturn jtext.ReverseStr(buffer.String())\n}", "func IntPriceToString(p int) string {\n\treturn fmt.Sprintf(\"£%.4f\", float64(p)/10000.0)\n}", "func Insert_number_comma(input_num int) string {\n\ttemp_str := strconv.Itoa(input_num)\n\tvar num_arr []string\n\ti := len(temp_str)%3;\n\tif i == 0 { i = 3 }\n\tfor index, elem := range strings.Split(temp_str, \"\") {\n\t\tif i == index {\n\t\t\tnum_arr = append(num_arr, \",\");\n\t\t\ti += 3;\n\t\t}\n\t\tnum_arr = append(num_arr, elem)\n\t}\n\treturn strings.Join(num_arr, \"\")\n}", "func (sms *SMS) format(phoneNumber string) (phoneNumberFormatted string) {\n\tphoneNumberFormatted = phoneNumber\n\n\tif len(phoneNumber) == 10 {\n\t\tphoneNumberFormatted = \"1\" + phoneNumber\n\t}\n\n\treturn\n}", "func FormatNumber(nf i18n.NumberFormatter) OptionFunc {\n\tif nf == nil {\n\t\tnf = DefaultFormatterNumber\n\t}\n\treturn func(c *Currency) OptionFunc {\n\t\tprevious := c.fmtNum\n\t\tc.fmtNum = nf\n\t\treturn FormatNumber(previous)\n\t}\n}", "func int32InsertComma(val uint) string {\n\ts := strconv.Itoa(int(val))\n\tn := 3\n var buffer bytes.Buffer\n for i, rune := range s {\n \tif i != 0 && (len(s)-i) % n == 0 {\n \t\tbuffer.WriteRune(',')\t\n \t} \n buffer.WriteRune(rune)\n }\n\n return buffer.String()\n}", "func Format(given string) (string, error) {\n\tnumber, err := Number(given)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn fmt.Sprintf(\"(%s) %s-%s\", number[0:3], number[3:6], number[6:]), nil\n}", "func (b *BookPrinter) FormatMoney(symbol string, amount *big.Rat, maxlen int) string {\n\tsym := b.FormatSymbol(symbol)\n\tl := maxlen - utf8.RuneCountInString(sym)\n\tnum := b.pr.Sprintf(\"%s%*s\", sym, l, b.FormatNumber(symbol, amount))\n\tvar zero big.Rat\n\tif amount.Cmp(&zero) >= 0 {\n\t\treturn b.Ansi(Blue, num)\n\t} else {\n\t\treturn b.Ansi(Red, num)\n\t}\n}", "func Format(given string) (string, error) {\n\tclean, err := Number(given)\n\tif err != nil {\n\t\treturn clean, err\n\t}\n\treturn fmt.Sprintf(\"(%s) %s-%s\", clean[:3], clean[3:6], clean[6:10]), nil\n}", "func (nb SecretNumber) Format(f fmt.State, c rune) {\n\tf.Write([]byte(nb.String()))\n}", "func (humanizer *Humanizer) HumanizeNumber(value float64, digits int) string {\n\treturn humanizer.printer.Sprintf(\"%g\", number.Decimal(value, number.MaxFractionDigits(digits)))\n}", "func (pb *Bar) Format(v int64) string {\n\tif pb.GetBool(Bytes) {\n\t\treturn formatBytes(v, pb.GetBool(SIBytesPrefix))\n\t}\n\treturn strconv.FormatInt(v, 10)\n}", "func (t epochTime) Format(str string) string {\n\treturn time.Time(t).Format(str)\n}", "func FormatUSD(x int64) string {\n\treturn fmt.Sprintf(\"%.2f\", float64(x)/100.0)\n}", "func FormatInts[T constraints.Integer](slice []T, base int) []string {\n\tif len(slice) == 0 {\n\t\treturn nil\n\t}\n\tout := make([]string, len(slice))\n\tfor i, x := range slice {\n\t\tout[i] = strconv.FormatInt(int64(x), base)\n\t}\n\treturn out\n}", "func (t *Translator) FmtNumber(number interface{}) string {\n\treturn t.locale.Number.FmtNumber(toFloat64(number))\n}", "func StringNumber(numberPairs int, separator string) string {\n\treturn StringNumberExt(numberPairs, separator, 2)\n}", "func (ln *localen) FmtNumber(num float64, v uint64) string {\n\treturn ln.fnFmtNumber(ln, num, v)\n}", "func (l *Locale) FormatMonetary(value float64, curr Currency) string {\n\tdigs := nbutils.Digits{Precision: 16, Scale: int8(curr.DecimalPlaces())}\n\tamount := l.FormatFloat(value, digs)\n\tif curr.Position() == \"before\" {\n\t\treturn fmt.Sprintf(\"%s %s\", curr.Symbol(), amount)\n\t}\n\treturn fmt.Sprintf(\"%s %s\", amount, curr.Symbol())\n}", "func Ordinal(number int64) string {\n\tnumber = int64(math.Abs(float64(number)))\n\tswitch number % 100 {\n\tcase 11, 12, 13:\n\t\treturn \"th\"\n\tdefault:\n\t\tswitch number % 10 {\n\t\tcase 1:\n\t\t\treturn \"st\"\n\t\tcase 2:\n\t\t\treturn \"nd\"\n\t\tcase 3:\n\t\t\treturn \"rd\"\n\t\tdefault:\n\t\t\treturn \"th\"\n\t\t}\n\t}\n}", "func Format(number *PhoneNumber, numberFormat PhoneNumberFormat) string {\n\tif number.GetNationalNumber() == 0 && len(number.GetRawInput()) > 0 {\n\t\t// Unparseable numbers that kept their raw input just use that.\n\t\t// This is the only case where a number can be formatted as E164\n\t\t// without a leading '+' symbol (but the original number wasn't\n\t\t// parseable anyway).\n\t\t// TODO: Consider removing the 'if' above so that unparseable\n\t\t// strings without raw input format to the empty string instead of \"+00\"\n\t\trawInput := number.GetRawInput()\n\t\tif len(rawInput) > 0 {\n\t\t\treturn rawInput\n\t\t}\n\t}\n\tvar formattedNumber = NewBuilder(nil)\n\tFormatWithBuf(number, numberFormat, formattedNumber)\n\treturn formattedNumber.String()\n}", "func formatDuration(i time.Duration, millisecondSep string) (s string) {\n\t// Parse hours\n\tvar hours = int(i / time.Hour)\n\tvar n = i % time.Hour\n\tif hours < 10 {\n\t\ts += \"0\"\n\t}\n\ts += strconv.Itoa(hours) + \":\"\n\n\t// Parse minutes\n\tvar minutes = int(n / time.Minute)\n\tn = i % time.Minute\n\tif minutes < 10 {\n\t\ts += \"0\"\n\t}\n\ts += strconv.Itoa(minutes) + \":\"\n\n\t// Parse seconds\n\tvar seconds = int(n / time.Second)\n\tn = i % time.Second\n\tif seconds < 10 {\n\t\ts += \"0\"\n\t}\n\ts += strconv.Itoa(seconds) + millisecondSep\n\n\t// Parse milliseconds\n\tvar milliseconds = int(n / time.Millisecond)\n\tif milliseconds < 10 {\n\t\ts += \"00\"\n\t} else if milliseconds < 100 {\n\t\ts += \"0\"\n\t}\n\ts += strconv.Itoa(milliseconds)\n\treturn\n}", "func Comma(v int64) string {\n\tsign := \"\"\n\n\t// Min int64 can't be negated to a usable value, so it has to be special cased.\n\tif v == math.MinInt64 {\n\t\treturn \"-9,223,372,036,854,775,808\"\n\t}\n\n\tif v < 0 {\n\t\tsign = \"-\"\n\t\tv = 0 - v\n\t}\n\n\tparts := []string{\"\", \"\", \"\", \"\", \"\", \"\", \"\"}\n\tj := len(parts) - 1\n\n\tfor v > 999 {\n\t\tparts[j] = strconv.FormatInt(v%1000, 10)\n\t\tswitch len(parts[j]) {\n\t\tcase 2:\n\t\t\tparts[j] = \"0\" + parts[j]\n\t\tcase 1:\n\t\t\tparts[j] = \"00\" + parts[j]\n\t\t}\n\t\tv = v / 1000\n\t\tj--\n\t}\n\tparts[j] = strconv.Itoa(int(v))\n\treturn sign + strings.Join(parts[j:], \",\")\n}", "func formatSerial(serial int, format string) string {\n\treturn fmt.Sprintf(\"%0\"+format+\"d\", serial)\n}", "func (t Time) Format(ft string) string {\n\tp := epoch.Add(time.Second * time.Duration(t))\n\tif ft == \"\" {\n\t\tft = \"02 Jan 2006 15:04:05\"\n\t}\n\treturn p.In(time.UTC).Format(ft)\n}", "func formatTimestamp(t time.Time, milli bool) string {\n\tif milli {\n\t\treturn fmt.Sprintf(\"%d.%03d\", t.Unix(), t.Nanosecond()/1000000)\n\t}\n\n\treturn fmt.Sprintf(\"%d\", t.Unix())\n}", "func (metrics *Metrics) metricFormat(name string, value int) string {\n\treturn fmt.Sprintf(\n\t\t\"%s %d %d\\n\",\n\t\tmetrics.Tag+\".\"+name,\n\t\tvalue,\n\t\ttime.Now().Unix(),\n\t)\n}", "func numberName(n int) string {\n\tswitch {\n\tcase n < 0:\n\tcase n < 20:\n\t\treturn small[n]\n\tcase n < 100:\n\t\tt := tens[n/10]\n\t\ts := n % 10\n\t\tif s > 0 {\n\t\t\tt += \" \" + small[s]\n\t\t}\n\t\treturn t\n\t}\n\treturn \"\"\n}", "func (id ID) Format(f fmt.State, r rune) {\n\tnumF, strF := `%d`, `%s`\n\tif r == 'q' {\n\t\tnumF, strF = `#%d`, `%q`\n\t}\n\n\tswitch {\n\tcase id.name != \"\":\n\t\tfmt.Fprintf(f, strF, id.name)\n\tdefault:\n\t\tfmt.Fprintf(f, numF, id.number)\n\t}\n}", "func (nff *NumberFormatFunc) Func(context.Context) interface{} {\n\treturn func(value interface{}, params ...int) string {\n\n\t\tprecision := int(nff.precision)\n\t\tif len(params) > 0 {\n\t\t\tprecision = params[0]\n\t\t}\n\n\t\tdefer func() {\n\t\t\tif err := recover(); err != nil {\n\t\t\t\tnff.logger.Error(err)\n\t\t\t}\n\t\t}()\n\n\t\tvalueBigFloat, ok := value.(*big.Float)\n\t\tif ok {\n\t\t\treturn accounting.FormatNumberBigFloat(valueBigFloat, precision, nff.thousand, nff.decimal)\n\t\t}\n\n\t\treturn accounting.FormatNumber(value, precision, nff.thousand, nff.decimal)\n\t}\n}", "func printInt(n int64) {\n\ttableau := []int64{}\n\n\tif n == 0 {\n\t\tz01.PrintRune('0')\n\t}\n\n\tif n < 0 {\n\t\tz01.PrintRune('-')\n\t\tn = n * -1\n\t}\n\n\tfor n > 0 {\n\t\ttableau = append(tableau, n%10)\n\t\tn = n / 10\n\t}\n\n\tfor i := 0; i < len(tableau); i++ {\n\t\tfor j := i + 1; j < len(tableau); j++ {\n\t\t\ttableau[i], tableau[j] = tableau[j], tableau[i]\n\t\t}\n\t}\n\n\tfor _, element := range tableau {\n\t\tz01.PrintRune(rune(element + 48))\n\t}\n\tz01.PrintRune('\\n')\n}", "func main() {\n\tfmt.Println(comma(\"-1234567.1234567\"))\n}", "func (r *RUT) Format(f Formatter) string {\n\tif f == nil {\n\t\tf = DefaultFormatter\n\t}\n\treturn f(r.number, r.verifier)\n}", "func moneyToString(cents int, thousandsSep, decimalSep string) string {\n\tcentsStr := fmt.Sprintf(\"%03d\", cents) // Pad to 3 digits\n\tcentsPart := centsStr[len(centsStr)-2:]\n\trest := centsStr[:len(centsStr)-2]\n\tvar parts []string\n\tfor len(rest) > 3 {\n\t\tparts = append(parts, rest[len(rest)-3:])\n\t\trest = rest[:len(rest)-3]\n\t}\n\tif len(rest) > 0 {\n\t\tparts = append(parts, rest)\n\t}\n\trevParts := make([]string, 0, len(parts))\n\tfor i := len(parts) - 1; i >= 0; i-- {\n\t\trevParts = append(revParts, parts[i])\n\t}\n\tvar buf bytes.Buffer\n\tbuf.WriteString(strings.Join(revParts, thousandsSep))\n\tbuf.WriteString(decimalSep)\n\tbuf.WriteString(centsPart)\n\treturn buf.String()\n}", "func IntToStr(v int64) (str string) {\n\tstr = strconv.FormatInt(v, 10)\n\tneg := str[0:1] == \"-\"\n\tif neg {\n\t\tstr = str[1:]\n\t}\n\tstr = StrDelimit(str, \",\", 3)\n\tif neg {\n\t\tstr = \"-\" + str\n\t}\n\treturn\n}", "func FormatCurrencyAmount(locale string) pongo2.FilterFunction {\n\treturn func(in *pongo2.Value, param *pongo2.Value) (*pongo2.Value, *pongo2.Error) {\n\n\t\tfAmount, err := strconv.ParseFloat(in.String(), 64)\n\t\tvar amount int64\n\t\tif err != nil {\n\t\t\tamount = 0\n\t\t}\n\n\t\tamount = int64(fAmount)\n\t\tcurrency := param.String()\n\n\t\tmoney := money.Money{\n\t\t\tM: amount,\n\t\t\tC: currency,\n\t\t}\n\n\t\treturn pongo2.AsValue(money.FormatNoSymbol(locale)), nil\n\t}\n}", "func Comma(v int64) string {\n\tsign := \"\"\n\n\t// minin64 can't be negated to a usable value, so it has to be special cased.\n\tif v == math.MinInt64 {\n\t\treturn \"-9,223,372,036,854,775,808\"\n\t}\n\n\tif v < 0 {\n\t\tsign = \"-\"\n\t\tv = 0 - v\n\t}\n\n\tparts := []string{\"\", \"\", \"\", \"\", \"\", \"\", \"\"}\n\tj := len(parts) - 1\n\n\tfor v > 999 {\n\t\tparts[j] = strconv.FormatInt(v%1000, 10)\n\t\tswitch len(parts[j]) {\n\t\tcase 2:\n\t\t\tparts[j] = \"0\" + parts[j]\n\t\tcase 1:\n\t\t\tparts[j] = \"00\" + parts[j]\n\t\t}\n\t\tv = v / 1000\n\t\tj--\n\t}\n\tparts[j] = strconv.Itoa(int(v))\n\treturn sign + strings.Join(parts[j:], \",\")\n}", "func FormatDec(v string) (string, error) {\n\tparts := strings.Split(v, \".\")\n\tif len(parts) > 2 {\n\t\treturn \"\", fmt.Errorf(\"invalid decimal: too many points in %s\", v)\n\t}\n\n\tintPart, err := FormatInt(parts[0])\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tif len(parts) == 1 {\n\t\treturn intPart, nil\n\t}\n\n\tdecPart := strings.TrimRight(parts[1], \"0\")\n\tif len(decPart) == 0 {\n\t\treturn intPart, nil\n\t}\n\n\t// Ensure that the decimal part has only digits.\n\t// https://github.com/cosmos/cosmos-sdk/issues/12811\n\tif !hasOnlyDigits(decPart) {\n\t\treturn \"\", fmt.Errorf(\"non-digits detected after decimal point in: %q\", decPart)\n\t}\n\n\treturn intPart + \".\" + decPart, nil\n}", "func PrintOneToThousandUsingForLoop() {\n\tfor i := 1; i <= 10000; i++ {\n\t\tfmt.Println(i)\n\t}\n}", "func (c *ClockVal) Format(d time.Duration) string {\n\tsecs := (d % time.Minute) / time.Second\n\tmins := (d % time.Hour) / time.Minute\n\thours := d / time.Hour\n\n\tif hours == 0 {\n\t\treturn fmt.Sprintf(\"%02dm:%02ds\", mins, secs)\n\t}\n\n\treturn fmt.Sprintf(\"%02dh:%02dm:%02ds\", hours, mins, secs)\n}", "func FormatValue(format string, val interface{}) string {\n\tswitch val := val.(type) {\n\tcase bool:\n\t\tif format == \"%d\" {\n\t\t\tif val {\n\t\t\t\treturn \"1\"\n\t\t\t}\n\t\t\treturn \"0\"\n\t\t}\n\t}\n\n\tif format == \"\" {\n\t\tformat = \"%v\"\n\t}\n\n\treturn fmt.Sprintf(format, val)\n}", "func StringInt(i int) string { return strconv.FormatInt(int64(i), 10) }", "func NumberSuffix(name string, cnt int) string {\n\treturn fmt.Sprintf(\"%s%d\", name, cnt)\n}", "func _(n int) int {\n\tif n > 1 {\n\t\tn *= 10\n\t} else if n > 10 {\n\t\tn *= 100\n\n\t} else if n > 1100 {\n\t\tn *= 110\n\t}\n\treturn n\n}", "func (p Int8Formatter) Format(f fmt.State, c rune) {\n\tif p.p == nil {\n\t\tfmt.Fprintf(f, \"<nil>\")\n\t\treturn\n\t}\n\tfmt.Fprintf(f, \"%\"+string(c), *p.p)\n}", "func StrCurrency100(amt100 int64) (str string) {\n\tvar sign string\n\tif amt100 < 0 {\n\t\tsign = \"-\"\n\t\tamt100 = -amt100\n\t} else {\n\t\tsign = \"\"\n\t}\n\tif amt100 < 100 {\n\t\tstr = fmt.Sprintf(\"%s$0.%02d\", sign, amt100)\n\t} else {\n\t\tstr = strconv.FormatInt(amt100, 10)\n\t\tln := len(str)\n\t\tstr = fmt.Sprintf(\"%s$%s.%s\", sign, StrDelimit(str[:ln-2], \",\", 3), str[ln-2:])\n\t}\n\treturn\n}", "func FormatInt32(name string) string {\n\treturn formatIntFunction(name, true)\n}", "func DelocalizeNumber(s string) string {\n\n\tif !strings.Contains(s, \",\") {\n\t\t// no comma; all is fine\n\t\treturn s\n\t}\n\n\t// contains comma, but contains no dot\n\tif !strings.Contains(s, \".\") {\n\t\t// => just comma instead of dot\n\t\ts = strings.ReplaceAll(s, \",\", \".\")\n\t\treturn s\n\t}\n\n\t// contains comma *and* dot\n\t//\n\t// 123,456.78\n\tif strings.Index(s, \".\") > strings.Index(s, \",\") {\n\t\t// => remove the thousands separator - 123456.78\n\t\ts = strings.ReplaceAll(s, \",\", \"\")\n\t\treturn s\n\t}\n\t//\n\t// 123.456,78\n\tif strings.Index(s, \",\") > strings.Index(s, \".\") {\n\t\t// => remove the thousands separator - 123456,78\n\t\ts = strings.ReplaceAll(s, \".\", \"\")\n\t\t// => replace the decimal separator - 123456.78\n\t\ts = strings.ReplaceAll(s, \",\", \".\")\n\t\treturn s\n\t}\n\n\treturn s\n}", "func IntToString(i int64) string {\n\treturn strconv.FormatInt(i, 10)\n}", "func print_int(n integer) {\n\tprint(fmt.Sprintf(\"%d\", n))\n}", "func humanize(f float64) string {\n\n\tsign := \"\"\n\tif f < 0 {\n\t\tsign = \"-\"\n\t\tf = -f\n\t}\n\n\tn := uint64(f)\n\n\t// Grab two rounded decimals\n\tdecimals := uint64((f+0.005)*100) % 100\n\n\tvar buf []byte\n\n\tif n == 0 {\n\t\tbuf = []byte{'0'}\n\t} else {\n\t\tbuf = make([]byte, 0, 16)\n\n\t\tfor n >= 1000 {\n\t\t\tfor i := 0; i < 3; i++ {\n\t\t\t\tbuf = append(buf, byte(n%10)+'0')\n\t\t\t\tn /= 10\n\t\t\t}\n\n\t\t\tbuf = append(buf, ',')\n\t\t}\n\n\t\tfor n > 0 {\n\t\t\tbuf = append(buf, byte(n%10)+'0')\n\t\t\tn /= 10\n\t\t}\n\t}\n\n\t// Reverse the byte slice\n\tfor l, r := 0, len(buf)-1; l < r; l, r = l+1, r-1 {\n\t\tbuf[l], buf[r] = buf[r], buf[l]\n\t}\n\n\treturn fmt.Sprintf(\"%s%s.%02d\", sign, buf, decimals)\n}", "func TransformIntToCurrency(intValue int, currency Currency) (string, error) {\r\n\tif currency == CurrencyDollars {\r\n\t\treturn FormatCentsToDollars(intValue), nil\r\n\t} else if currency == CurrencyBitcoin {\r\n\t\treturn fmt.Sprintf(\"%8.8f\", ConvertSatsToBSV(intValue)), nil\r\n\t}\r\n\treturn \"\", fmt.Errorf(\"currency %s cannot be transformed\", currency.Name())\r\n}", "func exampleNum(n []int) string {\n\tif len(n) == 1 {\n\t\treturn fmt.Sprintf(\"%d.\", n[0])\n\t}\n\treturn \"\"\n}", "func IntText(x *big.Int, base int) string", "func (h Hash32) Format(s fmt.State, c rune) {\n\t_, _ = fmt.Fprintf(s, \"%\"+string(c), h[:])\n}", "func FormatDecimalAmount(locale string) pongo2.FilterFunction {\n\tl := loc.Get(locale)\n\tcName := \"GBP\"\n\tif l != nil {\n\t\tcName = l.CurrencyCode\n\t}\n\tcurrency := c.Get(cName)\n\tdefaultDigits := currency.DecimalDigits\n\treturn func(in *pongo2.Value, param *pongo2.Value) (*pongo2.Value, *pongo2.Error) {\n\n\t\tlog.Tracef(\"[FormatDecimalAmount] 000 IN: %s PARAM: %s LOCALE: %d\", in.String(), param.String(), locale)\n\n\t\tif len(in.String()) == 0 {\n\t\t\treturn pongo2.AsValue(\"\"), nil\n\t\t}\n\n\t\tfAmount, err := strconv.ParseFloat(in.String(), 64)\n\t\tif err != nil {\n\t\t\treturn nil, &pongo2.Error{\n\t\t\t\tSender: \"filterFormatDecimalAmount\",\n\t\t\t\tErrorMsg: fmt.Sprintf(\"Error formatting value - not parseable '%v': error: %s\", in, err),\n\t\t\t}\n\t\t}\n\n\t\tdigits := defaultDigits\n\t\tif param.IsInteger() {\n\t\t\tdigits = param.Integer()\n\t\t\tlog.Tracef(\"[FormatDecimalAmount] IN: %s PARAM: %s LOCALE: %s DIGITS: %d\", in.String(), param.String(), locale, digits)\n\t\t} else if param.IsString() && len(param.String()) > 0 {\n\t\t\tcName = param.String()\n\t\t\tcurrency := c.Get(cName)\n\t\t\tlog.Tracef(\"[FormatDecimalAmount] IN: %s PARAM: %s LOCALE: %d CURRENCY: %s DIGITS: %d\", in.String(), param.String(), locale, cName, digits)\n\t\t\tdigits = currency.DecimalDigits\n\t\t}\n\n\t\tlog.Tracef(\"[FormatDecimalAmount] IN: %s PARAM: %s LOCALE: %d DIGITS: %d\", in.String(), param.String(), locale, digits)\n\n\t\tif digits > 0 {\n\t\t\treturn pongo2.AsValue(strconv.FormatFloat(fAmount, 'f', digits, 64)), nil\n\t\t}\n\t\treturn pongo2.AsValue(strconv.FormatInt(int64(fAmount), 10)), nil\n\t}\n}", "func DecimalNumberInDiffFormats(x int) {\n\n\t// %b is for binary format\n\t// %x is for hexa decimal format\n\t// %d is for decimal format\n\tfmt.Printf(\"%d \\t %b \\t %x\", x, x, x)\n}", "func formatTimeString(in int) string {\n\tvar buf bytes.Buffer\n\n\tif h := in / 3600; h < 10 {\n\t\tbuf.WriteString(\"0\" + strconv.Itoa(h))\n\t} else {\n\t\tbuf.WriteString(strconv.Itoa(h))\n\t}\n\tbuf.WriteString(\":\")\n\tlt := in % 3600\n\tif m := lt / 60; m < 10 {\n\t\tbuf.WriteString(\"0\" + strconv.Itoa(m))\n\t} else {\n\t\tbuf.WriteString(strconv.Itoa(m))\n\t}\n\tbuf.WriteString(\":\")\n\tif s := lt % 60; s < 10 {\n\t\tbuf.WriteString(\"0\" + strconv.Itoa(s))\n\t} else {\n\t\tbuf.WriteString(strconv.Itoa(s))\n\t}\n\treturn buf.String()\n}", "func (x *Float) Format(s fmt.State, format rune) {}", "func renderInteger(f float64) string {\n\tif f > math.Nextafter(float64(math.MaxInt64), 0) {\n\t\treturn fmt.Sprintf(\"%d\", int64(math.MaxInt64))\n\t}\n\tif f < math.Nextafter(float64(math.MinInt64), 0) {\n\t\treturn fmt.Sprintf(\"%d\", int64(math.MinInt64))\n\t}\n\treturn fmt.Sprintf(\"%d\", int64(f))\n}", "func main() {\n\ts := \"0000001234567890\"\n\tfmt.Printf(\"%s\\n\", comma(s))\n}", "func (c *ColumnBase) ApplyFormat(data interface{}) string {\n\tvar out string\n\n\tswitch d := data.(type) {\n\tcase int:\n\t\tif c.format == \"\" {\n\t\t\tout = fmt.Sprintf(\"%d\", d)\n\t\t} else {\n\t\t\tout = fmt.Sprintf(c.format, d)\n\t\t}\n\tcase float64:\n\t\tif c.format == \"\" {\n\t\t\tout = fmt.Sprintf(\"%f\", d)\n\t\t} else {\n\t\t\tout = fmt.Sprintf(c.format, d)\n\t\t}\n\tcase float32:\n\t\tif c.format == \"\" {\n\t\t\tout = fmt.Sprintf(\"%f\", d)\n\t\t} else {\n\t\t\tout = fmt.Sprintf(c.format, d)\n\t\t}\n\n\tcase time.Time:\n\t\tt := d\n\t\ttimeFormat := c.timeFormat\n\t\tif timeFormat == \"\" {\n\t\t\ttimeFormat = config.DefaultDateTimeFormat\n\t\t}\n\t\tout = t.Format(timeFormat)\n\n\t\tif c.format != \"\" {\n\t\t\tout = fmt.Sprintf(c.format, out)\n\t\t}\n\tcase nil:\n\t\treturn \"\"\n\tdefault:\n\t\tvar format = c.format\n\t\tif format == \"\" {\n\t\t\tformat = `%v`\n\t\t}\n\t\tif any.IsSlice(d) {\n\t\t\ts := any.InterfaceSlice(d)\n\t\t\tvar items []string\n\t\t\tfor _, i := range s {\n\t\t\t\titems = append(items, fmt.Sprintf(format, i))\n\t\t\t}\n\t\t\treturn strings.Join(items, \", \")\n\t\t} else {\n\t\t\tout = fmt.Sprintf(format, d)\n\t\t}\n\t}\n\treturn out\n}", "func dollars(cents int64) string {\n\t// Get the value in dollars.\n\tdollars := float64(cents) / 100\n\n\t// Initialize the buffer to store the string result.\n\tvar buf bytes.Buffer\n\n\t// Check for a negative value.\n\tif dollars < 0 {\n\t\tbuf.WriteString(\"-\")\n\t\t// Convert the negative value to a positive value.\n\t\t// The code below can only handle positive values.\n\t\tdollars = 0 - dollars\n\t}\n\tbuf.WriteString(\"$\")\n\n\t// Convert the dollar value into a string and split it into a\n\t// integer and decimal. This is done so that commas can be added\n\t// to the integer.\n\tvar (\n\t\tf = strconv.FormatFloat(dollars, 'f', -1, 64)\n\t\ts = strings.Split(f, \".\")\n\t\tinteger = s[0]\n\n\t\t// The value may or may not have a decimal. Default to 0.\n\t\tdecimal = \".00\"\n\t)\n\tif len(s) > 1 {\n\t\t// The value includes a decimal. Overwrite the default.\n\t\tdecimal = \".\" + s[1]\n\t}\n\n\t// Write the integer to the buffer one character at a time. Commas\n\t// are inserted in their appropriate places.\n\t//\n\t// Examples\n\t// \"100000\" to \"100,000\"\n\t// \"1000000\" to \"1,000,000\"\n\tfor i, c := range integer {\n\t\t// A comma should be inserted if the character index is divisible\n\t\t// by 3 when counting from the right side of the string.\n\t\tdivByThree := (len(integer)-i)%3 == 0\n\n\t\t// A comma should never be inserted for the first character.\n\t\t// Ex: \"100000\" should not be \",100,000\"\n\t\tif divByThree && i > 0 {\n\t\t\tbuf.WriteString(\",\")\n\t\t}\n\n\t\t// Write the character to the buffer.\n\t\tbuf.WriteRune(c)\n\t}\n\n\t// Write the decimal to the buffer.\n\tbuf.WriteString(decimal)\n\n\treturn buf.String()\n}", "func HumanizeWithPrecision(n uint64, prec int) string {\n\tvar s string\n\tvar m string\n\tdecimals := 1000.0\n\tif prec > -1 {\n\t\tdecimals = math.Pow10(prec)\n\t}\n\tF := func(N uint64, div float64) string {\n\t\treduced := float64(N) / div\n\t\trounded := math.Round(reduced*decimals) / decimals\n\t\ts = strconv.FormatFloat(rounded, 'f', prec, 64)\n\t\treturn s\n\t}\n\tif n >= (uint64(1) << 50) {\n\t\ts = F(n, math.Pow(1024, 5))\n\t\tm = \" PiB\"\n\t} else if n >= (uint64(1) << 40) {\n\t\ts = F(n, math.Pow(1024, 4))\n\t\tm = \" TiB\"\n\t} else if n >= (uint64(1) << 30) {\n\t\ts = F(n, math.Pow(1024, 3))\n\t\tm = \" GiB\"\n\t} else if n >= (uint64(1) << 20) {\n\t\ts = F(n, math.Pow(1024, 2))\n\t\tm = \" MiB\"\n\t} else if n >= (uint64(1) << 10) {\n\t\ts = F(n, 1024.0)\n\t\tm = \" KiB\"\n\t} else {\n\t\ts = fmt.Sprintf(\"%d\", n)\n\t\tm = \" bytes\"\n\t}\n\n\treturn s + m\n}", "func (d Date) Format(ft string) string {\n\tt := epoch.Add(time.Hour * 24 * time.Duration(d))\n\tif ft == \"\" {\n\t\tft = \"02 Jan 2006\"\n\t}\n\treturn t.In(time.UTC).Format(ft)\n}", "func formatTime(second int) string {\n\thh := second / 3600\n\tmm := second % 3600 / 60\n\tss := second % 60\n\treturn fmt.Sprintf(\"%02d:%02d:%02d\", hh, mm, ss)\n}", "func textify(n int) string {\n\tif n > 1000 {\n\t\tpanic(\"Number out of range\")\n\t} else if n == 1000 {\n\t\treturn \"one thousand\"\n\t} else if n >= 100 {\n\t\tnum := n % 100\n\t\tandText := \"and \"\n\t\tif num == 0 {\n\t\t\tandText = \"\"\n\t\t}\n\t\treturn oneNames[n/100-1] + \" hundred \" + andText + textify(num)\n\t} else if n >= 20 {\n\t\tnum := n % 10\n\t\thyphen := \"-\"\n\t\tif num == 0 {\n\t\t\thyphen = \" \"\n\t\t}\n\t\treturn tenNames[n/10-1] + hyphen + textify(num)\n\t} else if n >= 1 {\n\t\treturn oneNames[n-1]\n\t} else {\n\t\treturn \"\"\n\t}\n\tpanic(\"Not reached\")\n}", "func Convert(number int) string {\n\tvar output string\n\tif number%3 == 0 {\n\t\toutput += \"Pling\"\n\t}\n\tif number%5 == 0 {\n\t\toutput += \"Plang\"\n\t}\n\tif number%7 == 0 {\n\t\toutput += \"Plong\"\n\t}\n\n\tif output == \"\" {\n\t\treturn fmt.Sprint(number)\n\t}\n\treturn output\n}", "func (node *Limit) Format(buf *TrackedBuffer) {\n\tif node == nil {\n\t\treturn\n\t}\n\tbuf.astPrintf(node, \" limit \")\n\tif node.Offset != nil {\n\t\tbuf.astPrintf(node, \"%v, \", node.Offset)\n\t}\n\tbuf.astPrintf(node, \"%v\", node.Rowcount)\n}", "func (l TemplatedNames) Format(w fmt.State, verb rune) {\n\tfor i, n := range l {\n\t\tif i > 0 {\n\t\t\tfmt.Fprintf(w, \", \")\n\t\t}\n\t\tn.Format(w, verb)\n\t}\n}", "func FormatSize(val int64) string {\n\tif val < 1000 {\n\t\treturn fmt.Sprint(val)\n\t}\n\treturn fmt.Sprint(gorivets.FormatInt64(val, 1000), \"(\", val, \")\")\n}", "func (size Size) Format(precision int, scale SizeScale) string {\n\tvar (\n\t\tpower = 0\n\t\tvalue = float64(size)\n\t)\n\n\tfor value >= float64(scale.Divizor) && power+1 < len(scale.Suffixes) {\n\t\tpower += 1\n\t\tvalue /= float64(scale.Divizor)\n\t}\n\n\treturn strconv.FormatFloat(value, 'f', precision, 64) +\n\t\tscale.Suffixes[power]\n}", "func (me TSAFPTPortugueseVatNumber) String() string { return xsdt.Integer(me).String() }" ]
[ "0.6865555", "0.6692752", "0.62552243", "0.62158614", "0.61491007", "0.60920817", "0.6066588", "0.6066588", "0.5948704", "0.589865", "0.58934903", "0.5886799", "0.57061166", "0.5703804", "0.5638288", "0.5582579", "0.557504", "0.5553886", "0.54600537", "0.54164964", "0.53379995", "0.5303881", "0.5240682", "0.5240682", "0.5237625", "0.52312535", "0.5203791", "0.5196784", "0.51516813", "0.511627", "0.5055624", "0.5050531", "0.5021231", "0.499273", "0.49076223", "0.4899563", "0.4881081", "0.48423195", "0.4815192", "0.47741172", "0.47613516", "0.4752029", "0.47491944", "0.47464725", "0.4733019", "0.47186187", "0.471364", "0.47093943", "0.46795812", "0.4676577", "0.46755266", "0.46736875", "0.46670663", "0.4666791", "0.46510777", "0.4644093", "0.46308824", "0.4628859", "0.4623673", "0.46227688", "0.461144", "0.46037823", "0.4601144", "0.45949116", "0.45783812", "0.45774004", "0.4562623", "0.4559487", "0.45587578", "0.45193493", "0.45164642", "0.4514905", "0.44953275", "0.44825897", "0.44648236", "0.4464716", "0.44590887", "0.4458784", "0.4453025", "0.44504118", "0.44361132", "0.4419971", "0.44182616", "0.44001007", "0.43973348", "0.43953487", "0.4394896", "0.43881503", "0.43863726", "0.43793398", "0.4371457", "0.43680677", "0.43611926", "0.4352311", "0.4350975", "0.43388304", "0.4331404", "0.43109906", "0.42964047", "0.4295861" ]
0.7894669
0
String returns the string representation
func (s CopyClusterSnapshotInput) String() string { return awsutil.Prettify(s) }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (s CreateAlgorithmOutput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (s CreateAlgorithmOutput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (s Library) String() string {\n\tres := make([]string, 5)\n\tres[0] = \"ID: \" + reform.Inspect(s.ID, true)\n\tres[1] = \"UserID: \" + reform.Inspect(s.UserID, true)\n\tres[2] = \"VolumeID: \" + reform.Inspect(s.VolumeID, true)\n\tres[3] = \"CreatedAt: \" + reform.Inspect(s.CreatedAt, true)\n\tres[4] = \"UpdatedAt: \" + reform.Inspect(s.UpdatedAt, true)\n\treturn strings.Join(res, \", \")\n}", "func (s CreateCanaryOutput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (r Info) String() string {\n\tJSON, err := json.Marshal(r)\n\tif err != nil {\n\t\tlog.Fatalln(err)\n\t}\n\treturn string(JSON)\n}", "func (s ReEncryptOutput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (s CreateFHIRDatastoreOutput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (s CreateQuickConnectOutput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func String() string {\n\toutput := output{\n\t\tRerun: Rerun,\n\t\tVariables: Variables,\n\t\tItems: Items,\n\t}\n\tvar err error\n\tvar b []byte\n\tif Indent == \"\" {\n\t\tb, err = json.Marshal(output)\n\t} else {\n\t\tb, err = json.MarshalIndent(output, \"\", Indent)\n\t}\n\tif err != nil {\n\t\tmessageErr := Errorf(\"Error in parser. Please report this output to https://github.com/drgrib/alfred/issues: %v\", err)\n\t\tpanic(messageErr)\n\t}\n\ts := string(b)\n\treturn s\n}", "func (r *Registry) String() string {\n\tout := make([]string, 0, len(r.nameToObject))\n\tfor name, object := range r.nameToObject {\n\t\tout = append(out, fmt.Sprintf(\"* %s:\\n%s\", name, object.serialization))\n\t}\n\treturn strings.Join(out, \"\\n\\n\")\n}", "func (s CreateSceneOutput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (s CreateSafetyRuleOutput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (s CreateLanguageModelOutput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (o QtreeCreateResponse) String() string {\n\treturn ToString(reflect.ValueOf(o))\n}", "func (r SendAll) String() string {\n\tJSON, err := json.Marshal(r)\n\tif err != nil {\n\t\tlog.Fatalln(err)\n\t}\n\treturn string(JSON)\n}", "func (r ReceiveAll) String() string {\n\tJSON, err := json.Marshal(r)\n\tif err != nil {\n\t\tlog.Fatalln(err)\n\t}\n\treturn string(JSON)\n}", "func (enc *simpleEncoding) String() string {\n\treturn \"simpleEncoding(\" + enc.baseName + \")\"\n}", "func (s CreateDatabaseOutput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (z Zamowienium) String() string {\n\tjz, _ := json.Marshal(z)\n\treturn string(jz)\n}", "func (s CreateHITTypeOutput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (s CreateProgramOutput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (s CreateEntityOutput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (s CreateProjectVersionOutput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (o *Addshifttraderequest) String() string {\n \n \n \n \n o.AcceptableIntervals = []string{\"\"} \n\n j, _ := json.Marshal(o)\n str, _ := strconv.Unquote(strings.Replace(strconv.Quote(string(j)), `\\\\u`, `\\u`, -1))\n\n return str\n}", "func (r Rooms) String() string {\n\tjr, _ := json.Marshal(r)\n\treturn string(jr)\n}", "func (s CreateUseCaseOutput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (i Info) String() string {\n\ts, _ := i.toJSON()\n\treturn s\n}", "func (o *Botversionsummary) String() string {\n \n \n \n \n\n j, _ := json.Marshal(o)\n str, _ := strconv.Unquote(strings.Replace(strconv.Quote(string(j)), `\\\\u`, `\\u`, -1))\n\n return str\n}", "func (e ExternalCfps) String() string {\n\tje, _ := json.Marshal(e)\n\treturn string(je)\n}", "func (s CreateTrustStoreOutput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func String() string {\n\treturn fmt.Sprintf(\n\t\t\"AppVersion = %s\\n\"+\n\t\t\t\"VCSRef = %s\\n\"+\n\t\t\t\"BuildVersion = %s\\n\"+\n\t\t\t\"BuildDate = %s\",\n\t\tAppVersion, VCSRef, BuildVersion, Date,\n\t)\n}", "func (s CreateDataLakeOutput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (s CreateSolutionVersionOutput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (s GetSceneOutput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (i NotMachine) String() string { return toString(i) }", "func (s CreateRuleOutput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (s CreateRuleOutput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (s StartPipelineReprocessingOutput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (s CreateDatastoreOutput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (s CreateSequenceStoreOutput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (o *Adjustablelivespeakerdetection) String() string {\n \n \n \n \n \n \n\n j, _ := json.Marshal(o)\n str, _ := strconv.Unquote(strings.Replace(strconv.Quote(string(j)), `\\\\u`, `\\u`, -1))\n\n return str\n}", "func (s CreateRateBasedRuleOutput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (r Resiliency) String() string {\n\tb, _ := json.Marshal(r)\n\treturn string(b)\n}", "func (s RestoreFromRecoveryPointOutput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (o QtreeCreateResponseResult) String() string {\n\treturn ToString(reflect.ValueOf(o))\n}", "func (s CreateWaveOutput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (s CreateRoomOutput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (s CreateBotLocaleOutput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (s DeleteAlgorithmOutput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (z Zamowienia) String() string {\n\tjz, _ := json.Marshal(z)\n\treturn string(jz)\n}", "func (i *Info) String() string {\n\tb, _ := json.Marshal(i)\n\treturn string(b)\n}", "func (s ProcessingFeatureStoreOutput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (s ExportProjectOutput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (r RoomOccupancies) String() string {\n\tjr, _ := json.Marshal(r)\n\treturn string(jr)\n}", "func (r *InterRecord) String() string {\n\tbuf := r.Bytes()\n\tdefer ffjson.Pool(buf)\n\n\treturn string(buf)\n}", "func (s CreateResolverRuleOutput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (s CreateResolverRuleOutput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (s CreateResolverRuleOutput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (s CreateLayerOutput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (o *Coretype) String() string {\n \n \n \n \n \n o.ValidationFields = []string{\"\"} \n \n o.ItemValidationFields = []string{\"\"} \n \n\n j, _ := json.Marshal(o)\n str, _ := strconv.Unquote(strings.Replace(strconv.Quote(string(j)), `\\\\u`, `\\u`, -1))\n\n return str\n}", "func (s CreateModelCardOutput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (o *Limitchangerequestdetails) String() string {\n \n \n \n \n \n\n j, _ := json.Marshal(o)\n str, _ := strconv.Unquote(strings.Replace(strconv.Quote(string(j)), `\\\\u`, `\\u`, -1))\n\n return str\n}", "func (s NetworkPathComponentDetails) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (t Terms) String() string {\n\tjt, _ := json.Marshal(t)\n\treturn string(jt)\n}", "func (g GetObjectOutput) String() string {\n\treturn helper.Prettify(g)\n}", "func (s StartContactEvaluationOutput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (s CreateModelOutput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (s CreateModelOutput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (o *Interactionstatsalert) String() string {\n \n \n\n j, _ := json.Marshal(o)\n str, _ := strconv.Unquote(strings.Replace(strconv.Quote(string(j)), `\\\\u`, `\\u`, -1))\n\n return str\n}", "func (o *Digitalcondition) String() string {\n\tj, _ := json.Marshal(o)\n\tstr, _ := strconv.Unquote(strings.Replace(strconv.Quote(string(j)), `\\\\u`, `\\u`, -1))\n\n\treturn str\n}", "func (r RoomOccupancy) String() string {\n\tjr, _ := json.Marshal(r)\n\treturn string(jr)\n}", "func (d *Diagram) String() string { return toString(d) }", "func (o *Outboundroute) String() string {\n \n \n \n \n o.ClassificationTypes = []string{\"\"} \n \n \n o.ExternalTrunkBases = []Domainentityref{{}} \n\n j, _ := json.Marshal(o)\n str, _ := strconv.Unquote(strings.Replace(strconv.Quote(string(j)), `\\\\u`, `\\u`, -1))\n\n return str\n}", "func (s CreateCodeRepositoryOutput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (s CreateActivationOutput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (s CreateProjectOutput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (s CreateProjectOutput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (s CreateProjectOutput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (s CreateProjectOutput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (s CreateBotOutput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (s ResolutionTechniques) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (c CourseCode) String() string {\n\tjc, _ := json.Marshal(c)\n\treturn string(jc)\n}", "func (s CreateTrialComponentOutput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (p *Parms) String() string {\n\tout, _ := json.MarshalIndent(p, \"\", \"\\t\")\n\treturn string(out)\n}", "func (p polynomial) String() (str string) {\n\tfor _, m := range p.monomials {\n\t\tstr = str + \" \" + m.String() + \" +\"\n\t}\n\tstr = strings.TrimRight(str, \"+\")\n\treturn \"f(x) = \" + strings.TrimSpace(str)\n}", "func (s CreateThingOutput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (r *RUT) String() string {\n\treturn r.Format(DefaultFormatter)\n}", "func (s CreatePatchBaselineOutput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (o *Crossplatformpolicycreate) String() string {\n \n \n \n \n \n \n \n \n \n \n\n j, _ := json.Marshal(o)\n str, _ := strconv.Unquote(strings.Replace(strconv.Quote(string(j)), `\\\\u`, `\\u`, -1))\n\n return str\n}", "func (s BotVersionLocaleDetails) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (s DeleteMultiplexProgramOutput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (s LifeCycleLastTestInitiated) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (s GetObjectOutput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (s LifeCycleLastTestReverted) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (s CreateDocumentOutput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (s CreateComponentOutput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (s CreateIntegrationOutput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (o *Commonruleconditions) String() string {\n o.Clauses = []Commonruleconditions{{}} \n o.Predicates = []Commonrulepredicate{{}} \n \n \n\n j, _ := json.Marshal(o)\n str, _ := strconv.Unquote(strings.Replace(strconv.Quote(string(j)), `\\\\u`, `\\u`, -1))\n\n return str\n}", "func (t Test1s) String() string {\n\tjt, _ := json.Marshal(t)\n\treturn string(jt)\n}", "func (s CreateContactFlowOutput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (o *Directrouting) String() string {\n\tj, _ := json.Marshal(o)\n\tstr, _ := strconv.Unquote(strings.Replace(strconv.Quote(string(j)), `\\\\u`, `\\u`, -1))\n\n\treturn str\n}" ]
[ "0.721496", "0.721496", "0.72003424", "0.72003067", "0.71778786", "0.71672124", "0.71179444", "0.7087169", "0.708676", "0.70792294", "0.7078306", "0.7067698", "0.7031764", "0.7027706", "0.7026941", "0.70254856", "0.7020726", "0.70168954", "0.7010962", "0.70102316", "0.7001954", "0.6997284", "0.69971234", "0.6991765", "0.69905907", "0.69903153", "0.69867015", "0.69845456", "0.69752043", "0.69743115", "0.69629383", "0.6961912", "0.6961005", "0.69509166", "0.6947246", "0.69455487", "0.69455487", "0.69446045", "0.6940138", "0.6936814", "0.69329786", "0.69286585", "0.69271654", "0.69254273", "0.6922031", "0.69216454", "0.69182205", "0.69178134", "0.6911453", "0.6910748", "0.6909815", "0.6908686", "0.69052964", "0.6899659", "0.6896323", "0.6893855", "0.6893855", "0.6893855", "0.68922645", "0.68918127", "0.6891583", "0.6888694", "0.68884104", "0.6884165", "0.6882656", "0.6880121", "0.68768877", "0.68768877", "0.68755984", "0.68748397", "0.68738985", "0.68732196", "0.68729943", "0.6871865", "0.6869235", "0.68684727", "0.68684727", "0.68684727", "0.68684727", "0.68683946", "0.68661034", "0.6862186", "0.6862099", "0.6858425", "0.6856829", "0.6853848", "0.68523717", "0.685183", "0.68458325", "0.6843906", "0.68433076", "0.68429965", "0.68427455", "0.68420583", "0.6840824", "0.68394357", "0.68362874", "0.68344057", "0.6833331", "0.6832562", "0.6832457" ]
0.0
-1
Validate inspects the fields of the type to determine if they are valid.
func (s *CopyClusterSnapshotInput) Validate() error { invalidParams := aws.ErrInvalidParams{Context: "CopyClusterSnapshotInput"} if s.SourceSnapshotIdentifier == nil { invalidParams.Add(aws.NewErrParamRequired("SourceSnapshotIdentifier")) } if s.TargetSnapshotIdentifier == nil { invalidParams.Add(aws.NewErrParamRequired("TargetSnapshotIdentifier")) } if invalidParams.Len() > 0 { return invalidParams } return nil }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (f *InfoField) Validate() error {\n\tif err := f.BWCls.Validate(); err != nil {\n\t\treturn err\n\t}\n\tif err := f.RLC.Validate(); err != nil {\n\t\treturn err\n\t}\n\tif err := f.Idx.Validate(); err != nil {\n\t\treturn err\n\t}\n\tif err := f.PathType.Validate(); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}", "func (f FieldSpec) Validate() error {\n\tif f.Name == \"\" {\n\t\treturn errors.New(\"Field name required\")\n\t}\n\n\tif f.Type == \"\" {\n\t\treturn errors.New(\"Field type required\")\n\t}\n\n\treturn nil\n}", "func (p *Pass) FieldsValid() bool {\n\tfmt.Printf(\"validating: \")\n\tvalid := true\n\tfor k, v := range *p {\n\t\tfmt.Printf(\"%v...\", k)\n\t\tv := isFieldValid(k, v)\n\t\tvalid = valid && v\n\t\tif v {\n\t\t\tfmt.Printf(\"VALID \")\n\t\t} else {\n\t\t\tfmt.Printf(\"INVALID \")\n\t\t}\n\t}\n\n\tfmt.Println(\"\")\n\treturn valid\n}", "func (m Type) Validate(formats strfmt.Registry) error {\n\treturn nil\n}", "func (m *TestFieldsEx2) Validate(formats strfmt.Registry) error {\n\tvar res []error\n\n\tif err := m.validateFieldType(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateProjectID(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}", "func Validate(instance interface{}) string {\n\tval := unwrap(reflect.ValueOf(instance))\n\ttyp := val.Type()\n\n\tif typ.Kind() != reflect.Struct {\n\t\tcore.DefaultLogger.Panic(\"The provided instance is not a struct\")\n\t}\n\n\tvar result []string\n\n\tfor i := 0; i < typ.NumField(); i++ {\n\t\tfield := typ.Field(i)\n\t\tfieldTag := field.Tag\n\t\tif len(fieldTag) == 0 {\n\t\t\tcontinue\n\t\t}\n\n\t\tfieldVal := val.Field(i)\n\t\tfieldKind := fieldVal.Kind()\n\t\tif !fieldVal.CanInterface() || fieldKind == reflect.Invalid {\n\t\t\tcontinue\n\t\t}\n\n\t\tvar toEval []evalContext\n\t\tvar requiredCtx *evalContext\n\n\t\tfor _, v := range validators {\n\t\t\tif param, found := fieldTag.Lookup(v.key); found {\n\t\t\t\tctx := evalContext{validator: v, param: param}\n\n\t\t\t\tif v.key == required.key {\n\t\t\t\t\trequiredCtx = &ctx\n\t\t\t\t} else {\n\t\t\t\t\ttoEval = append(toEval, ctx)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\tif len(toEval) == 0 && requiredCtx == nil {\n\t\t\tcontinue\n\t\t}\n\n\t\tif requiredCtx == nil {\n\t\t\trequiredCtx = &evalContext{validator: required, param: \"true\"}\n\t\t}\n\n\t\tvar errors []string\n\t\teval := func(ctx evalContext) bool {\n\t\t\tif err := ctx.validator.fn(fieldVal, ctx.param); len(err) > 0 {\n\t\t\t\terrors = append(errors, err)\n\t\t\t\treturn false\n\t\t\t}\n\t\t\treturn true\n\t\t}\n\n\t\tif eval(*requiredCtx) {\n\t\t\tfor _, ctx := range toEval {\n\t\t\t\teval(ctx)\n\t\t\t}\n\t\t}\n\n\t\tif len(errors) > 0 {\n\t\t\tresult = append(result, fmt.Sprintf(\"%s: %s\", field.Name, strings.Join(errors, \", \")))\n\t\t}\n\t}\n\n\treturn strings.Join(result, \"; \")\n}", "func (info *structInfo) fieldValid(i int, t reflect.Type) bool {\n\treturn info.field(i).isValid(i, t)\n}", "func (v *ClassValue) Valid() bool {\n\tfor _, f := range v.Fields {\n\t\tif !f.Valid() {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}", "func (self *StructFieldDef) Validate() error {\n\tif self.Name == \"\" {\n\t\treturn fmt.Errorf(\"StructFieldDef.name is missing but is a required field\")\n\t} else {\n\t\tval := Validate(RdlSchema(), \"Identifier\", self.Name)\n\t\tif !val.Valid {\n\t\t\treturn fmt.Errorf(\"StructFieldDef.name does not contain a valid Identifier (%v)\", val.Error)\n\t\t}\n\t}\n\tif self.Type == \"\" {\n\t\treturn fmt.Errorf(\"StructFieldDef.type is missing but is a required field\")\n\t} else {\n\t\tval := Validate(RdlSchema(), \"TypeRef\", self.Type)\n\t\tif !val.Valid {\n\t\t\treturn fmt.Errorf(\"StructFieldDef.type does not contain a valid TypeRef (%v)\", val.Error)\n\t\t}\n\t}\n\tif self.Comment != \"\" {\n\t\tval := Validate(RdlSchema(), \"String\", self.Comment)\n\t\tif !val.Valid {\n\t\t\treturn fmt.Errorf(\"StructFieldDef.comment does not contain a valid String (%v)\", val.Error)\n\t\t}\n\t}\n\tif self.Items != \"\" {\n\t\tval := Validate(RdlSchema(), \"TypeRef\", self.Items)\n\t\tif !val.Valid {\n\t\t\treturn fmt.Errorf(\"StructFieldDef.items does not contain a valid TypeRef (%v)\", val.Error)\n\t\t}\n\t}\n\tif self.Keys != \"\" {\n\t\tval := Validate(RdlSchema(), \"TypeRef\", self.Keys)\n\t\tif !val.Valid {\n\t\t\treturn fmt.Errorf(\"StructFieldDef.keys does not contain a valid TypeRef (%v)\", val.Error)\n\t\t}\n\t}\n\treturn nil\n}", "func (v *Validator) Validate(data interface{}) (bool, []string, error) {\n\t//validate and check for any errors, reading explicit validations errors and returning\n\t//a list of fields that failed or the error\n\terr := v.Validator.Struct(data)\n\tif err != nil {\n\t\tvalidationErrs, ok := err.(validator.ValidationErrors)\n\t\tif !ok {\n\t\t\treturn false, nil, errors.Wrap(err, \"validate\")\n\t\t}\n\t\tfields := make([]string, 0)\n\t\tfor _, validationErr := range validationErrs {\n\t\t\tfields = append(fields, validationErr.Field())\n\t\t}\n\t\treturn false, fields, nil\n\t}\n\treturn true, nil, nil\n}", "func validateFields(req *logical.Request, data *framework.FieldData) error {\n\tvar unknownFields []string\n\tfor k := range req.Data {\n\t\tif _, ok := data.Schema[k]; !ok {\n\t\t\tunknownFields = append(unknownFields, k)\n\t\t}\n\t}\n\n\tif len(unknownFields) > 0 {\n\t\t// Sort since this is a human error\n\t\tsort.Strings(unknownFields)\n\n\t\treturn fmt.Errorf(\"unknown fields: %q\", unknownFields)\n\t}\n\n\treturn nil\n}", "func validateFields(req *logical.Request, data *framework.FieldData) error {\n\tvar unknownFields []string\n\tfor k := range req.Data {\n\t\tif _, ok := data.Schema[k]; !ok {\n\t\t\tunknownFields = append(unknownFields, k)\n\t\t}\n\t}\n\n\tif len(unknownFields) > 0 {\n\t\t// Sort since this is a human error\n\t\tsort.Strings(unknownFields)\n\n\t\treturn fmt.Errorf(\"unknown fields: %q\", unknownFields)\n\t}\n\n\treturn nil\n}", "func (s *RecordSchema) Validate(v reflect.Value) bool {\n\tv = dereference(v)\n\tif v.Kind() != reflect.Struct || !v.CanAddr() || !v.CanInterface() {\n\t\treturn false\n\t}\n\trec, ok := v.Interface().(GenericRecord)\n\tif !ok {\n\t\t// This is not a generic record and is likely a specific record. Hence\n\t\t// use the basic check.\n\t\treturn v.Kind() == reflect.Struct\n\t}\n\n\tfieldCount := 0\n\tfor key, val := range rec.fields {\n\t\tfor idx := range s.Fields {\n\t\t\t// key.Name must have rs.Fields[idx].Name as a suffix\n\t\t\tif len(s.Fields[idx].Name) <= len(key) {\n\t\t\t\tlhs := key[len(key)-len(s.Fields[idx].Name):]\n\t\t\t\tif lhs == s.Fields[idx].Name {\n\t\t\t\t\tif !s.Fields[idx].Type.Validate(reflect.ValueOf(val)) {\n\t\t\t\t\t\treturn false\n\t\t\t\t\t}\n\t\t\t\t\tfieldCount++\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\t// All of the fields set must be accounted for in the union.\n\tif fieldCount < len(rec.fields) {\n\t\treturn false\n\t}\n\n\treturn true\n}", "func (s StructSpec) Validate() error {\n\tfor _, f := range s.Fields {\n\t\terr := f.Validate()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}", "func (cv *CustomValidator) Validate(i interface{}) error {\n\terrorRes, err := cv.Validator.Struct(i).(validator.ValidationErrors)\n\tif !err {\n\t\treturn nil\n\t}\n\terrorFields := []string{}\n\tfor _, k := range errorRes {\n\t\terrorFields = append(errorFields, k.StructField())\n\t}\n\tif len(errorFields) == 1 {\n\t\treturn errors.New(strings.Join(errorFields, \", \") + \" field is invalid or missing.\")\n\t}\n\treturn errors.New(strings.Join(errorFields, \", \") + \" fields are invalid or missing.\")\n}", "func Validate(v interface{}) error {\n\n\t// returns nil or ValidationErrors ( []FieldError )\n\terr := val.Struct(v)\n\tif err != nil {\n\n\t\t// this check is only needed when your code could produce\n\t\t// an invalid value for validation such as interface with nil\n\t\t// value most including myself do not usually have code like this.\n\t\tif _, ok := err.(*validator.InvalidValidationError); ok {\n\t\t\treturn nil\n\t\t}\n\n\t\treturn err\n\t}\n\treturn nil\n}", "func ValidateFields(model interface{}) error {\n\terr := validator.Validate(model)\n\tif err != nil {\n\t\terrs, ok := err.(validator.ErrorMap)\n\t\tif ok {\n\t\t\tfor f, _ := range errs {\n\t\t\t\treturn errors.New(ecodes.ValidateField, constant.ValidateFieldErr+\"-\"+f)\n\t\t\t}\n\t\t} else {\n\t\t\treturn errors.New(ecodes.ValidationUnknown, constant.ValidationUnknownErr)\n\t\t}\n\t}\n\n\treturn nil\n}", "func (v *Validator) ValidateFields(input map[string]string) {\n\tfor field, value := range input {\n\t\t_, found := find(requiredFields, field)\n\t\tif !found {\n\t\t\tv.errors[\"errors\"] = append(v.errors[field], fmt.Sprintf(\"%+v is not valid, check docs for valid fields\", field))\n\t\t}\n\t\t(v.model)[field] = value\n\t}\n}", "func (self *TypeDef) Validate() error {\n\tif self.Type == \"\" {\n\t\treturn fmt.Errorf(\"TypeDef.type is missing but is a required field\")\n\t} else {\n\t\tval := Validate(RdlSchema(), \"TypeRef\", self.Type)\n\t\tif !val.Valid {\n\t\t\treturn fmt.Errorf(\"TypeDef.type does not contain a valid TypeRef (%v)\", val.Error)\n\t\t}\n\t}\n\tif self.Name == \"\" {\n\t\treturn fmt.Errorf(\"TypeDef.name is missing but is a required field\")\n\t} else {\n\t\tval := Validate(RdlSchema(), \"TypeName\", self.Name)\n\t\tif !val.Valid {\n\t\t\treturn fmt.Errorf(\"TypeDef.name does not contain a valid TypeName (%v)\", val.Error)\n\t\t}\n\t}\n\tif self.Comment != \"\" {\n\t\tval := Validate(RdlSchema(), \"String\", self.Comment)\n\t\tif !val.Valid {\n\t\t\treturn fmt.Errorf(\"TypeDef.comment does not contain a valid String (%v)\", val.Error)\n\t\t}\n\t}\n\treturn nil\n}", "func (mt *EasypostFieldObject) Validate() (err error) {\n\tif mt.Key == \"\" {\n\t\terr = goa.MergeErrors(err, goa.MissingAttributeError(`response`, \"key\"))\n\t}\n\tif mt.Value == \"\" {\n\t\terr = goa.MergeErrors(err, goa.MissingAttributeError(`response`, \"value\"))\n\t}\n\tif mt.Visibility == \"\" {\n\t\terr = goa.MergeErrors(err, goa.MissingAttributeError(`response`, \"visibility\"))\n\t}\n\tif mt.Label == \"\" {\n\t\terr = goa.MergeErrors(err, goa.MissingAttributeError(`response`, \"label\"))\n\t}\n\n\treturn\n}", "func (ti TypeInfo) Validate() error {\n\tif len(ti.Type) == 0 {\n\t\treturn errors.Wrap(ErrValidatingData, \"TypeInfo requires a type\")\n\t}\n\treturn nil\n}", "func ValidateStructFields(in interface{}, requiredFieldIDs []string) (err error) {\n\tvar inAsMap map[string]interface{}\n\ttemp, err := json.Marshal(in)\n\tif err != nil {\n\t\treturn errors.New(\"error validating input struct\")\n\t}\n\terr = json.Unmarshal(temp, &inAsMap)\n\tif err != nil {\n\t\treturn errors.New(\"error validating input struct\")\n\t}\n\n\tfor _, requiredFieldID := range requiredFieldIDs {\n\t\t// Make sure the field is in the data.\n\t\tif val, ok := inAsMap[requiredFieldID]; !ok || len(fmt.Sprintf(\"%v\", val)) == 0 {\n\t\t\treturn errors.New(\"required input field \" + requiredFieldID + \" not specified\")\n\t\t}\n\t}\n\n\treturn nil\n}", "func Validate(value interface{}) error {\n\tv := reflect.Indirect(reflect.ValueOf(value))\n\tt := v.Type()\n\n\t// Look for an IsValid method on value. To check that this IsValid method\n\t// exists, we need to retrieve it with MethodByName, which returns a\n\t// reflect.Value. This reflect.Value, m, has a method that is called\n\t// IsValid as well, which tells us whether v actually represents the\n\t// function we're looking for. But they're two completely different IsValid\n\t// methods. Yes, this is confusing.\n\tm := reflect.ValueOf(value).MethodByName(\"IsValid\")\n\tif m.IsValid() {\n\t\te := m.Call([]reflect.Value{})\n\t\terr, ok := e[0].Interface().(error)\n\t\tif ok && err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\t// For non-struct values, we cannot do much, as there's no associated tags\n\t// to lookup to decide how to validate, so we have to assume they're valid.\n\tif t.Kind() != reflect.Struct {\n\t\treturn nil\n\t}\n\n\t// For struct values, iterate through the fields and use the type of field\n\t// along with its validate tags to decide next steps\n\tfor i := 0; i < v.NumField(); i++ {\n\t\tfield := v.Field(i)\n\n\t\tswitch field.Type().Kind() {\n\t\tcase reflect.Struct:\n\t\t\tdv := field.Interface()\n\t\t\tif err := Validate(dv); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\tcase reflect.Slice:\n\t\t\tdv := reflect.ValueOf(field.Interface())\n\t\t\tif tag, ok := t.Field(i).Tag.Lookup(\"validate\"); ok {\n\t\t\t\tif err := validate(tag, t.Field(i).Name, v, v.Field(i)); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t}\n\t\t\tfor j := 0; j < dv.Len(); j++ {\n\t\t\t\tif err := Validate(dv.Index(j).Interface()); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t}\n\t\tcase reflect.Map:\n\t\t\tif tag, ok := t.Field(i).Tag.Lookup(\"validate\"); ok {\n\t\t\t\tif err := validate(tag, t.Field(i).Name, v, v.Field(i)); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t}\n\t\tcase reflect.Bool, reflect.Int, reflect.Int64, reflect.Float64, reflect.String:\n\t\t\ttag, ok := t.Field(i).Tag.Lookup(\"validate\")\n\t\t\tif !ok {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif err := validate(tag, t.Field(i).Name, v, v.Field(i)); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\tcase reflect.Chan:\n\t\t\treturn nil\n\t\tdefault:\n\t\t\treturn fmt.Errorf(\"unimplemented struct field type: %s\", t.Field(i).Name)\n\t\t}\n\t}\n\treturn nil\n}", "func (m *Type1) Validate(formats strfmt.Registry) error {\n\tvar res []error\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}", "func (u *Usecase) validFields(d *Device) error {\n\tif d.Name == \"\" {\n\t\treturn &InvalidError{\"attribute `Name` must not be empty\"}\n\t}\n\n\tif d.User == 0 {\n\t\treturn &InvalidError{\"invalid user\"}\n\t}\n\n\treturn nil\n}", "func Validate(schema interface{}, errors []map[string]interface{}) {\n\t/**\n\t * create validator instance\n\t */\n\tvalidate := validator.New()\n\n\tif err := validate.Struct(schema); err != nil {\n\t\tif _, ok := err.(*validator.InvalidValidationError); ok {\n\t\t\terrors = append(errors, map[string]interface{}{\n\t\t\t\t\"message\": fmt.Sprint(err), \"flag\": \"INVALID_BODY\"},\n\t\t\t)\n\t\t}\n\n\t\tfor _, err := range err.(validator.ValidationErrors) {\n\t\t\terrors = append(errors, map[string]interface{}{\n\t\t\t\t\"message\": fmt.Sprint(err), \"flag\": \"INVALID_BODY\"},\n\t\t\t)\n\t\t}\n\t\texception.BadRequest(\"Validation error\", errors)\n\t}\n\tif errors != nil {\n\t\texception.BadRequest(\"Validation error\", errors)\n\t}\n}", "func (time Time) Validate() bool {\n\tret := true\n\tif ret == true && time.hours != (Hours{}) {\n\t\tret = time.hours.Validate()\n\t}\n\n\tif ret == true && time.minutes != (Minutes{}) {\n\t\tret = time.minutes.Validate()\n\t}\n\n\tif ret == true && time.seconds != (Seconds{}) {\n\t\tret = time.seconds.Validate()\n\t}\n\n\tif ret == true && time.delay != (Delay{}) {\n\t\tret = time.delay.Validate()\n\t}\n\n\tif ret != true {\n\t\tlog.Println(\"Failed to validate time '\" + time.value + \"'\")\n\t}\n\treturn ret\n}", "func (s *FieldStatsService) Validate() error {\n\tvar invalid []string\n\tif s.level != \"\" && (s.level != FieldStatsIndicesLevel && s.level != FieldStatsClusterLevel) {\n\t\tinvalid = append(invalid, \"Level\")\n\t}\n\tif len(invalid) != 0 {\n\t\treturn fmt.Errorf(\"missing or invalid required fields: %v\", invalid)\n\t}\n\treturn nil\n}", "func (t Type) Validate() error {\n\tswitch t {\n\tcase git:\n\t\treturn nil\n\tcase nop:\n\t\treturn nil\n\tdefault:\n\t\treturn ErrInvalidType\n\t}\n}", "func (p *Publication) IsValidFields() error {\n\tif p.Content != \"\" {\n\t\treturn nil\n\t}\n\treturn errorstatus.ErrorBadInfo\n\n}", "func (a Relayer) Validate() error {\n\treturn validation.ValidateStruct(&a,\n\t\tvalidation.Field(&a.Address, validation.Required),\n\t)\n}", "func (builder *Builder) ValidateFields() error {\n\tvmImageRefFields := []string{\"ImageSku\", \"ImageVersion\"}\n\tcustomVMIMageRefFields := []string{\"Image\", \"ImageResourceGroup\", \"ImageStorageAccount\", \"ImageContainer\"}\n\n\tif !builder.hasMarketplaceVMImageRef() && !builder.hasCustomVMIMageRef() {\n\t\treturn fmt.Errorf(\n\t\t\t\"missing fields: you must provide values for either %s fields or %s fields\",\n\t\t\tstrings.Join(vmImageRefFields, \", \"),\n\t\t\tstrings.Join(customVMIMageRefFields, \", \"),\n\t\t)\n\t}\n\n\tif builder.hasMarketplaceVMImageRef() && builder.hasCustomVMIMageRef() {\n\t\treturn fmt.Errorf(\n\t\t\t\"confilicting fields: you must provide values for either %s fields or %s fields\",\n\t\t\tstrings.Join(vmImageRefFields, \", \"),\n\t\t\tstrings.Join(customVMIMageRefFields, \", \"),\n\t\t)\n\t}\n\n\treturn nil\n}", "func (self *NumberTypeDef) Validate() error {\n\tif self.Type == \"\" {\n\t\treturn fmt.Errorf(\"NumberTypeDef.type is missing but is a required field\")\n\t} else {\n\t\tval := Validate(RdlSchema(), \"TypeRef\", self.Type)\n\t\tif !val.Valid {\n\t\t\treturn fmt.Errorf(\"NumberTypeDef.type does not contain a valid TypeRef (%v)\", val.Error)\n\t\t}\n\t}\n\tif self.Name == \"\" {\n\t\treturn fmt.Errorf(\"NumberTypeDef.name is missing but is a required field\")\n\t} else {\n\t\tval := Validate(RdlSchema(), \"TypeName\", self.Name)\n\t\tif !val.Valid {\n\t\t\treturn fmt.Errorf(\"NumberTypeDef.name does not contain a valid TypeName (%v)\", val.Error)\n\t\t}\n\t}\n\tif self.Comment != \"\" {\n\t\tval := Validate(RdlSchema(), \"String\", self.Comment)\n\t\tif !val.Valid {\n\t\t\treturn fmt.Errorf(\"NumberTypeDef.comment does not contain a valid String (%v)\", val.Error)\n\t\t}\n\t}\n\treturn nil\n}", "func (mt *EasypostCarrierTypes) Validate() (err error) {\n\tif mt.Type == \"\" {\n\t\terr = goa.MergeErrors(err, goa.MissingAttributeError(`response`, \"type\"))\n\t}\n\tif mt.Object == \"\" {\n\t\terr = goa.MergeErrors(err, goa.MissingAttributeError(`response`, \"object\"))\n\t}\n\tif mt.Fields == nil {\n\t\terr = goa.MergeErrors(err, goa.MissingAttributeError(`response`, \"fields\"))\n\t}\n\n\tif ok := goa.ValidatePattern(`^CarrierType$`, mt.Object); !ok {\n\t\terr = goa.MergeErrors(err, goa.InvalidPatternError(`response.object`, mt.Object, `^CarrierType$`))\n\t}\n\treturn\n}", "func (t *Transform) Validate() *field.Error {\n\tswitch t.Type {\n\tcase TransformTypeMath:\n\t\tif t.Math == nil {\n\t\t\treturn field.Required(field.NewPath(\"math\"), \"given transform type math requires configuration\")\n\t\t}\n\t\treturn verrors.WrapFieldError(t.Math.Validate(), field.NewPath(\"math\"))\n\tcase TransformTypeMap:\n\t\tif t.Map == nil {\n\t\t\treturn field.Required(field.NewPath(\"map\"), \"given transform type map requires configuration\")\n\t\t}\n\t\treturn verrors.WrapFieldError(t.Map.Validate(), field.NewPath(\"map\"))\n\tcase TransformTypeMatch:\n\t\tif t.Match == nil {\n\t\t\treturn field.Required(field.NewPath(\"match\"), \"given transform type match requires configuration\")\n\t\t}\n\t\treturn verrors.WrapFieldError(t.Match.Validate(), field.NewPath(\"match\"))\n\tcase TransformTypeString:\n\t\tif t.String == nil {\n\t\t\treturn field.Required(field.NewPath(\"string\"), \"given transform type string requires configuration\")\n\t\t}\n\t\treturn verrors.WrapFieldError(t.String.Validate(), field.NewPath(\"string\"))\n\tcase TransformTypeConvert:\n\t\tif t.Convert == nil {\n\t\t\treturn field.Required(field.NewPath(\"convert\"), \"given transform type convert requires configuration\")\n\t\t}\n\t\tif err := t.Convert.Validate(); err != nil {\n\t\t\treturn verrors.WrapFieldError(err, field.NewPath(\"convert\"))\n\t\t}\n\tdefault:\n\t\t// Should never happen\n\t\treturn field.Invalid(field.NewPath(\"type\"), t.Type, \"unknown transform type\")\n\t}\n\n\treturn nil\n}", "func (strategy UpdateScatterStrategy) FieldsValidation() error {\n\tif len(strategy) == 0 {\n\t\treturn nil\n\t}\n\n\tm := make(map[string]struct{}, len(strategy))\n\tfor _, term := range strategy {\n\t\tif term.Key == \"\" {\n\t\t\treturn fmt.Errorf(\"key should not be empty\")\n\t\t}\n\t\tid := term.Key + \":\" + term.Value\n\t\tif _, ok := m[id]; !ok {\n\t\t\tm[id] = struct{}{}\n\t\t} else {\n\t\t\treturn fmt.Errorf(\"duplicated key=%v value=%v\", term.Key, term.Value)\n\t\t}\n\t}\n\n\treturn nil\n}", "func (self *StructTypeDef) Validate() error {\n\tif self.Type == \"\" {\n\t\treturn fmt.Errorf(\"StructTypeDef.type is missing but is a required field\")\n\t} else {\n\t\tval := Validate(RdlSchema(), \"TypeRef\", self.Type)\n\t\tif !val.Valid {\n\t\t\treturn fmt.Errorf(\"StructTypeDef.type does not contain a valid TypeRef (%v)\", val.Error)\n\t\t}\n\t}\n\tif self.Name == \"\" {\n\t\treturn fmt.Errorf(\"StructTypeDef.name is missing but is a required field\")\n\t} else {\n\t\tval := Validate(RdlSchema(), \"TypeName\", self.Name)\n\t\tif !val.Valid {\n\t\t\treturn fmt.Errorf(\"StructTypeDef.name does not contain a valid TypeName (%v)\", val.Error)\n\t\t}\n\t}\n\tif self.Comment != \"\" {\n\t\tval := Validate(RdlSchema(), \"String\", self.Comment)\n\t\tif !val.Valid {\n\t\t\treturn fmt.Errorf(\"StructTypeDef.comment does not contain a valid String (%v)\", val.Error)\n\t\t}\n\t}\n\tif self.Fields == nil {\n\t\treturn fmt.Errorf(\"StructTypeDef: Missing required field: fields\")\n\t}\n\treturn nil\n}", "func (self *MapTypeDef) Validate() error {\n\tif self.Type == \"\" {\n\t\treturn fmt.Errorf(\"MapTypeDef.type is missing but is a required field\")\n\t} else {\n\t\tval := Validate(RdlSchema(), \"TypeRef\", self.Type)\n\t\tif !val.Valid {\n\t\t\treturn fmt.Errorf(\"MapTypeDef.type does not contain a valid TypeRef (%v)\", val.Error)\n\t\t}\n\t}\n\tif self.Name == \"\" {\n\t\treturn fmt.Errorf(\"MapTypeDef.name is missing but is a required field\")\n\t} else {\n\t\tval := Validate(RdlSchema(), \"TypeName\", self.Name)\n\t\tif !val.Valid {\n\t\t\treturn fmt.Errorf(\"MapTypeDef.name does not contain a valid TypeName (%v)\", val.Error)\n\t\t}\n\t}\n\tif self.Comment != \"\" {\n\t\tval := Validate(RdlSchema(), \"String\", self.Comment)\n\t\tif !val.Valid {\n\t\t\treturn fmt.Errorf(\"MapTypeDef.comment does not contain a valid String (%v)\", val.Error)\n\t\t}\n\t}\n\tif self.Keys == \"\" {\n\t\treturn fmt.Errorf(\"MapTypeDef.keys is missing but is a required field\")\n\t} else {\n\t\tval := Validate(RdlSchema(), \"TypeRef\", self.Keys)\n\t\tif !val.Valid {\n\t\t\treturn fmt.Errorf(\"MapTypeDef.keys does not contain a valid TypeRef (%v)\", val.Error)\n\t\t}\n\t}\n\tif self.Items == \"\" {\n\t\treturn fmt.Errorf(\"MapTypeDef.items is missing but is a required field\")\n\t} else {\n\t\tval := Validate(RdlSchema(), \"TypeRef\", self.Items)\n\t\tif !val.Valid {\n\t\t\treturn fmt.Errorf(\"MapTypeDef.items does not contain a valid TypeRef (%v)\", val.Error)\n\t\t}\n\t}\n\treturn nil\n}", "func (s StructInCustom) Validate() []string {\n\tvar errs []string\n\tif s.Name == \"\" {\n\t\terrs = append(errs, \"name::is_required\")\n\t}\n\n\treturn errs\n}", "func (cv Validator) Validate(i interface{}) error {\n\treturn cv.Validator.Struct(i)\n}", "func validateFieldDurations(fl validator.FieldLevel) bool {\n\tv := fl.Field().Bool()\n\tif v {\n\t\t//read the parameter and extract the other fields that were specified\n\t\tparam := fl.Param()\n\t\tfields := strings.Fields(param)\n\t\tfor _, field := range fields {\n\t\t\t//check if the field is set\n\t\t\tstructField, _, _, ok := fl.GetStructFieldOKAdvanced2(fl.Parent(), field)\n\t\t\tif !ok || structField.IsZero() {\n\t\t\t\treturn false\n\t\t\t}\n\t\t}\n\t}\n\treturn true\n}", "func Validate(v interface{}) (error, bool) {\n\tresult, err := govalidator.ValidateStruct(v)\n\tif err != nil {\n\t\tlog.Println(\"Invalid data\", err)\n\t}\n\treturn err, result\n}", "func (h *HazardType) Validate(tx *pop.Connection) (*validate.Errors, error) {\n\terrors := validate.Validate(\n\t\t&validators.StringIsPresent{Name: \"Label\", Field: h.Label, Message: \"A label is required.\"},\n\t\t&validators.StringIsPresent{Name: \"Description\", Field: h.Description, Message: \"Please provide a brief description.\"},\n\t)\n\n\treturn errors, nil\n}", "func (tS *testAInfo) Validate(msg actor.Msg) bool {\n\tswitch m := msg[0].(type) {\n\tcase int:\n\t\tif m > 0 && m < 10 {\n\t\t\treturn true\n\t\t}\n\tdefault:\n\t\tfor _, datum := range tS.allowed {\n\t\t\tif reflect.TypeOf(msg[0]) ==\n\t\t\t\treflect.TypeOf(datum) {\n\t\t\t\treturn true\n\t\t\t}\n\t\t}\n\t}\n\t// Does not match a valid type\n\treturn false\n}", "func (ut *RegisterPayload) Validate() (err error) {\n\tif ut.Email == \"\" {\n\t\terr = goa.MergeErrors(err, goa.MissingAttributeError(`type`, \"email\"))\n\t}\n\tif ut.Password == \"\" {\n\t\terr = goa.MergeErrors(err, goa.MissingAttributeError(`type`, \"password\"))\n\t}\n\tif ut.FirstName == \"\" {\n\t\terr = goa.MergeErrors(err, goa.MissingAttributeError(`type`, \"first_name\"))\n\t}\n\tif ut.LastName == \"\" {\n\t\terr = goa.MergeErrors(err, goa.MissingAttributeError(`type`, \"last_name\"))\n\t}\n\tif err2 := goa.ValidateFormat(goa.FormatEmail, ut.Email); err2 != nil {\n\t\terr = goa.MergeErrors(err, goa.InvalidFormatError(`type.email`, ut.Email, goa.FormatEmail, err2))\n\t}\n\tif utf8.RuneCountInString(ut.Email) < 6 {\n\t\terr = goa.MergeErrors(err, goa.InvalidLengthError(`type.email`, ut.Email, utf8.RuneCountInString(ut.Email), 6, true))\n\t}\n\tif utf8.RuneCountInString(ut.Email) > 150 {\n\t\terr = goa.MergeErrors(err, goa.InvalidLengthError(`type.email`, ut.Email, utf8.RuneCountInString(ut.Email), 150, false))\n\t}\n\tif utf8.RuneCountInString(ut.FirstName) < 1 {\n\t\terr = goa.MergeErrors(err, goa.InvalidLengthError(`type.first_name`, ut.FirstName, utf8.RuneCountInString(ut.FirstName), 1, true))\n\t}\n\tif utf8.RuneCountInString(ut.FirstName) > 200 {\n\t\terr = goa.MergeErrors(err, goa.InvalidLengthError(`type.first_name`, ut.FirstName, utf8.RuneCountInString(ut.FirstName), 200, false))\n\t}\n\tif utf8.RuneCountInString(ut.LastName) < 1 {\n\t\terr = goa.MergeErrors(err, goa.InvalidLengthError(`type.last_name`, ut.LastName, utf8.RuneCountInString(ut.LastName), 1, true))\n\t}\n\tif utf8.RuneCountInString(ut.LastName) > 200 {\n\t\terr = goa.MergeErrors(err, goa.InvalidLengthError(`type.last_name`, ut.LastName, utf8.RuneCountInString(ut.LastName), 200, false))\n\t}\n\tif utf8.RuneCountInString(ut.Password) < 5 {\n\t\terr = goa.MergeErrors(err, goa.InvalidLengthError(`type.password`, ut.Password, utf8.RuneCountInString(ut.Password), 5, true))\n\t}\n\tif utf8.RuneCountInString(ut.Password) > 100 {\n\t\terr = goa.MergeErrors(err, goa.InvalidLengthError(`type.password`, ut.Password, utf8.RuneCountInString(ut.Password), 100, false))\n\t}\n\treturn\n}", "func (u Phone) Validate() error {\n\treturn nil\n\t// return validation.ValidateStruct(&u,\n\t// \tvalidation.Field(&u.Name, validation.Required),\n\t// \tvalidation.Field(&u.Created, validation.Required))\n}", "func (r *InfoReq) Validate() error {\n\treturn validate.Struct(r)\n}", "func (r *RouteSpecFields) Validate(ctx context.Context) (errs *apis.FieldError) {\n\n\tif r.Domain == \"\" {\n\t\terrs = errs.Also(apis.ErrMissingField(\"domain\"))\n\t}\n\n\tif r.Hostname == \"www\" {\n\t\terrs = errs.Also(apis.ErrInvalidValue(\"hostname\", r.Hostname))\n\t}\n\n\tif _, err := BuildPathRegexp(r.Path); err != nil {\n\t\terrs = errs.Also(apis.ErrInvalidValue(\"path\", r.Path))\n\t}\n\n\treturn errs\n}", "func (mt *EasypostScanform) Validate() (err error) {\n\tif mt.Address != nil {\n\t\tif err2 := mt.Address.Validate(); err2 != nil {\n\t\t\terr = goa.MergeErrors(err, err2)\n\t\t}\n\t}\n\tif mt.ID != nil {\n\t\tif ok := goa.ValidatePattern(`^sf_`, *mt.ID); !ok {\n\t\t\terr = goa.MergeErrors(err, goa.InvalidPatternError(`response.id`, *mt.ID, `^sf_`))\n\t\t}\n\t}\n\tif ok := goa.ValidatePattern(`^ScanForm$`, mt.Object); !ok {\n\t\terr = goa.MergeErrors(err, goa.InvalidPatternError(`response.object`, mt.Object, `^ScanForm$`))\n\t}\n\tif mt.Status != nil {\n\t\tif !(*mt.Status == \"creating\" || *mt.Status == \"created\" || *mt.Status == \"failed\") {\n\t\t\terr = goa.MergeErrors(err, goa.InvalidEnumValueError(`response.status`, *mt.Status, []interface{}{\"creating\", \"created\", \"failed\"}))\n\t\t}\n\t}\n\treturn\n}", "func Validate(schema interface{}) {\n\tvalidate := validator.New()\n\n\tif err := validate.Struct(schema); err != nil {\n\t\tif _, ok := err.(*validator.InvalidValidationError); ok {\n\t\t\texception.BadRequest(fmt.Sprint(err), \"INVALID_BODY\")\n\t\t}\n\n\t\tfor _, err := range err.(validator.ValidationErrors) {\n\t\t\texception.BadRequest(fmt.Sprint(err), \"INVALID_BODY\")\n\t\t}\n\t}\n}", "func (v *Validation) Validate(i interface{}) ValidationErrors {\n\terrs := v.validate.Struct(i)\n\tif errs == nil {\n\t\treturn nil\n\t}\n\n\tvar returnErrs ValidationErrors\n\tfor _, err := range errs.(validator.ValidationErrors) {\n\t\t// cast the FieldError into our ValidationError and append to the slice\n\t\tve := ValidationError{err.(validator.FieldError)}\n\t\treturnErrs = append(returnErrs, ve)\n\t}\n\treturn returnErrs\n}", "func (s *MemberDefinition) Validate() error {\n\tinvalidParams := request.ErrInvalidParams{Context: \"MemberDefinition\"}\n\tif s.CognitoMemberDefinition != nil {\n\t\tif err := s.CognitoMemberDefinition.Validate(); err != nil {\n\t\t\tinvalidParams.AddNested(\"CognitoMemberDefinition\", err.(request.ErrInvalidParams))\n\t\t}\n\t}\n\tif s.OidcMemberDefinition != nil {\n\t\tif err := s.OidcMemberDefinition.Validate(); err != nil {\n\t\t\tinvalidParams.AddNested(\"OidcMemberDefinition\", err.(request.ErrInvalidParams))\n\t\t}\n\t}\n\n\tif invalidParams.Len() > 0 {\n\t\treturn invalidParams\n\t}\n\treturn nil\n}", "func (m *MeasurementType) Validate(formats strfmt.Registry) error {\n\treturn nil\n}", "func (u *User) Validate() *errors.RestError {\n\tif err := validators.ValidateStruct(u); err != nil {\n\t\treturn err\n\t}\n\t// Sanitize Structure\n\tu.FirstName = strings.TrimSpace(u.FirstName)\n\tu.LastName = strings.TrimSpace(u.LastName)\n\tu.Email = strings.TrimSpace(u.Email)\n\tu.Username = strings.TrimSpace(u.Username)\n\tu.Password = strings.TrimSpace(u.Password)\n\t// Check password\n\tif err := u.validatePassword(); err != nil {\n\t\treturn err\n\t}\n\t// Check uniqueness\n\tif err := u.checkUniqueness(); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}", "func (s *UnionSchema) Validate(v reflect.Value) bool {\n\tv = dereference(v)\n\tfor i := range s.Types {\n\t\tif t := s.Types[i]; t.Validate(v) {\n\t\t\treturn true\n\t\t}\n\t}\n\n\treturn false\n}", "func (DirectorBindStrategy) Validate(ctx request.Context, obj runtime.Object) field.ErrorList {\n\to := obj.(*bind.DirectorBind)\n\tlog.Printf(\"Validating fields for DirectorBind %s\\n\", o.Name)\n\terrors := field.ErrorList{}\n\t// perform validation here and add to errors using field.Invalid\n\treturn errors\n}", "func (t *Test1) Validate(tx *pop.Connection) (*validate.Errors, error) {\n\treturn validate.Validate(\n\t\t&validators.IntIsPresent{Field: t.Field1, Name: \"Field1\"},\n\t), nil\n}", "func (t ConvertTransform) Validate() *field.Error {\n\tif !t.GetFormat().IsValid() {\n\t\treturn field.Invalid(field.NewPath(\"format\"), t.Format, \"invalid format\")\n\t}\n\tif !t.ToType.IsValid() {\n\t\treturn field.Invalid(field.NewPath(\"toType\"), t.ToType, \"invalid type\")\n\t}\n\treturn nil\n}", "func (conf TypeConfig) Validate() error {\n\tfor _, rule := range conf.Rules {\n\t\td, ok := conf.Descriptors[rule.Descriptor]\n\t\tif !ok {\n\t\t\treturn fmt.Errorf(\"rule %s=%s uses descriptor %s that does not exist\", rule.Name, rule.Value, rule.Descriptor)\n\t\t}\n\t\tif !hasField(rule.Name, d) {\n\t\t\treturn fmt.Errorf(\"rule %s refers to field %s that is not present in descriptor\", rule.Descriptor, rule.Name)\n\t\t}\n\n\t}\n\tfor name, desc := range conf.Descriptors {\n\t\tfor i, d := range desc {\n\t\t\tcol, ok := d.(map[string]interface{})\n\t\t\tif !ok {\n\t\t\t\treturn fmt.Errorf(\"descriptor %s has invalid structure in element %d\", name, i)\n\t\t\t}\n\t\t\tif col[\"name\"] == \"ts\" && col[\"type\"] != \"time\" {\n\t\t\t\treturn fmt.Errorf(\"descriptor %s has field ts with wrong type %s\", name, col[\"type\"])\n\t\t\t}\n\t\t}\n\t\tcol := desc[0].(map[string]interface{})\n\t\tif col[\"name\"] != \"_path\" {\n\t\t\treturn fmt.Errorf(\"descriptor %s does not have _path as first column\", name)\n\t\t}\n\t}\n\treturn nil\n}", "func (m APIStepType) Validate(formats strfmt.Registry) error {\n\tvar res []error\n\n\t// value enum\n\tif err := m.validateAPIStepTypeEnum(\"\", \"body\", m); err != nil {\n\t\treturn err\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}", "func (r *Version) Validate() error {\n\n\tR := *r\n\tif len(R) > 4 {\n\t\treturn errors.New(\"Version field may not contain more than 4 fields\")\n\t}\n\tif len(R) < 3 {\n\t\treturn errors.New(\"Version field must contain at least 3 fields\")\n\t}\n\tfor i, x := range R[:3] {\n\t\tn, ok := x.(int)\n\t\tif !ok {\n\t\t\treturn fmt.Errorf(\"Version field %d is not an integer: %d\", i, n)\n\t\t}\n\t\tif n > 99 {\n\t\t\treturn fmt.Errorf(\"Version field %d value is over 99: %d\", i, n)\n\t\t}\n\t}\n\tif len(R) > 3 {\n\t\ts, ok := R[3].(string)\n\t\tif !ok {\n\t\t\treturn fmt.Errorf(\"optional field 4 of Version is not a string\")\n\t\t} else {\n\t\t\tfor i, x := range s {\n\t\t\t\tif !(unicode.IsLetter(x) || unicode.IsDigit(x)) {\n\t\t\t\t\treturn fmt.Errorf(\n\t\t\t\t\t\t\"optional field 4 of Version contains other than letters and numbers at position %d: '%v,\", i, x)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\treturn nil\n}", "func (s *ListAggregatedUtterancesInput) Validate() error {\n\tinvalidParams := request.ErrInvalidParams{Context: \"ListAggregatedUtterancesInput\"}\n\tif s.AggregationDuration == nil {\n\t\tinvalidParams.Add(request.NewErrParamRequired(\"AggregationDuration\"))\n\t}\n\tif s.BotAliasId != nil && len(*s.BotAliasId) < 10 {\n\t\tinvalidParams.Add(request.NewErrParamMinLen(\"BotAliasId\", 10))\n\t}\n\tif s.BotId == nil {\n\t\tinvalidParams.Add(request.NewErrParamRequired(\"BotId\"))\n\t}\n\tif s.BotId != nil && len(*s.BotId) < 10 {\n\t\tinvalidParams.Add(request.NewErrParamMinLen(\"BotId\", 10))\n\t}\n\tif s.BotVersion != nil && len(*s.BotVersion) < 1 {\n\t\tinvalidParams.Add(request.NewErrParamMinLen(\"BotVersion\", 1))\n\t}\n\tif s.Filters != nil && len(s.Filters) < 1 {\n\t\tinvalidParams.Add(request.NewErrParamMinLen(\"Filters\", 1))\n\t}\n\tif s.LocaleId == nil {\n\t\tinvalidParams.Add(request.NewErrParamRequired(\"LocaleId\"))\n\t}\n\tif s.MaxResults != nil && *s.MaxResults < 1 {\n\t\tinvalidParams.Add(request.NewErrParamMinValue(\"MaxResults\", 1))\n\t}\n\tif s.AggregationDuration != nil {\n\t\tif err := s.AggregationDuration.Validate(); err != nil {\n\t\t\tinvalidParams.AddNested(\"AggregationDuration\", err.(request.ErrInvalidParams))\n\t\t}\n\t}\n\tif s.Filters != nil {\n\t\tfor i, v := range s.Filters {\n\t\t\tif v == nil {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif err := v.Validate(); err != nil {\n\t\t\t\tinvalidParams.AddNested(fmt.Sprintf(\"%s[%v]\", \"Filters\", i), err.(request.ErrInvalidParams))\n\t\t\t}\n\t\t}\n\t}\n\tif s.SortBy != nil {\n\t\tif err := s.SortBy.Validate(); err != nil {\n\t\t\tinvalidParams.AddNested(\"SortBy\", err.(request.ErrInvalidParams))\n\t\t}\n\t}\n\n\tif invalidParams.Len() > 0 {\n\t\treturn invalidParams\n\t}\n\treturn nil\n}", "func (s *CreateSlotTypeInput) Validate() error {\n\tinvalidParams := request.ErrInvalidParams{Context: \"CreateSlotTypeInput\"}\n\tif s.BotId == nil {\n\t\tinvalidParams.Add(request.NewErrParamRequired(\"BotId\"))\n\t}\n\tif s.BotId != nil && len(*s.BotId) < 10 {\n\t\tinvalidParams.Add(request.NewErrParamMinLen(\"BotId\", 10))\n\t}\n\tif s.BotVersion == nil {\n\t\tinvalidParams.Add(request.NewErrParamRequired(\"BotVersion\"))\n\t}\n\tif s.BotVersion != nil && len(*s.BotVersion) < 5 {\n\t\tinvalidParams.Add(request.NewErrParamMinLen(\"BotVersion\", 5))\n\t}\n\tif s.LocaleId == nil {\n\t\tinvalidParams.Add(request.NewErrParamRequired(\"LocaleId\"))\n\t}\n\tif s.LocaleId != nil && len(*s.LocaleId) < 1 {\n\t\tinvalidParams.Add(request.NewErrParamMinLen(\"LocaleId\", 1))\n\t}\n\tif s.SlotTypeName == nil {\n\t\tinvalidParams.Add(request.NewErrParamRequired(\"SlotTypeName\"))\n\t}\n\tif s.SlotTypeName != nil && len(*s.SlotTypeName) < 1 {\n\t\tinvalidParams.Add(request.NewErrParamMinLen(\"SlotTypeName\", 1))\n\t}\n\tif s.SlotTypeValues != nil && len(s.SlotTypeValues) < 1 {\n\t\tinvalidParams.Add(request.NewErrParamMinLen(\"SlotTypeValues\", 1))\n\t}\n\tif s.CompositeSlotTypeSetting != nil {\n\t\tif err := s.CompositeSlotTypeSetting.Validate(); err != nil {\n\t\t\tinvalidParams.AddNested(\"CompositeSlotTypeSetting\", err.(request.ErrInvalidParams))\n\t\t}\n\t}\n\tif s.ExternalSourceSetting != nil {\n\t\tif err := s.ExternalSourceSetting.Validate(); err != nil {\n\t\t\tinvalidParams.AddNested(\"ExternalSourceSetting\", err.(request.ErrInvalidParams))\n\t\t}\n\t}\n\tif s.SlotTypeValues != nil {\n\t\tfor i, v := range s.SlotTypeValues {\n\t\t\tif v == nil {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif err := v.Validate(); err != nil {\n\t\t\t\tinvalidParams.AddNested(fmt.Sprintf(\"%s[%v]\", \"SlotTypeValues\", i), err.(request.ErrInvalidParams))\n\t\t\t}\n\t\t}\n\t}\n\tif s.ValueSelectionSetting != nil {\n\t\tif err := s.ValueSelectionSetting.Validate(); err != nil {\n\t\t\tinvalidParams.AddNested(\"ValueSelectionSetting\", err.(request.ErrInvalidParams))\n\t\t}\n\t}\n\n\tif invalidParams.Len() > 0 {\n\t\treturn invalidParams\n\t}\n\treturn nil\n}", "func (s *OrderBy) Validate() error {\n\tinvalidParams := request.ErrInvalidParams{Context: \"OrderBy\"}\n\tif s.PropertyName == nil {\n\t\tinvalidParams.Add(request.NewErrParamRequired(\"PropertyName\"))\n\t}\n\tif s.PropertyName != nil && len(*s.PropertyName) < 1 {\n\t\tinvalidParams.Add(request.NewErrParamMinLen(\"PropertyName\", 1))\n\t}\n\n\tif invalidParams.Len() > 0 {\n\t\treturn invalidParams\n\t}\n\treturn nil\n}", "func Validate(ctx http.IContext, vld *validator.Validate, arg interface{}) bool {\n\n\tif err := ctx.GetRequest().GetBodyAs(arg); err != nil {\n\t\thttp.InternalServerException(ctx)\n\t\treturn false\n\t}\n\n\tswitch err := vld.Struct(arg); err.(type) {\n\tcase validator.ValidationErrors:\n\t\thttp.FailedValidationException(ctx, err.(validator.ValidationErrors))\n\t\treturn false\n\n\tcase nil:\n\t\tbreak\n\n\tdefault:\n\t\thttp.InternalServerException(ctx)\n\t\treturn false\n\t}\n\n\treturn true\n}", "func (s *CreateBotAliasInput) Validate() error {\n\tinvalidParams := request.ErrInvalidParams{Context: \"CreateBotAliasInput\"}\n\tif s.BotAliasLocaleSettings != nil && len(s.BotAliasLocaleSettings) < 1 {\n\t\tinvalidParams.Add(request.NewErrParamMinLen(\"BotAliasLocaleSettings\", 1))\n\t}\n\tif s.BotAliasName == nil {\n\t\tinvalidParams.Add(request.NewErrParamRequired(\"BotAliasName\"))\n\t}\n\tif s.BotAliasName != nil && len(*s.BotAliasName) < 1 {\n\t\tinvalidParams.Add(request.NewErrParamMinLen(\"BotAliasName\", 1))\n\t}\n\tif s.BotId == nil {\n\t\tinvalidParams.Add(request.NewErrParamRequired(\"BotId\"))\n\t}\n\tif s.BotId != nil && len(*s.BotId) < 10 {\n\t\tinvalidParams.Add(request.NewErrParamMinLen(\"BotId\", 10))\n\t}\n\tif s.BotVersion != nil && len(*s.BotVersion) < 1 {\n\t\tinvalidParams.Add(request.NewErrParamMinLen(\"BotVersion\", 1))\n\t}\n\tif s.BotAliasLocaleSettings != nil {\n\t\tfor i, v := range s.BotAliasLocaleSettings {\n\t\t\tif v == nil {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif err := v.Validate(); err != nil {\n\t\t\t\tinvalidParams.AddNested(fmt.Sprintf(\"%s[%v]\", \"BotAliasLocaleSettings\", i), err.(request.ErrInvalidParams))\n\t\t\t}\n\t\t}\n\t}\n\tif s.ConversationLogSettings != nil {\n\t\tif err := s.ConversationLogSettings.Validate(); err != nil {\n\t\t\tinvalidParams.AddNested(\"ConversationLogSettings\", err.(request.ErrInvalidParams))\n\t\t}\n\t}\n\tif s.SentimentAnalysisSettings != nil {\n\t\tif err := s.SentimentAnalysisSettings.Validate(); err != nil {\n\t\t\tinvalidParams.AddNested(\"SentimentAnalysisSettings\", err.(request.ErrInvalidParams))\n\t\t}\n\t}\n\n\tif invalidParams.Len() > 0 {\n\t\treturn invalidParams\n\t}\n\treturn nil\n}", "func (m *StripeRefundSpecificFields) Validate(formats strfmt.Registry) error {\n\tvar res []error\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}", "func (m ModelErrorDatumType) Validate(formats strfmt.Registry) error {\n\tvar res []error\n\n\t// value enum\n\tif err := m.validateModelErrorDatumTypeEnum(\"\", \"body\", m); err != nil {\n\t\treturn err\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}", "func (a *Account) Validate() error {\n\tvalidate := validator.New()\n\treturn validate.Struct(a)\n}", "func (s *CreateMemberInput) Validate() error {\n\tinvalidParams := request.ErrInvalidParams{Context: \"CreateMemberInput\"}\n\tif s.ClientRequestToken != nil && len(*s.ClientRequestToken) < 1 {\n\t\tinvalidParams.Add(request.NewErrParamMinLen(\"ClientRequestToken\", 1))\n\t}\n\tif s.InvitationId == nil {\n\t\tinvalidParams.Add(request.NewErrParamRequired(\"InvitationId\"))\n\t}\n\tif s.InvitationId != nil && len(*s.InvitationId) < 1 {\n\t\tinvalidParams.Add(request.NewErrParamMinLen(\"InvitationId\", 1))\n\t}\n\tif s.MemberConfiguration == nil {\n\t\tinvalidParams.Add(request.NewErrParamRequired(\"MemberConfiguration\"))\n\t}\n\tif s.NetworkId == nil {\n\t\tinvalidParams.Add(request.NewErrParamRequired(\"NetworkId\"))\n\t}\n\tif s.NetworkId != nil && len(*s.NetworkId) < 1 {\n\t\tinvalidParams.Add(request.NewErrParamMinLen(\"NetworkId\", 1))\n\t}\n\tif s.MemberConfiguration != nil {\n\t\tif err := s.MemberConfiguration.Validate(); err != nil {\n\t\t\tinvalidParams.AddNested(\"MemberConfiguration\", err.(request.ErrInvalidParams))\n\t\t}\n\t}\n\n\tif invalidParams.Len() > 0 {\n\t\treturn invalidParams\n\t}\n\treturn nil\n}", "func (ut *UpdateUserPayload) Validate() (err error) {\n\tif ut.Name == \"\" {\n\t\terr = goa.MergeErrors(err, goa.MissingAttributeError(`type`, \"name\"))\n\t}\n\tif ut.Email == \"\" {\n\t\terr = goa.MergeErrors(err, goa.MissingAttributeError(`type`, \"email\"))\n\t}\n\tif ut.Bio == \"\" {\n\t\terr = goa.MergeErrors(err, goa.MissingAttributeError(`type`, \"bio\"))\n\t}\n\tif err2 := goa.ValidateFormat(goa.FormatEmail, ut.Email); err2 != nil {\n\t\terr = goa.MergeErrors(err, goa.InvalidFormatError(`type.email`, ut.Email, goa.FormatEmail, err2))\n\t}\n\tif ok := goa.ValidatePattern(`\\S`, ut.Name); !ok {\n\t\terr = goa.MergeErrors(err, goa.InvalidPatternError(`type.name`, ut.Name, `\\S`))\n\t}\n\tif utf8.RuneCountInString(ut.Name) > 256 {\n\t\terr = goa.MergeErrors(err, goa.InvalidLengthError(`type.name`, ut.Name, utf8.RuneCountInString(ut.Name), 256, false))\n\t}\n\treturn\n}", "func (o *Virtualserver) validate(dbRecord *common.DbRecord) (ok bool, err error) {\n\t////////////////////////////////////////////////////////////////////////////\n\t// Marshal data interface.\n\t////////////////////////////////////////////////////////////////////////////\n\tvar data virtualserver.Data\n\terr = shared.MarshalInterface(dbRecord.Data, &data)\n\tif err != nil {\n\t\treturn\n\t}\n\t////////////////////////////////////////////////////////////////////////////\n\t// Test required fields.\n\t////////////////////////////////////////////////////////////////////////////\n\tok = true\n\trequired := make(map[string]bool)\n\trequired[\"ProductCode\"] = false\n\trequired[\"IP\"] = false\n\trequired[\"Port\"] = false\n\trequired[\"LoadBalancerIP\"] = false\n\trequired[\"Name\"] = false\n\t////////////////////////////////////////////////////////////////////////////\n\tif data.ProductCode != 0 {\n\t\trequired[\"ProductCode\"] = true\n\t}\n\tif len(dbRecord.LoadBalancerIP) > 0 {\n\t\trequired[\"LoadBalancerIP\"] = true\n\t}\n\tif len(data.Ports) != 0 {\n\t\trequired[\"Port\"] = true\n\t}\n\tif data.IP != \"\" {\n\t\trequired[\"IP\"] = true\n\t}\n\tif data.Name != \"\" {\n\t\trequired[\"Name\"] = true\n\t}\n\tfor _, val := range required {\n\t\tif val == false {\n\t\t\tok = false\n\t\t}\n\t}\n\tif !ok {\n\t\terr = fmt.Errorf(\"missing required fields - %+v\", required)\n\t}\n\treturn\n}", "func Validate(t interface{}) error {\n\treturn validator.Struct(t)\n}", "func (m *ColumnDetails) Validate(formats strfmt.Registry) error {\n\tvar res []error\n\n\tif err := m.validateKeyType(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateSortOrder(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateType(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateValueType(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}", "func (cv *CustomValidator) Validate(i interface{}) error {\n\treturn cv.Validator.Struct(i)\n}", "func (cv *CustomValidator) Validate(i interface{}) error {\n\treturn cv.Validator.Struct(i)\n}", "func (s *WriteRecordsInput) Validate() error {\n\tinvalidParams := request.ErrInvalidParams{Context: \"WriteRecordsInput\"}\n\tif s.DatabaseName == nil {\n\t\tinvalidParams.Add(request.NewErrParamRequired(\"DatabaseName\"))\n\t}\n\tif s.Records == nil {\n\t\tinvalidParams.Add(request.NewErrParamRequired(\"Records\"))\n\t}\n\tif s.Records != nil && len(s.Records) < 1 {\n\t\tinvalidParams.Add(request.NewErrParamMinLen(\"Records\", 1))\n\t}\n\tif s.TableName == nil {\n\t\tinvalidParams.Add(request.NewErrParamRequired(\"TableName\"))\n\t}\n\tif s.CommonAttributes != nil {\n\t\tif err := s.CommonAttributes.Validate(); err != nil {\n\t\t\tinvalidParams.AddNested(\"CommonAttributes\", err.(request.ErrInvalidParams))\n\t\t}\n\t}\n\tif s.Records != nil {\n\t\tfor i, v := range s.Records {\n\t\t\tif v == nil {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif err := v.Validate(); err != nil {\n\t\t\t\tinvalidParams.AddNested(fmt.Sprintf(\"%s[%v]\", \"Records\", i), err.(request.ErrInvalidParams))\n\t\t\t}\n\t\t}\n\t}\n\n\tif invalidParams.Len() > 0 {\n\t\treturn invalidParams\n\t}\n\treturn nil\n}", "func (s *CognitoMemberDefinition) Validate() error {\n\tinvalidParams := request.ErrInvalidParams{Context: \"CognitoMemberDefinition\"}\n\tif s.ClientId == nil {\n\t\tinvalidParams.Add(request.NewErrParamRequired(\"ClientId\"))\n\t}\n\tif s.ClientId != nil && len(*s.ClientId) < 1 {\n\t\tinvalidParams.Add(request.NewErrParamMinLen(\"ClientId\", 1))\n\t}\n\tif s.UserGroup == nil {\n\t\tinvalidParams.Add(request.NewErrParamRequired(\"UserGroup\"))\n\t}\n\tif s.UserGroup != nil && len(*s.UserGroup) < 1 {\n\t\tinvalidParams.Add(request.NewErrParamMinLen(\"UserGroup\", 1))\n\t}\n\tif s.UserPool == nil {\n\t\tinvalidParams.Add(request.NewErrParamRequired(\"UserPool\"))\n\t}\n\tif s.UserPool != nil && len(*s.UserPool) < 1 {\n\t\tinvalidParams.Add(request.NewErrParamMinLen(\"UserPool\", 1))\n\t}\n\n\tif invalidParams.Len() > 0 {\n\t\treturn invalidParams\n\t}\n\treturn nil\n}", "func (m *HashType) Validate(formats strfmt.Registry) error {\n\tvar res []error\n\n\tif err := m.validateFunction(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateMethod(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateModifier(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}", "func (recipe *Recipe) Validate() error {\n\tvalidate := validator.New()\n\treturn validate.Struct(recipe)\n}", "func (s *CreateInferenceExperimentInput) Validate() error {\n\tinvalidParams := request.ErrInvalidParams{Context: \"CreateInferenceExperimentInput\"}\n\tif s.EndpointName == nil {\n\t\tinvalidParams.Add(request.NewErrParamRequired(\"EndpointName\"))\n\t}\n\tif s.ModelVariants == nil {\n\t\tinvalidParams.Add(request.NewErrParamRequired(\"ModelVariants\"))\n\t}\n\tif s.ModelVariants != nil && len(s.ModelVariants) < 1 {\n\t\tinvalidParams.Add(request.NewErrParamMinLen(\"ModelVariants\", 1))\n\t}\n\tif s.Name == nil {\n\t\tinvalidParams.Add(request.NewErrParamRequired(\"Name\"))\n\t}\n\tif s.Name != nil && len(*s.Name) < 1 {\n\t\tinvalidParams.Add(request.NewErrParamMinLen(\"Name\", 1))\n\t}\n\tif s.RoleArn == nil {\n\t\tinvalidParams.Add(request.NewErrParamRequired(\"RoleArn\"))\n\t}\n\tif s.RoleArn != nil && len(*s.RoleArn) < 20 {\n\t\tinvalidParams.Add(request.NewErrParamMinLen(\"RoleArn\", 20))\n\t}\n\tif s.ShadowModeConfig == nil {\n\t\tinvalidParams.Add(request.NewErrParamRequired(\"ShadowModeConfig\"))\n\t}\n\tif s.Type == nil {\n\t\tinvalidParams.Add(request.NewErrParamRequired(\"Type\"))\n\t}\n\tif s.DataStorageConfig != nil {\n\t\tif err := s.DataStorageConfig.Validate(); err != nil {\n\t\t\tinvalidParams.AddNested(\"DataStorageConfig\", err.(request.ErrInvalidParams))\n\t\t}\n\t}\n\tif s.ModelVariants != nil {\n\t\tfor i, v := range s.ModelVariants {\n\t\t\tif v == nil {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif err := v.Validate(); err != nil {\n\t\t\t\tinvalidParams.AddNested(fmt.Sprintf(\"%s[%v]\", \"ModelVariants\", i), err.(request.ErrInvalidParams))\n\t\t\t}\n\t\t}\n\t}\n\tif s.ShadowModeConfig != nil {\n\t\tif err := s.ShadowModeConfig.Validate(); err != nil {\n\t\t\tinvalidParams.AddNested(\"ShadowModeConfig\", err.(request.ErrInvalidParams))\n\t\t}\n\t}\n\tif s.Tags != nil {\n\t\tfor i, v := range s.Tags {\n\t\t\tif v == nil {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif err := v.Validate(); err != nil {\n\t\t\t\tinvalidParams.AddNested(fmt.Sprintf(\"%s[%v]\", \"Tags\", i), err.(request.ErrInvalidParams))\n\t\t\t}\n\t\t}\n\t}\n\n\tif invalidParams.Len() > 0 {\n\t\treturn invalidParams\n\t}\n\treturn nil\n}", "func (s *UpdateSlotTypeInput) Validate() error {\n\tinvalidParams := request.ErrInvalidParams{Context: \"UpdateSlotTypeInput\"}\n\tif s.BotId == nil {\n\t\tinvalidParams.Add(request.NewErrParamRequired(\"BotId\"))\n\t}\n\tif s.BotId != nil && len(*s.BotId) < 10 {\n\t\tinvalidParams.Add(request.NewErrParamMinLen(\"BotId\", 10))\n\t}\n\tif s.BotVersion == nil {\n\t\tinvalidParams.Add(request.NewErrParamRequired(\"BotVersion\"))\n\t}\n\tif s.BotVersion != nil && len(*s.BotVersion) < 5 {\n\t\tinvalidParams.Add(request.NewErrParamMinLen(\"BotVersion\", 5))\n\t}\n\tif s.LocaleId == nil {\n\t\tinvalidParams.Add(request.NewErrParamRequired(\"LocaleId\"))\n\t}\n\tif s.LocaleId != nil && len(*s.LocaleId) < 1 {\n\t\tinvalidParams.Add(request.NewErrParamMinLen(\"LocaleId\", 1))\n\t}\n\tif s.SlotTypeId == nil {\n\t\tinvalidParams.Add(request.NewErrParamRequired(\"SlotTypeId\"))\n\t}\n\tif s.SlotTypeId != nil && len(*s.SlotTypeId) < 10 {\n\t\tinvalidParams.Add(request.NewErrParamMinLen(\"SlotTypeId\", 10))\n\t}\n\tif s.SlotTypeName == nil {\n\t\tinvalidParams.Add(request.NewErrParamRequired(\"SlotTypeName\"))\n\t}\n\tif s.SlotTypeName != nil && len(*s.SlotTypeName) < 1 {\n\t\tinvalidParams.Add(request.NewErrParamMinLen(\"SlotTypeName\", 1))\n\t}\n\tif s.SlotTypeValues != nil && len(s.SlotTypeValues) < 1 {\n\t\tinvalidParams.Add(request.NewErrParamMinLen(\"SlotTypeValues\", 1))\n\t}\n\tif s.CompositeSlotTypeSetting != nil {\n\t\tif err := s.CompositeSlotTypeSetting.Validate(); err != nil {\n\t\t\tinvalidParams.AddNested(\"CompositeSlotTypeSetting\", err.(request.ErrInvalidParams))\n\t\t}\n\t}\n\tif s.ExternalSourceSetting != nil {\n\t\tif err := s.ExternalSourceSetting.Validate(); err != nil {\n\t\t\tinvalidParams.AddNested(\"ExternalSourceSetting\", err.(request.ErrInvalidParams))\n\t\t}\n\t}\n\tif s.SlotTypeValues != nil {\n\t\tfor i, v := range s.SlotTypeValues {\n\t\t\tif v == nil {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif err := v.Validate(); err != nil {\n\t\t\t\tinvalidParams.AddNested(fmt.Sprintf(\"%s[%v]\", \"SlotTypeValues\", i), err.(request.ErrInvalidParams))\n\t\t\t}\n\t\t}\n\t}\n\tif s.ValueSelectionSetting != nil {\n\t\tif err := s.ValueSelectionSetting.Validate(); err != nil {\n\t\t\tinvalidParams.AddNested(\"ValueSelectionSetting\", err.(request.ErrInvalidParams))\n\t\t}\n\t}\n\n\tif invalidParams.Len() > 0 {\n\t\treturn invalidParams\n\t}\n\treturn nil\n}", "func Validate(obj interface{}) (map[string]interface{}, bool) {\n\n\trules := govalidator.MapData{\n\t\t\"name\": []string{\"required\", \"between:3,150\"},\n\t\t//\"email\": []string{\"required\", \"min:4\", \"max:20\", \"email\"},\n\t\t//\"web\": []string{\"url\"},\n\t\t//\"age\": []string{\"numeric_between:18,56\"},\n\t}\n\n\treturn validate.Validate(rules, obj)\n}", "func (u *User) Validate() ([]app.Invalid, error) {\n\tvar inv []app.Invalid\n\n\tif u.UserType == 0 {\n\t\tinv = append(inv, app.Invalid{Fld: \"UserType\", Err: \"The value of UserType cannot be 0.\"})\n\t}\n\n\tif u.FirstName == \"\" {\n\t\tinv = append(inv, app.Invalid{Fld: \"FirstName\", Err: \"A value of FirstName cannot be empty.\"})\n\t}\n\n\tif u.LastName == \"\" {\n\t\tinv = append(inv, app.Invalid{Fld: \"LastName\", Err: \"A value of LastName cannot be empty.\"})\n\t}\n\n\tif u.Email == \"\" {\n\t\tinv = append(inv, app.Invalid{Fld: \"Email\", Err: \"A value of Email cannot be empty.\"})\n\t}\n\n\tif u.Company == \"\" {\n\t\tinv = append(inv, app.Invalid{Fld: \"Company\", Err: \"A value of Company cannot be empty.\"})\n\t}\n\n\tif len(u.Addresses) == 0 {\n\t\tinv = append(inv, app.Invalid{Fld: \"Addresses\", Err: \"There must be at least one address.\"})\n\t} else {\n\t\tfor _, ua := range u.Addresses {\n\t\t\tif va, err := ua.Validate(); err != nil {\n\t\t\t\tinv = append(inv, va...)\n\t\t\t}\n\t\t}\n\t}\n\n\tif len(inv) > 0 {\n\t\treturn inv, errors.New(\"Validation failures identified\")\n\t}\n\n\treturn nil, nil\n}", "func (s *GetPropertyValueHistoryInput) Validate() error {\n\tinvalidParams := request.ErrInvalidParams{Context: \"GetPropertyValueHistoryInput\"}\n\tif s.ComponentName != nil && len(*s.ComponentName) < 1 {\n\t\tinvalidParams.Add(request.NewErrParamMinLen(\"ComponentName\", 1))\n\t}\n\tif s.ComponentTypeId != nil && len(*s.ComponentTypeId) < 1 {\n\t\tinvalidParams.Add(request.NewErrParamMinLen(\"ComponentTypeId\", 1))\n\t}\n\tif s.EndTime != nil && len(*s.EndTime) < 20 {\n\t\tinvalidParams.Add(request.NewErrParamMinLen(\"EndTime\", 20))\n\t}\n\tif s.EntityId != nil && len(*s.EntityId) < 1 {\n\t\tinvalidParams.Add(request.NewErrParamMinLen(\"EntityId\", 1))\n\t}\n\tif s.PropertyFilters != nil && len(s.PropertyFilters) < 1 {\n\t\tinvalidParams.Add(request.NewErrParamMinLen(\"PropertyFilters\", 1))\n\t}\n\tif s.SelectedProperties == nil {\n\t\tinvalidParams.Add(request.NewErrParamRequired(\"SelectedProperties\"))\n\t}\n\tif s.SelectedProperties != nil && len(s.SelectedProperties) < 1 {\n\t\tinvalidParams.Add(request.NewErrParamMinLen(\"SelectedProperties\", 1))\n\t}\n\tif s.StartTime != nil && len(*s.StartTime) < 20 {\n\t\tinvalidParams.Add(request.NewErrParamMinLen(\"StartTime\", 20))\n\t}\n\tif s.WorkspaceId == nil {\n\t\tinvalidParams.Add(request.NewErrParamRequired(\"WorkspaceId\"))\n\t}\n\tif s.WorkspaceId != nil && len(*s.WorkspaceId) < 1 {\n\t\tinvalidParams.Add(request.NewErrParamMinLen(\"WorkspaceId\", 1))\n\t}\n\tif s.PropertyFilters != nil {\n\t\tfor i, v := range s.PropertyFilters {\n\t\t\tif v == nil {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif err := v.Validate(); err != nil {\n\t\t\t\tinvalidParams.AddNested(fmt.Sprintf(\"%s[%v]\", \"PropertyFilters\", i), err.(request.ErrInvalidParams))\n\t\t\t}\n\t\t}\n\t}\n\n\tif invalidParams.Len() > 0 {\n\t\treturn invalidParams\n\t}\n\treturn nil\n}", "func (v *validator) Validate(val interface{}) (bool, *domain.NuxError) {\n\tif l, ok := val.(int); ok {\n\t\treturn v.validateInt(l)\n\t}\n\n\tif l, ok := val.(int64); ok {\n\t\treturn v.validateInt64(l)\n\t}\n\n\tif l, ok := val.(float64); ok {\n\t\treturn v.validateFloat64(l)\n\t}\n\n\tif l, ok := val.(float32); ok {\n\t\treturn v.validateFloat32(l)\n\t}\n\n\treturn true, nil\n}", "func (d *Definition) Validate() (bool, error) {\n\treturn govalidator.ValidateStruct(d)\n}", "func (s *ServiceCatalogProvisioningDetails) Validate() error {\n\tinvalidParams := request.ErrInvalidParams{Context: \"ServiceCatalogProvisioningDetails\"}\n\tif s.PathId != nil && len(*s.PathId) < 1 {\n\t\tinvalidParams.Add(request.NewErrParamMinLen(\"PathId\", 1))\n\t}\n\tif s.ProductId == nil {\n\t\tinvalidParams.Add(request.NewErrParamRequired(\"ProductId\"))\n\t}\n\tif s.ProductId != nil && len(*s.ProductId) < 1 {\n\t\tinvalidParams.Add(request.NewErrParamMinLen(\"ProductId\", 1))\n\t}\n\tif s.ProvisioningArtifactId != nil && len(*s.ProvisioningArtifactId) < 1 {\n\t\tinvalidParams.Add(request.NewErrParamMinLen(\"ProvisioningArtifactId\", 1))\n\t}\n\tif s.ProvisioningParameters != nil {\n\t\tfor i, v := range s.ProvisioningParameters {\n\t\t\tif v == nil {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif err := v.Validate(); err != nil {\n\t\t\t\tinvalidParams.AddNested(fmt.Sprintf(\"%s[%v]\", \"ProvisioningParameters\", i), err.(request.ErrInvalidParams))\n\t\t\t}\n\t\t}\n\t}\n\n\tif invalidParams.Len() > 0 {\n\t\treturn invalidParams\n\t}\n\treturn nil\n}", "func (self *AliasTypeDef) Validate() error {\n\tif self.Type == \"\" {\n\t\treturn fmt.Errorf(\"AliasTypeDef.type is missing but is a required field\")\n\t} else {\n\t\tval := Validate(RdlSchema(), \"TypeRef\", self.Type)\n\t\tif !val.Valid {\n\t\t\treturn fmt.Errorf(\"AliasTypeDef.type does not contain a valid TypeRef (%v)\", val.Error)\n\t\t}\n\t}\n\tif self.Name == \"\" {\n\t\treturn fmt.Errorf(\"AliasTypeDef.name is missing but is a required field\")\n\t} else {\n\t\tval := Validate(RdlSchema(), \"TypeName\", self.Name)\n\t\tif !val.Valid {\n\t\t\treturn fmt.Errorf(\"AliasTypeDef.name does not contain a valid TypeName (%v)\", val.Error)\n\t\t}\n\t}\n\tif self.Comment != \"\" {\n\t\tval := Validate(RdlSchema(), \"String\", self.Comment)\n\t\tif !val.Valid {\n\t\t\treturn fmt.Errorf(\"AliasTypeDef.comment does not contain a valid String (%v)\", val.Error)\n\t\t}\n\t}\n\treturn nil\n}", "func (s *ListSlotTypesInput) Validate() error {\n\tinvalidParams := request.ErrInvalidParams{Context: \"ListSlotTypesInput\"}\n\tif s.BotId == nil {\n\t\tinvalidParams.Add(request.NewErrParamRequired(\"BotId\"))\n\t}\n\tif s.BotId != nil && len(*s.BotId) < 10 {\n\t\tinvalidParams.Add(request.NewErrParamMinLen(\"BotId\", 10))\n\t}\n\tif s.BotVersion == nil {\n\t\tinvalidParams.Add(request.NewErrParamRequired(\"BotVersion\"))\n\t}\n\tif s.BotVersion != nil && len(*s.BotVersion) < 1 {\n\t\tinvalidParams.Add(request.NewErrParamMinLen(\"BotVersion\", 1))\n\t}\n\tif s.Filters != nil && len(s.Filters) < 1 {\n\t\tinvalidParams.Add(request.NewErrParamMinLen(\"Filters\", 1))\n\t}\n\tif s.LocaleId == nil {\n\t\tinvalidParams.Add(request.NewErrParamRequired(\"LocaleId\"))\n\t}\n\tif s.LocaleId != nil && len(*s.LocaleId) < 1 {\n\t\tinvalidParams.Add(request.NewErrParamMinLen(\"LocaleId\", 1))\n\t}\n\tif s.MaxResults != nil && *s.MaxResults < 1 {\n\t\tinvalidParams.Add(request.NewErrParamMinValue(\"MaxResults\", 1))\n\t}\n\tif s.Filters != nil {\n\t\tfor i, v := range s.Filters {\n\t\t\tif v == nil {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif err := v.Validate(); err != nil {\n\t\t\t\tinvalidParams.AddNested(fmt.Sprintf(\"%s[%v]\", \"Filters\", i), err.(request.ErrInvalidParams))\n\t\t\t}\n\t\t}\n\t}\n\tif s.SortBy != nil {\n\t\tif err := s.SortBy.Validate(); err != nil {\n\t\t\tinvalidParams.AddNested(\"SortBy\", err.(request.ErrInvalidParams))\n\t\t}\n\t}\n\n\tif invalidParams.Len() > 0 {\n\t\treturn invalidParams\n\t}\n\treturn nil\n}", "func (s *UpdateBotAliasInput) Validate() error {\n\tinvalidParams := request.ErrInvalidParams{Context: \"UpdateBotAliasInput\"}\n\tif s.BotAliasId == nil {\n\t\tinvalidParams.Add(request.NewErrParamRequired(\"BotAliasId\"))\n\t}\n\tif s.BotAliasId != nil && len(*s.BotAliasId) < 10 {\n\t\tinvalidParams.Add(request.NewErrParamMinLen(\"BotAliasId\", 10))\n\t}\n\tif s.BotAliasLocaleSettings != nil && len(s.BotAliasLocaleSettings) < 1 {\n\t\tinvalidParams.Add(request.NewErrParamMinLen(\"BotAliasLocaleSettings\", 1))\n\t}\n\tif s.BotAliasName == nil {\n\t\tinvalidParams.Add(request.NewErrParamRequired(\"BotAliasName\"))\n\t}\n\tif s.BotAliasName != nil && len(*s.BotAliasName) < 1 {\n\t\tinvalidParams.Add(request.NewErrParamMinLen(\"BotAliasName\", 1))\n\t}\n\tif s.BotId == nil {\n\t\tinvalidParams.Add(request.NewErrParamRequired(\"BotId\"))\n\t}\n\tif s.BotId != nil && len(*s.BotId) < 10 {\n\t\tinvalidParams.Add(request.NewErrParamMinLen(\"BotId\", 10))\n\t}\n\tif s.BotVersion != nil && len(*s.BotVersion) < 1 {\n\t\tinvalidParams.Add(request.NewErrParamMinLen(\"BotVersion\", 1))\n\t}\n\tif s.BotAliasLocaleSettings != nil {\n\t\tfor i, v := range s.BotAliasLocaleSettings {\n\t\t\tif v == nil {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif err := v.Validate(); err != nil {\n\t\t\t\tinvalidParams.AddNested(fmt.Sprintf(\"%s[%v]\", \"BotAliasLocaleSettings\", i), err.(request.ErrInvalidParams))\n\t\t\t}\n\t\t}\n\t}\n\tif s.ConversationLogSettings != nil {\n\t\tif err := s.ConversationLogSettings.Validate(); err != nil {\n\t\t\tinvalidParams.AddNested(\"ConversationLogSettings\", err.(request.ErrInvalidParams))\n\t\t}\n\t}\n\tif s.SentimentAnalysisSettings != nil {\n\t\tif err := s.SentimentAnalysisSettings.Validate(); err != nil {\n\t\t\tinvalidParams.AddNested(\"SentimentAnalysisSettings\", err.(request.ErrInvalidParams))\n\t\t}\n\t}\n\n\tif invalidParams.Len() > 0 {\n\t\treturn invalidParams\n\t}\n\treturn nil\n}", "func (v *Validator) Validate(i interface{}) error {\n\treturn v.validator.Struct(i)\n}", "func (s *CreateProfileInput) Validate() error {\n\tinvalidParams := aws.ErrInvalidParams{Context: \"CreateProfileInput\"}\n\n\tif s.Address == nil {\n\t\tinvalidParams.Add(aws.NewErrParamRequired(\"Address\"))\n\t}\n\tif s.Address != nil && len(*s.Address) < 1 {\n\t\tinvalidParams.Add(aws.NewErrParamMinLen(\"Address\", 1))\n\t}\n\tif s.ClientRequestToken != nil && len(*s.ClientRequestToken) < 10 {\n\t\tinvalidParams.Add(aws.NewErrParamMinLen(\"ClientRequestToken\", 10))\n\t}\n\tif len(s.DistanceUnit) == 0 {\n\t\tinvalidParams.Add(aws.NewErrParamRequired(\"DistanceUnit\"))\n\t}\n\tif s.Locale != nil && len(*s.Locale) < 1 {\n\t\tinvalidParams.Add(aws.NewErrParamMinLen(\"Locale\", 1))\n\t}\n\n\tif s.ProfileName == nil {\n\t\tinvalidParams.Add(aws.NewErrParamRequired(\"ProfileName\"))\n\t}\n\tif s.ProfileName != nil && len(*s.ProfileName) < 1 {\n\t\tinvalidParams.Add(aws.NewErrParamMinLen(\"ProfileName\", 1))\n\t}\n\tif len(s.TemperatureUnit) == 0 {\n\t\tinvalidParams.Add(aws.NewErrParamRequired(\"TemperatureUnit\"))\n\t}\n\n\tif s.Timezone == nil {\n\t\tinvalidParams.Add(aws.NewErrParamRequired(\"Timezone\"))\n\t}\n\tif s.Timezone != nil && len(*s.Timezone) < 1 {\n\t\tinvalidParams.Add(aws.NewErrParamMinLen(\"Timezone\", 1))\n\t}\n\tif len(s.WakeWord) == 0 {\n\t\tinvalidParams.Add(aws.NewErrParamRequired(\"WakeWord\"))\n\t}\n\tif s.MeetingRoomConfiguration != nil {\n\t\tif err := s.MeetingRoomConfiguration.Validate(); err != nil {\n\t\t\tinvalidParams.AddNested(\"MeetingRoomConfiguration\", err.(aws.ErrInvalidParams))\n\t\t}\n\t}\n\tif s.Tags != nil {\n\t\tfor i, v := range s.Tags {\n\t\t\tif err := v.Validate(); err != nil {\n\t\t\t\tinvalidParams.AddNested(fmt.Sprintf(\"%s[%v]\", \"Tags\", i), err.(aws.ErrInvalidParams))\n\t\t\t}\n\t\t}\n\t}\n\n\tif invalidParams.Len() > 0 {\n\t\treturn invalidParams\n\t}\n\treturn nil\n}", "func (l *logger) Validate() error {\n\tif l == nil {\n\t\treturn nil\n\t}\n\tif err := l.Console.Validate(); err != nil {\n\t\treturn fmt.Errorf(\"`Console` field: %s\", err.Error())\n\t}\n\tif err := l.File.Validate(); err != nil {\n\t\treturn fmt.Errorf(\"`File` field: %s\", err.Error())\n\t}\n\treturn nil\n}", "func (s *RegexMatchTuple) Validate() error {\n\tinvalidParams := request.ErrInvalidParams{Context: \"RegexMatchTuple\"}\n\tif s.FieldToMatch == nil {\n\t\tinvalidParams.Add(request.NewErrParamRequired(\"FieldToMatch\"))\n\t}\n\tif s.RegexPatternSetId == nil {\n\t\tinvalidParams.Add(request.NewErrParamRequired(\"RegexPatternSetId\"))\n\t}\n\tif s.RegexPatternSetId != nil && len(*s.RegexPatternSetId) < 1 {\n\t\tinvalidParams.Add(request.NewErrParamMinLen(\"RegexPatternSetId\", 1))\n\t}\n\tif s.TextTransformation == nil {\n\t\tinvalidParams.Add(request.NewErrParamRequired(\"TextTransformation\"))\n\t}\n\tif s.FieldToMatch != nil {\n\t\tif err := s.FieldToMatch.Validate(); err != nil {\n\t\t\tinvalidParams.AddNested(\"FieldToMatch\", err.(request.ErrInvalidParams))\n\t\t}\n\t}\n\n\tif invalidParams.Len() > 0 {\n\t\treturn invalidParams\n\t}\n\treturn nil\n}", "func (s *Service) Validate() error {\n\tnonEmptyFields := map[string]checker{\n\t\t\"Name\": checker{s.Name, true},\n\t\t\"Type\": checker{s.Type.String(), false}, // Type is a enum, no need to check\n\t\t\"Owner\": checker{s.Owner, true},\n\t\t\"ClusterType\": checker{s.ClusterType, true},\n\t\t\"InstanceName\": checker{s.InstanceName.String(), true},\n\t}\n\n\tfor label, field := range nonEmptyFields {\n\t\tif field.val == \"\" {\n\t\t\treturn fmt.Errorf(errorTmpl, label+\" is empty\")\n\t\t} else if field.checkSeparator && strings.Contains(field.val, keyPartSeparator) {\n\t\t\treturn fmt.Errorf(errorTmpl, label+separatorErrorMsg)\n\t\t}\n\t}\n\n\tswitch {\n\tcase len([]rune(s.Name)) > maxServiceNameLen:\n\t\treturn fmt.Errorf(errorTmpl, fmt.Sprintf(\"Name %q is too long, max len is %d symbols\", s.Name, maxServiceNameLen))\n\tcase !reRolloutType.MatchString(s.RolloutType):\n\t\treturn fmt.Errorf(errorTmpl, \"RolloutType is invalid\")\n\t}\n\treturn nil\n}", "func (r *RecordValidator) Validate(i interface{}) error {\r\n\treturn r.validator.Struct(i)\r\n}", "func (self *ArrayTypeDef) Validate() error {\n\tif self.Type == \"\" {\n\t\treturn fmt.Errorf(\"ArrayTypeDef.type is missing but is a required field\")\n\t} else {\n\t\tval := Validate(RdlSchema(), \"TypeRef\", self.Type)\n\t\tif !val.Valid {\n\t\t\treturn fmt.Errorf(\"ArrayTypeDef.type does not contain a valid TypeRef (%v)\", val.Error)\n\t\t}\n\t}\n\tif self.Name == \"\" {\n\t\treturn fmt.Errorf(\"ArrayTypeDef.name is missing but is a required field\")\n\t} else {\n\t\tval := Validate(RdlSchema(), \"TypeName\", self.Name)\n\t\tif !val.Valid {\n\t\t\treturn fmt.Errorf(\"ArrayTypeDef.name does not contain a valid TypeName (%v)\", val.Error)\n\t\t}\n\t}\n\tif self.Comment != \"\" {\n\t\tval := Validate(RdlSchema(), \"String\", self.Comment)\n\t\tif !val.Valid {\n\t\t\treturn fmt.Errorf(\"ArrayTypeDef.comment does not contain a valid String (%v)\", val.Error)\n\t\t}\n\t}\n\tif self.Items == \"\" {\n\t\treturn fmt.Errorf(\"ArrayTypeDef.items is missing but is a required field\")\n\t} else {\n\t\tval := Validate(RdlSchema(), \"TypeRef\", self.Items)\n\t\tif !val.Valid {\n\t\t\treturn fmt.Errorf(\"ArrayTypeDef.items does not contain a valid TypeRef (%v)\", val.Error)\n\t\t}\n\t}\n\treturn nil\n}", "func (t *Visibility_Visibility) Validate(opts ...ygot.ValidationOption) error {\n\tif err := ytypes.Validate(SchemaTree[\"Visibility_Visibility\"], t, opts...); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}", "func (s *UpdateWorkteamInput) Validate() error {\n\tinvalidParams := request.ErrInvalidParams{Context: \"UpdateWorkteamInput\"}\n\tif s.Description != nil && len(*s.Description) < 1 {\n\t\tinvalidParams.Add(request.NewErrParamMinLen(\"Description\", 1))\n\t}\n\tif s.MemberDefinitions != nil && len(s.MemberDefinitions) < 1 {\n\t\tinvalidParams.Add(request.NewErrParamMinLen(\"MemberDefinitions\", 1))\n\t}\n\tif s.WorkteamName == nil {\n\t\tinvalidParams.Add(request.NewErrParamRequired(\"WorkteamName\"))\n\t}\n\tif s.WorkteamName != nil && len(*s.WorkteamName) < 1 {\n\t\tinvalidParams.Add(request.NewErrParamMinLen(\"WorkteamName\", 1))\n\t}\n\tif s.MemberDefinitions != nil {\n\t\tfor i, v := range s.MemberDefinitions {\n\t\t\tif v == nil {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif err := v.Validate(); err != nil {\n\t\t\t\tinvalidParams.AddNested(fmt.Sprintf(\"%s[%v]\", \"MemberDefinitions\", i), err.(request.ErrInvalidParams))\n\t\t\t}\n\t\t}\n\t}\n\n\tif invalidParams.Len() > 0 {\n\t\treturn invalidParams\n\t}\n\treturn nil\n}", "func (s *AwsSecurityFinding) Validate() error {\n\tinvalidParams := aws.ErrInvalidParams{Context: \"AwsSecurityFinding\"}\n\n\tif s.AwsAccountId == nil {\n\t\tinvalidParams.Add(aws.NewErrParamRequired(\"AwsAccountId\"))\n\t}\n\n\tif s.CreatedAt == nil {\n\t\tinvalidParams.Add(aws.NewErrParamRequired(\"CreatedAt\"))\n\t}\n\n\tif s.Description == nil {\n\t\tinvalidParams.Add(aws.NewErrParamRequired(\"Description\"))\n\t}\n\n\tif s.GeneratorId == nil {\n\t\tinvalidParams.Add(aws.NewErrParamRequired(\"GeneratorId\"))\n\t}\n\n\tif s.Id == nil {\n\t\tinvalidParams.Add(aws.NewErrParamRequired(\"Id\"))\n\t}\n\n\tif s.ProductArn == nil {\n\t\tinvalidParams.Add(aws.NewErrParamRequired(\"ProductArn\"))\n\t}\n\n\tif s.Resources == nil {\n\t\tinvalidParams.Add(aws.NewErrParamRequired(\"Resources\"))\n\t}\n\n\tif s.SchemaVersion == nil {\n\t\tinvalidParams.Add(aws.NewErrParamRequired(\"SchemaVersion\"))\n\t}\n\n\tif s.Severity == nil {\n\t\tinvalidParams.Add(aws.NewErrParamRequired(\"Severity\"))\n\t}\n\n\tif s.Title == nil {\n\t\tinvalidParams.Add(aws.NewErrParamRequired(\"Title\"))\n\t}\n\n\tif s.Types == nil {\n\t\tinvalidParams.Add(aws.NewErrParamRequired(\"Types\"))\n\t}\n\n\tif s.UpdatedAt == nil {\n\t\tinvalidParams.Add(aws.NewErrParamRequired(\"UpdatedAt\"))\n\t}\n\tif s.Compliance != nil {\n\t\tif err := s.Compliance.Validate(); err != nil {\n\t\t\tinvalidParams.AddNested(\"Compliance\", err.(aws.ErrInvalidParams))\n\t\t}\n\t}\n\tif s.Malware != nil {\n\t\tfor i, v := range s.Malware {\n\t\t\tif err := v.Validate(); err != nil {\n\t\t\t\tinvalidParams.AddNested(fmt.Sprintf(\"%s[%v]\", \"Malware\", i), err.(aws.ErrInvalidParams))\n\t\t\t}\n\t\t}\n\t}\n\tif s.Note != nil {\n\t\tif err := s.Note.Validate(); err != nil {\n\t\t\tinvalidParams.AddNested(\"Note\", err.(aws.ErrInvalidParams))\n\t\t}\n\t}\n\tif s.RelatedFindings != nil {\n\t\tfor i, v := range s.RelatedFindings {\n\t\t\tif err := v.Validate(); err != nil {\n\t\t\t\tinvalidParams.AddNested(fmt.Sprintf(\"%s[%v]\", \"RelatedFindings\", i), err.(aws.ErrInvalidParams))\n\t\t\t}\n\t\t}\n\t}\n\tif s.Resources != nil {\n\t\tfor i, v := range s.Resources {\n\t\t\tif err := v.Validate(); err != nil {\n\t\t\t\tinvalidParams.AddNested(fmt.Sprintf(\"%s[%v]\", \"Resources\", i), err.(aws.ErrInvalidParams))\n\t\t\t}\n\t\t}\n\t}\n\tif s.Vulnerabilities != nil {\n\t\tfor i, v := range s.Vulnerabilities {\n\t\t\tif err := v.Validate(); err != nil {\n\t\t\t\tinvalidParams.AddNested(fmt.Sprintf(\"%s[%v]\", \"Vulnerabilities\", i), err.(aws.ErrInvalidParams))\n\t\t\t}\n\t\t}\n\t}\n\n\tif invalidParams.Len() > 0 {\n\t\treturn invalidParams\n\t}\n\treturn nil\n}" ]
[ "0.6366925", "0.62572694", "0.62447786", "0.62179637", "0.6206535", "0.6188605", "0.61792254", "0.6150862", "0.61362636", "0.6130793", "0.61278373", "0.61278373", "0.60985994", "0.60607976", "0.60560167", "0.6015007", "0.5993984", "0.59915495", "0.5974752", "0.59439635", "0.5910365", "0.5910089", "0.5890247", "0.58738875", "0.5831463", "0.58200026", "0.5811172", "0.58110964", "0.58079803", "0.580424", "0.57959574", "0.5787203", "0.5785949", "0.5777741", "0.576542", "0.5762354", "0.5745653", "0.5732171", "0.5726267", "0.5722942", "0.57126415", "0.5712214", "0.5705472", "0.5704808", "0.57032686", "0.56997675", "0.5695831", "0.5693257", "0.56581986", "0.5652151", "0.5649412", "0.564685", "0.5635615", "0.5627059", "0.5626882", "0.5620269", "0.5614515", "0.56099296", "0.5604897", "0.5602183", "0.55972546", "0.5592677", "0.55908024", "0.55876917", "0.55833346", "0.55829865", "0.5582494", "0.55823463", "0.5580874", "0.5576074", "0.5570078", "0.5565018", "0.5561956", "0.55536056", "0.55517834", "0.55517834", "0.554307", "0.55411386", "0.5539489", "0.5537677", "0.55338573", "0.553167", "0.5530022", "0.5517815", "0.5515914", "0.55145717", "0.55141366", "0.55091316", "0.55040276", "0.5501105", "0.5499323", "0.5497519", "0.5496368", "0.5486845", "0.5485202", "0.54850245", "0.54849327", "0.54849", "0.54847085", "0.54845464", "0.548292" ]
0.0
-1
String returns the string representation
func (s CopyClusterSnapshotOutput) String() string { return awsutil.Prettify(s) }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (s CreateAlgorithmOutput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (s CreateAlgorithmOutput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (s Library) String() string {\n\tres := make([]string, 5)\n\tres[0] = \"ID: \" + reform.Inspect(s.ID, true)\n\tres[1] = \"UserID: \" + reform.Inspect(s.UserID, true)\n\tres[2] = \"VolumeID: \" + reform.Inspect(s.VolumeID, true)\n\tres[3] = \"CreatedAt: \" + reform.Inspect(s.CreatedAt, true)\n\tres[4] = \"UpdatedAt: \" + reform.Inspect(s.UpdatedAt, true)\n\treturn strings.Join(res, \", \")\n}", "func (s CreateCanaryOutput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (r Info) String() string {\n\tJSON, err := json.Marshal(r)\n\tif err != nil {\n\t\tlog.Fatalln(err)\n\t}\n\treturn string(JSON)\n}", "func (s ReEncryptOutput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (s CreateFHIRDatastoreOutput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func String() string {\n\toutput := output{\n\t\tRerun: Rerun,\n\t\tVariables: Variables,\n\t\tItems: Items,\n\t}\n\tvar err error\n\tvar b []byte\n\tif Indent == \"\" {\n\t\tb, err = json.Marshal(output)\n\t} else {\n\t\tb, err = json.MarshalIndent(output, \"\", Indent)\n\t}\n\tif err != nil {\n\t\tmessageErr := Errorf(\"Error in parser. Please report this output to https://github.com/drgrib/alfred/issues: %v\", err)\n\t\tpanic(messageErr)\n\t}\n\ts := string(b)\n\treturn s\n}", "func (s CreateQuickConnectOutput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (r *Registry) String() string {\n\tout := make([]string, 0, len(r.nameToObject))\n\tfor name, object := range r.nameToObject {\n\t\tout = append(out, fmt.Sprintf(\"* %s:\\n%s\", name, object.serialization))\n\t}\n\treturn strings.Join(out, \"\\n\\n\")\n}", "func (s CreateSceneOutput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (s CreateSafetyRuleOutput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (s CreateLanguageModelOutput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (o QtreeCreateResponse) String() string {\n\treturn ToString(reflect.ValueOf(o))\n}", "func (r SendAll) String() string {\n\tJSON, err := json.Marshal(r)\n\tif err != nil {\n\t\tlog.Fatalln(err)\n\t}\n\treturn string(JSON)\n}", "func (r ReceiveAll) String() string {\n\tJSON, err := json.Marshal(r)\n\tif err != nil {\n\t\tlog.Fatalln(err)\n\t}\n\treturn string(JSON)\n}", "func (enc *simpleEncoding) String() string {\n\treturn \"simpleEncoding(\" + enc.baseName + \")\"\n}", "func (s CreateDatabaseOutput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (z Zamowienium) String() string {\n\tjz, _ := json.Marshal(z)\n\treturn string(jz)\n}", "func (s CreateHITTypeOutput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (s CreateProgramOutput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (s CreateProjectVersionOutput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (s CreateEntityOutput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (o *Addshifttraderequest) String() string {\n \n \n \n \n o.AcceptableIntervals = []string{\"\"} \n\n j, _ := json.Marshal(o)\n str, _ := strconv.Unquote(strings.Replace(strconv.Quote(string(j)), `\\\\u`, `\\u`, -1))\n\n return str\n}", "func (s CreateUseCaseOutput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (r Rooms) String() string {\n\tjr, _ := json.Marshal(r)\n\treturn string(jr)\n}", "func (i Info) String() string {\n\ts, _ := i.toJSON()\n\treturn s\n}", "func (o *Botversionsummary) String() string {\n \n \n \n \n\n j, _ := json.Marshal(o)\n str, _ := strconv.Unquote(strings.Replace(strconv.Quote(string(j)), `\\\\u`, `\\u`, -1))\n\n return str\n}", "func (e ExternalCfps) String() string {\n\tje, _ := json.Marshal(e)\n\treturn string(je)\n}", "func (s CreateTrustStoreOutput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func String() string {\n\treturn fmt.Sprintf(\n\t\t\"AppVersion = %s\\n\"+\n\t\t\t\"VCSRef = %s\\n\"+\n\t\t\t\"BuildVersion = %s\\n\"+\n\t\t\t\"BuildDate = %s\",\n\t\tAppVersion, VCSRef, BuildVersion, Date,\n\t)\n}", "func (s CreateDataLakeOutput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (s CreateSolutionVersionOutput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (s GetSceneOutput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (i NotMachine) String() string { return toString(i) }", "func (s CreateRuleOutput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (s CreateRuleOutput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (s StartPipelineReprocessingOutput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (s CreateDatastoreOutput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (s CreateSequenceStoreOutput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (o *Adjustablelivespeakerdetection) String() string {\n \n \n \n \n \n \n\n j, _ := json.Marshal(o)\n str, _ := strconv.Unquote(strings.Replace(strconv.Quote(string(j)), `\\\\u`, `\\u`, -1))\n\n return str\n}", "func (s CreateRateBasedRuleOutput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (r Resiliency) String() string {\n\tb, _ := json.Marshal(r)\n\treturn string(b)\n}", "func (s RestoreFromRecoveryPointOutput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (s CreateWaveOutput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (o QtreeCreateResponseResult) String() string {\n\treturn ToString(reflect.ValueOf(o))\n}", "func (s CreateRoomOutput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (s CreateBotLocaleOutput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (s DeleteAlgorithmOutput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (z Zamowienia) String() string {\n\tjz, _ := json.Marshal(z)\n\treturn string(jz)\n}", "func (i *Info) String() string {\n\tb, _ := json.Marshal(i)\n\treturn string(b)\n}", "func (s ProcessingFeatureStoreOutput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (s ExportProjectOutput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (r RoomOccupancies) String() string {\n\tjr, _ := json.Marshal(r)\n\treturn string(jr)\n}", "func (r *InterRecord) String() string {\n\tbuf := r.Bytes()\n\tdefer ffjson.Pool(buf)\n\n\treturn string(buf)\n}", "func (s CreateResolverRuleOutput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (s CreateResolverRuleOutput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (s CreateResolverRuleOutput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (o *Coretype) String() string {\n \n \n \n \n \n o.ValidationFields = []string{\"\"} \n \n o.ItemValidationFields = []string{\"\"} \n \n\n j, _ := json.Marshal(o)\n str, _ := strconv.Unquote(strings.Replace(strconv.Quote(string(j)), `\\\\u`, `\\u`, -1))\n\n return str\n}", "func (s CreateLayerOutput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (s CreateModelCardOutput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (o *Limitchangerequestdetails) String() string {\n \n \n \n \n \n\n j, _ := json.Marshal(o)\n str, _ := strconv.Unquote(strings.Replace(strconv.Quote(string(j)), `\\\\u`, `\\u`, -1))\n\n return str\n}", "func (s NetworkPathComponentDetails) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (t Terms) String() string {\n\tjt, _ := json.Marshal(t)\n\treturn string(jt)\n}", "func (g GetObjectOutput) String() string {\n\treturn helper.Prettify(g)\n}", "func (s StartContactEvaluationOutput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (s CreateModelOutput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (s CreateModelOutput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (o *Interactionstatsalert) String() string {\n \n \n\n j, _ := json.Marshal(o)\n str, _ := strconv.Unquote(strings.Replace(strconv.Quote(string(j)), `\\\\u`, `\\u`, -1))\n\n return str\n}", "func (o *Digitalcondition) String() string {\n\tj, _ := json.Marshal(o)\n\tstr, _ := strconv.Unquote(strings.Replace(strconv.Quote(string(j)), `\\\\u`, `\\u`, -1))\n\n\treturn str\n}", "func (d *Diagram) String() string { return toString(d) }", "func (r RoomOccupancy) String() string {\n\tjr, _ := json.Marshal(r)\n\treturn string(jr)\n}", "func (o *Outboundroute) String() string {\n \n \n \n \n o.ClassificationTypes = []string{\"\"} \n \n \n o.ExternalTrunkBases = []Domainentityref{{}} \n\n j, _ := json.Marshal(o)\n str, _ := strconv.Unquote(strings.Replace(strconv.Quote(string(j)), `\\\\u`, `\\u`, -1))\n\n return str\n}", "func (s CreateCodeRepositoryOutput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (s CreateActivationOutput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (s CreateBotOutput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (s CreateProjectOutput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (s CreateProjectOutput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (s CreateProjectOutput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (s CreateProjectOutput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (s ResolutionTechniques) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (s CreateTrialComponentOutput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (c CourseCode) String() string {\n\tjc, _ := json.Marshal(c)\n\treturn string(jc)\n}", "func (p *Parms) String() string {\n\tout, _ := json.MarshalIndent(p, \"\", \"\\t\")\n\treturn string(out)\n}", "func (p polynomial) String() (str string) {\n\tfor _, m := range p.monomials {\n\t\tstr = str + \" \" + m.String() + \" +\"\n\t}\n\tstr = strings.TrimRight(str, \"+\")\n\treturn \"f(x) = \" + strings.TrimSpace(str)\n}", "func (s CreateThingOutput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (r *RUT) String() string {\n\treturn r.Format(DefaultFormatter)\n}", "func (s CreatePatchBaselineOutput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (o *Crossplatformpolicycreate) String() string {\n \n \n \n \n \n \n \n \n \n \n\n j, _ := json.Marshal(o)\n str, _ := strconv.Unquote(strings.Replace(strconv.Quote(string(j)), `\\\\u`, `\\u`, -1))\n\n return str\n}", "func (s BotVersionLocaleDetails) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (s DeleteMultiplexProgramOutput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (s LifeCycleLastTestInitiated) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (s GetObjectOutput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (s CreateDocumentOutput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (s LifeCycleLastTestReverted) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (s CreateComponentOutput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (s CreateIntegrationOutput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (o *Commonruleconditions) String() string {\n o.Clauses = []Commonruleconditions{{}} \n o.Predicates = []Commonrulepredicate{{}} \n \n \n\n j, _ := json.Marshal(o)\n str, _ := strconv.Unquote(strings.Replace(strconv.Quote(string(j)), `\\\\u`, `\\u`, -1))\n\n return str\n}", "func (t Test1s) String() string {\n\tjt, _ := json.Marshal(t)\n\treturn string(jt)\n}", "func (o *Directrouting) String() string {\n\tj, _ := json.Marshal(o)\n\tstr, _ := strconv.Unquote(strings.Replace(strconv.Quote(string(j)), `\\\\u`, `\\u`, -1))\n\n\treturn str\n}", "func (s CreateContactFlowOutput) String() string {\n\treturn awsutil.Prettify(s)\n}" ]
[ "0.7215344", "0.7215344", "0.7200941", "0.72008544", "0.7177566", "0.7167415", "0.711847", "0.7088234", "0.7087988", "0.7080387", "0.70788854", "0.7067582", "0.7032172", "0.7028053", "0.7026998", "0.7025534", "0.70219815", "0.7017481", "0.7011076", "0.7010567", "0.70030844", "0.6997389", "0.69972193", "0.6992983", "0.6991194", "0.69904995", "0.69869614", "0.69856846", "0.6975249", "0.69748116", "0.6963287", "0.6962303", "0.6961273", "0.69512933", "0.6948752", "0.6945595", "0.6945595", "0.6944921", "0.69408435", "0.69380724", "0.6933419", "0.6928579", "0.6926984", "0.69253695", "0.69225246", "0.69223505", "0.69186175", "0.69183916", "0.69116604", "0.69112486", "0.6909993", "0.6908943", "0.69059587", "0.68992275", "0.68964165", "0.6894148", "0.6894148", "0.6894148", "0.6892935", "0.68918955", "0.68914765", "0.68892837", "0.6888408", "0.6885119", "0.6883532", "0.6880246", "0.6877422", "0.6877422", "0.68765414", "0.68749624", "0.6874543", "0.6873706", "0.68733835", "0.68719864", "0.6869712", "0.6869087", "0.68688697", "0.68688697", "0.68688697", "0.68688697", "0.68673867", "0.6862735", "0.68626535", "0.6859764", "0.6858307", "0.68545836", "0.6852679", "0.6851865", "0.6847157", "0.68442994", "0.684429", "0.6843622", "0.68428755", "0.6841922", "0.68416125", "0.68401366", "0.6836748", "0.68352926", "0.6833609", "0.683329", "0.68324035" ]
0.0
-1
Send marshals and sends the CopyClusterSnapshot API request.
func (r CopyClusterSnapshotRequest) Send(ctx context.Context) (*CopyClusterSnapshotResponse, error) { r.Request.SetContext(ctx) err := r.Request.Send() if err != nil { return nil, err } resp := &CopyClusterSnapshotResponse{ CopyClusterSnapshotOutput: r.Request.Data.(*CopyClusterSnapshotOutput), response: &aws.Response{Request: r.Request}, } return resp, nil }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (r CopySnapshotRequest) Send(ctx context.Context) (*CopySnapshotResponse, error) {\n\tr.Request.SetContext(ctx)\n\terr := r.Request.Send()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tresp := &CopySnapshotResponse{\n\t\tCopySnapshotOutput: r.Request.Data.(*CopySnapshotOutput),\n\t\tresponse: &aws.Response{Request: r.Request},\n\t}\n\n\treturn resp, nil\n}", "func (m *MockRDSAPI) CopyDBClusterSnapshotRequest(arg0 *rds.CopyDBClusterSnapshotInput) (*request.Request, *rds.CopyDBClusterSnapshotOutput) {\n\tret := m.ctrl.Call(m, \"CopyDBClusterSnapshotRequest\", arg0)\n\tret0, _ := ret[0].(*request.Request)\n\tret1, _ := ret[1].(*rds.CopyDBClusterSnapshotOutput)\n\treturn ret0, ret1\n}", "func (z *ZfsH) SendSnapshot(ds0, ds1 string, output io.Writer, sendflags SendFlag, compress string) error {\n\tif sendflags&SendWithToken == 0 && !strings.ContainsAny(ds0, \"@\") {\n\t\treturn errors.New(\"can only send snapshots\")\n\t}\n\n\tc := command{\n\t\tCommand: \"zfs\",\n\t\tStdout: output,\n\t\tzh: z,\n\t}\n\n\targs := make([]string, 1,5)\n\targs[0] = \"send\"\n\n\tif sendflags&SendRecursive != 0 {\n\t\targs = append(args, \"-R\")\n\t}\n\n\tif sendflags&SendLz4 != 0 {\n\t\targs = append(args, \"-c\")\n\t}\n\n\tif sendflags&SendWithToken != 0 {\n\t\targs = append(args, \"-t\")\n\t}\n\n\tif sendflags&SendEmbeddedData != 0 {\n\t\targs = append(args, \"-e\")\n\t}\n\n\tif sendflags&SendIncremental != 0 {\n\t\tif ds1 == \"\" {\n\t\t\treturn errors.New(\"Source snapshot must be set for incremental send\")\n\t\t}\n\t\tif sendflags&SendIntermediate != 0 {\n\t\t\targs = append(args, \"-I\", ds1)\n\t\t} else {\n\t\t\targs = append(args, \"-i\", ds1)\n\t\t}\n\t}\n\targs = append(args, ds0)\n\n\tif compress != \"\" {\n\t\targs = append(args, \"|\", compress)\n\t}\n\n\t_, err := c.Run(args...)\n\treturn err\n}", "func ExampleRDS_CopyDBClusterSnapshot_shared00() {\n\tsvc := rds.New(session.New())\n\tinput := &rds.CopyDBClusterSnapshotInput{\n\t\tCopyTags: aws.Bool(true),\n\t\tSourceDBClusterSnapshotIdentifier: aws.String(\"arn:aws:rds:us-east-1:123456789012:cluster-snapshot:rds:myaurora-2019-06-04-09-16\"),\n\t\tTargetDBClusterSnapshotIdentifier: aws.String(\"myclustersnapshotcopy\"),\n\t}\n\n\tresult, err := svc.CopyDBClusterSnapshot(input)\n\tif err != nil {\n\t\tif aerr, ok := err.(awserr.Error); ok {\n\t\t\tswitch aerr.Code() {\n\t\t\tcase rds.ErrCodeDBClusterSnapshotAlreadyExistsFault:\n\t\t\t\tfmt.Println(rds.ErrCodeDBClusterSnapshotAlreadyExistsFault, aerr.Error())\n\t\t\tcase rds.ErrCodeDBClusterSnapshotNotFoundFault:\n\t\t\t\tfmt.Println(rds.ErrCodeDBClusterSnapshotNotFoundFault, aerr.Error())\n\t\t\tcase rds.ErrCodeInvalidDBClusterStateFault:\n\t\t\t\tfmt.Println(rds.ErrCodeInvalidDBClusterStateFault, aerr.Error())\n\t\t\tcase rds.ErrCodeInvalidDBClusterSnapshotStateFault:\n\t\t\t\tfmt.Println(rds.ErrCodeInvalidDBClusterSnapshotStateFault, aerr.Error())\n\t\t\tcase rds.ErrCodeSnapshotQuotaExceededFault:\n\t\t\t\tfmt.Println(rds.ErrCodeSnapshotQuotaExceededFault, aerr.Error())\n\t\t\tcase rds.ErrCodeKMSKeyNotAccessibleFault:\n\t\t\t\tfmt.Println(rds.ErrCodeKMSKeyNotAccessibleFault, aerr.Error())\n\t\t\tdefault:\n\t\t\t\tfmt.Println(aerr.Error())\n\t\t\t}\n\t\t} else {\n\t\t\t// Print the error, cast err to awserr.Error to get the Code and\n\t\t\t// Message from an error.\n\t\t\tfmt.Println(err.Error())\n\t\t}\n\t\treturn\n\t}\n\n\tfmt.Println(result)\n}", "func (j JoinBySnapshotUIObject) copySnapshotDirectoryK8s(peer string, joinBySnapshotObject JoinBySnapshotUIObject) error {\n\n\tcopyFromArgs := []string{\n\t\t\"-n\", \"fabric-system-test\",\n\t\t\"cp\",\n\t\tfmt.Sprintf(\"%s-0:/shared/data/snapshots/completed/%s/%s\", joinBySnapshotObject.SnapshotPeer, joinBySnapshotObject.ChannelOpt.Name, joinBySnapshotObject.SnapshotPath),\n\t\tfmt.Sprintf(\"/tmp/%s\", joinBySnapshotObject.SnapshotPath),\n\t\t\"-c\", \"peer\",\n\t}\n\t_, err := networkclient.ExecuteCommand(\"kubectl\", copyFromArgs, true)\n\tif err != nil {\n\t\treturn err\n\t}\n\tcopyToArgs := []string{\n\t\t\"-n\", \"fabric-system-test\",\n\t\t\"cp\",\n\t\tfmt.Sprintf(\"/tmp/%s\", joinBySnapshotObject.SnapshotPath),\n\t\tfmt.Sprintf(\"%s-0:/shared/data/snapshots/completed/%s\", peer, joinBySnapshotObject.SnapshotPath),\n\t\t\"-c\", \"peer\",\n\t}\n\t_, err = networkclient.ExecuteCommand(\"kubectl\", copyToArgs, true)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}", "func (z *zfsctl) SendSnapshot(ctx context.Context, name, options string, i string) *execute {\n\targs := []string{\"send\"}\n\tif len(options) > 0 {\n\t\targs = append(args, options)\n\t}\n\tif len(i) > 0 {\n\t\targs = append(args, \"-i \"+i)\n\t}\n\targs = append(args, name)\n\treturn &execute{ctx: ctx, name: z.cmd, args: args}\n}", "func (r CreateInstancesFromSnapshotRequest) Send(ctx context.Context) (*CreateInstancesFromSnapshotResponse, error) {\n\tr.Request.SetContext(ctx)\n\terr := r.Request.Send()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tresp := &CreateInstancesFromSnapshotResponse{\n\t\tCreateInstancesFromSnapshotOutput: r.Request.Data.(*CreateInstancesFromSnapshotOutput),\n\t\tresponse: &aws.Response{Request: r.Request},\n\t}\n\n\treturn resp, nil\n}", "func (t transporter) SendSnapshotRequest(server *raft.Server, peer *raft.Peer, req *raft.SnapshotRequest) *raft.SnapshotResponse {\n\tvar aersp *raft.SnapshotResponse\n\tvar b bytes.Buffer\n\tjson.NewEncoder(&b).Encode(req)\n\n\tdebug(\"Send Snapshot to %s [Last Term: %d, LastIndex %d]\", peer.Name(),\n\t\treq.LastTerm, req.LastIndex)\n\n\tresp, err := t.Post(fmt.Sprintf(\"%s/snapshot\", peer.Name()), &b)\n\n\tif resp != nil {\n\t\tdefer resp.Body.Close()\n\t\taersp = &raft.SnapshotResponse{}\n\t\tif err = json.NewDecoder(resp.Body).Decode(&aersp); err == nil || err == io.EOF {\n\n\t\t\treturn aersp\n\t\t}\n\t}\n\treturn aersp\n}", "func (b *ClusterBuilder) Copy(object *Cluster) *ClusterBuilder {\n\tif object == nil {\n\t\treturn b\n\t}\n\tb.bitmap_ = object.bitmap_\n\tb.id = object.id\n\tb.href = object.href\n\tif object.api != nil {\n\t\tb.api = NewClusterAPI().Copy(object.api)\n\t} else {\n\t\tb.api = nil\n\t}\n\tif object.aws != nil {\n\t\tb.aws = NewAWS().Copy(object.aws)\n\t} else {\n\t\tb.aws = nil\n\t}\n\tif object.awsInfrastructureAccessRoleGrants != nil {\n\t\tb.awsInfrastructureAccessRoleGrants = NewAWSInfrastructureAccessRoleGrantList().Copy(object.awsInfrastructureAccessRoleGrants)\n\t} else {\n\t\tb.awsInfrastructureAccessRoleGrants = nil\n\t}\n\tif object.ccs != nil {\n\t\tb.ccs = NewCCS().Copy(object.ccs)\n\t} else {\n\t\tb.ccs = nil\n\t}\n\tif object.dns != nil {\n\t\tb.dns = NewDNS().Copy(object.dns)\n\t} else {\n\t\tb.dns = nil\n\t}\n\tif object.gcp != nil {\n\t\tb.gcp = NewGCP().Copy(object.gcp)\n\t} else {\n\t\tb.gcp = nil\n\t}\n\tif object.addons != nil {\n\t\tb.addons = NewAddOnInstallationList().Copy(object.addons)\n\t} else {\n\t\tb.addons = nil\n\t}\n\tb.billingModel = object.billingModel\n\tif object.cloudProvider != nil {\n\t\tb.cloudProvider = NewCloudProvider().Copy(object.cloudProvider)\n\t} else {\n\t\tb.cloudProvider = nil\n\t}\n\tif object.console != nil {\n\t\tb.console = NewClusterConsole().Copy(object.console)\n\t} else {\n\t\tb.console = nil\n\t}\n\tb.creationTimestamp = object.creationTimestamp\n\tb.disableUserWorkloadMonitoring = object.disableUserWorkloadMonitoring\n\tb.displayName = object.displayName\n\tb.etcdEncryption = object.etcdEncryption\n\tb.expirationTimestamp = object.expirationTimestamp\n\tb.externalID = object.externalID\n\tif object.externalConfiguration != nil {\n\t\tb.externalConfiguration = NewExternalConfiguration().Copy(object.externalConfiguration)\n\t} else {\n\t\tb.externalConfiguration = nil\n\t}\n\tif object.flavour != nil {\n\t\tb.flavour = NewFlavour().Copy(object.flavour)\n\t} else {\n\t\tb.flavour = nil\n\t}\n\tif object.groups != nil {\n\t\tb.groups = NewGroupList().Copy(object.groups)\n\t} else {\n\t\tb.groups = nil\n\t}\n\tb.healthState = object.healthState\n\tif object.identityProviders != nil {\n\t\tb.identityProviders = NewIdentityProviderList().Copy(object.identityProviders)\n\t} else {\n\t\tb.identityProviders = nil\n\t}\n\tif object.ingresses != nil {\n\t\tb.ingresses = NewIngressList().Copy(object.ingresses)\n\t} else {\n\t\tb.ingresses = nil\n\t}\n\tb.loadBalancerQuota = object.loadBalancerQuota\n\tif object.machinePools != nil {\n\t\tb.machinePools = NewMachinePoolList().Copy(object.machinePools)\n\t} else {\n\t\tb.machinePools = nil\n\t}\n\tb.managed = object.managed\n\tb.multiAZ = object.multiAZ\n\tb.name = object.name\n\tif object.network != nil {\n\t\tb.network = NewNetwork().Copy(object.network)\n\t} else {\n\t\tb.network = nil\n\t}\n\tif object.nodeDrainGracePeriod != nil {\n\t\tb.nodeDrainGracePeriod = NewValue().Copy(object.nodeDrainGracePeriod)\n\t} else {\n\t\tb.nodeDrainGracePeriod = nil\n\t}\n\tif object.nodes != nil {\n\t\tb.nodes = NewClusterNodes().Copy(object.nodes)\n\t} else {\n\t\tb.nodes = nil\n\t}\n\tb.openshiftVersion = object.openshiftVersion\n\tif object.product != nil {\n\t\tb.product = NewProduct().Copy(object.product)\n\t} else {\n\t\tb.product = nil\n\t}\n\tif len(object.properties) > 0 {\n\t\tb.properties = map[string]string{}\n\t\tfor k, v := range object.properties {\n\t\t\tb.properties[k] = v\n\t\t}\n\t} else {\n\t\tb.properties = nil\n\t}\n\tif object.provisionShard != nil {\n\t\tb.provisionShard = NewProvisionShard().Copy(object.provisionShard)\n\t} else {\n\t\tb.provisionShard = nil\n\t}\n\tif object.region != nil {\n\t\tb.region = NewCloudRegion().Copy(object.region)\n\t} else {\n\t\tb.region = nil\n\t}\n\tb.state = object.state\n\tif object.status != nil {\n\t\tb.status = NewClusterStatus().Copy(object.status)\n\t} else {\n\t\tb.status = nil\n\t}\n\tif object.storageQuota != nil {\n\t\tb.storageQuota = NewValue().Copy(object.storageQuota)\n\t} else {\n\t\tb.storageQuota = nil\n\t}\n\tif object.subscription != nil {\n\t\tb.subscription = NewSubscription().Copy(object.subscription)\n\t} else {\n\t\tb.subscription = nil\n\t}\n\tif object.version != nil {\n\t\tb.version = NewVersion().Copy(object.version)\n\t} else {\n\t\tb.version = nil\n\t}\n\treturn b\n}", "func (r CreateDiskFromSnapshotRequest) Send(ctx context.Context) (*CreateDiskFromSnapshotResponse, error) {\n\tr.Request.SetContext(ctx)\n\terr := r.Request.Send()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tresp := &CreateDiskFromSnapshotResponse{\n\t\tCreateDiskFromSnapshotOutput: r.Request.Data.(*CreateDiskFromSnapshotOutput),\n\t\tresponse: &aws.Response{Request: r.Request},\n\t}\n\n\treturn resp, nil\n}", "func (r ImportSnapshotRequest) Send(ctx context.Context) (*ImportSnapshotResponse, error) {\n\tr.Request.SetContext(ctx)\n\terr := r.Request.Send()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tresp := &ImportSnapshotResponse{\n\t\tImportSnapshotOutput: r.Request.Data.(*ImportSnapshotOutput),\n\t\tresponse: &aws.Response{Request: r.Request},\n\t}\n\n\treturn resp, nil\n}", "func (b *ServiceClusterBuilder) Copy(object *ServiceCluster) *ServiceClusterBuilder {\n\tif object == nil {\n\t\treturn b\n\t}\n\tb.bitmap_ = object.bitmap_\n\tb.id = object.id\n\tb.href = object.href\n\tif object.dns != nil {\n\t\tb.dns = NewDNS().Copy(object.dns)\n\t} else {\n\t\tb.dns = nil\n\t}\n\tb.cloudProvider = object.cloudProvider\n\tif object.clusterManagementReference != nil {\n\t\tb.clusterManagementReference = NewClusterManagementReference().Copy(object.clusterManagementReference)\n\t} else {\n\t\tb.clusterManagementReference = nil\n\t}\n\tb.name = object.name\n\tb.region = object.region\n\tb.status = object.status\n\treturn b\n}", "func (s CopyClusterSnapshotInput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (cs *controllerServer) CreateSnapshot(ctx context.Context, req *csi.CreateSnapshotRequest) (*csi.CreateSnapshotResponse, error) {\n\tlog.Debugf(\"Starting Create Snapshot %s with response: %v\", req.Name, req)\n\t// Step 1: check request\n\tsnapshotName := req.GetName()\n\tif len(snapshotName) == 0 {\n\t\tlog.Error(\"CreateSnapshot: snapshot name not provided\")\n\t\treturn nil, status.Error(codes.InvalidArgument, \"CreateSnapshot: snapshot name not provided\")\n\t}\n\tvolumeID := req.GetSourceVolumeId()\n\tif len(volumeID) == 0 {\n\t\tlog.Error(\"CreateSnapshot: snapshot volume source ID not provided\")\n\t\treturn nil, status.Error(codes.InvalidArgument, \"CreateSnapshot: snapshot volume source ID not provided\")\n\t}\n\n\t// Step 2: get snapshot initial size from parameter\n\tinitialSize, _, _, err := getSnapshotInitialInfo(req.Parameters)\n\tif err != nil {\n\t\tlog.Errorf(\"CreateSnapshot: get snapshot %s initial info error: %s\", req.Name, err.Error())\n\t\treturn nil, status.Errorf(codes.Internal, \"CreateSnapshot: get snapshot %s initial info error: %s\", req.Name, err.Error())\n\t}\n\n\t// Step 3: get nodeName and vgName\n\tnodeName, vgName, pv, err := getPvSpec(cs.client, volumeID, cs.driverName)\n\tif err != nil {\n\t\tlog.Errorf(\"CreateSnapshot: get pv %s error: %s\", volumeID, err.Error())\n\t\treturn nil, status.Errorf(codes.Internal, \"CreateSnapshot: get pv %s error: %s\", volumeID, err.Error())\n\t}\n\tlog.Infof(\"CreateSnapshot: snapshot %s is in %s, whose vg is %s\", snapshotName, nodeName, vgName)\n\n\t// Step 4: update initialSize if initialSize is bigger than pv request size\n\tpvSize, _ := pv.Spec.Capacity.Storage().AsInt64()\n\tif pvSize < int64(initialSize) {\n\t\tinitialSize = uint64(pvSize)\n\t}\n\n\t// Step 5: get grpc client\n\tconn, err := cs.getNodeConn(nodeName)\n\tif err != nil {\n\t\tlog.Errorf(\"CreateSnapshot: get grpc client at node %s error: %s\", nodeName, err.Error())\n\t\treturn nil, status.Errorf(codes.Internal, \"CreateSnapshot: get grpc client at node %s error: %s\", nodeName, err.Error())\n\t}\n\tdefer conn.Close()\n\n\t// Step 6: create lvm snapshot\n\tvar lvmName string\n\tif lvmName, err = conn.GetLvm(ctx, vgName, snapshotName); err != nil {\n\t\tlog.Errorf(\"CreateSnapshot: get lvm snapshot %s failed: %s\", snapshotName, err.Error())\n\t\treturn nil, status.Errorf(codes.Internal, \"CreateSnapshot: get lvm snapshot %s failed: %s\", snapshotName, err.Error())\n\t}\n\tif lvmName == \"\" {\n\t\t_, err := conn.CreateSnapshot(ctx, vgName, snapshotName, volumeID, initialSize)\n\t\tif err != nil {\n\t\t\tlog.Errorf(\"CreateSnapshot: create lvm snapshot %s failed: %s\", snapshotName, err.Error())\n\t\t\treturn nil, status.Errorf(codes.Internal, \"CreateSnapshot: create lvm snapshot %s failed: %s\", snapshotName, err.Error())\n\t\t}\n\t\tlog.Infof(\"CreateSnapshot: create snapshot %s successfully\", snapshotName)\n\t} else {\n\t\tlog.Infof(\"CreateSnapshot: lvm snapshot %s in node %s already exists\", snapshotName, nodeName)\n\t}\n\treturn cs.newCreateSnapshotResponse(req)\n}", "func (mr *MockRDSAPIMockRecorder) CopyDBClusterSnapshotRequest(arg0 interface{}) *gomock.Call {\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"CopyDBClusterSnapshotRequest\", reflect.TypeOf((*MockRDSAPI)(nil).CopyDBClusterSnapshotRequest), arg0)\n}", "func (r GetDeployablePatchSnapshotForInstanceRequest) Send() (*GetDeployablePatchSnapshotForInstanceOutput, error) {\n\terr := r.Request.Send()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn r.Request.Data.(*GetDeployablePatchSnapshotForInstanceOutput), nil\n}", "func (r StartSnapshotRequest) Send(ctx context.Context) (*StartSnapshotResponse, error) {\n\tr.Request.SetContext(ctx)\n\terr := r.Request.Send()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tresp := &StartSnapshotResponse{\n\t\tStartSnapshotOutput: r.Request.Data.(*StartSnapshotOutput),\n\t\tresponse: &aws.Response{Request: r.Request},\n\t}\n\n\treturn resp, nil\n}", "func (p *AuroraAdminClient) Snapshot(ctx context.Context) (r *Response, err error) {\n var _args345 AuroraAdminSnapshotArgs\n var _result346 AuroraAdminSnapshotResult\n if err = p.Client_().Call(ctx, \"snapshot\", &_args345, &_result346); err != nil {\n return\n }\n return _result346.GetSuccess(), nil\n}", "func (p *AuroraAdminClient) Snapshot(ctx context.Context) (r *Response, err error) {\n var _args395 AuroraAdminSnapshotArgs\n var _result396 AuroraAdminSnapshotResult\n var meta thrift.ResponseMeta\n meta, err = p.Client_().Call(ctx, \"snapshot\", &_args395, &_result396)\n p.SetLastResponseMeta_(meta)\n if err != nil {\n return\n }\n return _result396.GetSuccess(), nil\n}", "func (m *MockRDSAPI) CopyDBClusterSnapshot(arg0 *rds.CopyDBClusterSnapshotInput) (*rds.CopyDBClusterSnapshotOutput, error) {\n\tret := m.ctrl.Call(m, \"CopyDBClusterSnapshot\", arg0)\n\tret0, _ := ret[0].(*rds.CopyDBClusterSnapshotOutput)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func Snapshot(c Rendezvous, s snapshotter, options ...SnapshotOption) {\n\tvar (\n\t\terr error\n\t\tsnapper = newSnapshot(options...)\n\t)\n\ttake := func() {\n\t\tlog.Println(\"taking snapshot of the cluster\")\n\t\tif err = s.Snapshot(Peers(c)); err != nil {\n\t\t\tlog.Println(\"failed to snapshot cluster\", err)\n\t\t}\n\t}\n\t// take an initial snapshot immediately.\n\ttake()\n\t// then take a snapshot every period.\n\ttick := time.NewTicker(snapper.Frequency)\n\tdefer tick.Stop()\n\tfor {\n\t\tselect {\n\t\tcase <-snapper.Context.Done():\n\t\t\treturn\n\t\tcase <-tick.C:\n\t\t\ttake()\n\t\t}\n\t}\n}", "func (v *ClusterProfiler) Dump(cpRequest *v1.ClusterProfilerRequest) (*v1.ClusterProfilerResults, error) {\n\tpreferredVersion, err := v.preferredVersion()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif cpRequest == nil {\n\t\treturn nil, fmt.Errorf(\"request body can't be nil\")\n\t}\n\n\tbytes, err := json.Marshal(cpRequest)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Now, query the preferred version\n\turi := fmt.Sprintf(\"/apis/%s/dump-cluster-profiler\", preferredVersion)\n\n\tvar profileResults v1.ClusterProfilerResults\n\n\tresult := v.restClient.Get().AbsPath(uri).Body(bytes).Do(context.Background())\n\tif data, err := result.Raw(); err != nil {\n\t\tconnErr, isConnectionErr := err.(*url.Error)\n\n\t\tif isConnectionErr {\n\t\t\treturn nil, connErr.Err\n\t\t}\n\n\t\treturn nil, err\n\t} else if len(data) == 0 {\n\t\treturn &profileResults, nil\n\t} else if err = json.Unmarshal(data, &profileResults); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &profileResults, nil\n}", "func (s CopyClusterSnapshotOutput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func SnapShotCluster() int32 {\n\n\tfor _, v := range VolMgrHosts {\n\n\t\tconn, err := utils.Dial(v)\n\t\tif err != nil {\n\t\t\tlogger.Error(\"SnapShotVol failed,Dial to MetaNodeHosts %v fail :%v\", v, err)\n\t\t\treturn -1\n\t\t}\n\n\t\tdefer conn.Close()\n\n\t\tvc := vp.NewVolMgrClient(conn)\n\t\tpSnapShotClusterReq := &vp.SnapShotClusterReq{}\n\t\tctx, _ := context.WithTimeout(context.Background(), SNAPSHOT_TIMEOUT_SECONDS*time.Second)\n\t\tpSnapShotClusterAck, err := vc.SnapShotCluster(ctx, pSnapShotClusterReq)\n\t\tif err != nil {\n\t\t\tlogger.Error(\"SnapShotVol failed,grpc func err :%v\", err)\n\t\t\treturn -1\n\t\t}\n\n\t\tif pSnapShotClusterAck.Ret != 0 {\n\t\t\tlogger.Error(\"SnapShotCluster failed,rpc func ret:%v\", pSnapShotClusterAck.Ret)\n\t\t\treturn -1\n\t\t}\n\t}\n\n\treturn 0\n}", "func Copy(c *gophercloud.ServiceClient, containerName, objectName string, opts CopyOptsBuilder) CopyResult {\n\tvar res CopyResult\n\th := c.AuthenticatedHeaders()\n\n\theaders, err := opts.ToObjectCopyMap()\n\tif err != nil {\n\t\tres.Err = err\n\t\treturn res\n\t}\n\n\tfor k, v := range headers {\n\t\th[k] = v\n\t}\n\n\turl := copyURL(c, containerName, objectName)\n\tresp, err := c.Request(\"COPY\", url, gophercloud.RequestOpts{\n\t\tMoreHeaders: h,\n\t\tOkCodes: []int{201},\n\t})\n\tif resp != nil {\n\t\tres.Header = resp.Header\n\t}\n\tres.Err = err\n\treturn res\n}", "func PublishSnapshot(client *httputil.ClientConn, containerName, snapshotName string) error {\n\treq, err := json.Marshal(&utils.PublishRequest{\n\t\tFilename: snapshotName + \".tar.xz\",\n\t\tPublic: false,\n\t\tAliases: []utils.PublishRequestAliases{\n\t\t\tutils.PublishRequestAliases{\n\t\t\t\tName: snapshotName,\n\t\t\t\tDescription: \"\",\n\t\t\t}},\n\t\tSource: utils.PublishRequestSource{\n\t\t\tType: \"snapshot\",\n\t\t\tName: fmt.Sprintf(\"%s/%s\", containerName, snapshotName),\n\t\t},\n\t})\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to marshal PublishRequest: %v\", err)\n\t}\n\tbody, err := utils.Do(client, \"POST\", \"/1.0/images\", bytes.NewReader(req))\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to publish image for %s: %v\", containerName, err)\n\t}\n\tvar r utils.SnapshotResponse\n\tif err := json.Unmarshal(body, &r); err != nil {\n\t\treturn fmt.Errorf(\"failed to unmarshal PublishResponse for %s: %v\", containerName, err)\n\t}\n\tif err := waitBackgroundOperation(client, containerName, r.Metadata.ID); err != nil {\n\t\treturn fmt.Errorf(\"failed to check background publish operations: %v\", err)\n\t}\n\treturn nil\n}", "func (s DBClusterSnapshot) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (s *Server) Snapshot(w http.ResponseWriter, r *http.Request) {\n\t_ = json.NewEncoder(w).Encode(s.Regenbox.Snapshot())\n}", "func (r CopyInstancesRequest) Send(ctx context.Context) (*CopyInstancesResponse, error) {\n\tr.Request.SetContext(ctx)\n\terr := r.Request.Send()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tresp := &CopyInstancesResponse{\n\t\tCopyInstancesOutput: r.Request.Data.(*CopyInstancesOutput),\n\t\tresponse: &aws.Response{Request: r.Request},\n\t}\n\n\treturn resp, nil\n}", "func (mr *MockRDSAPIMockRecorder) CopyDBClusterSnapshot(arg0 interface{}) *gomock.Call {\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"CopyDBClusterSnapshot\", reflect.TypeOf((*MockRDSAPI)(nil).CopyDBClusterSnapshot), arg0)\n}", "func (r *Replica) sendSnapshot(\n\tctx context.Context,\n\trecipient roachpb.ReplicaDescriptor,\n\tsnapType SnapshotRequest_Type,\n\tpriority SnapshotRequest_Priority,\n) (retErr error) {\n\tdefer func() {\n\t\t// Report the snapshot status to Raft, which expects us to do this once we\n\t\t// finish sending the snapshot.\n\t\tr.reportSnapshotStatus(ctx, recipient.ReplicaID, retErr)\n\t}()\n\n\tsnap, err := r.GetSnapshot(ctx, snapType, recipient.StoreID)\n\tif err != nil {\n\t\terr = errors.Wrapf(err, \"%s: failed to generate %s snapshot\", r, snapType)\n\t\treturn errors.Mark(err, errMarkSnapshotError)\n\t}\n\tdefer snap.Close()\n\tlog.Event(ctx, \"generated snapshot\")\n\n\t// Check that the snapshot we generated has a descriptor that includes the\n\t// recipient. If it doesn't, the recipient will reject it, so it's better to\n\t// not send it in the first place. It's possible to hit this case if we're not\n\t// the leaseholder and we haven't yet applied the configuration change that's\n\t// adding the recipient to the range.\n\tif _, ok := snap.State.Desc.GetReplicaDescriptor(recipient.StoreID); !ok {\n\t\treturn errors.Wrapf(errMarkSnapshotError,\n\t\t\t\"attempting to send snapshot that does not contain the recipient as a replica; \"+\n\t\t\t\t\"snapshot type: %s, recipient: s%d, desc: %s\", snapType, recipient, snap.State.Desc)\n\t}\n\n\tsender, err := r.GetReplicaDescriptor()\n\tif err != nil {\n\t\treturn errors.Wrapf(err, \"%s: change replicas failed\", r)\n\t}\n\n\tstatus := r.RaftStatus()\n\tif status == nil {\n\t\t// This code path is sometimes hit during scatter for replicas that\n\t\t// haven't woken up yet.\n\t\treturn &benignError{errors.Wrap(errMarkSnapshotError, \"raft status not initialized\")}\n\t}\n\n\tusesReplicatedTruncatedState, err := storage.MVCCGetProto(\n\t\tctx, snap.EngineSnap, keys.RaftTruncatedStateLegacyKey(r.RangeID), hlc.Timestamp{}, nil, storage.MVCCGetOptions{},\n\t)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"loading legacy truncated state\")\n\t}\n\n\tcanAvoidSendingLog := !usesReplicatedTruncatedState &&\n\t\tsnap.State.TruncatedState.Index < snap.State.RaftAppliedIndex\n\n\tif canAvoidSendingLog {\n\t\t// If we're not using a legacy (replicated) truncated state, we avoid\n\t\t// sending the (past) Raft log in the snapshot in the first place and\n\t\t// send only those entries that are actually useful to the follower.\n\t\t// This is done by changing the truncated state, which we're allowed\n\t\t// to do since it is not a replicated key (and thus not subject to\n\t\t// matching across replicas). The actual sending happens here:\n\t\t_ = (*kvBatchSnapshotStrategy)(nil).Send\n\t\t// and results in no log entries being sent at all. Note that\n\t\t// Metadata.Index is really the applied index of the replica.\n\t\tsnap.State.TruncatedState = &roachpb.RaftTruncatedState{\n\t\t\tIndex: snap.RaftSnap.Metadata.Index,\n\t\t\tTerm: snap.RaftSnap.Metadata.Term,\n\t\t}\n\t}\n\n\treq := SnapshotRequest_Header{\n\t\tState: snap.State,\n\t\t// Tell the recipient whether it needs to synthesize the new\n\t\t// unreplicated TruncatedState. It could tell by itself by peeking into\n\t\t// the data, but it uses a write only batch for performance which\n\t\t// doesn't support that; this is easier. Notably, this is true if the\n\t\t// snap index itself is the one at which the migration happens.\n\t\t//\n\t\t// See VersionUnreplicatedRaftTruncatedState.\n\t\tUnreplicatedTruncatedState: !usesReplicatedTruncatedState,\n\t\tRaftMessageRequest: RaftMessageRequest{\n\t\t\tRangeID: r.RangeID,\n\t\t\tFromReplica: sender,\n\t\t\tToReplica: recipient,\n\t\t\tMessage: raftpb.Message{\n\t\t\t\tType: raftpb.MsgSnap,\n\t\t\t\tTo: uint64(recipient.ReplicaID),\n\t\t\t\tFrom: uint64(sender.ReplicaID),\n\t\t\t\tTerm: status.Term,\n\t\t\t\tSnapshot: snap.RaftSnap,\n\t\t\t},\n\t\t},\n\t\tRangeSize: r.GetMVCCStats().Total(),\n\t\t// Recipients currently cannot choose to decline any snapshots.\n\t\t// In 19.2 and earlier versions pre-emptive snapshots could be declined.\n\t\t//\n\t\t// TODO(ajwerner): Consider removing the CanDecline flag.\n\t\tCanDecline: false,\n\t\tPriority: priority,\n\t\tStrategy: SnapshotRequest_KV_BATCH,\n\t\tType: snapType,\n\t}\n\tnewBatchFn := func() storage.Batch {\n\t\treturn r.store.Engine().NewUnindexedBatch(true /* writeOnly */)\n\t}\n\tsent := func() {\n\t\tr.store.metrics.RangeSnapshotsGenerated.Inc(1)\n\t}\n\tif err := r.store.cfg.Transport.SendSnapshot(\n\t\tctx,\n\t\tr.store.allocator.storePool,\n\t\treq,\n\t\tsnap,\n\t\tnewBatchFn,\n\t\tsent,\n\t); err != nil {\n\t\tif errors.Is(err, errMalformedSnapshot) {\n\t\t\ttag := fmt.Sprintf(\"r%d_%s\", r.RangeID, snap.SnapUUID.Short())\n\t\t\tif dir, err := r.store.checkpoint(ctx, tag); err != nil {\n\t\t\t\tlog.Warningf(ctx, \"unable to create checkpoint %s: %+v\", dir, err)\n\t\t\t} else {\n\t\t\t\tlog.Warningf(ctx, \"created checkpoint %s\", dir)\n\t\t\t}\n\n\t\t\tlog.Fatal(ctx, \"malformed snapshot generated\")\n\t\t}\n\t\treturn errors.Mark(err, errMarkSnapshotError)\n\t}\n\treturn nil\n}", "func (r DescribeApplicationSnapshotRequest) Send(ctx context.Context) (*DescribeApplicationSnapshotResponse, error) {\n\tr.Request.SetContext(ctx)\n\terr := r.Request.Send()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tresp := &DescribeApplicationSnapshotResponse{\n\t\tDescribeApplicationSnapshotOutput: r.Request.Data.(*DescribeApplicationSnapshotOutput),\n\t\tresponse: &aws.Response{Request: r.Request},\n\t}\n\n\treturn resp, nil\n}", "func (a *Agent) SnapshotRPC(args *structs.SnapshotRequest, in io.Reader, out io.Writer,\n\treplyFn structs.SnapshotReplyFn) error {\n\treturn a.delegate.SnapshotRPC(args, in, out, replyFn)\n}", "func (c *ReplicaClient) WriteSnapshot(ctx context.Context, generation string, index int, rd io.Reader) (info litestream.SnapshotInfo, err error) {\n\tdefer func() { c.resetOnConnError(err) }()\n\n\tsftpClient, err := c.Init(ctx)\n\tif err != nil {\n\t\treturn info, err\n\t}\n\n\tfilename, err := litestream.SnapshotPath(c.Path, generation, index)\n\tif err != nil {\n\t\treturn info, fmt.Errorf(\"cannot determine snapshot path: %w\", err)\n\t}\n\tstartTime := time.Now()\n\n\tif err := sftpClient.MkdirAll(path.Dir(filename)); err != nil {\n\t\treturn info, fmt.Errorf(\"cannot make parent wal segment directory %q: %w\", path.Dir(filename), err)\n\t}\n\n\tf, err := sftpClient.OpenFile(filename, os.O_WRONLY|os.O_CREATE|os.O_TRUNC)\n\tif err != nil {\n\t\treturn info, fmt.Errorf(\"cannot open snapshot file for writing: %w\", err)\n\t}\n\tdefer f.Close()\n\n\tn, err := io.Copy(f, rd)\n\tif err != nil {\n\t\treturn info, err\n\t} else if err := f.Close(); err != nil {\n\t\treturn info, err\n\t}\n\n\tinternal.OperationTotalCounterVec.WithLabelValues(ReplicaClientType, \"PUT\").Inc()\n\tinternal.OperationBytesCounterVec.WithLabelValues(ReplicaClientType, \"PUT\").Add(float64(n))\n\n\t// log.Printf(\"%s(%s): snapshot: creating %s/%08x t=%s\", r.db.Path(), r.Name(), generation, index, time.Since(startTime).Truncate(time.Millisecond))\n\n\treturn litestream.SnapshotInfo{\n\t\tGeneration: generation,\n\t\tIndex: index,\n\t\tSize: n,\n\t\tCreatedAt: startTime.UTC(),\n\t}, nil\n}", "func (c *restClient) CreateSnapshot(ctx context.Context, req *netapppb.CreateSnapshotRequest, opts ...gax.CallOption) (*CreateSnapshotOperation, error) {\n\tm := protojson.MarshalOptions{AllowPartial: true, UseEnumNumbers: true}\n\tbody := req.GetSnapshot()\n\tjsonReq, err := m.Marshal(body)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tbaseUrl, err := url.Parse(c.endpoint)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tbaseUrl.Path += fmt.Sprintf(\"/v1/%v/snapshots\", req.GetParent())\n\n\tparams := url.Values{}\n\tparams.Add(\"$alt\", \"json;enum-encoding=int\")\n\tparams.Add(\"snapshotId\", fmt.Sprintf(\"%v\", req.GetSnapshotId()))\n\n\tbaseUrl.RawQuery = params.Encode()\n\n\t// Build HTTP headers from client and context metadata.\n\thds := []string{\"x-goog-request-params\", fmt.Sprintf(\"%s=%v\", \"parent\", url.QueryEscape(req.GetParent()))}\n\n\thds = append(c.xGoogHeaders, hds...)\n\thds = append(hds, \"Content-Type\", \"application/json\")\n\theaders := gax.BuildHeaders(ctx, hds...)\n\tunm := protojson.UnmarshalOptions{AllowPartial: true, DiscardUnknown: true}\n\tresp := &longrunningpb.Operation{}\n\te := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {\n\t\tif settings.Path != \"\" {\n\t\t\tbaseUrl.Path = settings.Path\n\t\t}\n\t\thttpReq, err := http.NewRequest(\"POST\", baseUrl.String(), bytes.NewReader(jsonReq))\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\thttpReq = httpReq.WithContext(ctx)\n\t\thttpReq.Header = headers\n\n\t\thttpRsp, err := c.httpClient.Do(httpReq)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tdefer httpRsp.Body.Close()\n\n\t\tif err = googleapi.CheckResponse(httpRsp); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tbuf, err := io.ReadAll(httpRsp.Body)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tif err := unm.Unmarshal(buf, resp); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\treturn nil\n\t}, opts...)\n\tif e != nil {\n\t\treturn nil, e\n\t}\n\n\toverride := fmt.Sprintf(\"/v1/%s\", resp.GetName())\n\treturn &CreateSnapshotOperation{\n\t\tlro: longrunning.InternalNewOperation(*c.LROClient, resp),\n\t\tpollPath: override,\n\t}, nil\n}", "func (r ModifyDBClusterRequest) Send(ctx context.Context) (*ModifyDBClusterResponse, error) {\n\tr.Request.SetContext(ctx)\n\terr := r.Request.Send()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tresp := &ModifyDBClusterResponse{\n\t\tModifyDBClusterOutput: r.Request.Data.(*ModifyDBClusterOutput),\n\t\tresponse: &aws.Response{Request: r.Request},\n\t}\n\n\treturn resp, nil\n}", "func clone(namespace, sourceClusterName, targetClusterName string) {\n\tlog.Debugf(\"clone called namespace:%s sourceClusterName:%s targetClusterName:%s\",\n\t\tnamespace, sourceClusterName, targetClusterName)\n\n\t// set up a request to the clone API sendpoint\n\trequest := msgs.CloneRequest{\n\t\tBackrestStorageSource: BackrestStorageSource,\n\t\tBackrestPVCSize: BackrestPVCSize,\n\t\tEnableMetrics: MetricsFlag,\n\t\tNamespace: Namespace,\n\t\tPVCSize: PVCSize,\n\t\tSourceClusterName: sourceClusterName,\n\t\tTargetClusterName: targetClusterName,\n\t}\n\n\t// make a call to the clone API\n\tresponse, err := api.Clone(httpclient, &SessionCredentials, &request)\n\n\t// if there was an error with the API call, print that out here\n\tif err != nil {\n\t\tfmt.Println(\"Error: \" + err.Error())\n\t\tos.Exit(1)\n\t}\n\n\t// if the response was unsuccessful due to user error, print out the error\n\t// message here\n\tif response.Status.Code != msgs.Ok {\n\t\tfmt.Println(\"Error: \" + response.Status.Msg)\n\t\tos.Exit(1)\n\t}\n\n\t// otherwise, print out some feedback:\n\tfmt.Println(\"Created clone task for: \", response.TargetClusterName)\n\tfmt.Println(\"workflow id is \", response.WorkflowID)\n}", "func (m *MockRDSAPI) CreateDBClusterSnapshotRequest(arg0 *rds.CreateDBClusterSnapshotInput) (*request.Request, *rds.CreateDBClusterSnapshotOutput) {\n\tret := m.ctrl.Call(m, \"CreateDBClusterSnapshotRequest\", arg0)\n\tret0, _ := ret[0].(*request.Request)\n\tret1, _ := ret[1].(*rds.CreateDBClusterSnapshotOutput)\n\treturn ret0, ret1\n}", "func SendSnapshot(file string, writer *SnapshotStreamWriter) error {\n\treader, err := openSnapshot(file)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer reader.Close()\n\n\t_, err = io.Copy(writer, reader)\n\treturn err\n}", "func (r StartDBClusterRequest) Send(ctx context.Context) (*StartDBClusterResponse, error) {\n\tr.Request.SetContext(ctx)\n\terr := r.Request.Send()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tresp := &StartDBClusterResponse{\n\t\tStartDBClusterOutput: r.Request.Data.(*StartDBClusterOutput),\n\t\tresponse: &aws.Response{Request: r.Request},\n\t}\n\n\treturn resp, nil\n}", "func (s *OsdCsiServer) CreateSnapshot(\n\tctx context.Context,\n\treq *csi.CreateSnapshotRequest,\n) (*csi.CreateSnapshotResponse, error) {\n\n\tif len(req.GetSourceVolumeId()) == 0 {\n\t\treturn nil, status.Error(codes.InvalidArgument, \"Volume id must be provided\")\n\t} else if len(req.GetName()) == 0 {\n\t\treturn nil, status.Error(codes.InvalidArgument, \"Name must be provided\")\n\t}\n\n\t// Check if the snapshot with this name already exists\n\tv, err := util.VolumeFromName(s.driver, req.GetName())\n\tif err == nil {\n\t\t// Verify the parent is the same\n\t\tif req.GetSourceVolumeId() != v.GetSource().GetParent() {\n\t\t\treturn nil, status.Error(codes.AlreadyExists, \"Requested snapshot already exists for another source volume id\")\n\t\t}\n\n\t\t// Return current snapshot info\n\t\tcreatedAt, err := ptypes.Timestamp(v.GetCtime())\n\t\tif err != nil {\n\t\t\treturn nil, status.Errorf(codes.Internal, \"Failed to get time snapshot was created: %v\", err)\n\t\t}\n\t\treturn &csi.CreateSnapshotResponse{\n\t\t\tSnapshot: &csi.Snapshot{\n\t\t\t\tId: v.GetId(),\n\t\t\t\tSourceVolumeId: v.GetSource().GetParent(),\n\t\t\t\tCreatedAt: createdAt.Unix(),\n\t\t\t\tStatus: &csi.SnapshotStatus{\n\t\t\t\t\t// This means that we are not uploading our snapshot\n\t\t\t\t\t// We may add support for cloud snaps in future patches\n\t\t\t\t\tType: csi.SnapshotStatus_READY,\n\t\t\t\t},\n\t\t\t},\n\t\t}, nil\n\t}\n\n\t// Get any labels passed in by the CO\n\t_, locator, _, err := s.specHandler.SpecFromOpts(req.GetParameters())\n\tif err != nil {\n\t\treturn nil, status.Errorf(codes.InvalidArgument, \"Unable to get parameters: %v\", err)\n\t}\n\n\t// Create snapshot\n\treadonly := true\n\tsnapshotID, err := s.driver.Snapshot(req.GetSourceVolumeId(), readonly, &api.VolumeLocator{\n\t\tName: req.GetName(),\n\t\tVolumeLabels: locator.GetVolumeLabels(),\n\t}, false)\n\tif err != nil {\n\t\tif err == kvdb.ErrNotFound {\n\t\t\treturn nil, status.Errorf(codes.NotFound, \"Volume id %s not found\", req.GetSourceVolumeId())\n\t\t}\n\t\treturn nil, status.Errorf(codes.Internal, \"Failed to create snapshot: %v\", err)\n\t}\n\n\tsnapInfo, err := util.VolumeFromName(s.driver, snapshotID)\n\tif err != nil {\n\t\treturn nil, status.Errorf(codes.Internal, \"Failed to get information about the snapshot: %v\", err)\n\t}\n\tcreatedAt, err := ptypes.Timestamp(snapInfo.GetCtime())\n\tif err != nil {\n\t\treturn nil, status.Errorf(codes.Internal, \"Failed to get time snapshot was created: %v\", err)\n\t}\n\n\treturn &csi.CreateSnapshotResponse{\n\t\tSnapshot: &csi.Snapshot{\n\t\t\tId: snapshotID,\n\t\t\tSourceVolumeId: req.GetSourceVolumeId(),\n\t\t\tCreatedAt: createdAt.Unix(),\n\t\t\tStatus: &csi.SnapshotStatus{\n\t\t\t\t// This means that we are not uploading our snapshot\n\t\t\t\t// We may add support flow cloud snaps in future patches\n\t\t\t\tType: csi.SnapshotStatus_READY,\n\t\t\t},\n\t\t},\n\t}, nil\n}", "func (s *API) CreateSnapshot(req *CreateSnapshotRequest, opts ...scw.RequestOption) (*Snapshot, error) {\n\tvar err error\n\n\tif req.Region == \"\" {\n\t\tdefaultRegion, _ := s.client.GetDefaultRegion()\n\t\treq.Region = defaultRegion\n\t}\n\n\tif req.Name == \"\" {\n\t\treq.Name = namegenerator.GetRandomName(\"snp\")\n\t}\n\n\tif fmt.Sprint(req.Region) == \"\" {\n\t\treturn nil, errors.New(\"field Region cannot be empty in request\")\n\t}\n\n\tif fmt.Sprint(req.InstanceID) == \"\" {\n\t\treturn nil, errors.New(\"field InstanceID cannot be empty in request\")\n\t}\n\n\tscwReq := &scw.ScalewayRequest{\n\t\tMethod: \"POST\",\n\t\tPath: \"/rdb/v1/regions/\" + fmt.Sprint(req.Region) + \"/instances/\" + fmt.Sprint(req.InstanceID) + \"/snapshots\",\n\t\tHeaders: http.Header{},\n\t}\n\n\terr = scwReq.SetBody(req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar resp Snapshot\n\n\terr = s.client.Do(scwReq, &resp, opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &resp, nil\n}", "func (r *ClusterUpdateRequest) marshal(writer io.Writer) error {\n\tstream := helpers.NewStream(writer)\n\tr.stream(stream)\n\treturn stream.Error\n}", "func (a Agent) SnapshotDeploy(sid int, keepFiles bool, password string, snapshot RawSnapshot) error {\n\treq := libts.Request{\n\t\tCommand: \"serversnapshotdeploy\",\n\t\tServerID: sid,\n\t\tArgs: map[string]interface{}{\n\t\t\t\"password\": password,\n\t\t},\n\t}\n\tif keepFiles {\n\t\treq.Args[\"-keepfiles\"] = \"\"\n\t}\n\treq.Args[\"version\"] = snapshot.Version\n\treq.Args[\"salt\"] = snapshot.Salt\n\treq.Args[\"data\"] = snapshot.Data\n\treturn a.Query.Do(req, nil)\n}", "func (s *Server) streamSnapshot(ci *ClientInfo, acc *Account, mset *stream, sr *SnapshotResult, req *JSApiStreamSnapshotRequest) {\n\tchunkSize := req.ChunkSize\n\tif chunkSize == 0 {\n\t\tchunkSize = defaultSnapshotChunkSize\n\t}\n\t// Setup for the chunk stream.\n\treply := req.DeliverSubject\n\tr := sr.Reader\n\tdefer r.Close()\n\n\t// Check interest for the snapshot deliver subject.\n\tinch := make(chan bool, 1)\n\tacc.sl.RegisterNotification(req.DeliverSubject, inch)\n\tdefer acc.sl.ClearNotification(req.DeliverSubject, inch)\n\thasInterest := <-inch\n\tif !hasInterest {\n\t\t// Allow 2 seconds or so for interest to show up.\n\t\tselect {\n\t\tcase <-inch:\n\t\tcase <-time.After(2 * time.Second):\n\t\t}\n\t}\n\n\t// Create our ack flow handler.\n\t// This is very simple for now.\n\tacks := make(chan struct{}, 1)\n\tacks <- struct{}{}\n\n\t// Track bytes outstanding.\n\tvar out int32\n\n\t// We will place sequence number and size of chunk sent in the reply.\n\tackSubj := fmt.Sprintf(jsSnapshotAckT, mset.name(), nuid.Next())\n\tackSub, _ := mset.subscribeInternalUnlocked(ackSubj+\".>\", func(_ *subscription, _ *client, _ *Account, subject, _ string, _ []byte) {\n\t\tcs, _ := strconv.Atoi(tokenAt(subject, 6))\n\t\t// This is very crude and simple, but ok for now.\n\t\t// This only matters when sending multiple chunks.\n\t\tif atomic.AddInt32(&out, int32(-cs)) < defaultSnapshotWindowSize {\n\t\t\tselect {\n\t\t\tcase acks <- struct{}{}:\n\t\t\tdefault:\n\t\t\t}\n\t\t}\n\t})\n\tdefer mset.unsubscribeUnlocked(ackSub)\n\n\t// TODO(dlc) - Add in NATS-Chunked-Sequence header\n\n\tfor index := 1; ; index++ {\n\t\tchunk := make([]byte, chunkSize)\n\t\tn, err := r.Read(chunk)\n\t\tchunk = chunk[:n]\n\t\tif err != nil {\n\t\t\tif n > 0 {\n\t\t\t\tmset.outq.send(newJSPubMsg(reply, _EMPTY_, _EMPTY_, nil, chunk, nil, 0))\n\t\t\t}\n\t\t\tbreak\n\t\t}\n\n\t\t// Wait on acks for flow control if past our window size.\n\t\t// Wait up to 10ms for now if no acks received.\n\t\tif atomic.LoadInt32(&out) > defaultSnapshotWindowSize {\n\t\t\tselect {\n\t\t\tcase <-acks:\n\t\t\tcase <-inch: // Lost interest\n\t\t\t\tgoto done\n\t\t\tcase <-time.After(10 * time.Millisecond):\n\t\t\t}\n\t\t}\n\t\tackReply := fmt.Sprintf(\"%s.%d.%d\", ackSubj, len(chunk), index)\n\t\tmset.outq.send(newJSPubMsg(reply, _EMPTY_, ackReply, nil, chunk, nil, 0))\n\t\tatomic.AddInt32(&out, int32(len(chunk)))\n\t}\ndone:\n\t// Send last EOF\n\t// TODO(dlc) - place hash in header\n\tmset.outq.send(newJSPubMsg(reply, _EMPTY_, _EMPTY_, nil, nil, nil, 0))\n}", "func createSnapshot(sg *snapshotgroup.SnapshotGroup, annotations map[string]string) error {\n\ttimestamp := strconv.Itoa(int(time.Now().Unix()))\n\tannotations[TimestampAnnotation] = timestamp\n\tannotations[managedByAnnotation] = managerName\n\tannotations[GroupNameAnnotation] = sg.ObjectMeta.Name\n\n\tsnapshot := snapshotsv1.VolumeSnapshot{\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tNamespace: sg.ObjectMeta.Namespace,\n\t\t\tName: sg.ObjectMeta.Name + \"-\" + timestamp,\n\t\t\tAnnotations: annotations,\n\t\t},\n\t\tSpec: sg.Spec.Template.Spec,\n\t}\n\tname := getPVCName(sg)\n\tklog.Infof(\"%s/%s: creating snapshot for PVC %s\", sg.ObjectMeta.Namespace, sg.ObjectMeta.Name, name)\n\tsnapshot.Spec.Source.PersistentVolumeClaimName = &name\n\n\tmarshaled, err := json.Marshal(snapshot)\n\tif err != nil {\n\t\treturn err\n\t}\n\tunst := unstructured.Unstructured{\n\t\tObject: map[string]interface{}{},\n\t}\n\terr = json.Unmarshal(marshaled, &unst.Object)\n\tif err != nil {\n\t\treturn err\n\t}\n\tclient := kube.GetClient()\n\tunst.Object[\"kind\"] = \"VolumeSnapshot\"\n\tunst.Object[\"apiVersion\"] = client.VolumeSnapshotVersion\n\n\tif strings.HasSuffix(client.VolumeSnapshotVersion, \"v1alpha1\") {\n\t\t// There is a slight change in `source` from alpha to beta\n\t\tspec := unst.Object[\"spec\"].(map[string]interface{})\n\t\tsource := spec[\"source\"].(map[string]interface{})\n\t\tdelete(source, \"persistentVolumeClaimName\")\n\t\tsource[\"name\"] = name\n\t\tsource[\"kind\"] = \"PersistentVolumeClaim\"\n\t\tspec[\"source\"] = source\n\t\tunst.Object[\"spec\"] = spec\n\t}\n\n\tsnapClient := client.SnapshotClient.Namespace(snapshot.ObjectMeta.Namespace)\n\t_, err = snapClient.Create(&unst, metav1.CreateOptions{})\n\treturn err\n}", "func SaveSnapshot(k, Lk, Xk, Kk, Mk string) error {\n\t// we need to wrap PUT request our own\n\tclient := &http.Client{}\n\n\treqBody := [][]string{{k, Lk, Xk, Kk, Mk}}\n\turl := \"https://nalusi-b235sdkoha-de.a.run.app/nalupi?spreadsheetID=1FMUFV2z_MaccKswNLh3-x2vDeBY3RRNNzzAusjh848c&a1Range=Data!A2:E2\"\n\tb, _ := json.Marshal(reqBody)\n\treq, err := http.NewRequest(\"PUT\", url, bytes.NewBuffer(b))\n\tif err != nil {\n\t\treturn err\n\t}\n\treq.Header.Set(\"Content-Type\", \"application/json\")\n\tresponse, err := client.Do(req)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif response.StatusCode == 200 {\n\t\treturn nil\n\t}\n\treturn errors.New(\"Server error\")\n}", "func NewClusterSnapshot(ctx *pulumi.Context,\n\tname string, args *ClusterSnapshotArgs, opts ...pulumi.ResourceOption) (*ClusterSnapshot, error) {\n\tif args == nil || args.DbClusterIdentifier == nil {\n\t\treturn nil, errors.New(\"missing required argument 'DbClusterIdentifier'\")\n\t}\n\tif args == nil || args.DbClusterSnapshotIdentifier == nil {\n\t\treturn nil, errors.New(\"missing required argument 'DbClusterSnapshotIdentifier'\")\n\t}\n\tif args == nil {\n\t\targs = &ClusterSnapshotArgs{}\n\t}\n\tvar resource ClusterSnapshot\n\terr := ctx.RegisterResource(\"aws:neptune/clusterSnapshot:ClusterSnapshot\", name, args, &resource, opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &resource, nil\n}", "func (kvm *Clone) Snapshot(wr io.Writer) error {\n\tkvm.mu.RLock()\n\tdefer kvm.mu.RUnlock()\n\tdata, err := json.Marshal(kvm.keys)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif _, err := wr.Write(data); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}", "func (m *MockRDSAPI) CopyDBSnapshotRequest(arg0 *rds.CopyDBSnapshotInput) (*request.Request, *rds.CopyDBSnapshotOutput) {\n\tret := m.ctrl.Call(m, \"CopyDBSnapshotRequest\", arg0)\n\tret0, _ := ret[0].(*request.Request)\n\tret1, _ := ret[1].(*rds.CopyDBSnapshotOutput)\n\treturn ret0, ret1\n}", "func (b *ClusterNodesBuilder) Copy(object *ClusterNodes) *ClusterNodesBuilder {\n\tif object == nil {\n\t\treturn b\n\t}\n\tb.compute = object.compute\n\tb.infra = object.infra\n\tb.master = object.master\n\tb.total = object.total\n\treturn b\n}", "func (c *PresignClient) PresignCopyDBSnapshot(ctx context.Context, params *CopyDBSnapshotInput, optFns ...func(*PresignOptions)) (*v4.PresignedHTTPRequest, error) {\n\tif params == nil {\n\t\tparams = &CopyDBSnapshotInput{}\n\t}\n\toptions := c.options.copy()\n\tfor _, fn := range optFns {\n\t\tfn(&options)\n\t}\n\tclientOptFns := append(options.ClientOptions, withNopHTTPClientAPIOption)\n\n\tresult, _, err := c.client.invokeOperation(ctx, \"CopyDBSnapshot\", params, clientOptFns,\n\t\tc.client.addOperationCopyDBSnapshotMiddlewares,\n\t\tpresignConverter(options).convertToPresignMiddleware,\n\t)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tout := result.(*v4.PresignedHTTPRequest)\n\treturn out, nil\n}", "func (r ModifyDBSnapshotAttributeRequest) Send(ctx context.Context) (*ModifyDBSnapshotAttributeResponse, error) {\n\tr.Request.SetContext(ctx)\n\terr := r.Request.Send()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tresp := &ModifyDBSnapshotAttributeResponse{\n\t\tModifyDBSnapshotAttributeOutput: r.Request.Data.(*ModifyDBSnapshotAttributeOutput),\n\t\tresponse: &aws.Response{Request: r.Request},\n\t}\n\n\treturn resp, nil\n}", "func (m *MockRDSAPI) CopyDBClusterSnapshotWithContext(arg0 aws.Context, arg1 *rds.CopyDBClusterSnapshotInput, arg2 ...request.Option) (*rds.CopyDBClusterSnapshotOutput, error) {\n\tvarargs := []interface{}{arg0, arg1}\n\tfor _, a := range arg2 {\n\t\tvarargs = append(varargs, a)\n\t}\n\tret := m.ctrl.Call(m, \"CopyDBClusterSnapshotWithContext\", varargs...)\n\tret0, _ := ret[0].(*rds.CopyDBClusterSnapshotOutput)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func ExampleRDS_CreateDBClusterSnapshot_shared00() {\n\tsvc := rds.New(session.New())\n\tinput := &rds.CreateDBClusterSnapshotInput{\n\t\tDBClusterIdentifier: aws.String(\"mydbclustersnapshot\"),\n\t\tDBClusterSnapshotIdentifier: aws.String(\"mydbcluster\"),\n\t}\n\n\tresult, err := svc.CreateDBClusterSnapshot(input)\n\tif err != nil {\n\t\tif aerr, ok := err.(awserr.Error); ok {\n\t\t\tswitch aerr.Code() {\n\t\t\tcase rds.ErrCodeDBClusterSnapshotAlreadyExistsFault:\n\t\t\t\tfmt.Println(rds.ErrCodeDBClusterSnapshotAlreadyExistsFault, aerr.Error())\n\t\t\tcase rds.ErrCodeInvalidDBClusterStateFault:\n\t\t\t\tfmt.Println(rds.ErrCodeInvalidDBClusterStateFault, aerr.Error())\n\t\t\tcase rds.ErrCodeDBClusterNotFoundFault:\n\t\t\t\tfmt.Println(rds.ErrCodeDBClusterNotFoundFault, aerr.Error())\n\t\t\tcase rds.ErrCodeSnapshotQuotaExceededFault:\n\t\t\t\tfmt.Println(rds.ErrCodeSnapshotQuotaExceededFault, aerr.Error())\n\t\t\tcase rds.ErrCodeInvalidDBClusterSnapshotStateFault:\n\t\t\t\tfmt.Println(rds.ErrCodeInvalidDBClusterSnapshotStateFault, aerr.Error())\n\t\t\tdefault:\n\t\t\t\tfmt.Println(aerr.Error())\n\t\t\t}\n\t\t} else {\n\t\t\t// Print the error, cast err to awserr.Error to get the Code and\n\t\t\t// Message from an error.\n\t\t\tfmt.Println(err.Error())\n\t\t}\n\t\treturn\n\t}\n\n\tfmt.Println(result)\n}", "func (r ResizeClusterRequest) Send(ctx context.Context) (*ResizeClusterResponse, error) {\n\tr.Request.SetContext(ctx)\n\terr := r.Request.Send()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tresp := &ResizeClusterResponse{\n\t\tResizeClusterOutput: r.Request.Data.(*ResizeClusterOutput),\n\t\tresponse: &aws.Response{Request: r.Request},\n\t}\n\n\treturn resp, nil\n}", "func (t transporter) SendSnapshotRecoveryRequest(server *raft.Server, peer *raft.Peer, req *raft.SnapshotRecoveryRequest) *raft.SnapshotRecoveryResponse {\n\tvar aersp *raft.SnapshotRecoveryResponse\n\tvar b bytes.Buffer\n\tjson.NewEncoder(&b).Encode(req)\n\n\tdebug(\"Send SnapshotRecovery to %s [Last Term: %d, LastIndex %d]\", peer.Name(),\n\t\treq.LastTerm, req.LastIndex)\n\n\tresp, err := t.Post(fmt.Sprintf(\"%s/snapshotRecovery\", peer.Name()), &b)\n\n\tif resp != nil {\n\t\tdefer resp.Body.Close()\n\t\taersp = &raft.SnapshotRecoveryResponse{}\n\t\tif err = json.NewDecoder(resp.Body).Decode(&aersp); err == nil || err == io.EOF {\n\t\t\treturn aersp\n\t\t}\n\t}\n\treturn aersp\n}", "func (s *storageCeph) copyWithSnapshots(sourceVolumeName string,\n\ttargetVolumeName string, sourceParentSnapshot string) error {\n\tlogger.Debugf(`Creating non-sparse copy of RBD storage volume \"%s to \"%s\"`, sourceVolumeName, targetVolumeName)\n\n\targs := []string{\n\t\t\"export-diff\",\n\t\t\"--id\", s.UserName,\n\t\t\"--cluster\", s.ClusterName,\n\t\tsourceVolumeName,\n\t}\n\n\tif sourceParentSnapshot != \"\" {\n\t\targs = append(args, \"--from-snap\", sourceParentSnapshot)\n\t}\n\n\t// redirect output to stdout\n\targs = append(args, \"-\")\n\n\trbdSendCmd := exec.Command(\"rbd\", args...)\n\trbdRecvCmd := exec.Command(\n\t\t\"rbd\",\n\t\t\"--id\", s.UserName,\n\t\t\"import-diff\",\n\t\t\"--cluster\", s.ClusterName,\n\t\t\"-\",\n\t\ttargetVolumeName)\n\n\trbdRecvCmd.Stdin, _ = rbdSendCmd.StdoutPipe()\n\trbdRecvCmd.Stdout = os.Stdout\n\trbdRecvCmd.Stderr = os.Stderr\n\n\terr := rbdRecvCmd.Start()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = rbdSendCmd.Run()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = rbdRecvCmd.Wait()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tlogger.Debugf(`Created non-sparse copy of RBD storage volume \"%s\" to \"%s\"`, sourceVolumeName, targetVolumeName)\n\treturn nil\n}", "func (r UpdateClusterRequest) Send(ctx context.Context) (*UpdateClusterResponse, error) {\n\tr.Request.SetContext(ctx)\n\terr := r.Request.Send()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tresp := &UpdateClusterResponse{\n\t\tUpdateClusterOutput: r.Request.Data.(*UpdateClusterOutput),\n\t\tresponse: &aws.Response{Request: r.Request},\n\t}\n\n\treturn resp, nil\n}", "func (r UpdateClusterRequest) Send(ctx context.Context) (*UpdateClusterResponse, error) {\n\tr.Request.SetContext(ctx)\n\terr := r.Request.Send()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tresp := &UpdateClusterResponse{\n\t\tUpdateClusterOutput: r.Request.Data.(*UpdateClusterOutput),\n\t\tresponse: &aws.Response{Request: r.Request},\n\t}\n\n\treturn resp, nil\n}", "func (r CreateRelationalDatabaseFromSnapshotRequest) Send(ctx context.Context) (*CreateRelationalDatabaseFromSnapshotResponse, error) {\n\tr.Request.SetContext(ctx)\n\terr := r.Request.Send()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tresp := &CreateRelationalDatabaseFromSnapshotResponse{\n\t\tCreateRelationalDatabaseFromSnapshotOutput: r.Request.Data.(*CreateRelationalDatabaseFromSnapshotOutput),\n\t\tresponse: &aws.Response{Request: r.Request},\n\t}\n\n\treturn resp, nil\n}", "func (r ModifyClusterRequest) Send(ctx context.Context) (*ModifyClusterResponse, error) {\n\tr.Request.SetContext(ctx)\n\terr := r.Request.Send()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tresp := &ModifyClusterResponse{\n\t\tModifyClusterOutput: r.Request.Data.(*ModifyClusterOutput),\n\t\tresponse: &aws.Response{Request: r.Request},\n\t}\n\n\treturn resp, nil\n}", "func (b *AccessReviewRequestBuilder) Copy(object *AccessReviewRequest) *AccessReviewRequestBuilder {\n\tif object == nil {\n\t\treturn b\n\t}\n\tb.bitmap_ = object.bitmap_\n\tb.accountUsername = object.accountUsername\n\tb.action = object.action\n\tb.clusterID = object.clusterID\n\tb.clusterUUID = object.clusterUUID\n\tb.organizationID = object.organizationID\n\tb.resourceType = object.resourceType\n\tb.subscriptionID = object.subscriptionID\n\treturn b\n}", "func (s *OsdCsiServer) CreateSnapshot(\n\tctx context.Context,\n\treq *csi.CreateSnapshotRequest,\n) (*csi.CreateSnapshotResponse, error) {\n\n\tif len(req.GetSourceVolumeId()) == 0 {\n\t\treturn nil, status.Error(codes.InvalidArgument, \"Volume id must be provided\")\n\t} else if len(req.GetName()) == 0 {\n\t\treturn nil, status.Error(codes.InvalidArgument, \"Name must be provided\")\n\t}\n\n\t// Get secret if any was passed\n\tctx = s.setupContext(ctx, req.GetSecrets())\n\tctx, cancel := grpcutil.WithDefaultTimeout(ctx)\n\tdefer cancel()\n\n\t// Get any labels passed in by the CO\n\t_, locator, _, err := s.specHandler.SpecFromOpts(req.GetParameters())\n\tif err != nil {\n\t\treturn nil, status.Errorf(codes.InvalidArgument, \"Unable to get parameters: %v\", err)\n\t}\n\t// Check ID is valid with the specified volume capabilities\n\tsnapshotType, ok := locator.VolumeLabels[osdSnapshotLabelsTypeKey]\n\tif !ok {\n\t\tsnapshotType = DriverTypeLocal\n\t}\n\tswitch snapshotType {\n\tcase DriverTypeCloud:\n\t\treturn s.createCloudBackup(ctx, req)\n\tcase DriverTypeLocal:\n\t\tfallthrough\n\tdefault:\n\t\treturn s.createLocalSnapshot(ctx, req)\n\t}\n}", "func (c *NOOPSnapshotConnection) SendSnapshotChunk(chunk raftpb.SnapshotChunk) error {\n\tif c.req.Fail() {\n\t\treturn ErrRequestedToFail\n\t}\n\tc.sendChunksCount++\n\treturn nil\n}", "func (z *zfsctl) Snapshot(ctx context.Context, name string, properties map[string]string) *execute {\n\targs := []string{\"snapshot\", \"-r\"}\n\tif properties != nil {\n\t\tkv := \"-o \"\n\t\tfor k, v := range properties {\n\t\t\tkv += fmt.Sprintf(\"%s=%s \", k, v)\n\t\t}\n\t\targs = append(args, kv)\n\t}\n\targs = append(args, name)\n\treturn &execute{ctx: ctx, name: z.cmd, args: args}\n}", "func (rf *Raft) StartSnapshot(snapshotInBytes []byte) {\n rf.mu.Lock()\n defer rf.mu.Unlock()\n\n var snapshot Snapshot\n r := bytes.NewBuffer(snapshotInBytes)\n d := labgob.NewDecoder(r)\n if d.Decode(&snapshot.ServerMap) != nil {\n return\n }\n if d.Decode(&snapshot.LastApplyIdMap) != nil {\n return\n }\n if d.Decode(&snapshot.LastLogIndex) != nil {\n return\n }\n\n lastLogIndexInLogView := rf.convertToRaftLogViewIndex(snapshot.LastLogIndex)\n if lastLogIndexInLogView > 0 && lastLogIndexInLogView < len(rf.log) {\n lastIncludedTerm := rf.log[lastLogIndexInLogView].Term\n rf.cutoffLogBeforeIndex(lastLogIndexInLogView, lastIncludedTerm)\n rf.snapshottedIndex = snapshot.LastLogIndex\n\n rf.persistWithSnapshotInBytes(snapshotInBytes)\n\n if rf.state == \"Leader\" {\n go rf.sendInstallSnapshotToMultipleFollowers(snapshot.LastLogIndex, lastIncludedTerm)\n }\n\n }\n return\n}", "func (rf *Raft) InstallSnapshot(args *InstallSnapshotArgs, reply *InstallSnapshotReply) {\n rf.mu.Lock()\n defer rf.mu.Unlock()\n\n DISPrintf(\"Server(%d) receive InstallSnapshot RPC from Leader(%d)[LeaderTerm=%d, LastIncludedIndex=%d, LastIncludedTerm=%d]\", rf.me, args.LeaderId,\n args.LeaderTerm, args.LastIncludedIndex, args.LastIncludedTerm)\n raftViewIndex := rf.convertToRaftLogViewIndex(args.LastIncludedIndex)\n\n if args.LeaderTerm < rf.currentTerm || raftViewIndex < 0 {\n reply.FollowerTerm = rf.currentTerm\n return\n }\n\n if args.LeaderTerm > rf.currentTerm {\n rf.currentTerm = args.LeaderTerm\n rf.mu.Unlock()\n rf.convertToFollower()\n rf.mu.Lock()\n }\n\n reply.FollowerTerm = rf.currentTerm\n\n if raftViewIndex < len(rf.log) && args.LastIncludedTerm == rf.log[raftViewIndex].Term {\n rf.log = rf.log[raftViewIndex:]\n }else{\n rf.log = make([]Entry, 1, 100)\n rf.log[0].Term = args.LastIncludedTerm\n }\n\n rf.snapshottedIndex = args.LastIncludedIndex\n if rf.commitIndex < args.LastIncludedIndex {\n rf.commitIndex = args.LastIncludedIndex\n \n }\n if rf.lastApplied < args.LastIncludedIndex {\n rf.lastApplied = rf.commitIndex\n }\n\n rf.persistWithSnapshotInBytes(args.Data)\n\n if rf.lastApplied > args.LastIncludedIndex {\n return\n }\n\n applyMsg := ApplyMsg{}\n applyMsg.CommandValid = false\n applyMsg.CommandSnapshot = args.Data\n\n go func() {\n DISPrintf(\"Follower(%d) send a snapshot to its applyCh\", rf.me)\n rf.applyCh <- applyMsg\n }()\n}", "func (a *Client) CreateSnapshot(params *CreateSnapshotParams, opts ...ClientOption) (*CreateSnapshotOK, error) {\n\t// TODO: Validate the params before sending\n\tif params == nil {\n\t\tparams = NewCreateSnapshotParams()\n\t}\n\top := &runtime.ClientOperation{\n\t\tID: \"createSnapshot\",\n\t\tMethod: \"POST\",\n\t\tPathPattern: \"/astrolabe/{service}/{protectedEntityID}/snapshots\",\n\t\tProducesMediaTypes: []string{\"application/json\"},\n\t\tConsumesMediaTypes: []string{\"application/json\"},\n\t\tSchemes: []string{\"https\"},\n\t\tParams: params,\n\t\tReader: &CreateSnapshotReader{formats: a.formats},\n\t\tContext: params.Context,\n\t\tClient: params.HTTPClient,\n\t}\n\tfor _, opt := range opts {\n\t\topt(op)\n\t}\n\n\tresult, err := a.transport.Submit(op)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tsuccess, ok := result.(*CreateSnapshotOK)\n\tif ok {\n\t\treturn success, nil\n\t}\n\t// unexpected success response\n\t// safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue\n\tmsg := fmt.Sprintf(\"unexpected success response for createSnapshot: API contract not enforced by server. Client expected to get an error, but got: %T\", result)\n\tpanic(msg)\n}", "func (m *MockRDSAPI) RestoreDBClusterFromSnapshotRequest(arg0 *rds.RestoreDBClusterFromSnapshotInput) (*request.Request, *rds.RestoreDBClusterFromSnapshotOutput) {\n\tret := m.ctrl.Call(m, \"RestoreDBClusterFromSnapshotRequest\", arg0)\n\tret0, _ := ret[0].(*request.Request)\n\tret1, _ := ret[1].(*rds.RestoreDBClusterFromSnapshotOutput)\n\treturn ret0, ret1\n}", "func (r CreateClusterRequest) Send(ctx context.Context) (*CreateClusterResponse, error) {\n\tr.Request.SetContext(ctx)\n\terr := r.Request.Send()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tresp := &CreateClusterResponse{\n\t\tCreateClusterOutput: r.Request.Data.(*CreateClusterOutput),\n\t\tresponse: &aws.Response{Request: r.Request},\n\t}\n\n\treturn resp, nil\n}", "func (o *SnapmirrorCreateRequest) SourceCluster() string {\n\tvar r string\n\tif o.SourceClusterPtr == nil {\n\t\treturn r\n\t}\n\tr = *o.SourceClusterPtr\n\treturn r\n}", "func (syn *kubeSyncer) performSnapshot() ([]model.KVPair, map[string]bool, resourceVersions) {\n\topts := k8sapi.ListOptions{}\n\tversions := resourceVersions{}\n\tvar snap []model.KVPair\n\tvar keys map[string]bool\n\n\t// Loop until we successfully are able to accesss the API.\n\tfor {\n\t\t// Initialize the values to return.\n\t\tsnap = []model.KVPair{}\n\t\tkeys = map[string]bool{}\n\n\t\t// Get Namespaces (Profiles)\n\t\tlog.Info(\"Syncing Namespaces\")\n\t\tnsList, err := syn.kc.clientSet.Namespaces().List(opts)\n\t\tif err != nil {\n\t\t\tlog.Warnf(\"Error syncing Namespaces, retrying: %s\", err)\n\t\t\ttime.Sleep(1 * time.Second)\n\t\t\tcontinue\n\t\t}\n\t\tlog.Info(\"Received Namespace List() response\")\n\n\t\tversions.namespaceVersion = nsList.ListMeta.ResourceVersion\n\t\tfor _, ns := range nsList.Items {\n\t\t\t// The Syncer API expects a profile to be broken into its underlying\n\t\t\t// components - rules, tags, labels.\n\t\t\tprofile, err := syn.kc.converter.namespaceToProfile(&ns)\n\t\t\tif err != nil {\n\t\t\t\tlog.Panicf(\"%s\", err)\n\t\t\t}\n\t\t\trules, tags, labels := compat.ToTagsLabelsRules(profile)\n\t\t\trules.Revision = profile.Revision\n\t\t\ttags.Revision = profile.Revision\n\t\t\tlabels.Revision = profile.Revision\n\n\t\t\t// Also create a Policy for this Namespace.\n\t\t\tpolicy, err := syn.kc.converter.namespaceToPolicy(&ns)\n\t\t\tif err != nil {\n\t\t\t\tlog.Panicf(\"%s\", err)\n\t\t\t}\n\n\t\t\tsnap = append(snap, *rules, *tags, *labels, *policy)\n\t\t\tkeys[rules.Key.String()] = true\n\t\t\tkeys[tags.Key.String()] = true\n\t\t\tkeys[labels.Key.String()] = true\n\t\t\tkeys[policy.Key.String()] = true\n\t\t}\n\n\t\t// Get NetworkPolicies (Policies)\n\t\tlog.Info(\"Syncing NetworkPolicy\")\n\t\tnpList := extensions.NetworkPolicyList{}\n\t\terr = syn.kc.clientSet.Extensions().RESTClient().\n\t\t\tGet().\n\t\t\tResource(\"networkpolicies\").\n\t\t\tTimeout(10 * time.Second).\n\t\t\tDo().Into(&npList)\n\t\tif err != nil {\n\t\t\tlog.Warnf(\"Error querying NetworkPolicies during snapshot, retrying: %s\", err)\n\t\t\ttime.Sleep(1 * time.Second)\n\t\t\tcontinue\n\t\t}\n\t\tlog.Info(\"Received NetworkPolicy List() response\")\n\n\t\tversions.networkPolicyVersion = npList.ListMeta.ResourceVersion\n\t\tfor _, np := range npList.Items {\n\t\t\tpol, _ := syn.kc.converter.networkPolicyToPolicy(&np)\n\t\t\tsnap = append(snap, *pol)\n\t\t\tkeys[pol.Key.String()] = true\n\t\t}\n\n\t\t// Get Pods (WorkloadEndpoints)\n\t\tlog.Info(\"Syncing Pods\")\n\t\tpoList, err := syn.kc.clientSet.Pods(\"\").List(opts)\n\t\tif err != nil {\n\t\t\tlog.Warnf(\"Error querying Pods during snapshot, retrying: %s\", err)\n\t\t\ttime.Sleep(1 * time.Second)\n\t\t\tcontinue\n\t\t}\n\t\tlog.Info(\"Received Pod List() response\")\n\n\t\tversions.podVersion = poList.ListMeta.ResourceVersion\n\t\tfor _, po := range poList.Items {\n\t\t\t// Ignore any updates for pods which are not ready / valid.\n\t\t\tif !syn.kc.converter.isCalicoPod(&po) {\n\t\t\t\tlog.Debugf(\"Skipping pod %s/%s\", po.ObjectMeta.Namespace, po.ObjectMeta.Name)\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\t// Convert to a workload endpoint.\n\t\t\twep, err := syn.kc.converter.podToWorkloadEndpoint(&po)\n\t\t\tif err != nil {\n\t\t\t\tlog.WithError(err).Error(\"Failed to convert pod to workload endpoint\")\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tsnap = append(snap, *wep)\n\t\t\tkeys[wep.Key.String()] = true\n\t\t}\n\n\t\t// Sync GlobalConfig.\n\t\tlog.Info(\"Syncing GlobalConfig\")\n\t\tconfList, err := syn.kc.listGlobalConfig(model.GlobalConfigListOptions{})\n\t\tif err != nil {\n\t\t\tlog.Warnf(\"Error querying GlobalConfig during snapshot, retrying: %s\", err)\n\t\t\ttime.Sleep(1 * time.Second)\n\t\t\tcontinue\n\t\t}\n\t\tlog.Info(\"Received GlobalConfig List() response\")\n\n\t\tfor _, c := range confList {\n\t\t\tsnap = append(snap, *c)\n\t\t\tkeys[c.Key.String()] = true\n\t\t}\n\n\t\t// Sync IP Pools.\n\t\tlog.Info(\"Syncing IP Pools\")\n\t\tpoolList, err := syn.kc.List(model.IPPoolListOptions{})\n\t\tif err != nil {\n\t\t\tlog.Warnf(\"Error querying IP Pools during snapshot, retrying: %s\", err)\n\t\t\ttime.Sleep(1 * time.Second)\n\t\t\tcontinue\n\t\t}\n\t\tlog.Info(\"Received IP Pools List() response\")\n\n\t\tfor _, p := range poolList {\n\t\t\tsnap = append(snap, *p)\n\t\t\tkeys[p.Key.String()] = true\n\t\t}\n\n\t\tlog.Info(\"Syncing Nodes\")\n\t\tnoList, err := syn.kc.clientSet.Nodes().List(opts)\n\t\tif err != nil {\n\t\t\tlog.Warnf(\"Error syncing Nodes, retrying: %s\", err)\n\t\t\ttime.Sleep(1 * time.Second)\n\t\t\tcontinue\n\t\t}\n\t\tlog.Info(\"Received Node List() response\")\n\n\t\tversions.nodeVersion = noList.ListMeta.ResourceVersion\n\t\tfor _, no := range noList.Items {\n\t\t\tnode, err := resources.K8sNodeToCalico(&no)\n\t\t\tif err != nil {\n\t\t\t\tlog.Panicf(\"%s\", err)\n\t\t\t}\n\t\t\tif node != nil {\n\t\t\t\tsnap = append(snap, *node)\n\t\t\t\tkeys[node.Key.String()] = true\n\t\t\t}\n\t\t}\n\n\t\t// Include ready state.\n\t\tready, err := syn.kc.getReadyStatus(model.ReadyFlagKey{})\n\t\tif err != nil {\n\t\t\tlog.Warnf(\"Error querying ready status during snapshot, retrying: %s\", err)\n\t\t\ttime.Sleep(1 * time.Second)\n\t\t\tcontinue\n\t\t}\n\t\tsnap = append(snap, *ready)\n\t\tkeys[ready.Key.String()] = true\n\n\t\tlog.Infof(\"Snapshot resourceVersions: %+v\", versions)\n\t\tlog.Debugf(\"Created snapshot: %+v\", snap)\n\t\treturn snap, keys, versions\n\t}\n}", "func ExampleRDS_CopyDBSnapshot_shared00() {\n\tsvc := rds.New(session.New())\n\tinput := &rds.CopyDBSnapshotInput{\n\t\tSourceDBSnapshotIdentifier: aws.String(\"rds:database-mysql-2019-06-06-08-38\"),\n\t\tTargetDBSnapshotIdentifier: aws.String(\"mydbsnapshotcopy\"),\n\t}\n\n\tresult, err := svc.CopyDBSnapshot(input)\n\tif err != nil {\n\t\tif aerr, ok := err.(awserr.Error); ok {\n\t\t\tswitch aerr.Code() {\n\t\t\tcase rds.ErrCodeDBSnapshotAlreadyExistsFault:\n\t\t\t\tfmt.Println(rds.ErrCodeDBSnapshotAlreadyExistsFault, aerr.Error())\n\t\t\tcase rds.ErrCodeDBSnapshotNotFoundFault:\n\t\t\t\tfmt.Println(rds.ErrCodeDBSnapshotNotFoundFault, aerr.Error())\n\t\t\tcase rds.ErrCodeInvalidDBSnapshotStateFault:\n\t\t\t\tfmt.Println(rds.ErrCodeInvalidDBSnapshotStateFault, aerr.Error())\n\t\t\tcase rds.ErrCodeSnapshotQuotaExceededFault:\n\t\t\t\tfmt.Println(rds.ErrCodeSnapshotQuotaExceededFault, aerr.Error())\n\t\t\tcase rds.ErrCodeKMSKeyNotAccessibleFault:\n\t\t\t\tfmt.Println(rds.ErrCodeKMSKeyNotAccessibleFault, aerr.Error())\n\t\t\tcase rds.ErrCodeCustomAvailabilityZoneNotFoundFault:\n\t\t\t\tfmt.Println(rds.ErrCodeCustomAvailabilityZoneNotFoundFault, aerr.Error())\n\t\t\tdefault:\n\t\t\t\tfmt.Println(aerr.Error())\n\t\t\t}\n\t\t} else {\n\t\t\t// Print the error, cast err to awserr.Error to get the Code and\n\t\t\t// Message from an error.\n\t\t\tfmt.Println(err.Error())\n\t\t}\n\t\treturn\n\t}\n\n\tfmt.Println(result)\n}", "func createSnapshot(\n\tw io.Writer,\n\tprojectID, diskName, snapshotName, zone, region, location, diskProjectID string,\n) error {\n\t// projectID := \"your_project_id\"\n\t// diskName := \"your_disk_name\"\n\t// snapshotName := \"your_snapshot_name\"\n\t// zone := \"europe-central2-b\"\n\t// region := \"eupore-central2\"\n\t// location = \"eupore-central2\"\n\t// diskProjectID = \"YOUR_DISK_PROJECT_ID\"\n\n\tctx := context.Background()\n\n\tsnapshotsClient, err := compute.NewSnapshotsRESTClient(ctx)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"NewSnapshotsRESTClient: %w\", err)\n\t}\n\tdefer snapshotsClient.Close()\n\n\tif zone == \"\" && region == \"\" {\n\t\treturn fmt.Errorf(\"you need to specify `zone` or `region` for this function to work\")\n\t}\n\n\tif zone != \"\" && region != \"\" {\n\t\treturn fmt.Errorf(\"you can't set both `zone` and `region` parameters\")\n\t}\n\n\tif diskProjectID == \"\" {\n\t\tdiskProjectID = projectID\n\t}\n\n\tdisk := &computepb.Disk{}\n\tlocations := []string{}\n\tif location != \"\" {\n\t\tlocations = append(locations, location)\n\t}\n\n\tif zone != \"\" {\n\t\tdisksClient, err := compute.NewDisksRESTClient(ctx)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"NewDisksRESTClient: %w\", err)\n\t\t}\n\t\tdefer disksClient.Close()\n\n\t\tgetDiskReq := &computepb.GetDiskRequest{\n\t\t\tProject: projectID,\n\t\t\tZone: zone,\n\t\t\tDisk: diskName,\n\t\t}\n\n\t\tdisk, err = disksClient.Get(ctx, getDiskReq)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"unable to get disk: %w\", err)\n\t\t}\n\t} else {\n\t\tregionDisksClient, err := compute.NewRegionDisksRESTClient(ctx)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"NewRegionDisksRESTClient: %w\", err)\n\t\t}\n\t\tdefer regionDisksClient.Close()\n\n\t\tgetDiskReq := &computepb.GetRegionDiskRequest{\n\t\t\tProject: projectID,\n\t\t\tRegion: region,\n\t\t\tDisk: diskName,\n\t\t}\n\n\t\tdisk, err = regionDisksClient.Get(ctx, getDiskReq)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"unable to get disk: %w\", err)\n\t\t}\n\t}\n\n\treq := &computepb.InsertSnapshotRequest{\n\t\tProject: projectID,\n\t\tSnapshotResource: &computepb.Snapshot{\n\t\t\tName: proto.String(snapshotName),\n\t\t\tSourceDisk: proto.String(disk.GetSelfLink()),\n\t\t\tStorageLocations: locations,\n\t\t},\n\t}\n\n\top, err := snapshotsClient.Insert(ctx, req)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"unable to create snapshot: %w\", err)\n\t}\n\n\tif err = op.Wait(ctx); err != nil {\n\t\treturn fmt.Errorf(\"unable to wait for the operation: %w\", err)\n\t}\n\n\tfmt.Fprintf(w, \"Snapshot created\\n\")\n\n\treturn nil\n}", "func (r RestoreDBInstanceFromDBSnapshotRequest) Send(ctx context.Context) (*RestoreDBInstanceFromDBSnapshotResponse, error) {\n\tr.Request.SetContext(ctx)\n\terr := r.Request.Send()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tresp := &RestoreDBInstanceFromDBSnapshotResponse{\n\t\tRestoreDBInstanceFromDBSnapshotOutput: r.Request.Data.(*RestoreDBInstanceFromDBSnapshotOutput),\n\t\tresponse: &aws.Response{Request: r.Request},\n\t}\n\n\treturn resp, nil\n}", "func (c *Client) WalkSnapshot(ctx context.Context, snapshotter string, fn func(context.Context, snapshots.Info) error) error {\n\twrapperCli, err := c.Get(ctx)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to get a containerd grpc client: %v\", err)\n\t}\n\n\t// if not set specific snapshotter, set snapshotter to current snaphotter\n\tif snapshotter == \"\" {\n\t\tsnapshotter = CurrentSnapshotterName(ctx)\n\t}\n\n\tservice := wrapperCli.client.SnapshotService(snapshotter)\n\tdefer service.Close()\n\n\treturn service.Walk(ctx, fn)\n}", "func (s CopySnapshotInput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (c *Client) Send(containerRank container.Rank, destHost string, migrationType container.MigrationType) error {\n\targs := &SendArgs{containerRank, destHost, migrationType}\n\n\tvar reply bool\n\terr := c.client.Call(rpcSend, args, &reply)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}", "func (s *Snapshot) Copy() *Snapshot {\n\tif s == nil {\n\t\treturn nil\n\t}\n\tcp := Snapshot{\n\t\tTime: s.Time,\n\t\tLabels: append(make([]string, 0, len(s.Labels)), s.Labels...),\n\t\tCounters: append(make([]events.Counter, 0, len(s.Counters)), s.Counters...),\n\t}\n\treturn &cp\n}", "func ExampleRDS_RestoreDBClusterFromSnapshot_shared00() {\n\tsvc := rds.New(session.New())\n\tinput := &rds.RestoreDBClusterFromSnapshotInput{\n\t\tDBClusterIdentifier: aws.String(\"newdbcluster\"),\n\t\tEngine: aws.String(\"aurora-postgresql\"),\n\t\tEngineVersion: aws.String(\"10.7\"),\n\t\tSnapshotIdentifier: aws.String(\"test-instance-snapshot\"),\n\t}\n\n\tresult, err := svc.RestoreDBClusterFromSnapshot(input)\n\tif err != nil {\n\t\tif aerr, ok := err.(awserr.Error); ok {\n\t\t\tswitch aerr.Code() {\n\t\t\tcase rds.ErrCodeDBClusterAlreadyExistsFault:\n\t\t\t\tfmt.Println(rds.ErrCodeDBClusterAlreadyExistsFault, aerr.Error())\n\t\t\tcase rds.ErrCodeDBClusterQuotaExceededFault:\n\t\t\t\tfmt.Println(rds.ErrCodeDBClusterQuotaExceededFault, aerr.Error())\n\t\t\tcase rds.ErrCodeStorageQuotaExceededFault:\n\t\t\t\tfmt.Println(rds.ErrCodeStorageQuotaExceededFault, aerr.Error())\n\t\t\tcase rds.ErrCodeDBSubnetGroupNotFoundFault:\n\t\t\t\tfmt.Println(rds.ErrCodeDBSubnetGroupNotFoundFault, aerr.Error())\n\t\t\tcase rds.ErrCodeDBSnapshotNotFoundFault:\n\t\t\t\tfmt.Println(rds.ErrCodeDBSnapshotNotFoundFault, aerr.Error())\n\t\t\tcase rds.ErrCodeDBClusterSnapshotNotFoundFault:\n\t\t\t\tfmt.Println(rds.ErrCodeDBClusterSnapshotNotFoundFault, aerr.Error())\n\t\t\tcase rds.ErrCodeInsufficientDBClusterCapacityFault:\n\t\t\t\tfmt.Println(rds.ErrCodeInsufficientDBClusterCapacityFault, aerr.Error())\n\t\t\tcase rds.ErrCodeInsufficientStorageClusterCapacityFault:\n\t\t\t\tfmt.Println(rds.ErrCodeInsufficientStorageClusterCapacityFault, aerr.Error())\n\t\t\tcase rds.ErrCodeInvalidDBSnapshotStateFault:\n\t\t\t\tfmt.Println(rds.ErrCodeInvalidDBSnapshotStateFault, aerr.Error())\n\t\t\tcase rds.ErrCodeInvalidDBClusterSnapshotStateFault:\n\t\t\t\tfmt.Println(rds.ErrCodeInvalidDBClusterSnapshotStateFault, aerr.Error())\n\t\t\tcase rds.ErrCodeInvalidVPCNetworkStateFault:\n\t\t\t\tfmt.Println(rds.ErrCodeInvalidVPCNetworkStateFault, aerr.Error())\n\t\t\tcase rds.ErrCodeInvalidRestoreFault:\n\t\t\t\tfmt.Println(rds.ErrCodeInvalidRestoreFault, aerr.Error())\n\t\t\tcase rds.ErrCodeInvalidSubnet:\n\t\t\t\tfmt.Println(rds.ErrCodeInvalidSubnet, aerr.Error())\n\t\t\tcase rds.ErrCodeOptionGroupNotFoundFault:\n\t\t\t\tfmt.Println(rds.ErrCodeOptionGroupNotFoundFault, aerr.Error())\n\t\t\tcase rds.ErrCodeKMSKeyNotAccessibleFault:\n\t\t\t\tfmt.Println(rds.ErrCodeKMSKeyNotAccessibleFault, aerr.Error())\n\t\t\tcase rds.ErrCodeDomainNotFoundFault:\n\t\t\t\tfmt.Println(rds.ErrCodeDomainNotFoundFault, aerr.Error())\n\t\t\tcase rds.ErrCodeDBClusterParameterGroupNotFoundFault:\n\t\t\t\tfmt.Println(rds.ErrCodeDBClusterParameterGroupNotFoundFault, aerr.Error())\n\t\t\tcase rds.ErrCodeInvalidDBInstanceStateFault:\n\t\t\t\tfmt.Println(rds.ErrCodeInvalidDBInstanceStateFault, aerr.Error())\n\t\t\tdefault:\n\t\t\t\tfmt.Println(aerr.Error())\n\t\t\t}\n\t\t} else {\n\t\t\t// Print the error, cast err to awserr.Error to get the Code and\n\t\t\t// Message from an error.\n\t\t\tfmt.Println(err.Error())\n\t\t}\n\t\treturn\n\t}\n\n\tfmt.Println(result)\n}", "func (c *TestClient) CreateSnapshot(project, zone, disk string, s *compute.Snapshot) error {\n\tif c.CreateSnapshotFn != nil {\n\t\treturn c.CreateSnapshotFn(project, zone, disk, s)\n\t}\n\treturn c.client.CreateSnapshot(project, zone, disk, s)\n}", "func (r CreateFileSystemFromBackupRequest) Send(ctx context.Context) (*CreateFileSystemFromBackupResponse, error) {\n\tr.Request.SetContext(ctx)\n\terr := r.Request.Send()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tresp := &CreateFileSystemFromBackupResponse{\n\t\tCreateFileSystemFromBackupOutput: r.Request.Data.(*CreateFileSystemFromBackupOutput),\n\t\tresponse: &aws.Response{Request: r.Request},\n\t}\n\n\treturn resp, nil\n}", "func (r *Client) CreateSnapshot(ctx context.Context, a CreateSnapshotRequest) (StatusMessage, error) {\n\tvar (\n\t\traw []byte\n\t\tresp StatusMessage\n\t\terr error\n\t\tcode int\n\t)\n\tif raw, err = json.Marshal(a); err != nil {\n\t\treturn StatusMessage{}, errors.Wrap(err, \"marshal request\")\n\t}\n\tif raw, code, err = r.post(ctx, \"api/snapshots\", nil, raw); err != nil {\n\t\treturn StatusMessage{}, errors.Wrap(err, \"create snapshot\")\n\t}\n\tif code/100 != 2 {\n\t\treturn StatusMessage{}, fmt.Errorf(\"bad response: %d\", code)\n\t}\n\tif err = json.Unmarshal(raw, &resp); err != nil {\n\t\treturn StatusMessage{}, errors.Wrap(err, \"unmarshal response message\")\n\t}\n\treturn resp, nil\n}", "func NewExportSnapshotRequest(server string, id string) (*http.Request, error) {\n\tvar err error\n\n\tvar pathParam0 string\n\n\tpathParam0, err = runtime.StyleParam(\"simple\", false, \"id\", id)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tqueryUrl, err := url.Parse(server)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tbasePath := fmt.Sprintf(\"/snapshot/%s:export\", pathParam0)\n\tif basePath[0] == '/' {\n\t\tbasePath = basePath[1:]\n\t}\n\n\tqueryUrl, err = queryUrl.Parse(basePath)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treq, err := http.NewRequest(\"POST\", queryUrl.String(), nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn req, nil\n}", "func (b *ClusterUpgradeBuilder) Copy(object *ClusterUpgrade) *ClusterUpgradeBuilder {\n\tif object == nil {\n\t\treturn b\n\t}\n\tb.bitmap_ = object.bitmap_\n\tb.available = object.available\n\tb.state = object.state\n\tb.updatedTimestamp = object.updatedTimestamp\n\tb.version = object.version\n\treturn b\n}", "func ExampleRDS_ModifyDBClusterSnapshotAttribute_shared00() {\n\tsvc := rds.New(session.New())\n\tinput := &rds.ModifyDBClusterSnapshotAttributeInput{\n\t\tAttributeName: aws.String(\"restore\"),\n\t\tDBClusterSnapshotIdentifier: aws.String(\"myclustersnapshot\"),\n\t\tValuesToAdd: []*string{\n\t\t\taws.String(\"123456789012\"),\n\t\t},\n\t}\n\n\tresult, err := svc.ModifyDBClusterSnapshotAttribute(input)\n\tif err != nil {\n\t\tif aerr, ok := err.(awserr.Error); ok {\n\t\t\tswitch aerr.Code() {\n\t\t\tcase rds.ErrCodeDBClusterSnapshotNotFoundFault:\n\t\t\t\tfmt.Println(rds.ErrCodeDBClusterSnapshotNotFoundFault, aerr.Error())\n\t\t\tcase rds.ErrCodeInvalidDBClusterSnapshotStateFault:\n\t\t\t\tfmt.Println(rds.ErrCodeInvalidDBClusterSnapshotStateFault, aerr.Error())\n\t\t\tcase rds.ErrCodeSharedSnapshotQuotaExceededFault:\n\t\t\t\tfmt.Println(rds.ErrCodeSharedSnapshotQuotaExceededFault, aerr.Error())\n\t\t\tdefault:\n\t\t\t\tfmt.Println(aerr.Error())\n\t\t\t}\n\t\t} else {\n\t\t\t// Print the error, cast err to awserr.Error to get the Code and\n\t\t\t// Message from an error.\n\t\t\tfmt.Println(err.Error())\n\t\t}\n\t\treturn\n\t}\n\n\tfmt.Println(result)\n}", "func Create(client *golangsdk.ServiceClient, opts CreateOptsBuilder, clusterId string) (r CreateResult) {\n\tb, err := opts.ToSnapshotCreateMap()\n\tif err != nil {\n\t\tr.Err = err\n\t\treturn\n\t}\n\t_, r.Err = client.Post(createURL(client, clusterId), b, &r.Body, &golangsdk.RequestOpts{\n\t\tOkCodes: []int{201},\n\t})\n\treturn\n}", "func (r *ClusterUpdateRequest) Send() (result *ClusterUpdateResponse, err error) {\n\treturn r.SendContext(context.Background())\n}", "func DumpSnapshotData(snapshot Snapshot, blockNumber *big.Int) {\n\t//AddColumnWork\n\tinsertStatement := \"INSERT INTO snapshot_data VALUES ($1, $2, $3, $4, $5, $6, $7)\" //AddColumnWork\n\n\tfor address := range snapshot {\n\t\tsnapshotRow := snapshot[address]\n\t\t_, err := connection.DBCLIENT.Exec(insertStatement,\n\t\t\tblockNumber.String(),\n\t\t\taddress,\n\t\t\tsnapshotRow.SkaleTokenBalance.String(),\n\t\t\tsnapshotRow.SkaleTokenLockedBalance.String(),\n\t\t\tsnapshotRow.SkaleTokenDelegatedBalance.String(),\n\t\t\tsnapshotRow.SkaleTokenSlashedBalance.String(),\n\t\t\tsnapshotRow.SkaleTokenRewards.String())\n\t\t//AddColumnWork //Order Matters\n\t\tif err != nil {\n\t\t\tlog.Printf(\"Could not write snapshot to database: %s\\n\", err)\n\t\t}\n\n\t}\n}", "func (a *SnapshotApiService) SnapshotsPut(ctx _context.Context, snapshotId string, snapshot Snapshot, optionals *SnapshotsPutOpts) (Snapshot, *APIResponse, error) {\n\tvar (\n\t\tlocalVarHTTPMethod = _nethttp.MethodPut\n\t\tlocalVarPostBody interface{}\n\t\tlocalVarFormFileName string\n\t\tlocalVarFileName string\n\t\tlocalVarFileBytes []byte\n\t\tlocalVarReturnValue Snapshot\n\t)\n\n\t// create path and map variables\n\tlocalVarPath := a.client.cfg.BasePath + \"/snapshots/{snapshotId}\"\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"snapshotId\"+\"}\", _neturl.PathEscape(parameterToString(snapshotId, \"\")) , -1)\n\n\tlocalVarHeaderParams := make(map[string]string)\n\tlocalVarQueryParams := _neturl.Values{}\n\tlocalVarFormParams := _neturl.Values{}\n\n\tif optionals != nil && optionals.Pretty != nil {\n\t\tlocalVarQueryParams.Add(\"pretty\", parameterToString(*optionals.Pretty, \"\"))\n\t}\n\tif optionals != nil && optionals.Depth != nil {\n\t\tlocalVarQueryParams.Add(\"depth\", parameterToString(*optionals.Depth, \"\"))\n\t}\n\t// to determine the Content-Type header\n\tlocalVarHTTPContentTypes := []string{\"application/json\"}\n\n\t// set Content-Type header\n\tlocalVarHTTPContentType := selectHeaderContentType(localVarHTTPContentTypes)\n\tif localVarHTTPContentType != \"\" {\n\t\tlocalVarHeaderParams[\"Content-Type\"] = localVarHTTPContentType\n\t}\n\n\t// to determine the Accept header\n\tlocalVarHTTPHeaderAccepts := []string{\"application/json\"}\n\n\t// set Accept header\n\tlocalVarHTTPHeaderAccept := selectHeaderAccept(localVarHTTPHeaderAccepts)\n\tif localVarHTTPHeaderAccept != \"\" {\n\t\tlocalVarHeaderParams[\"Accept\"] = localVarHTTPHeaderAccept\n\t}\n\tif optionals != nil && optionals.XContractNumber != nil {\n\t\tlocalVarHeaderParams[\"X-Contract-Number\"] = parameterToString(*optionals.XContractNumber, \"\")\n\t}\n\t// body params\n\tlocalVarPostBody = &snapshot\n\tif ctx != nil {\n\t\t// API Key Authentication\n\t\tif auth, ok := ctx.Value(ContextAPIKey).(APIKey); ok {\n\t\t\tvar key string\n\t\t\tif auth.Prefix != \"\" {\n\t\t\t\tkey = auth.Prefix + \" \" + auth.Key\n\t\t\t} else {\n\t\t\t\tkey = auth.Key\n\t\t\t}\n\t\t\tlocalVarHeaderParams[\"Authorization\"] = key\n\t\t}\n\t}\n\tr, err := a.client.prepareRequest(ctx, localVarPath, localVarHTTPMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFormFileName, localVarFileName, localVarFileBytes)\n\tif err != nil {\n\t\treturn localVarReturnValue, nil, err\n\t}\n\n\tlocalVarHTTPResponse, err := a.client.callAPI(r)\n\tlocalVarAPIResponse := &APIResponse {\n\t\tResponse: localVarHTTPResponse,\n\t\tMethod: localVarHTTPMethod,\n\t\tRequestURL: localVarPath,\n\t\tOperation: \"SnapshotsPut\",\n\t}\n\tif err != nil || localVarHTTPResponse == nil {\n\t\treturn localVarReturnValue, localVarAPIResponse, err\n\t}\n\n\tlocalVarBody, err := _ioutil.ReadAll(localVarHTTPResponse.Body)\n\tlocalVarHTTPResponse.Body.Close()\n\tlocalVarAPIResponse.Payload = localVarBody\n\tif err != nil {\n\t\treturn localVarReturnValue, localVarAPIResponse, err\n\t}\n\n\tif localVarHTTPResponse.StatusCode >= 300 {\n\t\tnewErr := GenericOpenAPIError{\n\t\t\tbody: localVarBody,\n\t\t\terror: localVarHTTPResponse.Status,\n\t\t}\n\t\t\tvar v Error\n\t\t\terr = a.client.decode(&v, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\tnewErr.error = err.Error()\n\t\t\t\treturn localVarReturnValue, localVarAPIResponse, newErr\n\t\t\t}\n\t\t\tnewErr.model = v\n\t\treturn localVarReturnValue, localVarAPIResponse, newErr\n\t}\n\n\terr = a.client.decode(&localVarReturnValue, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\tif err != nil {\n\t\tnewErr := GenericOpenAPIError{\n\t\t\tbody: localVarBody,\n\t\t\terror: err.Error(),\n\t\t}\n\t\treturn localVarReturnValue, localVarAPIResponse, newErr\n\t}\n\n\treturn localVarReturnValue, localVarAPIResponse, nil\n}", "func (j JoinBySnapshotUIObject) joinBySnapshotCLI(joinBySnapshotObject JoinBySnapshotUIObject) error {\n\n\tfor _, orgName := range joinBySnapshotObject.ChannelOpt.OrgName {\n\t\tcurrentDir, err := paths.GetCurrentDir()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tvar connProfilePath string\n\t\tif strings.Contains(joinBySnapshotObject.ConnProfilePath, \".yaml\") || strings.Contains(joinBySnapshotObject.ConnProfilePath, \".json\") {\n\t\t\tconnProfilePath = joinBySnapshotObject.ConnProfilePath\n\t\t} else {\n\t\t\tconnProfilePath = fmt.Sprintf(\"%s/connection_profile_%s.yaml\", joinBySnapshotObject.ConnProfilePath, orgName)\n\t\t}\n\t\tfor _, peerName := range joinBySnapshotObject.TargetPeers {\n\t\t\tpeerOrgName := strings.Split(peerName, \"-\")\n\t\t\tif peerOrgName[1] == orgName {\n\t\t\t\tconnProfConfig, err := ConnProfileInformationForOrg(connProfilePath, orgName)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tpeerURL, err := url.Parse(connProfConfig.Peers[peerName].URL)\n\t\t\t\tif err != nil {\n\t\t\t\t\tlogger.ERROR(\"Failed to get peer url from connection profile\")\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tpeerAddress := peerURL.Host\n\t\t\t\terr = SetEnvForCLI(orgName, peerName, connProfilePath, joinBySnapshotObject.TLS, currentDir)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\targs := []string{\n\t\t\t\t\t\"channel\",\n\t\t\t\t\t\"joinbysnapshot\",\n\t\t\t\t\t\"--snapshotpath\",\n\t\t\t\t}\n\t\t\t\tos.Setenv(\"CORE_PEER_ADDRESS\", peerAddress)\n\t\t\t\tif os.Getenv(\"KUBECONFIG\") != \"\" || strings.Contains(peerAddress, \"127.0.0.1\") {\n\t\t\t\t\terr = j.copySnapshotDirectoryDocker(peerName, joinBySnapshotObject)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\treturn err\n\t\t\t\t\t}\n\t\t\t\t\targs = append(args, fmt.Sprintf(\"/var/hyperledger/production/snapshots/completed/%s\", joinBySnapshotObject.SnapshotPath))\n\t\t\t\t} else {\n\t\t\t\t\terr = j.copySnapshotDirectoryK8s(peerName, joinBySnapshotObject)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\treturn err\n\t\t\t\t\t}\n\t\t\t\t\targs = append(args, fmt.Sprintf(\"/shared/data/snapshots/completed/%s\", joinBySnapshotObject.SnapshotPath))\n\t\t\t\t}\n\t\t\t\t_, err = networkclient.ExecuteCommand(\"peer\", args, true)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\treturn nil\n}", "func (cmd *SLCommand) Copy(args []string) error {\n\tl := newCopyFlags()\n\tif err := l.flagSet.Parse(args); err != nil {\n\t\treturn nil // we don't return error, the usage will be printed instead\n\t}\n\n\treturn cmd.CopyToDatacenters(l.imageID, l.datacenters...)\n}", "func (c *Client) Copy(pod *k8sv1.Pod, container, containerPath, localPath string, exclude []string) error {\n\treturn nil\n}", "func (c *cstorSnapshotCreate) Run() (r RunCommandResult) {\n\terr := c.validateOptions()\n\tif err != nil {\n\t\treturn c.AddError(errors.Errorf(\"failed to create cstor snapshot: %s\", err)).Result(nil)\n\t}\n\tip, _ := c.Data[\"ip\"].(string)\n\n\t// get snapshot operation struct\n\tsnapOps := cstor.Cstor()\n\tsnapOps.IP = ip\n\tsnapOps.Snap = c.casSnapshot()\n\n\t// use the struct to call the Create method\n\tresponse, err := snapOps.Create()\n\tif err != nil {\n\t\treturn c.AddError(err).Result(nil)\n\t}\n\treturn c.Result(response)\n}", "func (s DBClusterSnapshotAttribute) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (cs *controllerServer) DeleteSnapshot(ctx context.Context, req *csi.DeleteSnapshotRequest) (*csi.DeleteSnapshotResponse, error) {\n\tlog.Infof(\"Starting Delete Snapshot %s with response: %v\", req.SnapshotId, req)\n\t// Step 1: check req\n\t// snapshotName is name of snapshot lv\n\tsnapshotName := req.GetSnapshotId()\n\tif len(snapshotName) == 0 {\n\t\tlog.Error(\"DeleteSnapshot: Snapshot ID not provided\")\n\t\treturn nil, status.Error(codes.InvalidArgument, \"DeleteSnapshot: Snapshot ID not provided\")\n\t}\n\n\t// Step 2: get volumeID from snapshot\n\tsnapContent, err := getVolumeSnapshotContent(cs.snapclient, snapshotName)\n\tif err != nil {\n\t\tlog.Errorf(\"DeleteSnapshot: get snapContent %s error: %s\", snapshotName, err.Error())\n\t\treturn nil, status.Errorf(codes.Internal, \"DeleteSnapshot: get snapContent %s error: %s\", snapshotName, err.Error())\n\t}\n\tvolumeID := *snapContent.Spec.Source.VolumeHandle\n\n\t// Step 3: get nodeName and vgName\n\tnodeName, vgName, _, err := getPvSpec(cs.client, volumeID, cs.driverName)\n\tif err != nil {\n\t\tlog.Errorf(\"DeleteSnapshot: get pv %s error: %s\", volumeID, err.Error())\n\t\treturn nil, status.Errorf(codes.Internal, \"DeleteSnapshot: get pv %s error: %s\", volumeID, err.Error())\n\t}\n\tlog.Infof(\"DeleteSnapshot: snapshot %s is in %s, whose vg is %s\", snapshotName, nodeName, vgName)\n\n\t// Step 4: get grpc client\n\tconn, err := cs.getNodeConn(nodeName)\n\tif err != nil {\n\t\tlog.Errorf(\"DeleteSnapshot: get grpc client at node %s error: %s\", nodeName, err.Error())\n\t\treturn nil, status.Errorf(codes.Internal, \"DeleteSnapshot: get grpc client at node %s error: %s\", nodeName, err.Error())\n\t}\n\tdefer conn.Close()\n\n\t// Step 5: delete lvm snapshot\n\tvar lvmName string\n\tif lvmName, err = conn.GetLvm(ctx, vgName, snapshotName); err != nil {\n\t\tlog.Errorf(\"DeleteSnapshot: get lvm snapshot %s failed: %s\", snapshotName, err.Error())\n\t\treturn nil, status.Errorf(codes.Internal, \"DeleteSnapshot: get lvm snapshot %s failed: %s\", snapshotName, err.Error())\n\t}\n\tif lvmName != \"\" {\n\t\terr := conn.DeleteSnapshot(ctx, vgName, snapshotName)\n\t\tif err != nil {\n\t\t\tlog.Errorf(\"DeleteSnapshot: delete lvm snapshot %s failed: %s\", snapshotName, err.Error())\n\t\t\treturn nil, status.Errorf(codes.Internal, \"DeleteSnapshot: delete lvm snapshot %s failed: %s\", snapshotName, err.Error())\n\t\t}\n\t} else {\n\t\tlog.Infof(\"DeleteSnapshot: lvm snapshot %s in node %s not found, skip...\", snapshotName, nodeName)\n\t\t// return immediately\n\t\treturn &csi.DeleteSnapshotResponse{}, nil\n\t}\n\n\tlog.Infof(\"DeleteSnapshot: delete snapshot %s successfully\", snapshotName)\n\treturn &csi.DeleteSnapshotResponse{}, nil\n}", "func GetClusterSnapshot(ctx *pulumi.Context,\n\tname string, id pulumi.IDInput, state *ClusterSnapshotState, opts ...pulumi.ResourceOption) (*ClusterSnapshot, error) {\n\tvar resource ClusterSnapshot\n\terr := ctx.ReadResource(\"aws:neptune/clusterSnapshot:ClusterSnapshot\", name, id, state, &resource, opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &resource, nil\n}", "func beginSnapshotRoute(w http.ResponseWriter, r *http.Request) {\n\troute := \"BeginSnapshot\"\n\n\tquery := r.URL.Query()\n\tnodeID := query.Get(\"nodeID\")\n\n\thandleRoute(route, nodeID)\n}", "func (a *DefaultApiService) VmSnapshotPut(ctx _context.Context, vmSnapshotConfig VmSnapshotConfig) (*_nethttp.Response, error) {\n\tvar (\n\t\tlocalVarHTTPMethod = _nethttp.MethodPut\n\t\tlocalVarPostBody interface{}\n\t\tlocalVarFormFileName string\n\t\tlocalVarFileName string\n\t\tlocalVarFileBytes []byte\n\t)\n\n\t// create path and map variables\n\tlocalVarPath := a.client.cfg.BasePath + \"/vm.snapshot\"\n\tlocalVarHeaderParams := make(map[string]string)\n\tlocalVarQueryParams := _neturl.Values{}\n\tlocalVarFormParams := _neturl.Values{}\n\n\t// to determine the Content-Type header\n\tlocalVarHTTPContentTypes := []string{\"application/json\"}\n\n\t// set Content-Type header\n\tlocalVarHTTPContentType := selectHeaderContentType(localVarHTTPContentTypes)\n\tif localVarHTTPContentType != \"\" {\n\t\tlocalVarHeaderParams[\"Content-Type\"] = localVarHTTPContentType\n\t}\n\n\t// to determine the Accept header\n\tlocalVarHTTPHeaderAccepts := []string{}\n\n\t// set Accept header\n\tlocalVarHTTPHeaderAccept := selectHeaderAccept(localVarHTTPHeaderAccepts)\n\tif localVarHTTPHeaderAccept != \"\" {\n\t\tlocalVarHeaderParams[\"Accept\"] = localVarHTTPHeaderAccept\n\t}\n\t// body params\n\tlocalVarPostBody = &vmSnapshotConfig\n\tr, err := a.client.prepareRequest(ctx, localVarPath, localVarHTTPMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFormFileName, localVarFileName, localVarFileBytes)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tlocalVarHTTPResponse, err := a.client.callAPI(r)\n\tif err != nil || localVarHTTPResponse == nil {\n\t\treturn localVarHTTPResponse, err\n\t}\n\n\tlocalVarBody, err := _ioutil.ReadAll(localVarHTTPResponse.Body)\n\tlocalVarHTTPResponse.Body.Close()\n\tif err != nil {\n\t\treturn localVarHTTPResponse, err\n\t}\n\n\tif localVarHTTPResponse.StatusCode >= 300 {\n\t\tnewErr := GenericOpenAPIError{\n\t\t\tbody: localVarBody,\n\t\t\terror: localVarHTTPResponse.Status,\n\t\t}\n\t\treturn localVarHTTPResponse, newErr\n\t}\n\n\treturn localVarHTTPResponse, nil\n}" ]
[ "0.63780296", "0.581049", "0.57423306", "0.5608499", "0.55666107", "0.55461967", "0.5525283", "0.551921", "0.54636824", "0.54585147", "0.53201103", "0.53033656", "0.52859074", "0.5271057", "0.5208492", "0.51859665", "0.5182696", "0.5129968", "0.5115464", "0.5103089", "0.50850344", "0.5053919", "0.5017978", "0.50157136", "0.500417", "0.4971892", "0.4966322", "0.49562", "0.4947341", "0.4904988", "0.4903643", "0.48832262", "0.48706385", "0.48675758", "0.4862694", "0.4857082", "0.48541144", "0.48345393", "0.48240247", "0.48208535", "0.48160142", "0.48159093", "0.48087242", "0.47785002", "0.47739342", "0.47671422", "0.47571424", "0.47564614", "0.47222957", "0.47123432", "0.4711614", "0.47058862", "0.46901858", "0.46838972", "0.4677386", "0.4664731", "0.46409842", "0.4627683", "0.462691", "0.462691", "0.46259442", "0.4616681", "0.45979932", "0.45971814", "0.45856172", "0.45812577", "0.45753768", "0.45389768", "0.4528091", "0.45260507", "0.450962", "0.45084772", "0.45040616", "0.44905254", "0.4480546", "0.44519508", "0.4445145", "0.44371533", "0.4434288", "0.44328716", "0.44291118", "0.44286156", "0.4426689", "0.44084308", "0.4405296", "0.44014886", "0.43992588", "0.4395358", "0.43737558", "0.43675324", "0.43627477", "0.4351743", "0.43510583", "0.43482423", "0.43421817", "0.43374786", "0.43334895", "0.4311171", "0.43106306", "0.43104756" ]
0.7562293
0
SDKResponseMetdata returns the response metadata for the CopyClusterSnapshot request.
func (r *CopyClusterSnapshotResponse) SDKResponseMetdata() *aws.Response { return r.response }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (r *CopySnapshotResponse) SDKResponseMetdata() *aws.Response {\n\treturn r.response\n}", "func (r *CreateDiskFromSnapshotResponse) SDKResponseMetdata() *aws.Response {\n\treturn r.response\n}", "func (r *CreateInstancesFromSnapshotResponse) SDKResponseMetdata() *aws.Response {\n\treturn r.response\n}", "func (r *ModifyDBSnapshotAttributeResponse) SDKResponseMetdata() *aws.Response {\n\treturn r.response\n}", "func (r *ResizeClusterResponse) SDKResponseMetdata() *aws.Response {\n\treturn r.response\n}", "func (r *StartSnapshotResponse) SDKResponseMetdata() *aws.Response {\n\treturn r.response\n}", "func (r *UpdateClusterResponse) SDKResponseMetdata() *aws.Response {\n\treturn r.response\n}", "func (r *UpdateClusterResponse) SDKResponseMetdata() *aws.Response {\n\treturn r.response\n}", "func (r *ModifyDBClusterResponse) SDKResponseMetdata() *aws.Response {\n\treturn r.response\n}", "func (r *CreateReplicationGroupResponse) SDKResponseMetdata() *aws.Response {\n\treturn r.response\n}", "func (r *CreateClusterResponse) SDKResponseMetdata() *aws.Response {\n\treturn r.response\n}", "func (r *ModifyClusterSubnetGroupResponse) SDKResponseMetdata() *aws.Response {\n\treturn r.response\n}", "func (r *CopyInstancesResponse) SDKResponseMetdata() *aws.Response {\n\treturn r.response\n}", "func (r *CreateRelationalDatabaseFromSnapshotResponse) SDKResponseMetdata() *aws.Response {\n\treturn r.response\n}", "func (r *ImportSnapshotResponse) SDKResponseMetdata() *aws.Response {\n\treturn r.response\n}", "func (r *ModifyClusterResponse) SDKResponseMetdata() *aws.Response {\n\treturn r.response\n}", "func (r *DescribeApplicationSnapshotResponse) SDKResponseMetdata() *aws.Response {\n\treturn r.response\n}", "func (r *ModifyReplicationGroupResponse) SDKResponseMetdata() *aws.Response {\n\treturn r.response\n}", "func (r *StartDBClusterResponse) SDKResponseMetdata() *aws.Response {\n\treturn r.response\n}", "func (r *DescribeSnapshotSchedulesResponse) SDKResponseMetdata() *aws.Response {\n\treturn r.response\n}", "func (r *CreateNFSFileShareResponse) SDKResponseMetdata() *aws.Response {\n\treturn r.response\n}", "func (r *GetCampaignVersionResponse) SDKResponseMetdata() *aws.Response {\n\treturn r.response\n}", "func (r *StartReplicationResponse) SDKResponseMetdata() *aws.Response {\n\treturn r.response\n}", "func (r *GetBucketReplicationResponse) SDKResponseMetdata() *aws.Response {\n\treturn r.response\n}", "func (r *RestoreDBInstanceFromDBSnapshotResponse) SDKResponseMetdata() *aws.Response {\n\treturn r.response\n}", "func (r *CreateFileSystemFromBackupResponse) SDKResponseMetdata() *aws.Response {\n\treturn r.response\n}", "func (r *ModifyTrafficMirrorFilterNetworkServicesResponse) SDKResponseMetdata() *aws.Response {\n\treturn r.response\n}", "func (r *RemoveLayerVersionPermissionResponse) SDKResponseMetdata() *aws.Response {\n\treturn r.response\n}", "func (s GetDeployablePatchSnapshotForInstanceOutput) SDKResponseMetadata() aws.Response {\n\treturn s.responseMetadata\n}", "func (r *ApplySecurityGroupsToClientVpnTargetNetworkResponse) SDKResponseMetdata() *aws.Response {\n\treturn r.response\n}", "func (r *CreateProjectVersionResponse) SDKResponseMetdata() *aws.Response {\n\treturn r.response\n}", "func (r *UpdateGameServerGroupResponse) SDKResponseMetdata() *aws.Response {\n\treturn r.response\n}", "func (r *ModifyMountTargetSecurityGroupsResponse) SDKResponseMetdata() *aws.Response {\n\treturn r.response\n}", "func (r *PutObjectLockConfigurationResponse) SDKResponseMetdata() *aws.Response {\n\treturn r.response\n}", "func (r *DeleteGlobalClusterResponse) SDKResponseMetdata() *aws.Response {\n\treturn r.response\n}", "func (r *PutBackupVaultNotificationsResponse) SDKResponseMetdata() *aws.Response {\n\treturn r.response\n}", "func (r *CreateResourceShareResponse) SDKResponseMetdata() *aws.Response {\n\treturn r.response\n}", "func (r *FlushStageCacheResponse) SDKResponseMetdata() *aws.Response {\n\treturn r.response\n}", "func (r *DeleteBackupSelectionResponse) SDKResponseMetdata() *aws.Response {\n\treturn r.response\n}", "func (r *DeletePortfolioShareResponse) SDKResponseMetdata() *aws.Response {\n\treturn r.response\n}", "func (r *CreateDBSecurityGroupResponse) SDKResponseMetdata() *aws.Response {\n\treturn r.response\n}", "func (r *UpdateRevisionResponse) SDKResponseMetdata() *aws.Response {\n\treturn r.response\n}", "func (r *DescribeChangeSetResponse) SDKResponseMetdata() *aws.Response {\n\treturn r.response\n}", "func (r *ModifyTrafficMirrorFilterRuleResponse) SDKResponseMetdata() *aws.Response {\n\treturn r.response\n}", "func (r *DescribeDBClusterParameterGroupsResponse) SDKResponseMetdata() *aws.Response {\n\treturn r.response\n}", "func (r *CreateTrafficMirrorFilterRuleResponse) SDKResponseMetdata() *aws.Response {\n\treturn r.response\n}", "func (r *CreateSolutionVersionResponse) SDKResponseMetdata() *aws.Response {\n\treturn r.response\n}", "func (r *DeleteInstanceBackupRuleResponse) SDKResponseMetdata() *aws.Response {\n\treturn r.response\n}", "func (r *CreateLicenseConfigurationResponse) SDKResponseMetdata() *aws.Response {\n\treturn r.response\n}", "func (r *CreateAccessPointResponse) SDKResponseMetdata() *aws.Response {\n\treturn r.response\n}", "func (r *ModifySelfservicePermissionsResponse) SDKResponseMetdata() *aws.Response {\n\treturn r.response\n}", "func (r *NiftyCreatePrivateLanResponse) SDKResponseMetdata() *aws.Response {\n\treturn r.response\n}", "func (r *ApplySecurityGroupsToLoadBalancerResponse) SDKResponseMetdata() *aws.Response {\n\treturn r.response\n}", "func (r *ListStreamingDistributionsResponse) SDKResponseMetdata() *aws.Response {\n\treturn r.response\n}", "func (r *UpdateCrawlerScheduleResponse) SDKResponseMetdata() *aws.Response {\n\treturn r.response\n}", "func (r *DisassociateGlobalReplicationGroupResponse) SDKResponseMetdata() *aws.Response {\n\treturn r.response\n}", "func (r *CreateLocationS3Response) SDKResponseMetdata() *aws.Response {\n\treturn r.response\n}", "func (r *DeleteTrafficMirrorFilterResponse) SDKResponseMetdata() *aws.Response {\n\treturn r.response\n}", "func (r *RunScheduledInstancesResponse) SDKResponseMetdata() *aws.Response {\n\treturn r.response\n}", "func (r *CreateByteMatchSetResponse) SDKResponseMetdata() *aws.Response {\n\treturn r.response\n}", "func (r *SwapEnvironmentCNAMEsResponse) SDKResponseMetdata() *aws.Response {\n\treturn r.response\n}", "func ExampleRDS_CopyDBClusterSnapshot_shared00() {\n\tsvc := rds.New(session.New())\n\tinput := &rds.CopyDBClusterSnapshotInput{\n\t\tCopyTags: aws.Bool(true),\n\t\tSourceDBClusterSnapshotIdentifier: aws.String(\"arn:aws:rds:us-east-1:123456789012:cluster-snapshot:rds:myaurora-2019-06-04-09-16\"),\n\t\tTargetDBClusterSnapshotIdentifier: aws.String(\"myclustersnapshotcopy\"),\n\t}\n\n\tresult, err := svc.CopyDBClusterSnapshot(input)\n\tif err != nil {\n\t\tif aerr, ok := err.(awserr.Error); ok {\n\t\t\tswitch aerr.Code() {\n\t\t\tcase rds.ErrCodeDBClusterSnapshotAlreadyExistsFault:\n\t\t\t\tfmt.Println(rds.ErrCodeDBClusterSnapshotAlreadyExistsFault, aerr.Error())\n\t\t\tcase rds.ErrCodeDBClusterSnapshotNotFoundFault:\n\t\t\t\tfmt.Println(rds.ErrCodeDBClusterSnapshotNotFoundFault, aerr.Error())\n\t\t\tcase rds.ErrCodeInvalidDBClusterStateFault:\n\t\t\t\tfmt.Println(rds.ErrCodeInvalidDBClusterStateFault, aerr.Error())\n\t\t\tcase rds.ErrCodeInvalidDBClusterSnapshotStateFault:\n\t\t\t\tfmt.Println(rds.ErrCodeInvalidDBClusterSnapshotStateFault, aerr.Error())\n\t\t\tcase rds.ErrCodeSnapshotQuotaExceededFault:\n\t\t\t\tfmt.Println(rds.ErrCodeSnapshotQuotaExceededFault, aerr.Error())\n\t\t\tcase rds.ErrCodeKMSKeyNotAccessibleFault:\n\t\t\t\tfmt.Println(rds.ErrCodeKMSKeyNotAccessibleFault, aerr.Error())\n\t\t\tdefault:\n\t\t\t\tfmt.Println(aerr.Error())\n\t\t\t}\n\t\t} else {\n\t\t\t// Print the error, cast err to awserr.Error to get the Code and\n\t\t\t// Message from an error.\n\t\t\tfmt.Println(err.Error())\n\t\t}\n\t\treturn\n\t}\n\n\tfmt.Println(result)\n}", "func (r *CreateSystemInstanceResponse) SDKResponseMetdata() *aws.Response {\n\treturn r.response\n}", "func (r *ListChangeSetsResponse) SDKResponseMetdata() *aws.Response {\n\treturn r.response\n}", "func (r *StartMonitoringMembersResponse) SDKResponseMetdata() *aws.Response {\n\treturn r.response\n}", "func (r *ModifyHostsResponse) SDKResponseMetdata() *aws.Response {\n\treturn r.response\n}", "func (r *UpdateFolderResponse) SDKResponseMetdata() *aws.Response {\n\treturn r.response\n}", "func (r *StartImagePipelineExecutionResponse) SDKResponseMetdata() *aws.Response {\n\treturn r.response\n}", "func (r *DeregisterPatchBaselineForPatchGroupResponse) SDKResponseMetdata() *aws.Response {\n\treturn r.response\n}", "func (r *PutDeliveryChannelResponse) SDKResponseMetdata() *aws.Response {\n\treturn r.response\n}", "func (r *GetInstanceAccessDetailsResponse) SDKResponseMetdata() *aws.Response {\n\treturn r.response\n}", "func (r *CreateVPCAssociationAuthorizationResponse) SDKResponseMetdata() *aws.Response {\n\treturn r.response\n}", "func (r *ModifyFpgaImageAttributeResponse) SDKResponseMetdata() *aws.Response {\n\treturn r.response\n}", "func (r *DescribeVpcEndpointConnectionNotificationsResponse) SDKResponseMetdata() *aws.Response {\n\treturn r.response\n}", "func (r *ListJournalKinesisStreamsForLedgerResponse) SDKResponseMetdata() *aws.Response {\n\treturn r.response\n}", "func (r *AttachLoadBalancerTargetGroupsResponse) SDKResponseMetdata() *aws.Response {\n\treturn r.response\n}", "func (r *PutImageRecipePolicyResponse) SDKResponseMetdata() *aws.Response {\n\treturn r.response\n}", "func (bcsr BlobsCreateSnapshotResponse) Snapshot() string {\n\treturn bcsr.rawResponse.Header.Get(\"x-ms-snapshot\")\n}", "func (r *OutputService6TestCaseOperation1Response) SDKResponseMetdata() *aws.Response {\n\treturn r.response\n}", "func (r *OutputService6TestCaseOperation1Response) SDKResponseMetdata() *aws.Response {\n\treturn r.response\n}", "func (r *PutContainerPolicyResponse) SDKResponseMetdata() *aws.Response {\n\treturn r.response\n}", "func (r *CreatePackagingConfigurationResponse) SDKResponseMetdata() *aws.Response {\n\treturn r.response\n}", "func (r *GetInstanceMetricDataResponse) SDKResponseMetdata() *aws.Response {\n\treturn r.response\n}", "func (r *GetApiCacheResponse) SDKResponseMetdata() *aws.Response {\n\treturn r.response\n}", "func (r *MergeBranchesByFastForwardResponse) SDKResponseMetdata() *aws.Response {\n\treturn r.response\n}", "func (r *UpdateNetworkProfileResponse) SDKResponseMetdata() *aws.Response {\n\treturn r.response\n}", "func (r *GetRecoveryPointRestoreMetadataResponse) SDKResponseMetdata() *aws.Response {\n\treturn r.response\n}", "func (r *GetCompatibleElasticsearchVersionsResponse) SDKResponseMetdata() *aws.Response {\n\treturn r.response\n}", "func (r *CreateMembersResponse) SDKResponseMetdata() *aws.Response {\n\treturn r.response\n}", "func (r *CreateMembersResponse) SDKResponseMetdata() *aws.Response {\n\treturn r.response\n}", "func (r *InvalidateProjectCacheResponse) SDKResponseMetdata() *aws.Response {\n\treturn r.response\n}", "func (r *CreateConfigurationSetEventDestinationResponse) SDKResponseMetdata() *aws.Response {\n\treturn r.response\n}", "func (r *DetachVolumeResponse) SDKResponseMetdata() *aws.Response {\n\treturn r.response\n}", "func (r *DescribeValidDBInstanceModificationsResponse) SDKResponseMetdata() *aws.Response {\n\treturn r.response\n}", "func (r *UpdateDomainNameserversResponse) SDKResponseMetdata() *aws.Response {\n\treturn r.response\n}", "func (r *UpdateModelVersionStatusResponse) SDKResponseMetdata() *aws.Response {\n\treturn r.response\n}", "func (r *UpdateConditionalForwarderResponse) SDKResponseMetdata() *aws.Response {\n\treturn r.response\n}", "func (r *ExportBundleResponse) SDKResponseMetdata() *aws.Response {\n\treturn r.response\n}", "func (r *ModifyVpcAttributeResponse) SDKResponseMetdata() *aws.Response {\n\treturn r.response\n}", "func (r *UpdatePipelineNotificationsResponse) SDKResponseMetdata() *aws.Response {\n\treturn r.response\n}" ]
[ "0.63096714", "0.6076909", "0.6013596", "0.5952312", "0.5912922", "0.58549273", "0.58414584", "0.58414584", "0.58228135", "0.5812001", "0.5810432", "0.5782599", "0.57477885", "0.5734081", "0.5730805", "0.5724143", "0.57020676", "0.5692633", "0.5664309", "0.5662683", "0.5564344", "0.5563323", "0.555856", "0.5556153", "0.5538353", "0.55258626", "0.5496155", "0.5476746", "0.5473342", "0.54589474", "0.5457882", "0.54434407", "0.54363084", "0.5410509", "0.5404841", "0.53951967", "0.5381099", "0.53760797", "0.5370711", "0.5358344", "0.5358077", "0.5349491", "0.5343077", "0.53421354", "0.5333412", "0.53113955", "0.52914006", "0.5282418", "0.52777773", "0.52772474", "0.5276527", "0.5262711", "0.5257578", "0.5251167", "0.5250333", "0.52487665", "0.5247742", "0.52455723", "0.52434427", "0.5234841", "0.5231831", "0.5224082", "0.5217289", "0.52075636", "0.52071637", "0.5205918", "0.5205786", "0.52038795", "0.5202599", "0.52025795", "0.5200545", "0.51972806", "0.5192008", "0.51910037", "0.5187016", "0.5182749", "0.51814085", "0.51797956", "0.51789165", "0.51789165", "0.5178741", "0.51775897", "0.51768976", "0.5172076", "0.5169377", "0.5163539", "0.5161375", "0.51613533", "0.51610845", "0.51610845", "0.51593393", "0.5157965", "0.51548916", "0.5154581", "0.51538944", "0.5148284", "0.5147693", "0.5146636", "0.5146429", "0.5140516" ]
0.6773798
0
EmailToName returns the name of the user based on the email address.
func EmailToName(email string) string { beforeAt, afterAt, _ := strings.Cut(email, "@") beforePlus, _, _ := strings.Cut(beforeAt, "+") spaceDivided := nonAlpha.ReplaceAllString(beforePlus, " ") lowerCased := strings.ToLower(spaceDivided) if lowerCased == "sturdy" { beforeDot, _, _ := strings.Cut(afterAt, ".") return EmailToName(beforeDot) } capitilized := strings.Title(lowerCased) return strings.TrimSpace(capitilized) }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (u Username) Email() string {\n\treturn string(u) + common.EmailSuffix\n}", "func UsernameFromEmail(email string) (Username, error) {\n\t// Check that the email is not too long\n\tif len(email) > common.MaxEmailLength || len(email) <=\n\t\tlen(common.EmailSuffix) {\n\t\treturn Username(\"\"), fmt.Errorf(\"email %s is too long or too short\", email)\n\t}\n\n\t// Check that the email suffix is at the end and that there is only one first\n\t// name and one last name.\n\tif email[len(email)-len(common.EmailSuffix):] != common.EmailSuffix ||\n\t\tlen(strings.Split(email, \".\")) != 3 {\n\t\treturn Username(\"\"), errInvalidEmail\n\t}\n\n\t// Return firstname.lastname as a Username type\n\treturn Username(email[0 : len(email)-len(common.EmailSuffix)]), nil\n}", "func GetUsernameFromEmail(email string) (userName, avatarURL string, err error) {\n\tdbQuery := `\n\t\tSELECT user_name, avatar_url\n\t\tFROM users\n\t\tWHERE email = $1`\n\tvar av pgx.NullString\n\terr = pdb.QueryRow(dbQuery, email).Scan(&userName, &av)\n\tif err != nil {\n\t\tif err == pgx.ErrNoRows {\n\t\t\t// No matching username of the email\n\t\t\terr = nil\n\t\t\treturn\n\t\t}\n\t\tlog.Printf(\"Looking up username for email address '%s' failed: %v\\n\", email, err)\n\t\treturn\n\t}\n\n\t// If no avatar URL is presently stored, default to a gravatar based on the users email (if known)\n\tif !av.Valid {\n\t\tpicHash := md5.Sum([]byte(email))\n\t\tavatarURL = fmt.Sprintf(\"https://www.gravatar.com/avatar/%x?d=identicon\", picHash)\n\t} else {\n\t\tavatarURL = av.String\n\t}\n\treturn\n}", "func EmailToDefaultName(email string) string {\n\tname := strings.NewReplacer(\".\", \"-\").Replace(email)\n\tsplitted := strings.Split(name, \"@\")\n\tif len(splitted) > 1 {\n\t\treturn \"kubeflow-\" + splitted[0]\n\t} else {\n\t\treturn \"kubeflow-\" + name\n\t}\n}", "func (internet *Internet) EmailWithUsername(name string) string {\n\treturn internet.UserWithName(name) + \"@\" + internet.DomainWord() + \".\" + internet.faker.MustParse(\"internet.domain_suffix\")\n}", "func (o GetUsersUserOutput) Email() pulumi.StringOutput {\n\treturn o.ApplyT(func(v GetUsersUser) string { return v.Email }).(pulumi.StringOutput)\n}", "func (u *UserInfoResolver) Email() string {\n\tif u.UserInfo != nil && u.UserInfo.Email != \"\" {\n\t\treturn u.UserInfo.Email\n\t}\n\n\treturn u.SessionCtx.Claims.GetUserClaims().Email\n}", "func (*ACMEManager) emailUsername(email string) string {\n\tat := strings.Index(email, \"@\")\n\tif at == -1 {\n\t\treturn email\n\t} else if at == 0 {\n\t\treturn email[1:]\n\t}\n\treturn email[:at]\n}", "func (o UserOutput) Email() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v *User) pulumi.StringPtrOutput { return v.Email }).(pulumi.StringPtrOutput)\n}", "func (o UserOutput) Email() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v *User) pulumi.StringPtrOutput { return v.Email }).(pulumi.StringPtrOutput)\n}", "func (u *UserResolver) Email() string {\n\treturn u.Field.Email\n}", "func Email() string {\n\treturn fmt.Sprintf(\"%s@%s\", Username(), Domain())\n}", "func Email(v string) predicate.User {\n\treturn predicate.User(sql.FieldEQ(FieldEmail, v))\n}", "func GetUserEmail() (string, error) {\n\tuser, err := getCurrentUser()\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn user.Email, nil\n}", "func Email(v string) predicate.User {\n\treturn predicate.User(func(s *sql.Selector) {\n\t\ts.Where(sql.EQ(s.C(FieldEmail), v))\n\t})\n}", "func Email(v string) predicate.User {\n\treturn predicate.User(func(s *sql.Selector) {\n\t\ts.Where(sql.EQ(s.C(FieldEmail), v))\n\t})\n}", "func Email(v string) predicate.User {\n\treturn predicate.User(func(s *sql.Selector) {\n\t\ts.Where(sql.EQ(s.C(FieldEmail), v))\n\t})\n}", "func Email(v string) predicate.User {\n\treturn predicate.User(func(s *sql.Selector) {\n\t\ts.Where(sql.EQ(s.C(FieldEmail), v))\n\t})\n}", "func (*ACMEIssuer) emailUsername(email string) string {\n\tat := strings.Index(email, \"@\")\n\tif at == -1 {\n\t\treturn email\n\t} else if at == 0 {\n\t\treturn email[1:]\n\t}\n\treturn email[:at]\n}", "func GetUserEmail(email string) (*User, error) {\n\tuser := User{}\n\terr := meddler.QueryRow(db, &user, userFindEmailStmt, email)\n\treturn &user, err\n}", "func (obj *MessengerUser) Email() string {\n\tproxyResult := /*pr4*/ C.vssq_messenger_user_email(obj.cCtx)\n\n\truntime.KeepAlive(obj)\n\n\treturn C.GoString(C.vsc_str_chars(proxyResult)) /* r5.1 */\n}", "func (user User) DisplayName() string {\n\treturn user.Email\n}", "func (d UserData) Email() string {\n\tval := d.ModelData.Get(models.NewFieldName(\"Email\", \"email\"))\n\tif !d.Has(models.NewFieldName(\"Email\", \"email\")) {\n\t\treturn *new(string)\n\t}\n\treturn val.(string)\n}", "func (u *User) GetEmail() domain.EmailAddress {\n\tif u.Email == \"\" {\n\t\t// if the user used a social login on Azure as well, the email will be empty\n\t\t// but is used as username\n\t\treturn domain.EmailAddress(u.UserPrincipalName)\n\t}\n\treturn u.Email\n}", "func (u *User) Email() string { return u.userData.Email }", "func (internet *Internet) UserName() string {\n\tf, err := NewFaker(\"en\")\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\ta := []string{f.Name.FirstName(), f.Name.LastName()}\n\tinternet.faker.shuffleStrings(a)\n\treturn internet.UserWithName(strings.Join(a, \" \"))\n}", "func (p *GitPerson) Email() string {\n\tlt := strings.IndexByte(p.Str, '<')\n\tgt := strings.IndexByte(p.Str, '>')\n\tif lt < 0 || gt < lt {\n\t\treturn \"\"\n\t}\n\treturn p.Str[lt+1 : gt]\n}", "func (m *User) GetMailNickname()(*string) {\n return m.mailNickname\n}", "func (p *AzureProvider) GetEmailAddress(s *sessions.SessionState) (string, error) {\n\tvar email string\n\tvar err error\n\n\tif s.AccessToken == \"\" {\n\t\treturn \"\", errors.New(\"missing access token\")\n\t}\n\treq, err := http.NewRequest(\"GET\", p.ProfileURL.String(), nil)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treq.Header = getAzureHeader(s.AccessToken)\n\n\tjson, err := requests.Request(req)\n\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\temail, err = getEmailFromJSON(json)\n\n\tif err == nil && email != \"\" {\n\t\treturn email, err\n\t}\n\n\temail, err = json.Get(\"userPrincipalName\").String()\n\n\tif err != nil {\n\t\tlogger.Printf(\"failed making request %s\", err)\n\t\treturn \"\", err\n\t}\n\n\tif email == \"\" {\n\t\tlogger.Printf(\"failed to get email address\")\n\t\treturn \"\", err\n\t}\n\n\treturn email, err\n}", "func (r *implementation) GetUserByEmail(_ context.Context, e string) (*domain.Fanboy, error) {\n\tif u, ok := r.users[strings.ToLower(e)]; ok {\n\n\t\temails := strings.Split(u.following, \",\")\n\t\tfollows := make(map[string]interface{}, len(emails))\n\t\tfor _, em := range emails {\n\t\t\tfollows[em] = nil\n\t\t}\n\n\t\tslugs := strings.Split(u.favorites, \",\")\n\t\tfavorites := make(map[string]interface{}, len(slugs))\n\t\tfor _, s := range slugs {\n\t\t\tfavorites[s] = nil\n\t\t}\n\n\t\treturn &domain.Fanboy{\n\t\t\tUser: domain.User{\n\t\t\t\tEmail: u.email,\n\t\t\t\tUsername: u.username,\n\t\t\t\tBio: u.bio,\n\t\t\t\tImage: u.image,\n\t\t\t\tPassword: u.password,\n\t\t\t},\n\t\t\tFollowing: follows,\n\t\t\tFavorites: favorites,\n\t\t}, nil\n\t}\n\n\treturn nil, domain.ErrUserNotFound\n}", "func GetUserByEmail(e string) User {\n\tsqlStmt := `SELECT \n\t\t\t\t\tfldID,\n\t\t\t\t\tfldFirstName,\n \t\t\t\tfldLastName,\n\t\t\t\t\tfldEmail,\n\t\t\t\t\tfldPassword\n\t\t\t\tFROM tblUsers\n\t\t\t\tWHERE fldEmail = ?;`\n\n\t// Prepare statement\n\tstmt, err := globals.Db.Prepare(sqlStmt)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tdefer stmt.Close()\n\n\tvar u User\n\terr = stmt.QueryRow(e).Scan(&u.UserID, &u.FirstName, &u.LastName, &u.LastName, &u.Password)\n\tif err != nil {\n\t\t// No rows found\n\t\tif err == sql.ErrNoRows {\n\t\t\t// Return empty user\n\t\t\treturn u\n\t\t}\n\t\t// Something else went wrong\n\t\tlog.Fatal(err)\n\t}\n\n\treturn u\n}", "func GetUserByEmail(email string) (*dtoums.UserDTO, error) {\n\tu, err := dalums.GetUserByEmail(email)\n\tif err != nil {\n\t\treturn nil, err //fmt.Errorf(\"errors founded %w\", err)\n\t}\n\treturn dtoums.UserDALToDTO(u)\n}", "func (o *DepositSwitchTargetUser) GetEmail() string {\n\tif o == nil {\n\t\tvar ret string\n\t\treturn ret\n\t}\n\n\treturn o.Email\n}", "func Email(email string) (string, error) {\n\tvar validEmail string\n\terrorInvalid := errors.New(\"invalid email format\")\n\tif len(email) < 6 || len(email) > 254 {\n\t\treturn validEmail, errorInvalid\n\t}\n\thostRegexp := regexp.MustCompile(\"^[^\\\\s]+\\\\.[^\\\\s]+$\")\n\tuserRegexp := regexp.MustCompile(\"^[a-zA-Z0-9!#$%&'*+/=?^_`{|}~.-]+$\")\n\tat := strings.LastIndex(email, \"@\")\n\tif at <= 0 || at > len(email)-3 {\n\t\treturn validEmail, errorInvalid\n\t}\n\n\tif !userRegexp.MatchString(email[:at]) || !hostRegexp.MatchString(email[at+1:]) {\n\t\treturn validEmail, errorInvalid\n\t}\n\n\tvalidEmail = strings.TrimSpace(email)\n\tvalidEmail = strings.TrimRight(validEmail, \".\")\n\tvalidEmail = strings.ToLower(validEmail)\n\n\treturn validEmail, nil\n}", "func (u user) GetEmail() string {\n\treturn u.Email\n}", "func (a *Account) GetEmail() string { return a.email }", "func (o *MicrosoftGraphEducationUser) GetMailNickname() string {\n\tif o == nil || o.MailNickname == nil {\n\t\tvar ret string\n\t\treturn ret\n\t}\n\treturn *o.MailNickname\n}", "func (internet *Internet) Email() string {\n\treturn internet.UserName() + \"@\" + internet.DomainWord() + \".\" + internet.faker.MustParse(\"internet.domain_suffix\")\n}", "func NimToEmail(s string) (string, error) {\n\treturn s + \"@student.trunojoyo.ac.id\", nil\n}", "func NormalizeEmail(str string) (string, error) {\n\tif !IsEmail(str) {\n\t\treturn \"\", fmt.Errorf(\"%s is not an email\", str)\n\t}\n\tparts := strings.Split(str, \"@\")\n\tparts[0] = strings.ToLower(parts[0])\n\tparts[1] = strings.ToLower(parts[1])\n\tif parts[1] == \"gmail.com\" || parts[1] == \"googlemail.com\" {\n\t\tparts[1] = \"gmail.com\"\n\t\tparts[0] = strings.Split(ReplacePattern(parts[0], `\\.`, \"\"), \"+\")[0]\n\t}\n\treturn strings.Join(parts, \"@\"), nil\n}", "func GetUserByEmail(email string) (models.User, error) {\n\treturn repositories.GetUserByEmail(email)\n}", "func (o *User) GetName() string {\n\tif o == nil || o.Name == nil {\n\t\tvar ret string\n\t\treturn ret\n\t}\n\treturn *o.Name\n}", "func GetUserByEmail(email string) (user User, err error) {\n\terr = database.DB.QueryRow(\"SELECT id, uuid, fname, lname, email, password, created_at FROM users WHERE email = ?\", email).\n\t\tScan(&user.ID, &user.UUID, &user.FName, &user.LName, &user.Email, &user.Password, &user.Created)\n\tif err != nil {\n\t\tlog.Println(\"Get user by email query failed\", err)\n\t\treturn\n\t}else {\n\t\tlog.Println(\"User retrieved by email successfully\")\n\t}\n\treturn\n}", "func Email() string {\n\treturn strings.ToLower(FirstName(RandomGender)+LastName()) + StringNumberExt(1, \"\", 3) + \"@\" + randomFrom(jsonData.Domains)\n}", "func (o BucketLoggingV2TargetGrantGranteeOutput) EmailAddress() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v BucketLoggingV2TargetGrantGrantee) *string { return v.EmailAddress }).(pulumi.StringPtrOutput)\n}", "func (o ServiceAccountOutput) Email() pulumi.StringOutput {\n\treturn o.ApplyT(func(v *ServiceAccount) pulumi.StringOutput { return v.Email }).(pulumi.StringOutput)\n}", "func (o ServiceAccountOutput) Email() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v ServiceAccount) *string { return v.Email }).(pulumi.StringPtrOutput)\n}", "func getUserEmail(r *http.Request) string {\n\treturn r.Context().Value(contextEmailKey(\"userEmail\")).(string)\n}", "func SocialEmail(v string) predicate.User {\n\treturn predicate.User(func(s *sql.Selector) {\n\t\ts.Where(sql.EQ(s.C(FieldSocialEmail), v))\n\t})\n}", "func (c ChangeEmailAddress) GetName() string {\n\treturn fmt.Sprintf(\"%T\", c)\n}", "func (o MemberOutput) Email() pulumi.StringOutput {\n\treturn o.ApplyT(func(v *Member) pulumi.StringOutput { return v.Email }).(pulumi.StringOutput)\n}", "func NormalizeEmail(email string) string {\r\n\treturn strings.ToLower(strings.Trim(email, \" \"))\r\n}", "func (u UsersRepository) UserEmail(db *sql.DB, email string, user usersmodels.User) (usersmodels.User, error) {\n\tsqlStmt := `SELECT\n\t\t\t\t\tfldID,\n \t\t\t\tfldFirstName,\n \t\t\t\tfldLastName,\n \t\t\t\tfldEmail,\n \t\t\t\tfldPassword\n\t\t\t\tFROM tblUsers\n\t\t\t\tWHERE fldEmail = ?;`\n\n\t// Prepare statment\n\tstmt, err := db.Prepare(sqlStmt)\n\tif err != nil {\n\t\tlog.Fatalln(err)\n\t}\n\tdefer stmt.Close()\n\n\t// Return row and scan columns to passed note\n\terr = stmt.QueryRow(email).Scan(&user.ID, &user.FirstName, &user.LastName, &user.Email, &user.Password)\n\tif err != nil {\n\t\t// No row found\n\t\tif err == sql.ErrNoRows {\n\t\t\t// Return empty user and error\n\t\t\treturn usersmodels.User{}, errors.New(\"Row not found\")\n\t\t}\n\t\t// Something else went wrong\n\t\tlog.Fatalln(err)\n\t}\n\n\treturn user, nil\n}", "func (db *database) GetPersonByEmail(\n\tctx context.Context,\n\temail string,\n) (app.Person, error) {\n\n\tvar dbp dbPerson\n\n\terr := db.GetContext(ctx, &dbp, `\n\t\tSELECT\n\t\t\tp.person_id,\n\t\t\tp.first_name,\n\t\t\tp.last_name,\n\t\t\tp.email,\n\t\t\tp.role_id,\n\t\t\tp.pass_hash,\n\t\t\tp.is_deactivated,\n\t\t\tarray_remove(array_agg(a.organization_id), NULL) as affiliations\n\t\tFROM person p\n\t\tLEFT JOIN affiliation a\n\t\t\tON p.person_id = a.person_id\n\t\tWHERE email = $1\n\t\tGROUP BY p.person_id\n\t`, email)\n\n\tif errors.Is(err, sql.ErrNoRows) {\n\t\treturn app.Person{}, errors.Wrapf(\n\t\t\tapp.ErrNotFound,\n\t\t\t\"no such person by email of '%s'\", email,\n\t\t)\n\t}\n\n\treturn dbp.toPerson(), errors.Wrap(err, \"failed to get person\")\n}", "func (u *User) GetUserByEmail() *User {\n\tvar result = &User{}\n\terr := database.DB.Where(map[string]interface{}{\n\t\t\"email\": u.Email,\n\t}).First(&result).Error\n\tif err != nil {\n\t\treturn nil\n\t}\n\treturn result\n}", "func (t *Token) Email() string {\n\tif res, ok := (t.Claims[\"email\"]).(string); ok {\n\t\treturn res\n\t}\n\treturn \"\"\n}", "func UidToName(uid uint32) (string, error) {\n\tx, err := user.LookupId(strconv.Itoa(int(uid)))\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn x.Name, nil\n}", "func (c *Crowd) GetUserByEmail(email string) (User, error) {\n\tu := User{}\n\n\tnames, err := c.SearchUsers(\"email=\" + email)\n\tif err != nil {\n\t\treturn u, err\n\t}\n\n\tswitch len(names) {\n\tcase 1:\n\t\treturn c.GetUser(names[0])\n\tcase 0:\n\t\treturn u, fmt.Errorf(\"user not found\")\n\tdefault:\n\t\treturn u, fmt.Errorf(\"multiple users found\")\n\t}\n}", "func (s *Service) ListRecipientNamesByEmail() *ListRecipientNamesByEmailOp {\n\treturn &ListRecipientNamesByEmailOp{\n\t\tCredential: s.credential,\n\t\tMethod: \"GET\",\n\t\tPath: \"recipient_names\",\n\t\tAccept: \"application/json\",\n\t\tQueryOpts: make(url.Values),\n\t\tVersion: esign.APIv2,\n\t}\n}", "func (u *UserEmail) GetEmail() string {\n\tif u == nil || u.Email == nil {\n\t\treturn \"\"\n\t}\n\treturn *u.Email\n}", "func (o LookupServiceAccountResultOutput) Email() pulumi.StringOutput {\n\treturn o.ApplyT(func(v LookupServiceAccountResult) string { return v.Email }).(pulumi.StringOutput)\n}", "func (o LookupServiceAccountResultOutput) Email() pulumi.StringOutput {\n\treturn o.ApplyT(func(v LookupServiceAccountResult) string { return v.Email }).(pulumi.StringOutput)\n}", "func EmailAddressForEmail(email string) EmailAddress {\n\treturn EmailAddress{\n\t\tEmail: email,\n\t}\n}", "func (o *ModelsUser) GetEmail() string {\n\tif o == nil || o.Email == nil {\n\t\tvar ret string\n\t\treturn ret\n\t}\n\treturn *o.Email\n}", "func Email(defaultValue string) (string, error) {\n\tp := promptui.Prompt{\n\t\tLabel: \"Email\",\n\t\tValidate: func(input string) error {\n\t\t\tvalid := govalidator.IsEmail(input)\n\t\t\tif valid {\n\t\t\t\treturn nil\n\t\t\t}\n\n\t\t\treturn errors.New(\"Please enter a valid email address\")\n\t\t},\n\t}\n\n\tif defaultValue != \"\" {\n\t\tp.Default = defaultValue\n\t}\n\n\treturn p.Run()\n}", "func (o LookupOrganizationUserResultOutput) UserEmail() pulumi.StringOutput {\n\treturn o.ApplyT(func(v LookupOrganizationUserResult) string { return v.UserEmail }).(pulumi.StringOutput)\n}", "func (uv *userValidator) ByEmail(email string) (*User, error) {\n\tuser := User{\n\t\tEmail: email,\n\t}\n\terr := runUserValFns(&user, uv.normalizeEmail)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn uv.UserDB.ByEmail(user.Email)\n}", "func (uv *userValidator) ByEmail(email string) (*User, error) {\n\tuser := User{\n\t\tEmail: email,\n\t}\n\terr := runUserValFns(&user, uv.normalizeEmail)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn uv.UserDB.ByEmail(user.Email)\n}", "func (u *User) GetName() string {\n\tif u == nil || u.Name == nil {\n\t\treturn \"\"\n\t}\n\treturn *u.Name\n}", "func UserByEmail(ctx context.Context, email string) (User, *datastore.Key, error) {\n\tvar results []User\n\tquery := datastore.NewQuery(UserEntityName).\n\t\tFilter(\"Email =\", email).\n\t\tLimit(1)\n\tkeys, err := query.GetAll(ctx, &results)\n\tif err != nil {\n\t\treturn User{}, nil, err\n\t}\n\n\tif len(results) == 0 {\n\t\treturn User{}, nil, nil\n\t}\n\n\treturn results[0], keys[0], nil\n}", "func (db *DB) GetUserByEmail(email string) (*model.User, error) {\n\tvar user model.User\n\n\tcursor := db.collections.users.FindOne(\n\t\tcontext.Background(),\n\t\tbson.D{primitive.E{\n\t\t\tKey: \"email\",\n\t\t\tValue: email,\n\t\t}},\n\t)\n\n\tif cursor.Err() != nil {\n\n\t\treturn nil, cursor.Err()\n\t}\n\n\terr := cursor.Decode(&user)\n\tif err != nil {\n\t\tif err == mongo.ErrNoDocuments {\n\t\t\treturn nil, nil\n\t\t}\n\n\t\treturn nil, err\n\t}\n\n\treturn &user, nil\n}", "func (o *User) GetMailNickname() string {\n\tif o == nil || o.MailNickname == nil {\n\t\tvar ret string\n\t\treturn ret\n\t}\n\treturn *o.MailNickname\n}", "func(s *Server) GetUserByEmail(email string) (*User, error) {\n\tuser := &User{}\n\terr := s.DB.Debug().Where(\"email = ?\", email).Take(&user).Error\n\tif err != nil {\n\t\treturn nil, err\n\t}\n \n\treturn user, nil\n }", "func (uv *userValidator) ByEmail(email string) (*User, error) {\n\tu := User{Email: email}\n\n\tif err := runUserValFuncs(&u, uv.emailNormalize); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn uv.UserDB.ByEmail(u.Email)\n}", "func (uv *userValidator) ByEmail(email string) (*User, error) {\r\n\tuser := User{\r\n\t\tEmail: email,\r\n\t}\r\n\terr := runUserValFns(&user, uv.normalizeEmail,\r\n\t\tuv.emailFormat)\r\n\tif err != nil {\r\n\t\treturn nil, err\r\n\t}\r\n\treturn uv.UserDB.ByEmail(user.Email)\r\n}", "func (s *UserRepository) GetUserByEmail(email string) (*akwad.Account, error) {\n\treturn nil, nil\n}", "func (u *User) GetEmail() string {\n\tif u == nil || u.Email == nil {\n\t\treturn \"\"\n\t}\n\treturn *u.Email\n}", "func (su *SystemUser) Email() string {\n\treturn su.HeaderString(\"email\")\n}", "func (user *User) GetEmail() string {\n\n\treturn user.Email\n}", "func (c RegisterWithEmail) GetName() string {\n\treturn fmt.Sprintf(\"%T\", c)\n}", "func (o EmailRoutingAddressOutput) Email() pulumi.StringOutput {\n\treturn o.ApplyT(func(v *EmailRoutingAddress) pulumi.StringOutput { return v.Email }).(pulumi.StringOutput)\n}", "func UserByEmail(c *gin.Context) {\n\temail := c.Param(\"email\")\n\tuser := user.GetByEmail(email)\n\tc.JSON(200, gin.H{\n\t\t\"data\": user,\n\t})\n}", "func (o BucketAclV2AccessControlPolicyGrantGranteeOutput) EmailAddress() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v BucketAclV2AccessControlPolicyGrantGrantee) *string { return v.EmailAddress }).(pulumi.StringPtrOutput)\n}", "func (uv *userValidator) ByEmail(email string) (*User, error) {\n\tuser := User{\n\t\tEmail: email,\n\t}\n\tif err := runUserValFuncs(&user, uv.normalizeEmail); err != nil {\n\t\treturn nil, err\n\t}\n\treturn uv.UserDB.ByEmail(user.Email)\n}", "func (uv *userValidator) ByEmail(email string) (*User, error) {\n\tuser := User{\n\t\tEmail: email,\n\t}\n\tif err := runUserValFuncs(&user, uv.normalizeEmail); err != nil {\n\t\treturn nil, err\n\t}\n\treturn uv.UserDB.ByEmail(user.Email)\n}", "func (us *UserService) UserByEmail(email string) (*railway.User, error) {\n\tdetails := &MethodInfo{\n\t\tName: \"UserByEmail\",\n\t\tDescription: \"UserByEmail is the DB method used to UserByEmail\",\n\t}\n\n\tuser := &railway.User{}\n\tquery := fmt.Sprintf(`\n\t\tSELECT %s\n\t\tFROM users\n\t\tWHERE email=$1\n`, userAllColumns)\n\terr := us.db.QueryRow(query, email).Scan(user.Scan()...)\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, details.Name)\n\t\t// We return nil here since no returned rows are a good thing!\n\t}\n\n\treturn user, nil\n}", "func (user *User) GetEmail() string {\n\treturn user.Email\n}", "func (p Profile) EmailAddress() *attribute.StringAttribute {\n\treturn p.GetStringAttribute(AttrConstEmailAddress)\n}", "func (uv *userValidator) ByEmail(email string) (*User, error){\n\tuser := User{\n\t\tEmail: email,\n\t}\n\tif err := runUserValidatorFunction(&user, uv.normalizeEmail); err!=nil{\n\t\treturn nil, err\n\t}\n\treturn uv.UserDB.ByEmail(user.Email)\n}", "func (op *ListRecipientNamesByEmailOp) Email(val string) *ListRecipientNamesByEmailOp {\n\tif op != nil {\n\t\top.QueryOpts.Set(\"email\", val)\n\t}\n\treturn op\n}", "func GetPersonByEmail(w http.ResponseWriter, r *http.Request) {\n\tvars := mux.Vars(r)\n\temail := vars[\"email\"]\n\n\tsession := utils.GetMongoSession()\n\tdefer session.Close()\n\n\tsession.SetMode(mgo.Monotonic, true)\n\n\tvar errDB error\n\tc := session.DB(\"test_db\").C(\"persons\")\n\n\tvar result interface{}\n\terrDB = c.Find(bson.M{\"email\": email}).Select(bson.M{\"_id\": 0}).Sort(\"-name\").One(&result)\n\n\tif errDB != nil {\n\t\tif errDB.Error() == \"not found\" {\n\t\t\tutils.SendJSONResponse(w, 404, \"Not Found\", nil)\n\t\t\treturn\n\t\t}\n\n\t\tpanic(errDB)\n\t}\n\n\tutils.SendJSONResponse(w, 0, \"Success\", result)\n}", "func UserByEmail(email string) (u *UserModel, err error) {\n\tu, err = getUser(\"where email = $1\", email, false, false)\n\treturn\n}", "func GetUserByEmail(e Executor, email string) (*User, error) {\n\n\treq, _ := http.NewRequest(\"GET\", RexBaseURL+apiFindByEmail+email, nil)\n\n\tr, err := e.Execute(req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// check if the user can be found\n\tvar user User\n\terr = json.NewDecoder(r.Body).Decode(&user)\n\tio.Copy(ioutil.Discard, r.Body)\n\n\tif err != nil || user.UserID == \"\" {\n\t\treturn &User{}, fmt.Errorf(\"user not found\")\n\t}\n\n\t// Fetch actual user information based on the retrieved UserID\n\treq, _ = http.NewRequest(\"GET\", RexBaseURL+apiFindByID+user.UserID, nil)\n\tr, err = e.Execute(req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer func() {\n\t\tio.Copy(ioutil.Discard, r.Body)\n\t}()\n\n\terr = json.NewDecoder(r.Body).Decode(&user)\n\treturn &user, nil\n}", "func EmailEQ(v string) predicate.User {\n\treturn predicate.User(sql.FieldEQ(FieldEmail, v))\n}", "func GetDomainFromEmail(email string) (string, error) {\n\tparsed, err := mail.ParseAddress(email)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn processEmail(strings.ToLower(strings.TrimSpace(parsed.Address)))\n}", "func GetUserEmail(userId int) (string, error) {\n\n\t// request failed, return an error\n\treturn \"\", &HttpError{403, \"GET\"}\n}", "func (o SAMLParamsResponseOutput) UserEmail() pulumi.StringOutput {\n\treturn o.ApplyT(func(v SAMLParamsResponse) string { return v.UserEmail }).(pulumi.StringOutput)\n}", "func (s *Store) UserByEmail(ctx context.Context, email string) (*User, error) {\n\tsb := userStruct.SelectFrom(\"users\")\n\tsb.Where(sb.E(\"email\", email))\n\tsq, sa := sb.Build()\n\tuser := &User{}\n\tif err := selectUser(ctx, s.db, s, user, sq, sa...); err != nil {\n\t\treturn nil, err\n\t}\n\treturn user, nil\n}", "func UserByEmail(db *sql.DB, email string) (User, error) {\n\treturn ScanUser(db.QueryRow(`select * from users where email = ?`, email))\n}", "func GetUserByEmail(email string) *User {\n\tuser := new(User)\n\trow := DB.QueryRow(\"SELECT * from user WHERE email=?\", email)\n\terr := row.Scan(&user.ID, &user.Email, &user.Password, &user.Fullname, &user.TSLastLogin, &user.TSCreate, &user.TSUpdate, &user.Permission)\n\tif err != nil {\n\t\tlog.Errorln(\"User SELECT by Email Err: \", err)\n\t\treturn nil\n\t}\n\treturn user\n}" ]
[ "0.67602915", "0.6609233", "0.6606951", "0.6319288", "0.6281543", "0.62564415", "0.6249514", "0.6211129", "0.6131208", "0.6131208", "0.61290425", "0.601704", "0.59960634", "0.5902471", "0.5901527", "0.5901527", "0.5901527", "0.5901527", "0.5897872", "0.58856905", "0.5879479", "0.58386856", "0.58155113", "0.5802114", "0.5791823", "0.5756705", "0.5754699", "0.57307225", "0.5715422", "0.57044876", "0.569757", "0.5676341", "0.5673679", "0.5668786", "0.5621303", "0.5616547", "0.5602919", "0.5588873", "0.55707735", "0.55697227", "0.5561951", "0.555556", "0.5551283", "0.5546337", "0.5542583", "0.5538142", "0.5528656", "0.55278003", "0.5509757", "0.55038697", "0.5503301", "0.5502946", "0.55020046", "0.5499804", "0.54941607", "0.5491761", "0.54903847", "0.54871476", "0.5476263", "0.5468972", "0.54648787", "0.54648787", "0.5462037", "0.54604715", "0.54585123", "0.5440775", "0.54397285", "0.54397285", "0.5434474", "0.54297996", "0.5417873", "0.54140484", "0.5410329", "0.54089123", "0.53975147", "0.53942114", "0.5393424", "0.5392679", "0.53913575", "0.5386425", "0.53862005", "0.53854686", "0.5378523", "0.53773475", "0.53773475", "0.5374237", "0.53703606", "0.5344703", "0.53248954", "0.53228825", "0.5319857", "0.5318282", "0.5317889", "0.5300892", "0.53005964", "0.53000045", "0.52994776", "0.52938473", "0.5289974", "0.5285231" ]
0.7996834
0
Apply templates and applies any ingress records required for the OpenFaaS Cloud ingress configuration
func Apply(plan types.Plan) error { err := apply("ingress-wildcard.yml", "ingress-wildcard", IngressTemplate{ RootDomain: plan.RootDomain, TLS: plan.TLS, IssuerType: plan.TLSConfig.IssuerType, }) if err != nil { return err } err1 := apply("ingress-auth.yml", "ingress-auth", IngressTemplate{ RootDomain: plan.RootDomain, TLS: plan.TLS, IssuerType: plan.TLSConfig.IssuerType, }) if err1 != nil { return err1 } return nil }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func createIngress(kubeconfig, ingressFilename string) error {\n\t// TODO(nikhiljindal): Allow users to specify the list of clusters to create the ingress in\n\t// rather than assuming all contexts in kubeconfig.\n\tclusters, err := getClusters(kubeconfig)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn createIngressInClusters(kubeconfig, ingressFilename, clusters)\n}", "func (i *IngressController) updateIngress(ctx context.Context, ingress *networkingv1.Ingress) error {\n\thost := i.staticAddress\n\tif host == \"\" {\n\t\thosts := make(map[string]uint)\n\t\tfor _, rule := range ingress.Spec.Rules {\n\t\t\tfor _, path := range rule.HTTP.Paths {\n\t\t\t\tsvc, err := i.CoreV1().Services(ingress.Namespace).Get(ctx, path.Backend.Service.Name, metav1.GetOptions{})\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\n\t\t\t\topts := metav1.ListOptions{\n\t\t\t\t\tLabelSelector: labels.Set(svc.Spec.Selector).String(),\n\t\t\t\t}\n\n\t\t\t\tpods, err := i.CoreV1().Pods(svc.Namespace).List(ctx, opts)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\n\t\t\t\tfor _, pod := range pods.Items {\n\t\t\t\t\tif pod.Status.Phase != v1.PodRunning {\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t}\n\n\t\t\t\t\t// resolve external IP from node\n\t\t\t\t\tnode, err := i.CoreV1().Nodes().Get(ctx, pod.Spec.NodeName, metav1.GetOptions{})\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\treturn err\n\t\t\t\t\t}\n\t\t\t\t\tnodeAddress := \"\"\n\t\t\t\t\tfor _, address := range node.Status.Addresses {\n\t\t\t\t\t\tif address.Type == v1.NodeExternalIP {\n\t\t\t\t\t\t\tnodeAddress = address.Address\n\t\t\t\t\t\t\tbreak\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\n\t\t\t\t\thosts[nodeAddress] += 1\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\tif len(hosts) == 0 {\n\t\t\tlog.Info(\"No backends found for ingress, can't update ingress host field\")\n\t\t\treturn nil\n\t\t}\n\n\t\tvar max uint\n\t\tfor ip, count := range hosts {\n\t\t\tlog.WithFields(log.Fields{\n\t\t\t\t\"host\": ip,\n\t\t\t\t\"num\": count,\n\t\t\t}).Debug()\n\t\t\tif count > max {\n\t\t\t\thost = ip\n\t\t\t\tmax = count\n\t\t\t}\n\t\t}\n\t}\n\n\tingress.Status.LoadBalancer.Ingress = []networkingv1.IngressLoadBalancerIngress{{\n\t\tIP: host,\n\t}}\n\n\t_, err := i.NetworkingV1().Ingresses(ingress.Namespace).UpdateStatus(ctx, ingress, metav1.UpdateOptions{})\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tlog.WithFields(log.Fields{\n\t\t\"ingress\": ingress.Name,\n\t\t\"ip\": host,\n\t}).Info()\n\n\treturn nil\n}", "func MakeIngress(dm *servingv1alpha1.DomainMapping, backendServiceName, hostName, ingressClass string, httpOption netv1alpha1.HTTPOption, tls []netv1alpha1.IngressTLS, acmeChallenges ...netv1alpha1.HTTP01Challenge) *netv1alpha1.Ingress {\n\tpaths, hosts := routeresources.MakeACMEIngressPaths(acmeChallenges, sets.NewString(dm.GetName()))\n\treturn &netv1alpha1.Ingress{\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tName: kmeta.ChildName(dm.GetName(), \"\"),\n\t\t\tNamespace: dm.Namespace,\n\t\t\tAnnotations: kmeta.FilterMap(kmeta.UnionMaps(map[string]string{\n\t\t\t\tnetapi.IngressClassAnnotationKey: ingressClass,\n\t\t\t}, dm.GetAnnotations()), routeresources.ExcludedAnnotations.Has),\n\t\t\tLabels: kmeta.UnionMaps(dm.Labels, map[string]string{\n\t\t\t\tserving.DomainMappingUIDLabelKey: string(dm.UID),\n\t\t\t\tserving.DomainMappingNamespaceLabelKey: dm.Namespace,\n\t\t\t}),\n\t\t\tOwnerReferences: []metav1.OwnerReference{*kmeta.NewControllerRef(dm)},\n\t\t},\n\t\tSpec: netv1alpha1.IngressSpec{\n\t\t\tHTTPOption: httpOption,\n\t\t\tTLS: tls,\n\t\t\tRules: []netv1alpha1.IngressRule{{\n\t\t\t\tHosts: append(hosts, dm.Name),\n\t\t\t\tVisibility: netv1alpha1.IngressVisibilityExternalIP,\n\t\t\t\tHTTP: &netv1alpha1.HTTPIngressRuleValue{\n\t\t\t\t\t// The order of the paths is sensitive, always put tls challenge first\n\t\t\t\t\tPaths: append(paths,\n\t\t\t\t\t\t[]netv1alpha1.HTTPIngressPath{{\n\t\t\t\t\t\t\tRewriteHost: hostName,\n\t\t\t\t\t\t\tSplits: []netv1alpha1.IngressBackendSplit{{\n\t\t\t\t\t\t\t\tPercent: 100,\n\t\t\t\t\t\t\t\tAppendHeaders: map[string]string{\n\t\t\t\t\t\t\t\t\tnetheader.OriginalHostKey: dm.Name,\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\tIngressBackend: netv1alpha1.IngressBackend{\n\t\t\t\t\t\t\t\t\tServiceNamespace: dm.Namespace,\n\t\t\t\t\t\t\t\t\tServiceName: backendServiceName,\n\t\t\t\t\t\t\t\t\tServicePort: intstr.FromInt(80),\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t}},\n\t\t\t\t\t\t}}...),\n\t\t\t\t},\n\t\t\t}},\n\t\t},\n\t}\n}", "func CreateIngressHandlers(lbc *controller.LoadBalancerController) cache.ResourceEventHandlerFuncs {\n\treturn cache.ResourceEventHandlerFuncs{\n\t\tAddFunc: func(obj interface{}) {\n\t\t\tingress := obj.(*extensions.Ingress)\n\t\t\tif !lbc.IsNginxIngress(ingress) {\n\t\t\t\tlog.Printf(\"Ignoring Ingress %v based on Annotation %v\\n\", ingress.Name, lbc.GetIngressClassKey())\n\t\t\t\treturn\n\t\t\t}\n\t\t\tlog.Printf(\"Adding Ingress: %v\", ingress.Name)\n\t\t\tlbc.AddSyncQueue(obj)\n\t\t},\n\t\tDeleteFunc: func(obj interface{}) {\n\t\t\tingress, isIng := obj.(*extensions.Ingress)\n\t\t\tif !isIng {\n\t\t\t\tdeletedState, ok := obj.(cache.DeletedFinalStateUnknown)\n\t\t\t\tif !ok {\n\t\t\t\t\tlog.Printf(\"Error received unexpected object: %v\", obj)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tingress, ok = deletedState.Obj.(*extensions.Ingress)\n\t\t\t\tif !ok {\n\t\t\t\t\tlog.Printf(\"Error DeletedFinalStateUnknown contained non-Ingress object: %v\", deletedState.Obj)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t}\n\t\t\tif !lbc.IsNginxIngress(ingress) {\n\t\t\t\treturn\n\t\t\t}\n\t\t\tlog.Printf(\"Removing Ingress: %v\", ingress.Name)\n\t\t\tlbc.AddSyncQueue(obj)\n\t\t},\n\t\tUpdateFunc: func(old, cur interface{}) {\n\t\t\tif !reflect.DeepEqual(old, cur) {\n\t\t\t\tglog.V(3).Infof(\"Endpoints %v changed, syncing\", cur.(*api_v1.Endpoints).Name)\n\t\t\t\tlbc.AddSyncQueue(cur)\n\t\t\t}\n\t\t},\n\t}\n}", "func main() {\n\tapp := cdk8s.NewApp(&cdk8s.AppProps{\n\t\tOutdir: jsii.String(\"templates\"),\n\t\tYamlOutputType: cdk8s.YamlOutputType_FILE_PER_RESOURCE,\n\t})\n\n\tchart := cdk8s.NewChart(app, jsii.String(\"cdk8s-demo\"), &cdk8s.ChartProps{\n\t\tLabels: &map[string]*string{\n\t\t\t\"app\": jsii.String(\"cdk8s-demo\"),\n\t\t},\n\t})\n\n\t//deploy :=\n\tdeploy := cdk8splus20.NewDeployment(chart, jsii.String(\"deploy\"), &cdk8splus20.DeploymentProps{\n\t\tReplicas: jsii.Number(3),\n\t\t//\tDefaultSelector: jsii.Bool(false),\n\t\tContainers: &[]*cdk8splus20.ContainerProps{{\n\t\t\tImage: jsii.String(\"ubuntu\"),\n\t\t\tName: jsii.String(\"ubuntu\"),\n\t\t\tLiveness: cdk8splus20.Probe_FromHttpGet(jsii.String(\"/health\"),\n\t\t\t\t&cdk8splus20.HttpGetProbeOptions{\n\t\t\t\t\tInitialDelaySeconds: cdk8s.Duration_Seconds(jsii.Number(30)),\n\t\t\t\t},\n\t\t\t),\n\t\t\tPort: jsii.Number(8080),\n\t\t\tEnv: &map[string]cdk8splus20.EnvValue{\n\t\t\t\t\"env1\": cdk8splus20.EnvValue_FromValue(jsii.String(\"valone\")),\n\t\t\t\t\"env2\": cdk8splus20.EnvValue_FromValue(jsii.String(\"valtwo\")),\n\t\t\t},\n\t\t}},\n\t})\n\n\t//deploy.SelectByLabel(jsii.String(\"klaas\"), jsii.String(\"jan\"))\n\t//deploy.SelectByLabel(jsii.String(\"piet\"), jsii.String(\"karel\"))\n\n\tsvc := deploy.Expose(jsii.Number(883), &cdk8splus20.ExposeOptions{})\n\n\ting := cdk8splus20.NewIngressV1Beta1(chart, jsii.String(\"ing\"), &cdk8splus20.IngressV1Beta1Props{})\n\ting.AddHostDefaultBackend(\n\t\tjsii.String(\"host\"),\n\t\tcdk8splus20.IngressV1Beta1Backend_FromService(\n\t\t\tsvc,\n\t\t\t&cdk8splus20.ServiceIngressV1BetaBackendOptions{},\n\t\t))\n\n\tnetworkingistioio.NewVirtualService(chart, jsii.String(\"vs\"), &networkingistioio.VirtualServiceProps{\n\t\tMetadata: &cdk8s.ApiObjectMetadata{\n\t\t\tName: jsii.String(\"test-vs\"),\n\t\t},\n\t\tSpec: &networkingistioio.VirtualServiceSpec{\n\t\t\tGateways: jsii.Strings(\"test-gateway\"),\n\t\t\tHosts: jsii.Strings(\"bla.com\", \"boe.com\"),\n\t\t\tHttp: &[]*networkingistioio.VirtualServiceSpecHttp{\n\t\t\t\t{\n\t\t\t\t\tMatch: &[]*networkingistioio.VirtualServiceSpecHttpMatch{\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tUri: &networkingistioio.VirtualServiceSpecHttpMatchUri{\n\t\t\t\t\t\t\t\tPrefix: jsii.String(\"/test\"),\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\tRoute: &[]*networkingistioio.VirtualServiceSpecHttpRoute{\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tDestination: &networkingistioio.VirtualServiceSpecHttpRouteDestination{\n\t\t\t\t\t\t\t\tHost: svc.Name(),\n\t\t\t\t\t\t\t\tPort: &networkingistioio.VirtualServiceSpecHttpRouteDestinationPort{\n\t\t\t\t\t\t\t\t\t(*svc.Ports())[0].Port,\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t})\n\n\tapp.Synth()\n}", "func createIngress(kubeconfig string, kubeContexts []string, ing *v1beta1.Ingress) ([]string, map[string]kubeclient.Interface, error) {\n\tclients, err := kubeutils.GetClients(kubeconfig, kubeContexts)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\tclusters, createErr := createIngressInClusters(ing, clients)\n\treturn clusters, clients, createErr\n}", "func (x *Kubernetes) ProcessIngress(e transistor.Event) {\n\tlog.Println(\"processing ingress\")\n\n\tif e.Matches(\"project:kubernetes:ingress\") {\n\t\tvar err error\n\t\tswitch e.Action {\n\t\tcase transistor.GetAction(\"delete\"):\n\t\t\terr = x.deleteIngress(e)\n\t\tcase transistor.GetAction(\"create\"):\n\t\t\terr = x.createIngress(e)\n\t\tcase transistor.GetAction(\"update\"):\n\t\t\terr = x.createIngress(e)\n\t\t}\n\n\t\tif err != nil {\n\t\t\tlog.Error(err)\n\t\t\tx.sendErrorResponse(e, err.Error())\n\t\t}\n\t}\n\n\treturn\n}", "func readAndConvertDirectIngress(reader io.Reader, writer io.Writer, clusterConfig *agent.ClusterConfig, store istiomodel.ConfigStore, svcStore []kube_v1.Service) error { // nolint: lll\n\tconfigs, err := readConfigs(reader)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// Ensure every input config is valid\n\tmcDescriptor := istiomodel.ConfigDescriptor{\n\t\tmcmodel.ServiceExpositionPolicy,\n\t\tmcmodel.RemoteServiceBinding,\n\t}\n\tfor _, config := range configs {\n\t\tschema, exists := mcDescriptor.GetByType(config.Type)\n\t\tif !exists {\n\t\t\tcontinue\n\t\t}\n\t\terr = schema.Validate(config.Name, config.Namespace, config.Spec)\n\t\tif err != nil {\n\t\t\treturn multierror.Prefix(err, \"input validation failure\")\n\t\t}\n\t}\n\n\tistioConfigs, svcs, err := mcmodel.ConvertBindingsAndExposuresDirectIngress(configs, clusterConfig, store, svcStore)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tconfigDescriptor := istiomodel.ConfigDescriptor{\n\t\tistiomodel.VirtualService,\n\t\tistiomodel.Gateway,\n\t\tistiomodel.DestinationRule,\n\t\tistiomodel.ServiceEntry,\n\t}\n\n\t// Ensure every generated config is valid\n\tfor _, istioConfig := range istioConfigs {\n\t\tschema, exists := configDescriptor.GetByType(istioConfig.Type)\n\t\tif !exists {\n\t\t\tcontinue // Don't validate generated K8s config (the Service)\n\t\t}\n\n\t\terr = schema.Validate(istioConfig.Name, istioConfig.Namespace, istioConfig.Spec)\n\t\tif err != nil {\n\t\t\treturn multierror.Prefix(err, \"validation failure\")\n\t\t}\n\t}\n\n\terr = writeIstioYAMLOutput(configDescriptor, istioConfigs, writer)\n\tif err != nil {\n\t\treturn multierror.Prefix(err, \"couldn't write yaml\")\n\t}\n\n\tif len(svcs) > 0 {\n\t\twriter.Write([]byte(\"---\\n\")) // nolint: errcheck\n\t\terr = writeK8sYAMLOutput(svcs, writer)\n\t\tif err != nil {\n\t\t\treturn multierror.Prefix(err, \"couldn't write yaml\")\n\t\t}\n\t}\n\n\treturn nil\n}", "func (o *StepExposeOptions) createIngress(requirements *config.RequirementsConfig, kubeClient kubernetes.Interface, ns string, service *corev1.Service, fileName string) (*v1beta1.Ingress, error) {\n\texists, err := util.FileExists(fileName)\n\tif err != nil {\n\t\treturn nil, errors.Wrapf(err, \"failed to check if file exists %s\", fileName)\n\t}\n\tif !exists {\n\t\tlog.Logger().Warnf(\"failed to find file %s\\n\", fileName)\n\t\treturn nil, nil\n\t}\n\tdata, err := readYamlTemplate(fileName, requirements, service)\n\tif err != nil {\n\t\treturn nil, errors.Wrapf(err, \"failed to load vault ingress template file %s\", fileName)\n\t}\n\n\tanswer := &v1beta1.Ingress{}\n\terr = yaml.Unmarshal(data, answer)\n\tif err != nil {\n\t\treturn nil, errors.Wrapf(err, \"failed to load Ingress from result of template file %s\", fileName)\n\t}\n\treturn answer, nil\n}", "func (t *Transformer) CreateIngresses(o *object.Service) ([]runtime.Object, error) {\n\tresult := []runtime.Object{}\n\tserviceLabels := map[string]string(o.Labels)\n\n\ti := &ext_v1beta1.Ingress{\n\t\tObjectMeta: api_v1.ObjectMeta{\n\t\t\tName: o.Name,\n\t\t\tLabels: *util.MergeMaps(\n\t\t\t\t// The map containing `\"service\": o.Name` should always be\n\t\t\t\t// passed later to avoid being overridden by util.MergeMaps()\n\t\t\t\t&serviceLabels,\n\t\t\t\t&map[string]string{\n\t\t\t\t\t\"service\": o.Name,\n\t\t\t\t},\n\t\t\t),\n\t\t},\n\t}\n\n\tfor _, c := range o.Containers {\n\t\t// We don't want to generate ingress if there are no ports to be mapped\n\t\tif len(c.Ports) == 0 {\n\t\t\tcontinue\n\t\t}\n\n\t\tfor _, p := range c.Ports {\n\t\t\tif p.Host == nil {\n\t\t\t\t// Not Ingress\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\thost := *p.Host\n\t\t\tvar rule *ext_v1beta1.IngressRule\n\t\t\tfor idx := range i.Spec.Rules {\n\t\t\t\tr := &i.Spec.Rules[idx]\n\t\t\t\tif r.Host == host {\n\t\t\t\t\trule = r\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\t\t\tif rule == nil {\n\t\t\t\trule = &ext_v1beta1.IngressRule{\n\t\t\t\t\tHost: host,\n\t\t\t\t\tIngressRuleValue: ext_v1beta1.IngressRuleValue{\n\t\t\t\t\t\tHTTP: &ext_v1beta1.HTTPIngressRuleValue{},\n\t\t\t\t\t},\n\t\t\t\t}\n\t\t\t\ti.Spec.Rules = append(i.Spec.Rules, *rule)\n\t\t\t}\n\n\t\t\trule.HTTP.Paths = append(rule.HTTP.Paths, ext_v1beta1.HTTPIngressPath{\n\t\t\t\tPath: p.Path,\n\t\t\t\tBackend: ext_v1beta1.IngressBackend{\n\t\t\t\t\tServiceName: o.Name,\n\t\t\t\t\tServicePort: intstr.FromInt(p.Port.ServicePort),\n\t\t\t\t},\n\t\t\t})\n\t\t}\n\t}\n\n\tif len(i.Spec.Rules) > 0 {\n\t\tresult = append(result, i)\n\t}\n\n\treturn result, nil\n}", "func secondaryIngress(ctx context.Context, h *helper.H) cloudingressv1alpha1.ApplicationIngress {\n\t// first get the default ingresscontroller\n\tsecondaryIngress, exists, _ := appIngressExits(ctx, h, true, \"\")\n\tExpect(exists).To(BeTrue())\n\n\t// then update it to create a secondary ingress\n\tsecondaryIngress.Default = false\n\tsecondaryIngress.DNSName = \"app-e2e-\" + secondaryIngress.DNSName\n\n\treturn secondaryIngress\n}", "func Test_Controller_Resource_TCCPF_Template_Render(t *testing.T) {\n\ttestCases := []struct {\n\t\tname string\n\t\tctx context.Context\n\t\tcr infrastructurev1alpha3.AWSCluster\n\t\troute53Enabled bool\n\t}{\n\t\t{\n\t\t\tname: \"case 0: basic test\",\n\t\t\tctx: unittest.DefaultContext(),\n\t\t\tcr: unittest.DefaultCluster(),\n\t\t\troute53Enabled: true,\n\t\t},\n\t\t{\n\t\t\tname: \"case 1: without route 53 enabled\",\n\t\t\tctx: unittest.DefaultContext(),\n\t\t\tcr: unittest.DefaultCluster(),\n\t\t\troute53Enabled: false,\n\t\t},\n\t}\n\n\tfor i, tc := range testCases {\n\t\tt.Run(strconv.Itoa(i), func(t *testing.T) {\n\t\t\tvar err error\n\n\t\t\tk := unittest.FakeK8sClient()\n\n\t\t\tvar d *changedetection.TCCPF\n\t\t\t{\n\t\t\t\tc := changedetection.TCCPFConfig{\n\t\t\t\t\tLogger: microloggertest.New(),\n\t\t\t\t}\n\n\t\t\t\td, err = changedetection.NewTCCPF(c)\n\t\t\t\tif err != nil {\n\t\t\t\t\tt.Fatal(err)\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tvar e recorder.Interface\n\t\t\t{\n\t\t\t\tc := recorder.Config{\n\t\t\t\t\tK8sClient: k,\n\n\t\t\t\t\tComponent: \"dummy\",\n\t\t\t\t}\n\n\t\t\t\te = recorder.New(c)\n\t\t\t}\n\n\t\t\tvar h *cphostedzone.HostedZone\n\t\t\t{\n\t\t\t\tc := cphostedzone.Config{\n\t\t\t\t\tLogger: microloggertest.New(),\n\n\t\t\t\t\tRoute53Enabled: false,\n\t\t\t\t}\n\n\t\t\t\th, err = cphostedzone.New(c)\n\t\t\t\tif err != nil {\n\t\t\t\t\tt.Fatal(err)\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tvar r *Resource\n\t\t\t{\n\t\t\t\tc := Config{\n\t\t\t\t\tDetection: d,\n\t\t\t\t\tEvent: e,\n\t\t\t\t\tHostedZone: h,\n\t\t\t\t\tLogger: microloggertest.New(),\n\n\t\t\t\t\tInstallationName: \"dummy\",\n\t\t\t\t\tRoute53Enabled: tc.route53Enabled,\n\t\t\t\t}\n\n\t\t\t\tr, err = New(c)\n\t\t\t\tif err != nil {\n\t\t\t\t\tt.Fatal(err)\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tparams, err := r.newTemplateParams(tc.ctx, tc.cr)\n\t\t\tif err != nil {\n\t\t\t\tif err != nil {\n\t\t\t\t\tt.Fatal(err)\n\t\t\t\t}\n\t\t\t}\n\n\t\t\ttemplateBody, err := template.Render(params)\n\t\t\tif err != nil {\n\t\t\t\tt.Fatal(err)\n\t\t\t}\n\n\t\t\t_, err = yaml.YAMLToJSONStrict([]byte(templateBody))\n\t\t\tif err != nil {\n\t\t\t\tt.Fatal(err)\n\t\t\t}\n\n\t\t\tp := filepath.Join(\"testdata\", unittest.NormalizeFileName(tc.name)+\".golden\")\n\n\t\t\tif *update {\n\t\t\t\terr := os.WriteFile(p, []byte(templateBody), 0644) // nolint: gosec\n\t\t\t\tif err != nil {\n\t\t\t\t\tt.Fatal(err)\n\t\t\t\t}\n\t\t\t}\n\t\t\tgoldenFile, err := os.ReadFile(p)\n\t\t\tif err != nil {\n\t\t\t\tt.Fatal(err)\n\t\t\t}\n\n\t\t\tif !bytes.Equal([]byte(templateBody), goldenFile) {\n\t\t\t\tt.Fatalf(\"\\n\\n%s\\n\", cmp.Diff(string(goldenFile), templateBody))\n\t\t\t}\n\t\t})\n\t}\n}", "func createIngressInClusters(kubeconfig, ingressFilename string, clusters []string) error {\n\tkubectlArgs := []string{\"kubectl\"}\n\tif kubeconfig != \"\" {\n\t\tkubectlArgs = append(kubectlArgs, fmt.Sprintf(\"--kubeconfig=%s\", kubeconfig))\n\t}\n\t// TODO(nikhiljindal): Validate and optionally add the gce-multi-cluster class annotation to the ingress YAML spec.\n\tcreateArgs := append(kubectlArgs, []string{\"create\", fmt.Sprintf(\"--filename=%s\", ingressFilename)}...)\n\tfor _, c := range clusters {\n\t\tfmt.Println(\"Creating ingress in context:\", c)\n\t\tcontextArgs := append(createArgs, fmt.Sprintf(\"--context=%s\", c))\n\t\toutput, err := runCommand(contextArgs)\n\t\tif err != nil {\n\t\t\t// TODO(nikhiljindal): Continue if this is an ingress already exists error.\n\t\t\tglog.V(2).Infof(\"error in running command: %s\", strings.Join(contextArgs, \" \"))\n\t\t\treturn fmt.Errorf(\"error in creating ingress in cluster %s: %s, output: %s\", c, err, output)\n\t\t}\n\t}\n\treturn nil\n}", "func (policy *PolicySvc) augmentPolicy(policyDoc *common.Policy) error {\n\t// Get info from topology service\n\tlog.Printf(\"Augmenting policy %s\", policyDoc.Name)\n\n\tif policyDoc.ExternalID != \"\" {\n\t\t// TODO\n\t\t// Important! This should really be done in policy agent.\n\t\t// Only done here as temporary measure.\n\t\texternalId := makeId(policyDoc.AppliedTo, policyDoc.Name)\n\t\tlog.Printf(\"Constructing internal policy name = %s\", externalId)\n\t\tpolicyDoc.ExternalID = externalId\n\t}\n\n\ttopoUrl, err := policy.client.GetServiceUrl(\"topology\")\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// Query topology for data center information\n\t// TODO move this to root\n\tindex := common.IndexResponse{}\n\terr = policy.client.Get(topoUrl, &index)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tdcURL := index.Links.FindByRel(\"datacenter\")\n\tdc := &common.Datacenter{}\n\terr = policy.client.Get(dcURL, dc)\n\tif err != nil {\n\t\treturn err\n\t}\n\tlog.Printf(\"Policy server received datacenter information from topology service: %+v\\n\", dc)\n\tpolicyDoc.Datacenter = dc\n\n\tfor i, _ := range policyDoc.AppliedTo {\n\t\tendpoint := &policyDoc.AppliedTo[i]\n\t\terr = policy.augmentEndpoint(endpoint)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tfor j, _ := range policyDoc.Ingress {\n\t\tfor i, _ := range policyDoc.Ingress[j].Rules {\n\t\t\trule := &policyDoc.Ingress[j].Rules[i]\n\t\t\trule.Protocol = strings.ToUpper(rule.Protocol)\n\t\t}\n\n\t\tfor i, _ := range policyDoc.Ingress[j].Peers {\n\t\t\tendpoint := &policyDoc.Ingress[j].Peers[i]\n\t\t\terr = policy.augmentEndpoint(endpoint)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\treturn nil\n}", "func add(mgr manager.Manager, r reconcile.Reconciler) error {\n\t// Create a new controller\n\tc, err := controller.New(\"ingress-controller\", mgr, controller.Options{Reconciler: r})\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// Watch for changes to primary resource Ingress\n\terr = c.Watch(&source.Kind{Type: &networkingv1alpha1.Ingress{}}, &handler.EnqueueRequestForObject{})\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// Watch for changes to secondary resource Routes and requeue the\n\t// owner Ingress\n\terr = c.Watch(&source.Kind{Type: &routev1.Route{}}, &handler.EnqueueRequestForOwner{\n\t\tIsController: true,\n\t\tOwnerType: &networkingv1alpha1.Ingress{},\n\t})\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}", "func (ings *IngressServer) InitIngress(service *spec.Service, port uint32) error {\n\tings.mutex.Lock()\n\tdefer ings.mutex.Unlock()\n\n\tings.applicationPort = port\n\n\tif _, ok := ings.pipelines[service.IngressPipelineName()]; !ok {\n\t\tsuperSpec, err := service.SideCarIngressPipelineSpec(port)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tentity, err := ings.tc.CreateHTTPPipelineForSpec(ings.namespace, superSpec)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"create http pipeline %s failed: %v\", superSpec.Name(), err)\n\t\t}\n\t\tings.pipelines[service.IngressPipelineName()] = entity\n\t}\n\n\tif ings.httpServer == nil {\n\t\tsuperSpec, err := service.SideCarIngressHTTPServerSpec()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tentity, err := ings.tc.CreateHTTPServerForSpec(ings.namespace, superSpec)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"create http server %s failed: %v\", superSpec.Name(), err)\n\t\t}\n\t\tings.httpServer = entity\n\t}\n\n\tif err := ings.inf.OnPartOfServiceSpec(service.Name, informer.AllParts, ings.reloadTraffic); err != nil {\n\t\t// Only return err when its type is not `AlreadyWatched`\n\t\tif err != informer.ErrAlreadyWatched {\n\t\t\tlogger.Errorf(\"add ingress spec watching service: %s failed: %v\", service.Name, err)\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}", "func addIngress(lbc *LoadBalancerController, ing *extensions.Ingress, pm *nodePortManager) {\n\tlbc.ctx.IngressInformer.GetIndexer().Add(ing)\n\tif pm == nil {\n\t\treturn\n\t}\n\tfor _, rule := range ing.Spec.Rules {\n\t\tfor _, path := range rule.HTTP.Paths {\n\t\t\tsvc := &api_v1.Service{\n\t\t\t\tObjectMeta: meta_v1.ObjectMeta{\n\t\t\t\t\tName: path.Backend.ServiceName,\n\t\t\t\t\tNamespace: ing.Namespace,\n\t\t\t\t},\n\t\t\t}\n\t\t\tvar svcPort api_v1.ServicePort\n\t\t\tswitch path.Backend.ServicePort.Type {\n\t\t\tcase intstr.Int:\n\t\t\t\tsvcPort = api_v1.ServicePort{Port: path.Backend.ServicePort.IntVal}\n\t\t\tdefault:\n\t\t\t\tsvcPort = api_v1.ServicePort{Name: path.Backend.ServicePort.StrVal}\n\t\t\t}\n\t\t\tsvcPort.NodePort = int32(pm.getNodePort(path.Backend.ServiceName))\n\t\t\tsvc.Spec.Ports = []api_v1.ServicePort{svcPort}\n\t\t\tlbc.ctx.ServiceInformer.GetIndexer().Add(svc)\n\t\t}\n\t}\n}", "func getIngressUrls(config *rest.Config, namespace string) url {\n\tclientset, err := kubernetes.NewForConfig(config)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tingress, err := clientset.Extensions().Ingresses(namespace).List(metav1.ListOptions{})\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tvar urls url\n\n\t//loop through all ingress items in namespace\n\tfor _, i := range ingress.Items {\n\n\t\t//default http protocol to http\n\t\thttpProto := \"http\"\n\t\t//loop through annotations for each ingress object\n\t\tfor k, v := range i.Annotations {\n\t\t\t//if ssl-redirect is set, only check https\n\t\t\tif k == \"ingress.kubernetes.io/ssl-redirect\" && v == \"true\" {\n\t\t\t\thttpProto = \"https\"\n\t\t\t} else if k == \"traefik.ingress.kubernetes.io/redirect-entry-point\" && v == \"https\" {\n\t\t\t\thttpProto = \"https\"\n\t\t\t} else if k == \"traefik.ingress.kubernetes.io/frontend-entry-points\" && v == \"https\" {\n\t\t\t\thttpProto = \"https\"\n\t\t\t}\n\t\t}\n\t\t//build list of https urls from ingress.Spec.TLS.hosts[]\n\t\t//if ingress.spec.tls.host does not have corresponding ingress.spec.rules.host it is ignored.\n\t\t//end result https://ingress.spec.tls.host/ingress.spec.rules.host.path\n\t\tfor _, t := range i.Spec.TLS {\n\t\t\t//if tls has no host, use rules[0] as the host\n\t\t\tif t.Hosts == nil {\n\t\t\t\tfor _, r := range i.Spec.Rules {\n\t\t\t\t\t//skip wild card hosts\n\t\t\t\t\tif !strings.Contains(r.Host, \"*\") {\n\t\t\t\t\t\tfor _, p := range r.HTTP.Paths {\n\t\t\t\t\t\t\turls = append(urls, \"https://\"+r.Host+p.Path)\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tfor _, h := range t.Hosts {\n\t\t\t\t\tfor _, r := range i.Spec.Rules {\n\t\t\t\t\t\tif r.Host == h {\n\t\t\t\t\t\t\t//skip wild card hosts\n\t\t\t\t\t\t\tif !strings.Contains(r.Host, \"*\") {\n\t\t\t\t\t\t\t\tfor _, p := range r.HTTP.Paths {\n\t\t\t\t\t\t\t\t\turls = append(urls, \"https://\"+h+p.Path)\n\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\t//if ssl redirect annotation isn't set build http urls from ingress.spce.rules.hosts[].paths[]\n\t\tif httpProto != \"https\" {\n\t\t\tfor _, r := range i.Spec.Rules {\n\t\t\t\t//skip wild card hosts\n\t\t\t\tif !strings.Contains(r.Host, \"*\") {\n\t\t\t\t\tfor _, p := range r.HTTP.Paths {\n\t\t\t\t\t\turls = append(urls, \"http://\"+r.Host+p.Path)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\t//remove excluded urls in -customizefile file\n\turls = urls.removeExcluded(excluded)\n\turls = urls.addAdditional(additional)\n\treturn urls\n\n}", "func addAppIngress(ctx context.Context, h *helper.H, appIngressToAppend cloudingressv1alpha1.ApplicationIngress) {\n\tvar err error\n\n\tPublishingStrategyInstance, ps := getPublishingStrategy(ctx, h)\n\tPublishingStrategyInstance.Spec.ApplicationIngress = append(\n\t\tPublishingStrategyInstance.Spec.ApplicationIngress,\n\t\tappIngressToAppend,\n\t)\n\n\tps.Object, err = runtime.DefaultUnstructuredConverter.ToUnstructured(&PublishingStrategyInstance)\n\tExpect(err).NotTo(HaveOccurred())\n\n\t// Update the publishingstrategy\n\tps, err = h.Dynamic().\n\t\tResource(schema.GroupVersionResource{Group: \"cloudingress.managed.openshift.io\", Version: \"v1alpha1\", Resource: \"publishingstrategies\"}).\n\t\tNamespace(OperatorNamespace).\n\t\tUpdate(ctx, ps, metav1.UpdateOptions{})\n\tExpect(err).NotTo(HaveOccurred())\n}", "func ingressName(instance *v1alpha1.Nuxeo, nodeSet v1alpha1.NodeSet) string {\n\treturn instance.Name + \"-\" + nodeSet.Name + \"-ingress\"\n}", "func newIngressForCR(cr *operatorv1alpha1.Minecraft) *extensionsv1beta1.Ingress {\n\t// https://godoc.org/k8s.io/api/extensions/v1beta1\n labels := map[string]string{\n \"app\": cr.Name,\n \"version\": cr.Spec.Version,\n \"uela\": cr.Spec.Uela,\n }\n return &extensionsv1beta1.Ingress{\n ObjectMeta: metav1.ObjectMeta{\n Name: cr.Name + \"-service\",\n Namespace: cr.Namespace,\n Labels: labels,\n },\n // https://godoc.org/k8s.io/api/extensions/v1beta1#IngressSpec\n\t\tSpec: extensionsv1beta1.IngressSpec{\n\t\t\tBackend: &extensionsv1beta1.IngressBackend{\n\t\t\t\tServiceName: cr.Name + \"-service\",\n\t\t\t\tServicePort: intstr.FromString(\"minecraft\"),\n\t\t\t},\n\t // https://godoc.org/k8s.io/api/extensions/v1beta1#IngressRule\n\t\t\tRules: []extensionsv1beta1.IngressRule{\n\t\t\t\t{\n\t\t\t\t\tHost: cr.Spec.HostName,\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n}", "func (r *AppReconciler) ingressForApp(app *cloudv1alpha1.App) (*netv1beta1.Ingress, error) {\n\tprojectName := AppProjectName(app)\n\tlabels := LabelsForApp(projectName, app.Name)\n\n\tport := appPortToExposeExternally(app)\n\n\tingr := &netv1beta1.Ingress{\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tName: app.Name,\n\t\t\tNamespace: app.Namespace,\n\t\t\tLabels: labels,\n\t\t},\n\t\tSpec: netv1beta1.IngressSpec{\n\t\t\tRules: []netv1beta1.IngressRule{\n\t\t\t\tnetv1beta1.IngressRule{\n\t\t\t\t\tHost: appURLHost(app),\n\t\t\t\t\tIngressRuleValue: netv1beta1.IngressRuleValue{\n\t\t\t\t\t\tHTTP: &netv1beta1.HTTPIngressRuleValue{\n\t\t\t\t\t\t\tPaths: []netv1beta1.HTTPIngressPath{\n\t\t\t\t\t\t\t\tnetv1beta1.HTTPIngressPath{\n\t\t\t\t\t\t\t\t\tBackend: netv1beta1.IngressBackend{\n\t\t\t\t\t\t\t\t\t\tServiceName: app.Name,\n\t\t\t\t\t\t\t\t\t\tServicePort: intstr.FromInt(int(*port)),\n\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\n\t// Set app instance as the owner and controller\n\terr := ctrl.SetControllerReference(app, ingr, r.Scheme)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn ingr, nil\n}", "func IngressToGateway(key resource.VersionedKey, i *ingress.IngressSpec) resource.Entry {\n\tnamespace, name := key.FullName.InterpretAsNamespaceAndName()\n\n\tgateway := &v1alpha3.Gateway{\n\t\tSelector: model.IstioIngressWorkloadLabels,\n\t}\n\n\t// FIXME this is a temporary hack until all test templates are updated\n\t//for _, tls := range i.Spec.TLS {\n\tif len(i.TLS) > 0 {\n\t\ttls := i.TLS[0] // FIXME\n\t\t// TODO validation when multiple wildcard tls secrets are given\n\t\tif len(tls.Hosts) == 0 {\n\t\t\ttls.Hosts = []string{\"*\"}\n\t\t}\n\t\tgateway.Servers = append(gateway.Servers, &v1alpha3.Server{\n\t\t\tPort: &v1alpha3.Port{\n\t\t\t\tNumber: 443,\n\t\t\t\tProtocol: string(model.ProtocolHTTPS),\n\t\t\t\tName: fmt.Sprintf(\"https-443-i-%s-%s\", name, namespace),\n\t\t\t},\n\t\t\tHosts: tls.Hosts,\n\t\t\t// While we accept multiple certs, we expect them to be mounted in\n\t\t\t// /etc/istio/certs/namespace/secretname/tls.crt|tls.key\n\t\t\tTls: &v1alpha3.Server_TLSOptions{\n\t\t\t\tHttpsRedirect: false,\n\t\t\t\tMode: v1alpha3.Server_TLSOptions_SIMPLE,\n\t\t\t\t// TODO this is no longer valid for the new v2 stuff\n\t\t\t\tPrivateKey: path.Join(model.IngressCertsPath, model.IngressKeyFilename),\n\t\t\t\tServerCertificate: path.Join(model.IngressCertsPath, model.IngressCertFilename),\n\t\t\t\t// TODO: make sure this is mounted\n\t\t\t\tCaCertificates: path.Join(model.IngressCertsPath, model.RootCertFilename),\n\t\t\t},\n\t\t})\n\t}\n\n\tgateway.Servers = append(gateway.Servers, &v1alpha3.Server{\n\t\tPort: &v1alpha3.Port{\n\t\t\tNumber: 80,\n\t\t\tProtocol: string(model.ProtocolHTTP),\n\t\t\tName: fmt.Sprintf(\"http-80-i-%s-%s\", name, namespace),\n\t\t},\n\t\tHosts: []string{\"*\"},\n\t})\n\n\tnewName := name + \"-\" + model.IstioIngressGatewayName\n\tnewNamespace := model.IstioIngressNamespace\n\n\tgw := resource.Entry{\n\t\tID: resource.VersionedKey{\n\t\t\tKey: resource.Key{\n\t\t\t\tFullName: resource.FullNameFromNamespaceAndName(newNamespace, newName),\n\t\t\t\tTypeURL: metadata.VirtualService.TypeURL,\n\t\t\t},\n\t\t\tVersion: key.Version,\n\t\t\tCreateTime: key.CreateTime,\n\t\t},\n\t\tItem: gateway,\n\t}\n\n\treturn gw\n}", "func (t *tectonic) Generate(dependencies map[asset.Asset]*asset.State) (*asset.State, error) {\n\tic, err := installconfig.GetInstallConfig(t.installConfig, dependencies)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tingressContents := dependencies[t.ingressCertKey].Contents\n\ttemplateData := &tectonicTemplateData{\n\t\tIngressCaCert: base64.StdEncoding.EncodeToString(dependencies[t.kubeCA].Contents[certIndex].Data),\n\t\tIngressKind: \"haproxy-router\",\n\t\tIngressStatusPassword: ic.Admin.Password, // FIXME: generate a new random one instead?\n\t\tIngressTLSBundle: base64.StdEncoding.EncodeToString(bytes.Join([][]byte{ingressContents[certIndex].Data, ingressContents[keyIndex].Data}, []byte{})),\n\t\tIngressTLSCert: base64.StdEncoding.EncodeToString(ingressContents[certIndex].Data),\n\t\tIngressTLSKey: base64.StdEncoding.EncodeToString(ingressContents[keyIndex].Data),\n\t\tKubeAddonOperatorImage: \"quay.io/coreos/kube-addon-operator-dev:3b6952f5a1ba89bb32dd0630faddeaf2779c9a85\",\n\t\tKubeCoreOperatorImage: \"quay.io/coreos/kube-core-operator-dev:3b6952f5a1ba89bb32dd0630faddeaf2779c9a85\",\n\t\tPullSecret: base64.StdEncoding.EncodeToString([]byte(ic.PullSecret)),\n\t\tTectonicIngressControllerOperatorImage: \"quay.io/coreos/tectonic-ingress-controller-operator-dev:3b6952f5a1ba89bb32dd0630faddeaf2779c9a85\",\n\t\tTectonicVersion: \"1.8.4-tectonic.2\",\n\t}\n\n\tassetData := map[string][]byte{\n\t\t// template files\n\t\t\"secrets/ingress-tls.yaml\": applyTemplateData(secrets.IngressTLS, templateData),\n\t\t\"secrets/ca-cert.yaml\": applyTemplateData(secrets.CaCert, templateData),\n\t\t\"secrets/pull.json\": applyTemplateData(secrets.Pull, templateData),\n\t\t\"updater/operators/tectonic-ingress-controller-operator.yaml\": applyTemplateData(operators.TectonicIngressControllerOperator, templateData),\n\t\t\"updater/operators/kube-addon-operator.yaml\": applyTemplateData(operators.KubeAddonOperator, templateData),\n\t\t\"updater/operators/kube-core-operator.yaml\": applyTemplateData(operators.KubeCoreOperator, templateData),\n\t\t\"updater/app_versions/app-version-tectonic-cluster.yaml\": applyTemplateData(appversions.AppVersionTectonicCluster, templateData),\n\t\t\"ingress/pull.json\": applyTemplateData(ingress.Pull, templateData),\n\t\t\"ingress/cluster-config.yaml\": applyTemplateData(ingress.ClusterConfig, templateData),\n\n\t\t// constant files\n\t\t\"security/priviledged-scc-tectonic.yaml\": []byte(security.PriviledgedSccTectonic),\n\t\t\"rbac/role-admin.yaml\": []byte(rbac.RoleAdmin),\n\t\t\"rbac/binding-admin.yaml\": []byte(rbac.BindingAdmin),\n\t\t\"rbac/binding-discovery.yaml\": []byte(rbac.BindingDiscovery),\n\t\t\"rbac/role-user.yaml\": []byte(rbac.RoleUser),\n\t\t\"updater/migration-status-kind.yaml\": []byte(updater.MigrationStatusKind),\n\t\t\"updater/app_versions/app-version-kube-addon.yaml\": []byte(appversions.AppVersionKubeAddon),\n\t\t\"updater/app_versions/app-version-tectonic-ingress.yaml\": []byte(appversions.AppVersionTectonicIngress),\n\t\t\"updater/app_versions/app-version-kube-core.yaml\": []byte(appversions.AppVersionKubeCore),\n\t\t\"updater/app-version-kind.yaml\": []byte(updater.AppVersionKind),\n\t\t\"ingress/svc-account.yaml\": []byte(ingress.SvcAccount),\n\t}\n\n\tvar assetContents []asset.Content\n\tfor name, data := range assetData {\n\t\tassetContents = append(assetContents, asset.Content{\n\t\t\tName: filepath.Join(\"tectonic\", name),\n\t\t\tData: data,\n\t\t})\n\t}\n\n\treturn &asset.State{Contents: assetContents}, nil\n}", "func setIngressManagedAnnotation(rollouts []v1alpha1.Rollout, refResource validation.ReferencedResources) {\n\tfor _, rollout := range rollouts {\n\t\tfor i := range refResource.Ingresses {\n\t\t\tvar serviceName string\n\n\t\t\t// Basic Canary so ingress is only pointing a single service and so no linting is needed for this case.\n\t\t\tif rollout.Spec.Strategy.Canary == nil || rollout.Spec.Strategy.Canary.TrafficRouting == nil {\n\t\t\t\treturn\n\t\t\t}\n\t\t\tif rollout.Spec.Strategy.Canary.TrafficRouting.Nginx != nil {\n\t\t\t\tserviceName = rollout.Spec.Strategy.Canary.StableService\n\t\t\t} else if rollout.Spec.Strategy.Canary.TrafficRouting.ALB != nil {\n\t\t\t\tserviceName = rollout.Spec.Strategy.Canary.StableService\n\t\t\t\tif rollout.Spec.Strategy.Canary.TrafficRouting.ALB.RootService != \"\" {\n\t\t\t\t\tserviceName = rollout.Spec.Strategy.Canary.TrafficRouting.ALB.RootService\n\t\t\t\t}\n\t\t\t} else if rollout.Spec.Strategy.Canary.TrafficRouting.SMI != nil {\n\t\t\t\tserviceName = rollout.Spec.Strategy.Canary.TrafficRouting.SMI.RootService\n\t\t\t}\n\n\t\t\tif ingressutil.HasRuleWithService(&refResource.Ingresses[i], serviceName) {\n\t\t\t\tannotations := refResource.Ingresses[i].GetAnnotations()\n\t\t\t\tif annotations == nil {\n\t\t\t\t\tannotations = make(map[string]string)\n\t\t\t\t}\n\t\t\t\tannotations[v1alpha1.ManagedByRolloutsKey] = rollout.Name\n\t\t\t\trefResource.Ingresses[i].SetAnnotations(annotations)\n\t\t\t}\n\t\t}\n\t}\n}", "func Start(clientset *kubernetes.Clientset, stopCh <-chan struct{}) {\n\tglog.Info(\"Starting ingress initializer...\")\n\trestClient := clientset.ExtensionsV1beta1().RESTClient()\n\twatchlist := cache.NewListWatchFromClient(restClient, \"ingresses\", corev1.NamespaceAll, fields.Everything())\n\tincludeUninitializedWatchlist := &cache.ListWatch{\n\t\tListFunc: func(options metav1.ListOptions) (runtime.Object, error) {\n\t\t\toptions.IncludeUninitialized = true\n\t\t\treturn watchlist.List(options)\n\t\t},\n\t\tWatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {\n\t\t\toptions.IncludeUninitialized = true\n\t\t\treturn watchlist.Watch(options)\n\t\t},\n\t}\n\n\tresyncPeriod := 30 * time.Second\n\n\t_, controller := cache.NewInformer(includeUninitializedWatchlist, &extv1beta1.Ingress{}, resyncPeriod,\n\t\tcache.ResourceEventHandlerFuncs{\n\t\t\tAddFunc: func(obj interface{}) {\n\t\t\t\terr := initializeIngress(obj.(*extv1beta1.Ingress), clientset)\n\t\t\t\tif err != nil {\n\t\t\t\t\tlog.Println(err)\n\t\t\t\t}\n\t\t\t},\n\t\t},\n\t)\n\tgo controller.Run(stopCh)\n}", "func (c Controller) create(client Client, template *Template, nameSpace, instanceID string, deploy *Payload) (*Dispatched, error) {\n\tvar (\n\t\tdispatched = &Dispatched{}\n\t\tstatusKey = StatusKey(instanceID, \"provision\")\n\t)\n\tfor _, ob := range template.Objects {\n\t\tswitch ob.(type) {\n\t\tcase *dc.DeploymentConfig:\n\t\t\tdeployment := ob.(*dc.DeploymentConfig)\n\t\t\tdeployed, err := client.CreateDeployConfigInNamespace(nameSpace, deployment)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tdispatched.DeploymentName = deployed.Name\n\t\t\tdispatched.DeploymentLabels = deployed.Labels\n\t\t\tc.statusPublisher.Publish(statusKey, configInProgress, \"deployment created \"+deployed.Name)\n\t\tcase *k8api.Service:\n\t\t\tif _, err := client.CreateServiceInNamespace(nameSpace, ob.(*k8api.Service)); err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tc.statusPublisher.Publish(statusKey, configInProgress, \" created service definition \")\n\t\tcase *route.Route:\n\t\t\tr, err := client.CreateRouteInNamespace(nameSpace, ob.(*route.Route))\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tdispatched.Route = r\n\t\t\tc.statusPublisher.Publish(statusKey, configInProgress, \" created route definition \")\n\t\tcase *image.ImageStream:\n\t\t\tif _, err := client.CreateImageStream(nameSpace, ob.(*image.ImageStream)); err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tc.statusPublisher.Publish(statusKey, configInProgress, \" created imageStream definition \")\n\t\tcase *bc.BuildConfig:\n\t\t\tbConfig := ob.(*bc.BuildConfig)\n\t\t\tif _, err := client.CreateBuildConfigInNamespace(nameSpace, bConfig); err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tc.statusPublisher.Publish(statusKey, configInProgress, \" created buildConfig definition \")\n\t\tcase *k8api.Secret:\n\t\t\tif _, err := client.CreateSecretInNamespace(nameSpace, ob.(*k8api.Secret)); err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tc.statusPublisher.Publish(statusKey, configInProgress, \" created secret definition \")\n\t\tcase *k8api.PersistentVolumeClaim:\n\t\t\tif _, err := client.CreatePersistentVolumeClaim(nameSpace, ob.(*k8api.PersistentVolumeClaim)); err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tc.statusPublisher.Publish(statusKey, configInProgress, \" created PersistentVolumeClaim definition \")\n\t\tcase *k8api.Pod:\n\t\t\tif _, err := client.CreatePod(nameSpace, ob.(*k8api.Pod)); err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tc.statusPublisher.Publish(statusKey, configInProgress, \" created Pod definition \")\n\t\tcase *k8api.ConfigMap:\n\t\t\tfmt.Println(\"creating config map\")\n\t\t\tif _, err := client.CreateConfigMap(nameSpace, ob.(*k8api.ConfigMap)); err != nil {\n\t\t\t\tfmt.Println(\"creating config map\", err)\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tc.statusPublisher.Publish(statusKey, configInProgress, \" created ConfigMap definition \")\n\t\t}\n\t}\n\treturn dispatched, nil\n}", "func (s *k8sStore) syncIngress(ing *networking.Ingress) {\n\tkey := MetaNamespaceKey(ing)\n\tklog.V(3).Infof(\"updating annotations information for ingress %v\", key)\n\tif !IsValid(ing) {\n\t\treturn\n\t}\n\tcopyIng := &networking.Ingress{}\n\ting.ObjectMeta.DeepCopyInto(&copyIng.ObjectMeta)\n\ting.Spec.DeepCopyInto(&copyIng.Spec)\n\ting.Status.DeepCopyInto(&copyIng.Status)\n\n\tfor ri, rule := range copyIng.Spec.Rules {\n\t\tif rule.HTTP == nil {\n\t\t\tcontinue\n\t\t}\n\n\t\tfor pi, path := range rule.HTTP.Paths {\n\t\t\tif path.Path == \"\" {\n\t\t\t\tcopyIng.Spec.Rules[ri].HTTP.Paths[pi].Path = \"/\"\n\t\t\t}\n\t\t}\n\t}\n\n\tSetDefaultALBPathType(copyIng)\n\n\terr := s.listers.IngressWithAnnotation.Update(&Ingress{\n\t\tIngress: *copyIng,\n\t})\n\tif err != nil {\n\t\tklog.Error(err)\n\t}\n}", "func CreateIngress(name string) *extv1beta1.Ingress {\n\treturn &extv1beta1.Ingress{\n\t\tTypeMeta: genTypeMeta(gvk.Ingress),\n\t\tObjectMeta: genObjectMeta(name, true),\n\t\tSpec: extv1beta1.IngressSpec{\n\t\t\tBackend: &extv1beta1.IngressBackend{\n\t\t\t\tServiceName: \"app\",\n\t\t\t\tServicePort: intstr.FromInt(80),\n\t\t\t},\n\t\t},\n\t}\n}", "func createIngressInClusters(ing *v1beta1.Ingress, clients map[string]kubeclient.Interface) ([]string, error) {\n\tvar err error\n\tvar clusters []string\n\tfor cluster, client := range clients {\n\t\tglog.V(4).Infof(\"Creating Ingress in cluster: %v...\", cluster)\n\t\tclusters = append(clusters, cluster)\n\t\tglog.V(3).Infof(\"Using namespace %s for ingress %s\", ing.Namespace, ing.Name)\n\t\tactualIng, createErr := client.Extensions().Ingresses(ing.Namespace).Create(ing)\n\t\tglog.V(2).Infof(\"Ingress Create returned: err:%v. Actual Ingress:%+v\", createErr, actualIng)\n\t\tif createErr != nil {\n\t\t\tif errors.IsAlreadyExists(createErr) {\n\t\t\t\tfmt.Println(\"Ingress already exists; moving on.\")\n\t\t\t\tcontinue\n\t\t\t} else {\n\t\t\t\terr = multierror.Append(err, fmt.Errorf(\"Error in creating ingress in cluster %s: %s\", cluster, createErr))\n\t\t\t}\n\t\t}\n\t\tfmt.Println(\"Created Ingress for cluster:\", cluster)\n\t}\n\treturn clusters, err\n}", "func (r *NuxeoReconciler) defaultIngress(instance *v1alpha1.Nuxeo, access v1alpha1.NuxeoAccess, forcePassthrough bool,\n\tingressName string, nodeSet v1alpha1.NodeSet) (*v1beta1.Ingress, error) {\n\tconst nginxPassthroughAnnotation = \"nginx.ingress.kubernetes.io/ssl-passthrough\"\n\ttargetPort := intstr.IntOrString{\n\t\tType: intstr.String,\n\t\tStrVal: \"web\",\n\t}\n\tif access.TargetPort != (intstr.IntOrString{}) {\n\t\ttargetPort = access.TargetPort\n\t}\n\tingress := v1beta1.Ingress{\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tName: ingressName,\n\t\t\tNamespace: instance.Namespace,\n\t\t},\n\t\tSpec: v1beta1.IngressSpec{\n\t\t\tRules: []v1beta1.IngressRule{{\n\t\t\t\tHost: access.Hostname,\n\t\t\t\tIngressRuleValue: v1beta1.IngressRuleValue{\n\t\t\t\t\tHTTP: &v1beta1.HTTPIngressRuleValue{\n\t\t\t\t\t\tPaths: []v1beta1.HTTPIngressPath{{\n\t\t\t\t\t\t\tBackend: v1beta1.IngressBackend{\n\t\t\t\t\t\t\t\tServiceName: serviceName(instance, nodeSet),\n\t\t\t\t\t\t\t\tServicePort: targetPort,\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t}},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t}},\n\t\t},\n\t}\n\tif access.Termination != \"\" || forcePassthrough {\n\t\tif access.Termination != \"\" && access.Termination != routev1.TLSTerminationPassthrough &&\n\t\t\taccess.Termination != routev1.TLSTerminationEdge {\n\t\t\treturn nil, fmt.Errorf(\"only passthrough and edge termination are supported\")\n\t\t}\n\t\tingress.Spec.TLS = []v1beta1.IngressTLS{{\n\t\t\tHosts: []string{access.Hostname},\n\t\t}}\n\t\tif access.Termination == routev1.TLSTerminationPassthrough || forcePassthrough {\n\t\t\tingress.ObjectMeta.Annotations = map[string]string{nginxPassthroughAnnotation: \"true\"}\n\t\t} else {\n\t\t\t// the Ingress will terminate TLS\n\t\t\tif access.TLSSecret == \"\" {\n\t\t\t\treturn nil, fmt.Errorf(\"the Ingress was configured for TLS termination but no secret was provided\")\n\t\t\t}\n\t\t\t// secret needs keys 'tls.crt' and 'tls.key' and cert must have CN=<access.Hostname>\n\t\t\tingress.Spec.TLS[0].SecretName = access.TLSSecret\n\t\t}\n\t}\n\t_ = controllerutil.SetControllerReference(instance, &ingress, r.Scheme)\n\treturn &ingress, nil\n}", "func labelsForEnvoyIngress(crName, eLName string) map[string]string {\n\treturn map[string]string{v1beta1.AppLabelKey: \"envoyingress\", \"eListenerName\": eLName, v1beta1.KafkaCRLabelKey: crName}\n}", "func (ing FakeIngress) UpdateIngress() (*networking.Ingress, error) {\n\n\t//check if resource already exists\n\tingress, err := KubeClient.NetworkingV1().Ingresses(ing.Namespace).Get(context.TODO(), ing.Name, metav1.GetOptions{})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t//increment resource version\n\tnewIngress := ing.IngressMultiPath() //Maybe we should replace Ingress() with IngressMultiPath() completely\n\trv, _ := strconv.Atoi(ingress.ResourceVersion)\n\tnewIngress.ResourceVersion = strconv.Itoa(rv + 1)\n\n\t//update ingress resource\n\tupdatedIngress, err := KubeClient.NetworkingV1().Ingresses(newIngress.Namespace).Update(context.TODO(), newIngress, metav1.UpdateOptions{})\n\treturn updatedIngress, err\n}", "func createIngressLW(kubeClient *kclient.ExtensionsClient) *kcache.ListWatch {\n\treturn kcache.NewListWatchFromClient(kubeClient, \"ingresses\", kapi.NamespaceAll, kSelector.Everything())\n}", "func (r *reconciler) routeToIngressController(context context.Context, obj client.Object) []reconcile.Request {\n\tvar requests []reconcile.Request\n\t// Cast the received object into Route object.\n\troute := obj.(*routev1.Route)\n\n\t// Create the NamespacedName for the Route.\n\trouteNamespacedName := types.NamespacedName{\n\t\tNamespace: route.Namespace,\n\t\tName: route.Name,\n\t}\n\n\t// Create a set of current Ingresses of the Route to easily retrieve them.\n\tcurrentRouteIngresses := sets.NewString()\n\n\t// Iterate through the related Route's Ingresses.\n\tfor _, ri := range route.Status.Ingress {\n\t\t// Check if the Route was admitted by the RouteIngress.\n\t\tfor _, cond := range ri.Conditions {\n\t\t\tif cond.Type == routev1.RouteAdmitted && cond.Status == corev1.ConditionTrue {\n\t\t\t\tlog.Info(\"queueing ingresscontroller\", \"name\", ri.RouterName)\n\t\t\t\t// Create a reconcile.Request for the router named in the RouteIngress.\n\t\t\t\trequest := reconcile.Request{\n\t\t\t\t\tNamespacedName: types.NamespacedName{\n\t\t\t\t\t\tName: ri.RouterName,\n\t\t\t\t\t\tNamespace: r.namespace,\n\t\t\t\t\t},\n\t\t\t\t}\n\t\t\t\trequests = append(requests, request)\n\n\t\t\t\t// Add the Router Name to the currentIngressSet.\n\t\t\t\tcurrentRouteIngresses.Insert(ri.RouterName)\n\t\t\t}\n\t\t}\n\t}\n\n\t// Get the previous set of Ingresses of the Route.\n\tpreviousRouteIngresses := r.routeToIngresses[routeNamespacedName]\n\n\t// Iterate through the previousRouteIngresses.\n\tfor routerName := range previousRouteIngresses {\n\t\t// Check if the currentRouteIngresses contains the Router Name. If it does not,\n\t\t// then the Ingress was removed from the Route Status. The reconcile loop is needed\n\t\t// to be run for the corresponding Ingress Controller.\n\t\tif !currentRouteIngresses.Has(routerName) {\n\t\t\tlog.Info(\"queueing ingresscontroller\", \"name\", routerName)\n\t\t\t// Create a reconcile.Request for the router named in the RouteIngress.\n\t\t\trequest := reconcile.Request{\n\t\t\t\tNamespacedName: types.NamespacedName{\n\t\t\t\t\tName: routerName,\n\t\t\t\t\tNamespace: r.namespace,\n\t\t\t\t},\n\t\t\t}\n\t\t\trequests = append(requests, request)\n\t\t}\n\t}\n\n\t// Map the currentRouteIngresses to Route's NamespacedName.\n\tr.routeToIngresses[routeNamespacedName] = currentRouteIngresses\n\n\treturn requests\n}", "func (l *EC2) Apply(r types.ResourceWithLabels) {\n\tlabels := l.Get()\n\tfor k, v := range r.GetStaticLabels() {\n\t\tlabels[k] = v\n\t}\n\tr.SetStaticLabels(labels)\n}", "func newIngress(hostRules map[string]utils.FakeIngressRuleValueMap) *extensions.Ingress {\n\tret := &extensions.Ingress{\n\t\tTypeMeta: meta_v1.TypeMeta{\n\t\t\tKind: \"Ingress\",\n\t\t\tAPIVersion: \"extensions/v1beta1\",\n\t\t},\n\t\tObjectMeta: meta_v1.ObjectMeta{\n\t\t\tName: fmt.Sprintf(\"%v\", uuid.NewUUID()),\n\t\t\tNamespace: \"default\",\n\t\t},\n\t\tSpec: extensions.IngressSpec{\n\t\t\tBackend: &extensions.IngressBackend{\n\t\t\t\tServiceName: defaultBackendName(testClusterName),\n\t\t\t\tServicePort: testBackendPort,\n\t\t\t},\n\t\t\tRules: toIngressRules(hostRules),\n\t\t},\n\t\tStatus: extensions.IngressStatus{\n\t\t\tLoadBalancer: api_v1.LoadBalancerStatus{\n\t\t\t\tIngress: []api_v1.LoadBalancerIngress{\n\t\t\t\t\t{IP: testIPManager.ip()},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\tret.SelfLink = fmt.Sprintf(\"%s/%s\", ret.Namespace, ret.Name)\n\treturn ret\n}", "func (op *Operator) initIngressCRDWatcher() cache.Controller {\n\tlw := &cache.ListWatch{\n\t\tListFunc: func(opts metav1.ListOptions) (runtime.Object, error) {\n\t\t\treturn op.VoyagerClient.Ingresses(apiv1.NamespaceAll).List(metav1.ListOptions{})\n\t\t},\n\t\tWatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {\n\t\t\treturn op.VoyagerClient.Ingresses(apiv1.NamespaceAll).Watch(metav1.ListOptions{})\n\t\t},\n\t}\n\t_, informer := cache.NewInformer(lw,\n\t\t&tapi.Ingress{},\n\t\top.Opt.ResyncPeriod,\n\t\tcache.ResourceEventHandlerFuncs{\n\t\t\tAddFunc: func(obj interface{}) {\n\t\t\t\tif engress, ok := obj.(*tapi.Ingress); ok {\n\t\t\t\t\tlog.Infof(\"%s %s@%s added\", engress.GroupVersionKind(), engress.Name, engress.Namespace)\n\t\t\t\t\tif !engress.ShouldHandleIngress(op.Opt.IngressClass) {\n\t\t\t\t\t\tlog.Infof(\"%s %s@%s does not match ingress class\", engress.GroupVersionKind(), engress.Name, engress.Namespace)\n\t\t\t\t\t\treturn\n\t\t\t\t\t}\n\t\t\t\t\tif err := engress.IsValid(op.Opt.CloudProvider); err != nil {\n\t\t\t\t\t\top.recorder.Eventf(\n\t\t\t\t\t\t\tengress.ObjectReference(),\n\t\t\t\t\t\t\tapiv1.EventTypeWarning,\n\t\t\t\t\t\t\teventer.EventReasonIngressInvalid,\n\t\t\t\t\t\t\t\"Reason: %s\",\n\t\t\t\t\t\t\terr.Error(),\n\t\t\t\t\t\t)\n\t\t\t\t\t\treturn\n\t\t\t\t\t}\n\n\t\t\t\t\top.AddEngress(engress)\n\t\t\t\t}\n\t\t\t},\n\t\t\tUpdateFunc: func(old, new interface{}) {\n\t\t\t\toldEngress, ok := old.(*tapi.Ingress)\n\t\t\t\tif !ok {\n\t\t\t\t\tlog.Errorln(errors.New(\"Invalid Ingress object\").Err())\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tnewEngress, ok := new.(*tapi.Ingress)\n\t\t\t\tif !ok {\n\t\t\t\t\tlog.Errorln(errors.New(\"Invalid Ingress object\").Err())\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tif changed, _ := oldEngress.HasChanged(*newEngress); !changed {\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tlog.Infof(\"%s %s@%s has changed\", newEngress.GroupVersionKind(), newEngress.Name, newEngress.Namespace)\n\t\t\t\tif err := newEngress.IsValid(op.Opt.CloudProvider); err != nil {\n\t\t\t\t\top.recorder.Eventf(\n\t\t\t\t\t\tnewEngress.ObjectReference(),\n\t\t\t\t\t\tapiv1.EventTypeWarning,\n\t\t\t\t\t\teventer.EventReasonIngressInvalid,\n\t\t\t\t\t\t\"Reason: %s\",\n\t\t\t\t\t\terr.Error(),\n\t\t\t\t\t)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\top.UpdateEngress(oldEngress, newEngress)\n\t\t\t},\n\t\t\tDeleteFunc: func(obj interface{}) {\n\t\t\t\tif engress, ok := obj.(*tapi.Ingress); ok {\n\t\t\t\t\tlog.Infof(\"%s %s@%s deleted\", engress.GroupVersionKind(), engress.Name, engress.Namespace)\n\t\t\t\t\tif !engress.ShouldHandleIngress(op.Opt.IngressClass) {\n\t\t\t\t\t\tlog.Infof(\"%s %s@%s does not match ingress class\", engress.GroupVersionKind(), engress.Name, engress.Namespace)\n\t\t\t\t\t\treturn\n\t\t\t\t\t}\n\t\t\t\t\top.DeleteEngress(engress)\n\t\t\t\t}\n\t\t\t},\n\t\t},\n\t)\n\treturn informer\n}", "func (cac *EnvironmentServiceConfigController) Configure(client Client, deploymentName, namespace string) error {\n\t//cloudapp deployment config should be in place at this point, but check\n\tvar configurationStatus = ConfigurationStatus{Started: time.Now(), Log: []string{\"starting configuration for service \" + deploymentName}, Status: configInProgress}\n\tkey := namespace + \"/\" + deploymentName\n\tcac.StatusPublisher.Publish(key, configurationStatus)\n\tvar statusUpdate = func(message, status string) {\n\t\tconfigurationStatus.Status = status\n\t\tconfigurationStatus.Log = append(configurationStatus.Log, message)\n\t\tcac.StatusPublisher.Publish(key, configurationStatus)\n\t}\n\t// ensure we have the latest DeploymentConfig\n\tdeployment, err := client.GetDeploymentConfigByName(namespace, deploymentName)\n\tif err != nil {\n\t\tstatusUpdate(\"unexpected error retrieving DeploymentConfig\"+err.Error(), configError)\n\t\treturn err\n\t}\n\tif deployment == nil {\n\t\tstatusUpdate(\"could not find DeploymentConfig for \"+deploymentName, configError)\n\t\treturn errors.New(\"could not find DeploymentConfig for \" + deploymentName)\n\t}\n\t//find the deployed services\n\tservices, err := client.FindDeploymentConfigsByLabel(namespace, map[string]string{\"rhmap/type\": \"environmentService\"})\n\tif err != nil {\n\t\tstatusUpdate(\"failed to retrieve environment Service dcs during configuration of \"+deployment.Name+\" \"+err.Error(), \"error\")\n\t\treturn err\n\t}\n\terrs := []string{}\n\t//configure for any environment services already deployed\n\tfor _, s := range services {\n\t\tserviceName := s.Labels[\"rhmap/name\"]\n\t\tc := cac.ConfigurationFactory.Factory(serviceName)\n\t\t_, err := c.Configure(client, deployment, namespace)\n\t\tif err != nil {\n\t\t\terrs = append(errs, err.Error())\n\t\t}\n\t}\n\tif _, err := client.UpdateDeployConfigInNamespace(namespace, deployment); err != nil {\n\t\treturn errors.Wrap(err, \"failed to update deployment after configuring it \")\n\t}\n\t//TODO given we have a status updater do we really need to return errors from the configuration handlers\n\tif len(errs) > 0 {\n\t\treturn errors.New(\"some services failed to configure: \" + strings.Join(errs, \" : \"))\n\t}\n\treturn nil\n}", "func (h *Handler) ApplyFromFile(filename string) (sa *corev1.ServiceAccount, err error) {\n\tsa, err = h.CreateFromFile(filename)\n\tif k8serrors.IsAlreadyExists(err) { // if serviceaccount already exist, update it.\n\t\tsa, err = h.UpdateFromFile(filename)\n\t}\n\treturn\n}", "func (h *Handler) importCluster(config *eksv1.EKSClusterConfig, awsSVCs *awsServices) (*eksv1.EKSClusterConfig, error) {\n\tif awsSVCs == nil {\n\t\treturn config, fmt.Errorf(\"aws services not initialized\")\n\t}\n\n\tclusterState, err := awsservices.GetClusterState(&awsservices.GetClusterStatusOpts{\n\t\tEKSService: awsSVCs.eks,\n\t\tConfig: config,\n\t})\n\tif err != nil {\n\t\treturn config, err\n\t}\n\n\tif err := h.createCASecret(config, clusterState); err != nil {\n\t\tif !apierrors.IsAlreadyExists(err) {\n\t\t\treturn config, err\n\t\t}\n\t}\n\n\tlaunchTemplatesOutput, err := awsSVCs.ec2.DescribeLaunchTemplates(&ec2.DescribeLaunchTemplatesInput{\n\t\tLaunchTemplateNames: []*string{aws.String(fmt.Sprintf(awsservices.LaunchTemplateNameFormat, config.Spec.DisplayName))},\n\t})\n\tif err == nil && len(launchTemplatesOutput.LaunchTemplates) > 0 {\n\t\tconfig.Status.ManagedLaunchTemplateID = aws.StringValue(launchTemplatesOutput.LaunchTemplates[0].LaunchTemplateId)\n\t}\n\n\tconfig.Status.Subnets = aws.StringValueSlice(clusterState.Cluster.ResourcesVpcConfig.SubnetIds)\n\tconfig.Status.SecurityGroups = aws.StringValueSlice(clusterState.Cluster.ResourcesVpcConfig.SecurityGroupIds)\n\tconfig.Status.Phase = eksConfigActivePhase\n\treturn h.eksCC.UpdateStatus(config)\n}", "func verifyExternalIngressController(t *testing.T, name types.NamespacedName, hostname, address string) {\n\tt.Helper()\n\techoPod := buildEchoPod(name.Name, name.Namespace)\n\tif err := kclient.Create(context.TODO(), echoPod); err != nil {\n\t\tt.Fatalf(\"failed to create pod %s/%s: %v\", echoPod.Namespace, echoPod.Name, err)\n\t}\n\tdefer func() {\n\t\tif err := kclient.Delete(context.TODO(), echoPod); err != nil {\n\t\t\tt.Fatalf(\"failed to delete pod %s/%s: %v\", echoPod.Namespace, echoPod.Name, err)\n\t\t}\n\t}()\n\n\techoService := buildEchoService(echoPod.Name, echoPod.Namespace, echoPod.ObjectMeta.Labels)\n\tif err := kclient.Create(context.TODO(), echoService); err != nil {\n\t\tt.Fatalf(\"failed to create service %s/%s: %v\", echoService.Namespace, echoService.Name, err)\n\t}\n\tdefer func() {\n\t\tif err := kclient.Delete(context.TODO(), echoService); err != nil {\n\t\t\tt.Fatalf(\"failed to delete service %s/%s: %v\", echoService.Namespace, echoService.Name, err)\n\t\t}\n\t}()\n\n\techoRoute := buildRouteWithHost(echoPod.Name, echoPod.Namespace, echoService.Name, hostname)\n\tif err := kclient.Create(context.TODO(), echoRoute); err != nil {\n\t\tt.Fatalf(\"failed to create route %s/%s: %v\", echoRoute.Namespace, echoRoute.Name, err)\n\t}\n\tdefer func() {\n\t\tif err := kclient.Delete(context.TODO(), echoRoute); err != nil {\n\t\t\tt.Fatalf(\"failed to delete route %s/%s: %v\", echoRoute.Namespace, echoRoute.Name, err)\n\t\t}\n\t}()\n\n\t// If we have a DNS as an external IP address, make sure we can resolve it before moving on.\n\t// This just limits the number of \"could not resolve host\" errors which can be confusing.\n\tif net.ParseIP(address) == nil {\n\t\tif err := wait.PollImmediate(10*time.Second, 5*time.Minute, func() (bool, error) {\n\t\t\t_, err := net.LookupIP(address)\n\t\t\tif err != nil {\n\t\t\t\tt.Logf(\"waiting for loadbalancer domain %s to resolve...\", address)\n\t\t\t\treturn false, nil\n\t\t\t}\n\t\t\treturn true, nil\n\t\t}); err != nil {\n\t\t\tt.Fatalf(\"loadbalancer domain %s was unable to resolve:\", address)\n\t\t}\n\t}\n\n\treq, err := http.NewRequest(http.MethodGet, fmt.Sprintf(\"http://%s\", address), nil)\n\tif err != nil {\n\t\tt.Fatalf(\"failed to build client request: %v\", err)\n\t}\n\t// we use HOST header to map to the domain associated on the ingresscontroller.\n\t// This ensures our http call is routed to the correct router.\n\treq.Host = hostname\n\n\thttpClient := http.Client{Timeout: 5 * time.Second}\n\terr = waitForHTTPClientCondition(t, &httpClient, req, 10*time.Second, 10*time.Minute, func(r *http.Response) bool {\n\t\tif r.StatusCode == http.StatusOK {\n\t\t\tt.Logf(\"verified connectivity with workload with req %v and response %v\", req.URL, r.StatusCode)\n\t\t\treturn true\n\t\t}\n\t\treturn false\n\t})\n\tif err != nil {\n\t\tt.Fatalf(\"failed to verify connectivity with workload with reqURL %s using external client: %v\", req.URL, err)\n\t}\n}", "func (e *esearch) ApplyIndexTemplate(indexType string) error {\n\n\t// Remove the existing index template (if exists)\n\tdeleteTemplateRepsonse, err := e.client.IndexDeleteTemplate(e.indexName(indexType)).Do(e.ctx)\n\tif elastic.IsNotFound(err) {\n\t\t// We're good\n\t} else if err != nil {\n\t\treturn fmt.Errorf(\"Failed to remove Elasticsearch template '%s' error: %v\", e.indexName(indexType), err)\n\t} else if !deleteTemplateRepsonse.Acknowledged {\n\t\treturn fmt.Errorf(\"Failed to receive Elasticsearch delete %s template response\", indexType)\n\t}\n\n\t// Load the index mapping\n\tvar mapping = make(map[string]interface{})\n\tmappingFile := config.GetString(\"elasticsearch.\" + indexType + \".template_file\")\n\n\t// Get mapping file\n\tvar rawMapping []byte\n\tif mappingFile == \"\" {\n\t\tmappingFile = \"embedded\"\n\t\trawMapping, err = embed.Asset(\"template-6-\" + indexType + \".json\")\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"Could not retrieve embedded mapping file: %v\", err)\n\t\t}\n\t} else {\n\t\t// Get the default mapping from the mapping file\n\t\trawMapping, err = ioutil.ReadFile(mappingFile)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"Could not retrieve mapping from %s error: %s\", mappingFile, err)\n\t\t}\n\t}\n\n\t// Copy the mapping structure to a map we can modify\n\terr = json.Unmarshal(rawMapping, &mapping)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Could not parse mapping JSON from %s error %s\", mappingFile, err)\n\t}\n\n\t// Update the default mapping settings based on passed in options\n\tsettings := mapping[\"settings\"].(map[string]interface{})\n\tsettings[\"number_of_shards\"] = config.GetInt(\"elasticsearch.\" + indexType + \".index_shards\")\n\tsettings[\"number_of_replicas\"] = config.GetInt(\"elasticsearch.\" + indexType + \".index_replicas\")\n\tsettings[\"refresh_interval\"] = config.GetString(\"elasticsearch.\" + indexType + \".refresh_interval\")\n\n\t// Create an index template\n\tmapping[\"index_patterns\"] = e.indexName(indexType) + \"-*\"\n\n\t// Create the new index template\n\tcreateTemplate, err := e.client.IndexPutTemplate(e.indexName(indexType)).BodyJson(mapping).Do(e.ctx)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Failed to create Elasticsearch %s template: %v\", indexType, err)\n\t}\n\tif !createTemplate.Acknowledged {\n\t\treturn fmt.Errorf(\"Failed to receive acknowledgement that Elasticsearch %s template was created\", indexType)\n\t}\n\n\treturn nil\n\n}", "func (bc *ReconcileJenkinsInstance) newIngress(instanceName types.NamespacedName) (*v1beta1.Ingress, error) {\n\texists := false\n\n\tjenkinsInstance, err := bc.getJenkinsInstance(instanceName)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif jenkinsInstance.Spec.Ingress == nil {\n\t\treturn nil, nil\n\t}\n\n\tingress, err := bc.getIngress(instanceName)\n\n\t// If the ingress doesn't exist, we'll create it\n\tif err != nil {\n\t\tif !errors.IsNotFound(err) {\n\t\t\treturn nil, err\n\t\t}\n\t} else {\n\t\t// If the Ingress is not controlled by this JenkinsInstance resource, we should log\n\t\t// a warning to the event recorder and ret\n\t\tif !metav1.IsControlledBy(ingress, jenkinsInstance) {\n\t\t\tmsg := fmt.Sprintf(MessageResourceExists, ingress.GetName())\n\t\t\tbc.Event(jenkinsInstance, corev1.EventTypeWarning, ErrResourceExists, msg)\n\t\t\treturn ingress, fmt.Errorf(msg)\n\t\t}\n\n\t\texists = true\n\t}\n\n\tlabels := map[string]string{\n\t\t\"app\": \"jenkinsci\",\n\t\t\"controller\": jenkinsInstance.GetName(),\n\t\t\"component\": string(jenkinsInstance.UID),\n\t}\n\n\tserviceName := jenkinsInstance.GetName()\n\tif jenkinsInstance.Spec.Service != nil && jenkinsInstance.Spec.Service.Name != \"\" {\n\t\tserviceName = jenkinsInstance.Spec.Service.Name\n\t}\n\tif jenkinsInstance.Spec.Ingress.Service != \"\" {\n\t\tserviceName = jenkinsInstance.Spec.Ingress.Service\n\t}\n\n\tingressPath := jenkinsInstance.Spec.Ingress.Path\n\tif ingressPath == \"\" {\n\t\tingressPath = \"/\"\n\t}\n\n\tif exists {\n\t\tingressCopy := ingress.DeepCopy()\n\t\tingressCopy.Labels = labels\n\t\tingressCopy.Spec.TLS = []v1beta1.IngressTLS{\n\t\t\t{\n\t\t\t\tSecretName: jenkinsInstance.Spec.Ingress.TlsSecret,\n\t\t\t\tHosts: []string{\n\t\t\t\t\tutil.GetJenkinsLocationHost(jenkinsInstance),\n\t\t\t\t},\n\t\t\t},\n\t\t}\n\t\tingressCopy.Spec.Rules = []v1beta1.IngressRule{\n\t\t\t{\n\t\t\t\tHost: util.GetJenkinsLocationHost(jenkinsInstance),\n\t\t\t\tIngressRuleValue: v1beta1.IngressRuleValue{\n\t\t\t\t\tHTTP: &v1beta1.HTTPIngressRuleValue{\n\t\t\t\t\t\tPaths: []v1beta1.HTTPIngressPath{\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\tPath: ingressPath,\n\t\t\t\t\t\t\t\tBackend: v1beta1.IngressBackend{\n\t\t\t\t\t\t\t\t\tServiceName: serviceName,\n\t\t\t\t\t\t\t\t\tServicePort: intstr.IntOrString{\n\t\t\t\t\t\t\t\t\t\tType: intstr.Int,\n\t\t\t\t\t\t\t\t\t\tIntVal: JenkinsMasterPort,\n\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t}\n\n\t\tif reflect.DeepEqual(ingressCopy.Spec, ingress.Spec) {\n\t\t\treturn ingress, nil\n\t\t}\n\n\t\tglog.Info(\"updating ingress\")\n\t\terr = bc.Client.Update(context.TODO(), ingressCopy)\n\t\treturn ingress, err\n\n\t} else {\n\n\t\tingress = &v1beta1.Ingress{\n\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\tName: jenkinsInstance.GetName(),\n\t\t\t\tNamespace: jenkinsInstance.GetNamespace(),\n\t\t\t\tLabels: labels,\n\t\t\t\tAnnotations: jenkinsInstance.Spec.Ingress.Annotations,\n\t\t\t},\n\t\t\tSpec: v1beta1.IngressSpec{\n\t\t\t\tTLS: []v1beta1.IngressTLS{\n\t\t\t\t\t{\n\t\t\t\t\t\tSecretName: jenkinsInstance.Spec.Ingress.TlsSecret,\n\t\t\t\t\t\tHosts: []string{\n\t\t\t\t\t\t\tutil.GetJenkinsLocationHost(jenkinsInstance),\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tRules: []v1beta1.IngressRule{\n\t\t\t\t\t{\n\t\t\t\t\t\tHost: util.GetJenkinsLocationHost(jenkinsInstance),\n\t\t\t\t\t\tIngressRuleValue: v1beta1.IngressRuleValue{\n\t\t\t\t\t\t\tHTTP: &v1beta1.HTTPIngressRuleValue{\n\t\t\t\t\t\t\t\tPaths: []v1beta1.HTTPIngressPath{\n\t\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\t\tPath: ingressPath,\n\t\t\t\t\t\t\t\t\t\tBackend: v1beta1.IngressBackend{\n\t\t\t\t\t\t\t\t\t\t\tServiceName: serviceName,\n\t\t\t\t\t\t\t\t\t\t\tServicePort: intstr.IntOrString{\n\t\t\t\t\t\t\t\t\t\t\t\tType: intstr.Int,\n\t\t\t\t\t\t\t\t\t\t\t\tIntVal: JenkinsMasterPort,\n\t\t\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t}\n\n\t\terr = controllerutil.SetControllerReference(jenkinsInstance, ingress, bc.scheme)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\terr = bc.Client.Create(context.TODO(), ingress)\n\t\treturn ingress, err\n\t}\n}", "func (r *Reconciler) PatchIngress(ns, name string, pt types.PatchType, data []byte, subresources ...string) (v1alpha1.IngressAccessor, error) {\n\treturn r.ServingClientSet.NetworkingV1alpha1().ClusterIngresses().Patch(name, pt, data, subresources...)\n}", "func normalizeForK8sVMASScalingUp(templateMap map[string]interface{}) error {\n\tif err := normalizeMasterResourcesForScaling(templateMap); err != nil {\n\t\treturn err\n\t}\n\trtIndex := -1\n\tnsgIndex := -1\n\tresources := templateMap[resourcesFieldName].([]interface{})\n\tfor index, resource := range resources {\n\t\tresourceMap, ok := resource.(map[string]interface{})\n\t\tif !ok {\n\t\t\tklog.Warning(\"Template improperly formatted for resource\")\n\t\t\tcontinue\n\t\t}\n\n\t\tresourceType, ok := resourceMap[typeFieldName].(string)\n\t\tif ok && resourceType == nsgResourceType {\n\t\t\tif nsgIndex != -1 {\n\t\t\t\terr := fmt.Errorf(\"found 2 resources with type %s in the template. There should only be 1\", nsgResourceType)\n\t\t\t\tklog.Errorf(err.Error())\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tnsgIndex = index\n\t\t}\n\t\tif ok && resourceType == rtResourceType {\n\t\t\tif rtIndex != -1 {\n\t\t\t\terr := fmt.Errorf(\"found 2 resources with type %s in the template. There should only be 1\", rtResourceType)\n\t\t\t\tklog.Warningf(err.Error())\n\t\t\t\treturn err\n\t\t\t}\n\t\t\trtIndex = index\n\t\t}\n\n\t\tdependencies, ok := resourceMap[dependsOnFieldName].([]interface{})\n\t\tif !ok {\n\t\t\tcontinue\n\t\t}\n\n\t\tfor dIndex := len(dependencies) - 1; dIndex >= 0; dIndex-- {\n\t\t\tdependency := dependencies[dIndex].(string)\n\t\t\tif strings.Contains(dependency, nsgResourceType) || strings.Contains(dependency, nsgID) ||\n\t\t\t\tstrings.Contains(dependency, rtResourceType) || strings.Contains(dependency, rtID) {\n\t\t\t\tdependencies = append(dependencies[:dIndex], dependencies[dIndex+1:]...)\n\t\t\t}\n\t\t}\n\n\t\tif len(dependencies) > 0 {\n\t\t\tresourceMap[dependsOnFieldName] = dependencies\n\t\t} else {\n\t\t\tdelete(resourceMap, dependsOnFieldName)\n\t\t}\n\t}\n\n\tindexesToRemove := []int{}\n\tif nsgIndex == -1 {\n\t\terr := fmt.Errorf(\"found no resources with type %s in the template. There should have been 1\", nsgResourceType)\n\t\tklog.Errorf(err.Error())\n\t\treturn err\n\t}\n\tif rtIndex == -1 {\n\t\tklog.Infof(\"Found no resources with type %s in the template.\", rtResourceType)\n\t} else {\n\t\tindexesToRemove = append(indexesToRemove, rtIndex)\n\t}\n\tindexesToRemove = append(indexesToRemove, nsgIndex)\n\ttemplateMap[resourcesFieldName] = removeIndexesFromArray(resources, indexesToRemove)\n\n\treturn nil\n}", "func (m *MeshReconciler) configureIstioCR(istio *v1beta1.Istio, config Config) {\n\tenabled := true\n\tdisabled := false\n\tmaxReplicas := int32(1)\n\n\tlabels := istio.GetLabels()\n\tif labels == nil {\n\t\tlabels = make(map[string]string, 0)\n\t}\n\tlabels[clusterIDLabel] = strconv.FormatUint(uint64(m.Master.GetID()), 10)\n\tlabels[cloudLabel] = m.Master.GetCloud()\n\tlabels[distributionLabel] = m.Master.GetDistribution()\n\tistio.SetLabels(labels)\n\n\tistio.Spec.Gateways.IngressConfig.Ports = []corev1.ServicePort{\n\t\t{Name: \"status-port\", Port: 15020, TargetPort: intstr.FromInt(15020)},\n\t\t{Name: \"http2\", Port: 80, TargetPort: intstr.FromInt(80)},\n\t\t{Name: \"https\", Port: 443, TargetPort: intstr.FromInt(443)},\n\t\t{Name: \"tls\", Port: 15443, TargetPort: intstr.FromInt(15443)},\n\t\t{Name: \"tcp-als-tls\", Port: 50600, TargetPort: intstr.FromInt(50600)},\n\t\t{Name: \"tcp-zipkin-tls\", Port: 59411, TargetPort: intstr.FromInt(59411)},\n\t}\n\n\tistio.Spec.MTLS = nil\n\tif config.EnableMTLS {\n\t\tistio.Spec.MeshPolicy.MTLSMode = \"PERMISSIVE\"\n\t} else {\n\t\tistio.Spec.MeshPolicy.MTLSMode = \"DISABLED\"\n\t}\n\tistio.Spec.AutoMTLS = &enabled\n\tistio.Spec.AutoInjectionNamespaces = config.AutoSidecarInjectNamespaces\n\tistio.Spec.Version = istioVersion\n\tistio.Spec.ImagePullPolicy = corev1.PullAlways\n\tistio.Spec.Gateways.IngressConfig.Enabled = &enabled\n\tistio.Spec.Gateways.IngressConfig.MaxReplicas = &maxReplicas\n\tistio.Spec.Gateways.EgressConfig.Enabled = &enabled\n\tistio.Spec.Gateways.EgressConfig.MaxReplicas = &maxReplicas\n\tistio.Spec.Pilot.Enabled = &disabled\n\tistio.Spec.Pilot.Image = &m.Configuration.internalConfig.Istio.PilotImage\n\tistio.Spec.Pilot.MaxReplicas = &maxReplicas\n\tistio.Spec.Mixer.Enabled = &disabled\n\tistio.Spec.Mixer.MultiClusterSupport = &enabled\n\tistio.Spec.Telemetry.Enabled = &disabled\n\tistio.Spec.Policy.Enabled = &disabled\n\tistio.Spec.Galley.Enabled = &disabled\n\tistio.Spec.Citadel.Enabled = &disabled\n\tistio.Spec.Istiod.Enabled = &enabled\n\tistio.Spec.Istiod.MultiClusterSupport = &enabled\n\tistio.Spec.Mixer.Image = &m.Configuration.internalConfig.Istio.MixerImage\n\tistio.Spec.Mixer.MaxReplicas = &maxReplicas\n\tistio.Spec.SidecarInjector.Enabled = &disabled\n\tistio.Spec.SidecarInjector.Image = &m.Configuration.internalConfig.Istio.SidecarInjectorImage\n\tistio.Spec.SidecarInjector.RewriteAppHTTPProbe = true\n\tistio.Spec.SidecarInjector.InjectedContainerAdditionalEnvVars = []corev1.EnvVar{\n\t\t{\n\t\t\tName: \"ISTIO_METAJSON_PLATFORM_METADATA\",\n\t\t\tValue: `{\"PLATFORM_METADATA\":{\"cluster_id\":\"master\"}}`,\n\t\t},\n\t}\n\tistio.Spec.Tracing.Enabled = &enabled\n\tistio.Spec.Tracing.Zipkin = v1beta1.ZipkinConfiguration{\n\t\tAddress: fmt.Sprintf(\"%s:%d\", zipkinHost, zipkinPort),\n\t\tTLSSettings: &v1beta1.TLSSettings{\n\t\t\tMode: \"ISTIO_MUTUAL\",\n\t\t},\n\t}\n\n\tistio.Spec.Proxy.Image = m.Configuration.internalConfig.Istio.ProxyImage\n\tistio.Spec.Proxy.EnvoyAccessLogService = v1beta1.EnvoyServiceCommonConfiguration{\n\t\tEnabled: &enabled,\n\t\tHost: alsHost,\n\t\tPort: alsPort,\n\t\tTLSSettings: &v1beta1.TLSSettings{\n\t\t\tMode: \"ISTIO_MUTUAL\",\n\t\t},\n\t\tTCPKeepalive: &v1beta1.TCPKeepalive{\n\t\t\tInterval: \"10s\",\n\t\t\tProbes: 3,\n\t\t\tTime: \"10s\",\n\t\t},\n\t}\n\tistio.Spec.Proxy.UseMetadataExchangeFilter = &enabled\n\tistio.Spec.JWTPolicy = \"first-party-jwt\"\n\tistio.Spec.ControlPlaneSecurityEnabled = enabled\n\tistio.Spec.MixerlessTelemetry = &v1beta1.MixerlessTelemetryConfiguration{\n\t\tEnabled: &enabled,\n\t}\n\n\tif len(m.Remotes) > 0 {\n\t\tistio.Spec.Gateways.IngressConfig.Labels = map[string]string{\"istio.banzaicloud.io/mesh-expansion\": \"true\"}\n\t\tistio.Spec.MeshExpansion = &enabled\n\t\tistio.Spec.MeshPolicy.MTLSMode = \"PERMISSIVE\"\n\t} else {\n\t\tistio.Spec.Gateways.IngressConfig.Labels = nil\n\t}\n\n\tif config.BypassEgressTraffic {\n\t\tistio.Spec.OutboundTrafficPolicy = v1beta1.OutboundTrafficPolicyConfiguration{\n\t\t\tMode: \"ALLOW_ANY\",\n\t\t}\n\t} else {\n\t\tistio.Spec.OutboundTrafficPolicy = v1beta1.OutboundTrafficPolicyConfiguration{\n\t\t\tMode: \"REGISTRY_ONLY\",\n\t\t}\n\t}\n}", "func BootstrapRules(p Proxy, inHost, inPort string, extEndpoint *url.URL, endpointSlice bool) error {\n\t// TODO (sbezverk) Consider adding ip address validation\n\textHost, extPort, _ := net.SplitHostPort(extEndpoint.Host)\n\tepn, _ := strconv.Atoi(extPort)\n\te32p := int32(epn)\n\tipn, _ := strconv.Atoi(inPort)\n\ti32p := int32(ipn)\n\tipFamily, _ := getIPFamily(inHost)\n\n\tsvc := v1.Service{\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tName: \"kubernetes\",\n\t\t\tNamespace: \"default\",\n\t\t},\n\t\tSpec: v1.ServiceSpec{\n\t\t\tIPFamily: &ipFamily,\n\t\t\tPorts: []v1.ServicePort{\n\t\t\t\t{\n\t\t\t\t\tName: extEndpoint.Scheme,\n\t\t\t\t\tProtocol: v1.ProtocolTCP,\n\t\t\t\t\t// API Server's internal port number\n\t\t\t\t\tPort: i32p,\n\t\t\t\t\tTargetPort: intstr.FromString(inPort),\n\t\t\t\t},\n\t\t\t},\n\t\t\tType: v1.ServiceTypeClusterIP,\n\t\t\tClusterIP: inHost,\n\t\t},\n\t}\n\tp.AddService(&svc)\n\tif endpointSlice {\n\t\tready := true\n\t\tname := extEndpoint.Scheme\n\t\tproto := v1.ProtocolTCP\n\t\t// Todo (sbezverk) must be passed as a parameter\n\t\tlabel := map[string]string{\n\t\t\tdiscovery.LabelServiceName: \"kubernetes\",\n\t\t}\n\t\tepsl := discovery.EndpointSlice{\n\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\tName: \"kubernetes\",\n\t\t\t\tNamespace: \"default\",\n\t\t\t\tLabels: label,\n\t\t\t},\n\t\t\tEndpoints: []discovery.Endpoint{\n\t\t\t\t{\n\t\t\t\t\t// External API server IP\n\t\t\t\t\tAddresses: []string{extHost},\n\t\t\t\t\tConditions: discovery.EndpointConditions{\n\t\t\t\t\t\tReady: &ready,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tPorts: []discovery.EndpointPort{\n\t\t\t\t{\n\t\t\t\t\tName: &name,\n\t\t\t\t\tProtocol: &proto,\n\t\t\t\t\t// External API server ports it listens on\n\t\t\t\t\tPort: &e32p,\n\t\t\t\t},\n\t\t\t},\n\t\t}\n\n\t\tepsl.AddressType = discovery.AddressTypeIPv4\n\t\tif ipFamily == v1.IPv6Protocol {\n\t\t\tepsl.AddressType = discovery.AddressTypeIPv6\n\t\t}\n\t\tp.AddEndpointSlice(&epsl)\n\t} else {\n\t\tendpoint := v1.Endpoints{\n\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\tName: \"kubernetes\",\n\t\t\t\tNamespace: \"default\",\n\t\t\t},\n\t\t\tSubsets: []v1.EndpointSubset{\n\t\t\t\t{\n\t\t\t\t\tAddresses: []v1.EndpointAddress{\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tIP: extHost,\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\tPorts: []v1.EndpointPort{\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tName: extEndpoint.Scheme,\n\t\t\t\t\t\t\tProtocol: v1.ProtocolTCP,\n\t\t\t\t\t\t\tPort: e32p,\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t}\n\t\tp.AddEndpoints(&endpoint)\n\t}\n\n\treturn nil\n}", "func newIngressController(name, defaultCertificateSecretName, domain string) operatorv1.IngressController {\n\tingresscontroller := operatorv1.IngressController{\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tName: name,\n\t\t},\n\t\tStatus: operatorv1.IngressControllerStatus{\n\t\t\tDomain: domain,\n\t\t},\n\t}\n\tif len(defaultCertificateSecretName) != 0 {\n\t\tingresscontroller.Spec.DefaultCertificate = &corev1.LocalObjectReference{Name: defaultCertificateSecretName}\n\t}\n\treturn ingresscontroller\n}", "func verifyInternalIngressController(t *testing.T, name types.NamespacedName, hostname, address, image string) {\n\tkubeConfig, err := config.GetConfig()\n\tif err != nil {\n\t\tt.Fatalf(\"failed to get kube config: %v\", err)\n\t}\n\tclient, err := kubernetes.NewForConfig(kubeConfig)\n\tif err != nil {\n\t\tt.Fatalf(\"failed to create kube client: %v\", err)\n\t}\n\n\techoPod := buildEchoPod(name.Name, name.Namespace)\n\tif err := kclient.Create(context.TODO(), echoPod); err != nil {\n\t\tt.Fatalf(\"failed to create pod %s/%s: %v\", echoPod.Namespace, echoPod.Name, err)\n\t}\n\tdefer func() {\n\t\tif err := kclient.Delete(context.TODO(), echoPod); err != nil {\n\t\t\tt.Fatalf(\"failed to delete pod %s/%s: %v\", echoPod.Namespace, echoPod.Name, err)\n\t\t}\n\t}()\n\n\techoService := buildEchoService(echoPod.Name, echoPod.Namespace, echoPod.ObjectMeta.Labels)\n\tif err := kclient.Create(context.TODO(), echoService); err != nil {\n\t\tt.Fatalf(\"failed to create service %s/%s: %v\", echoService.Namespace, echoService.Name, err)\n\t}\n\tdefer func() {\n\t\tif err := kclient.Delete(context.TODO(), echoService); err != nil {\n\t\t\tt.Fatalf(\"failed to delete service %s/%s: %v\", echoService.Namespace, echoService.Name, err)\n\t\t}\n\t}()\n\n\techoRoute := buildRouteWithHost(echoPod.Name, echoPod.Namespace, echoService.Name, hostname)\n\tif err := kclient.Create(context.TODO(), echoRoute); err != nil {\n\t\tt.Fatalf(\"failed to create route %s/%s: %v\", echoRoute.Namespace, echoRoute.Name, err)\n\t}\n\tdefer func() {\n\t\tif err := kclient.Delete(context.TODO(), echoRoute); err != nil {\n\t\t\tt.Fatalf(\"failed to delete route %s/%s: %v\", echoRoute.Namespace, echoRoute.Name, err)\n\t\t}\n\t}()\n\n\textraArgs := []string{\n\t\t\"--header\", \"HOST:\" + echoRoute.Spec.Host,\n\t\t\"-v\",\n\t\t\"--retry-delay\", \"20\",\n\t\t\"--max-time\", \"10\",\n\t}\n\tclientPodName := types.NamespacedName{Namespace: name.Namespace, Name: \"curl-\" + name.Name}\n\tclientPodSpec := buildCurlPod(clientPodName.Name, clientPodName.Namespace, image, address, echoRoute.Spec.Host, extraArgs...)\n\tclientPod := clientPodSpec.DeepCopy()\n\tif err := kclient.Create(context.TODO(), clientPod); err != nil {\n\t\tt.Fatalf(\"failed to create pod %q: %v\", clientPodName, err)\n\t}\n\tdefer func() {\n\t\tif err := kclient.Delete(context.TODO(), clientPod); err != nil {\n\t\t\tif errors.IsNotFound(err) {\n\t\t\t\treturn\n\t\t\t}\n\t\t\tt.Fatalf(\"failed to delete pod %q: %v\", clientPodName, err)\n\t\t}\n\t}()\n\n\tvar curlPodLogs string\n\terr = wait.PollImmediate(10*time.Second, 10*time.Minute, func() (bool, error) {\n\t\tif err := kclient.Get(context.TODO(), clientPodName, clientPod); err != nil {\n\t\t\tt.Logf(\"error getting client pod %q: %v, retrying...\", clientPodName, err)\n\t\t\treturn false, nil\n\t\t}\n\t\t// First check if client curl pod is still starting or not running.\n\t\tif clientPod.Status.Phase == corev1.PodPending {\n\t\t\tt.Logf(\"waiting for client pod %q to start\", clientPodName)\n\t\t\treturn false, nil\n\t\t}\n\t\treadCloser, err := client.CoreV1().Pods(clientPod.Namespace).GetLogs(clientPod.Name, &corev1.PodLogOptions{\n\t\t\tContainer: \"curl\",\n\t\t\tFollow: false,\n\t\t}).Stream(context.TODO())\n\t\tif err != nil {\n\t\t\tt.Logf(\"failed to read output from pod %s: %v\", clientPod.Name, err)\n\t\t\treturn false, nil\n\t\t}\n\t\tscanner := bufio.NewScanner(readCloser)\n\t\tdefer func() {\n\t\t\tif err := readCloser.Close(); err != nil {\n\t\t\t\tt.Errorf(\"failed to close reader for pod %s: %v\", clientPod.Name, err)\n\t\t\t}\n\t\t}()\n\t\tcurlPodLogs = \"\"\n\t\tfor scanner.Scan() {\n\t\t\tline := scanner.Text()\n\t\t\tcurlPodLogs += line + \"\\n\"\n\t\t\tif strings.Contains(line, \"HTTP/1.0 200 OK\") {\n\t\t\t\tt.Logf(\"verified connectivity with workload with address: %s with response %s\", address, line)\n\t\t\t\treturn true, nil\n\t\t\t}\n\t\t}\n\t\t// If failed or succeeded, the pod is stopped, but didn't provide us 200 response, let's try again.\n\t\tif clientPod.Status.Phase == corev1.PodFailed || clientPod.Status.Phase == corev1.PodSucceeded {\n\t\t\tt.Logf(\"client pod %q has stopped...restarting. Curl Pod Logs:\\n%s\", clientPodName, curlPodLogs)\n\t\t\tif err := kclient.Delete(context.TODO(), clientPod); err != nil && errors.IsNotFound(err) {\n\t\t\t\tt.Fatalf(\"failed to delete pod %q: %v\", clientPodName, err)\n\t\t\t}\n\t\t\t// Wait for deletion to prevent a race condition. Use PollInfinite since we are already in a Poll.\n\t\t\twait.PollInfinite(5*time.Second, func() (bool, error) {\n\t\t\t\terr = kclient.Get(context.TODO(), clientPodName, clientPod)\n\t\t\t\tif !errors.IsNotFound(err) {\n\t\t\t\t\tt.Logf(\"waiting for %q: to be deleted\", clientPodName)\n\t\t\t\t\treturn false, nil\n\t\t\t\t}\n\t\t\t\treturn true, nil\n\t\t\t})\n\t\t\tclientPod = clientPodSpec.DeepCopy()\n\t\t\tif err := kclient.Create(context.TODO(), clientPod); err != nil {\n\t\t\t\tt.Fatalf(\"failed to create pod %q: %v\", clientPodName, err)\n\t\t\t}\n\t\t\treturn false, nil\n\t\t}\n\t\treturn false, nil\n\t})\n\tif err != nil {\n\t\tt.Fatalf(\"failed to verify connectivity with workload with address: %s using internal curl client. Curl Pod Logs:\\n%s\", address, curlPodLogs)\n\t}\n}", "func ingressControllerExists(ctx context.Context, h *helper.H, ingressControllerName string, shouldexist bool) {\n\t_, err := h.Dynamic().\n\t\tResource(schema.GroupVersionResource{Group: \"operator.openshift.io\", Version: \"v1\", Resource: \"ingresscontrollers\"}).\n\t\tNamespace(\"openshift-ingress-operator\").\n\t\tGet(ctx, ingressControllerName, metav1.GetOptions{})\n\tif shouldexist {\n\t\tExpect(err).NotTo(HaveOccurred())\n\t} else {\n\t\tExpect(err).Should(MatchError(fmt.Sprintf(\"ingresscontrollers.operator.openshift.io \\\"%v\\\" not found\", ingressControllerName)))\n\t}\n}", "func (r *reconciler) resourceToClusterIngressConfig(ctx context.Context, o client.Object) []reconcile.Request {\n\treturn []reconcile.Request{\n\t\t{\n\t\t\tNamespacedName: operatorcontroller.IngressClusterConfigName(),\n\t\t},\n\t}\n}", "func NewALBIngressFromIngress(o *NewALBIngressFromIngressOptions) *ALBIngress {\n\tvar err error\n\n\t// Create newIngress ALBIngress object holding the resource details and some cluster information.\n\tnewIngress := NewALBIngress(&NewALBIngressOptions{\n\t\tNamespace: o.Ingress.GetNamespace(),\n\t\tName: o.Ingress.Name,\n\t\tClusterName: o.ClusterName,\n\t\tALBNamePrefix: o.ALBNamePrefix,\n\t\tRecorder: o.Recorder,\n\t\tIngress: o.Ingress,\n\t})\n\n\tif o.ExistingIngress != nil {\n\t\t// Acquire a lock to prevent race condition if existing ingress's state is currently being synced\n\t\t// with Amazon..\n\t\tnewIngress = o.ExistingIngress\n\t\tnewIngress.lock.Lock()\n\t\tdefer newIngress.lock.Unlock()\n\t\t// reattach k8s ingress as if assembly happened through aws sync, it may be missing.\n\t\tnewIngress.ingress = o.Ingress\n\t\t// Ensure all desired state is removed from the copied ingress. The desired state of each\n\t\t// component will be generated later in this function.\n\t\tnewIngress.StripDesiredState()\n\t\tnewIngress.valid = false\n\t}\n\n\t// Load up the ingress with our current annotations.\n\tnewIngress.annotations, err = annotations.ParseAnnotations(o.Ingress.Annotations, o.ClusterName, o.Ingress.Namespace, o.Ingress.Name)\n\tif err != nil {\n\t\tmsg := fmt.Sprintf(\"Error parsing annotations: %s\", err.Error())\n\t\tnewIngress.Reconciled = false\n\t\tnewIngress.Eventf(api.EventTypeWarning, \"ERROR\", msg)\n\t\tnewIngress.logger.Errorf(msg)\n\t\treturn newIngress\n\t}\n\n\t// If annotation set is nil, its because it was cached as an invalid set before. Stop processing\n\t// and return nil.\n\tif newIngress.annotations == nil {\n\t\tmsg := fmt.Sprintf(\"Skipping processing due to a history of bad annotations\")\n\t\tnewIngress.Eventf(api.EventTypeWarning, \"ERROR\", msg)\n\t\tnewIngress.logger.Debugf(msg)\n\t\treturn newIngress\n\t}\n\n\t// Assemble the load balancer\n\tnewIngress.LoadBalancer = loadbalancer.NewDesiredLoadBalancer(&loadbalancer.NewDesiredLoadBalancerOptions{\n\t\tALBNamePrefix: o.ALBNamePrefix,\n\t\tNamespace: o.Ingress.GetNamespace(),\n\t\tExistingLoadBalancer: newIngress.LoadBalancer,\n\t\tIngressName: o.Ingress.Name,\n\t\tLogger: newIngress.logger,\n\t\tAnnotations: newIngress.annotations,\n\t\tTags: newIngress.Tags(),\n\t})\n\n\t// Assemble the target groups\n\tnewIngress.LoadBalancer.TargetGroups, err = targetgroups.NewDesiredTargetGroups(&targetgroups.NewDesiredTargetGroupsOptions{\n\t\tIngress: o.Ingress,\n\t\tLoadBalancerID: newIngress.LoadBalancer.ID,\n\t\tExistingTargetGroups: newIngress.LoadBalancer.TargetGroups,\n\t\tAnnotations: newIngress.annotations,\n\t\tALBNamePrefix: o.ALBNamePrefix,\n\t\tNamespace: o.Ingress.GetNamespace(),\n\t\tTags: newIngress.Tags(),\n\t\tLogger: newIngress.logger,\n\t\tGetServiceNodePort: o.GetServiceNodePort,\n\t\tGetNodes: o.GetNodes,\n\t})\n\tif err != nil {\n\t\tmsg := fmt.Sprintf(\"Error instantiating target groups: %s\", err.Error())\n\t\tnewIngress.Eventf(api.EventTypeWarning, \"ERROR\", msg)\n\t\tnewIngress.logger.Errorf(msg)\n\t\tnewIngress.Reconciled = false\n\t\treturn newIngress\n\t}\n\n\t// Assemble the listeners\n\tnewIngress.LoadBalancer.Listeners, err = listeners.NewDesiredListeners(&listeners.NewDesiredListenersOptions{\n\t\tIngress: o.Ingress,\n\t\tListeners: newIngress.LoadBalancer.Listeners,\n\t\tAnnotations: newIngress.annotations,\n\t\tLogger: newIngress.logger,\n\t})\n\tif err != nil {\n\t\tmsg := fmt.Sprintf(\"Error instantiating listeners: %s\", err.Error())\n\t\tnewIngress.Reconciled = false\n\t\tnewIngress.Eventf(api.EventTypeWarning, \"ERROR\", msg)\n\t\tnewIngress.logger.Errorf(msg)\n\t\treturn newIngress\n\t}\n\n\tnewIngress.valid = true\n\treturn newIngress\n}", "func newDNSController(kubeClient *client.Client,\n\tresyncPeriod time.Duration,\n\tprovider providers.DNSProvider,\n\twatchNamespace string,\n\tpublishServices []string) (*dnsController, error) {\n\n\teventBroadcaster := record.NewBroadcaster()\n\teventBroadcaster.StartLogging(glog.Infof)\n\teventBroadcaster.StartRecordingToSink(kubeClient.Events(\"\"))\n\n\tlbc := dnsController{\n\t\tprovider: provider,\n\t\tpublishServices: publishServices,\n\t\tclient: kubeClient,\n\t\tstopCh: make(chan struct{}),\n\t\trecorder: eventBroadcaster.NewRecorder(api.EventSource{Component: \"loadbalancer-controller\"}),\n\t}\n\n\tlbc.syncQueue = NewTaskQueue(lbc.sync)\n\n\tingEventHandler := framework.ResourceEventHandlerFuncs{\n\t\tAddFunc: func(obj interface{}) {\n\t\t\taddIng := obj.(*extensions.Ingress)\n\t\t\tlbc.recorder.Eventf(addIng, api.EventTypeNormal, \"CREATE\", fmt.Sprintf(\"%s/%s\", addIng.Namespace, addIng.Name))\n\t\t\tlbc.syncQueue.enqueue(obj)\n\t\t},\n\t\tDeleteFunc: func(obj interface{}) {\n\t\t\tupIng := obj.(*extensions.Ingress)\n\t\t\tlbc.recorder.Eventf(upIng, api.EventTypeNormal, \"DELETE\", fmt.Sprintf(\"%s/%s\", upIng.Namespace, upIng.Name))\n\t\t\tlbc.syncQueue.enqueue(obj)\n\t\t},\n\t\tUpdateFunc: func(old, cur interface{}) {\n\t\t\tif !reflect.DeepEqual(old, cur) {\n\t\t\t\tupIng := cur.(*extensions.Ingress)\n\t\t\t\tlbc.recorder.Eventf(upIng, api.EventTypeNormal, \"UPDATE\", fmt.Sprintf(\"%s/%s\", upIng.Namespace, upIng.Name))\n\t\t\t\tlbc.syncQueue.enqueue(cur)\n\t\t\t}\n\t\t},\n\t}\n\n\tlbc.ingLister.Store, lbc.ingController = framework.NewInformer(\n\t\t&cache.ListWatch{\n\t\t\tListFunc: ingressListFunc(lbc.client, watchNamespace),\n\t\t\tWatchFunc: ingressWatchFunc(lbc.client, watchNamespace),\n\t\t},\n\t\t&extensions.Ingress{}, resyncPeriod, ingEventHandler)\n\n\tlbc.svcLister.Store, lbc.svcController = framework.NewInformer(\n\t\t&cache.ListWatch{\n\t\t\tListFunc: serviceListFunc(lbc.client, watchNamespace),\n\t\t\tWatchFunc: serviceWatchFunc(lbc.client, watchNamespace),\n\t\t},\n\t\t&api.Service{}, resyncPeriod, framework.ResourceEventHandlerFuncs{})\n\n\treturn &lbc, nil\n}", "func New(\n\tnamespace string,\n\tresyncPeriod time.Duration,\n\tclient clientset.Interface,\n\tupdateCh *channels.RingChannel,\n\tdisableCatchAll bool) Storer {\n\n\tstore := &k8sStore{\n\t\tinformers: &Informer{},\n\t\tlisters: &Lister{},\n\t\tupdateCh: updateCh,\n\t\tsyncSecretMu: &sync.Mutex{},\n\t\tbackendConfigMu: &sync.RWMutex{},\n\t\tsecretIngressMap: NewObjectRefMap(),\n\t}\n\n\teventBroadcaster := record.NewBroadcaster()\n\teventBroadcaster.StartLogging(klog.Infof)\n\teventBroadcaster.StartRecordingToSink(&clientcorev1.EventSinkImpl{\n\t\tInterface: client.CoreV1().Events(namespace),\n\t})\n\trecorder := eventBroadcaster.NewRecorder(scheme.Scheme, corev1.EventSource{\n\t\tComponent: \"alb-ingress-controller\",\n\t})\n\n\tstore.listers.IngressWithAnnotation.Store = cache.NewStore(cache.DeletionHandlingMetaNamespaceKeyFunc)\n\t// create informers factory, enable and assign required informers\n\tinfFactory := informers.NewSharedInformerFactoryWithOptions(client, resyncPeriod,\n\t\tinformers.WithNamespace(namespace),\n\t)\n\n\tstore.informers.Ingress = infFactory.Networking().V1().Ingresses().Informer()\n\tstore.listers.Ingress.Store = store.informers.Ingress.GetStore()\n\n\tstore.informers.Endpoint = infFactory.Core().V1().Endpoints().Informer()\n\tstore.listers.Endpoint.Store = store.informers.Endpoint.GetStore()\n\n\tstore.informers.Service = infFactory.Core().V1().Services().Informer()\n\tstore.listers.Service.Store = store.informers.Service.GetStore()\n\n\tstore.informers.Node = infFactory.Core().V1().Nodes().Informer()\n\tstore.listers.Node.Store = store.informers.Node.GetStore()\n\n\tstore.informers.Pod = infFactory.Core().V1().Pods().Informer()\n\tstore.listers.Pod.Store = store.informers.Pod.GetStore()\n\n\tingDeleteHandler := func(obj interface{}) {\n\t\ting, ok := toIngress(obj)\n\t\tif !ok {\n\t\t\t// If we reached here it means the ingress was deleted but its final state is unrecorded.\n\t\t\ttombstone, ok := obj.(cache.DeletedFinalStateUnknown)\n\t\t\tif !ok {\n\t\t\t\tklog.ErrorS(nil, \"Error obtaining object from tombstone\", \"key\", obj)\n\t\t\t\treturn\n\t\t\t}\n\t\t\ting, ok = tombstone.Obj.(*networking.Ingress)\n\t\t\tif !ok {\n\t\t\t\tklog.Errorf(\"Tombstone contained object that is not an Ingress: %#v\", obj)\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\n\t\tif !IsValid(ing) {\n\t\t\treturn\n\t\t}\n\n\t\tif isCatchAllIngress(ing.Spec) && disableCatchAll {\n\t\t\tklog.InfoS(\"Ignoring delete for catch-all because of --disable-catch-all\", \"ingress\", klog.KObj(ing))\n\t\t\treturn\n\t\t}\n\n\t\t//store.listers.IngressWithAnnotation.Delete(ing)\n\n\t\tkey := MetaNamespaceKey(ing)\n\t\tstore.secretIngressMap.Delete(key)\n\n\t\tupdateCh.In() <- helper.Event{\n\t\t\tType: helper.IngressDeleteEvent,\n\t\t\tObj: obj,\n\t\t}\n\t}\n\n\tingEventHandler := cache.ResourceEventHandlerFuncs{\n\t\tAddFunc: func(obj interface{}) {\n\t\t\ting, ok := toIngress(obj)\n\t\t\tif !ok {\n\t\t\t\treturn\n\t\t\t}\n\t\t\tif !IsValid(ing) {\n\t\t\t\tingressClass, _ := annotations.GetStringAnnotation(IngressKey, ing)\n\t\t\t\tklog.InfoS(\"Ignoring ingress\", \"ingress\", klog.KObj(ing), \"kubernetes.io/ingress.class\", ingressClass, \"ingressClassName\", pointer.StringDeref(ing.Spec.IngressClassName, \"\"))\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tif isCatchAllIngress(ing.Spec) && disableCatchAll {\n\t\t\t\tklog.InfoS(\"Ignoring add for catch-all ingress because of --disable-catch-all\", \"ingress\", klog.KObj(ing))\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\trecorder.Eventf(ing, corev1.EventTypeNormal, \"Sync\", \"Scheduled for sync\")\n\n\t\t\tstore.syncIngress(ing)\n\n\t\t\tupdateCh.In() <- helper.Event{\n\t\t\t\tType: helper.CreateEvent,\n\t\t\t\tObj: obj,\n\t\t\t}\n\t\t},\n\t\tDeleteFunc: ingDeleteHandler,\n\t\tUpdateFunc: func(old, cur interface{}) {\n\t\t\toldIng, ok := toIngress(old)\n\t\t\tif !ok {\n\t\t\t\treturn\n\t\t\t}\n\t\t\tcurIng, ok := toIngress(cur)\n\t\t\tif !ok {\n\t\t\t\treturn\n\t\t\t}\n\t\t\tvalidOld := IsValid(oldIng)\n\t\t\tvalidCur := IsValid(curIng)\n\t\t\tif !validOld && validCur {\n\t\t\t\tif isCatchAllIngress(curIng.Spec) && disableCatchAll {\n\t\t\t\t\tklog.InfoS(\"ignoring update for catch-all ingress because of --disable-catch-all\", \"ingress\", klog.KObj(curIng))\n\t\t\t\t\treturn\n\t\t\t\t}\n\n\t\t\t\tklog.InfoS(\"creating ingress\", \"ingress\", klog.KObj(curIng), \"class\", IngressKey)\n\t\t\t\trecorder.Eventf(curIng, corev1.EventTypeNormal, \"Sync\", \"Scheduled for sync\")\n\t\t\t} else if validOld && !validCur {\n\t\t\t\tklog.InfoS(\"removing ingress\", \"ingress\", klog.KObj(curIng), \"class\", IngressKey)\n\t\t\t\tingDeleteHandler(old)\n\t\t\t\treturn\n\t\t\t} else if validCur && !reflect.DeepEqual(old, cur) {\n\t\t\t\tif isCatchAllIngress(curIng.Spec) && disableCatchAll {\n\t\t\t\t\tklog.InfoS(\"ignoring update for catch-all ingress and delete old one because of --disable-catch-all\", \"ingress\", klog.KObj(curIng))\n\t\t\t\t\tingDeleteHandler(old)\n\t\t\t\t\treturn\n\t\t\t\t}\n\n\t\t\t\trecorder.Eventf(curIng, corev1.EventTypeNormal, \"Sync\", \"Scheduled for sync\")\n\t\t\t} else {\n\t\t\t\tklog.V(3).InfoS(\"No changes on ingress. Skipping update\", \"ingress\", klog.KObj(curIng))\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tstore.syncIngress(curIng)\n\n\t\t\tupdateCh.In() <- helper.Event{\n\t\t\t\tType: helper.UpdateEvent,\n\t\t\t\tObj: cur,\n\t\t\t}\n\t\t},\n\t}\n\n\tepEventHandler := cache.ResourceEventHandlerFuncs{\n\t\tAddFunc: func(obj interface{}) {\n\t\t\tep1 := obj.(*corev1.Endpoints)\n\t\t\tkey := MetaNamespaceKey(ep1)\n\t\t\tsvc, exist, err := store.listers.Service.GetByKey(key)\n\t\t\tif err != nil {\n\t\t\t\tklog.Error(err, \"get service GetByKey by endpoint failed\", \"endpoint\", ep1)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tif !exist {\n\t\t\t\tklog.Warningf(\"epEventHandler %s\", key)\n\t\t\t\treturn\n\t\t\t}\n\t\t\ts := svc.(*corev1.Service)\n\n\t\t\tklog.Info(\"controller: endpoint add event\",\n\t\t\t\tutil.NamespacedName(ep1).String())\n\t\t\tupdateCh.In() <- helper.Event{\n\t\t\t\tType: helper.EndPointEvent,\n\t\t\t\tObj: s,\n\t\t\t}\n\t\t},\n\t\tDeleteFunc: func(obj interface{}) {\n\t\t\tep1 := obj.(*corev1.Endpoints)\n\t\t\tkey := MetaNamespaceKey(ep1)\n\t\t\tsvc, exist, err := store.listers.Service.GetByKey(key)\n\t\t\tif err != nil {\n\t\t\t\tklog.Error(err, \"DeleteFunc get service GetByKey by endpoint failed\", \"endpoint\", ep1)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tif !exist {\n\t\t\t\tklog.Warningf(\"DeleteFunc epEventHandler %s\", key)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\ts := svc.(*corev1.Service)\n\n\t\t\tklog.Info(\"controller: endpoint delete event\",\n\t\t\t\tutil.NamespacedName(ep1).String())\n\t\t\tupdateCh.In() <- helper.Event{\n\t\t\t\tType: helper.EndPointEvent,\n\t\t\t\tObj: s,\n\t\t\t}\n\t\t},\n\t\tUpdateFunc: func(old, cur interface{}) {\n\t\t\tep1 := old.(*corev1.Endpoints)\n\t\t\tep2 := cur.(*corev1.Endpoints)\n\t\t\tif !reflect.DeepEqual(ep1.Subsets, ep2.Subsets) {\n\t\t\t\tkey := MetaNamespaceKey(ep1)\n\t\t\t\tsvc, exist, err := store.listers.Service.GetByKey(key)\n\t\t\t\tif err != nil {\n\t\t\t\t\tklog.Error(err, \"UpdateFunc get service GetByKey by endpoint failed\", \"endpoint\", ep1)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tif !exist {\n\t\t\t\t\tklog.Warningf(\"UpdateFunc epEventHandler %s\", key)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\ts := svc.(*corev1.Service)\n\n\t\t\t\tklog.Info(\"controller: endpoint update event\",\n\t\t\t\t\tutil.NamespacedName(ep1).String())\n\t\t\t\tupdateCh.In() <- helper.Event{\n\t\t\t\t\tType: helper.EndPointEvent,\n\t\t\t\t\tObj: s,\n\t\t\t\t}\n\t\t\t}\n\t\t},\n\t}\n\tpodEventHandler := cache.ResourceEventHandlerFuncs{\n\t\tAddFunc: func(obj interface{}) {\n\t\t\terr := store.listers.Pod.Add(obj)\n\t\t\tif err != nil {\n\t\t\t\tklog.Error(err, \"Pod Add failed\")\n\t\t\t\treturn\n\t\t\t}\n\t\t},\n\t\tDeleteFunc: func(obj interface{}) {\n\t\t\t_ = store.listers.Pod.Delete(obj)\n\t\t},\n\t\tUpdateFunc: func(old, cur interface{}) {\n\t\t},\n\t}\n\tnodeEventHandler := cache.ResourceEventHandlerFuncs{\n\t\tAddFunc: func(obj interface{}) {\n\t\t\tserviceList := store.listers.Service.List()\n\t\t\tfor _, v := range serviceList {\n\t\t\t\tsvc := v.(*corev1.Service)\n\t\t\t\tklog.Info(\"node change: enqueue service\", util.Key(svc))\n\t\t\t\tupdateCh.In() <- helper.Event{\n\t\t\t\t\tType: helper.NodeEvent,\n\t\t\t\t\tObj: svc,\n\t\t\t\t}\n\t\t\t}\n\t\t},\n\t\tUpdateFunc: func(old, cur interface{}) {\n\t\t\tnodeOld := old.(*corev1.Node)\n\t\t\tnodeNew := cur.(*corev1.Node)\n\n\t\t\tif !reflect.DeepEqual(nodeOld.Labels, nodeNew.Labels) {\n\t\t\t\tserviceList := store.listers.Service.List()\n\t\t\t\tfor _, v := range serviceList {\n\t\t\t\t\tsvc := v.(*corev1.Service)\n\t\t\t\t\tklog.Info(\"node change: enqueue service\", util.Key(svc))\n\t\t\t\t\tupdateCh.In() <- helper.Event{\n\t\t\t\t\t\tType: helper.NodeEvent,\n\t\t\t\t\t\tObj: svc,\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t},\n\n\t\tDeleteFunc: func(obj interface{}) {\n\t\t\tserviceList := store.listers.Service.List()\n\t\t\tfor _, v := range serviceList {\n\t\t\t\tsvc := v.(*corev1.Service)\n\t\t\t\tklog.Info(\"node change: enqueue service\", util.Key(svc))\n\t\t\t\tupdateCh.In() <- helper.Event{\n\t\t\t\t\tType: helper.NodeEvent,\n\t\t\t\t\tObj: svc,\n\t\t\t\t}\n\t\t\t}\n\n\t\t},\n\t}\n\n\tserviceHandler := cache.ResourceEventHandlerFuncs{\n\t\tAddFunc: func(obj interface{}) {\n\t\t\tcurSvc := obj.(*corev1.Service)\n\t\t\tstore.enqueueImpactedIngresses(updateCh, curSvc)\n\t\t},\n\t\tUpdateFunc: func(old, cur interface{}) {\n\t\t\t// update the server group\n\t\t\toldSvc := old.(*corev1.Service)\n\t\t\tcurSvc := cur.(*corev1.Service)\n\n\t\t\tif reflect.DeepEqual(oldSvc, curSvc) {\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tupdateCh.In() <- helper.Event{\n\t\t\t\tType: helper.ServiceEvent,\n\t\t\t\tObj: cur,\n\t\t\t}\n\t\t},\n\t\tDeleteFunc: func(obj interface{}) {\n\t\t\t// ingress refer service to delete\n\t\t\tcurSvc := obj.(*corev1.Service)\n\t\t\tstore.enqueueImpactedIngresses(updateCh, curSvc)\n\t\t},\n\t}\n\n\t_, _ = store.informers.Ingress.AddEventHandler(ingEventHandler)\n\t_, _ = store.informers.Endpoint.AddEventHandler(epEventHandler)\n\t_, _ = store.informers.Node.AddEventHandler(podEventHandler)\n\t_, _ = store.informers.Service.AddEventHandler(serviceHandler)\n\t_, _ = store.informers.Node.AddEventHandler(nodeEventHandler)\n\treturn store\n}", "func add(mgr manager.Manager, r reconcile.Reconciler) error {\n\t// Create a new controller\n\tc, err := controller.New(\"elasticsearchtemplate-controller\", mgr, controller.Options{Reconciler: r})\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// Watch for changes to primary resource ElasticSearchTemplate\n\terr = c.Watch(&source.Kind{Type: &xov1alpha1.ElasticSearchTemplate{}}, &handler.EnqueueRequestForObject{})\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}", "func Validate(ingress *networkingv1.Ingress) error {\n\tif supportsTLS(ingress) && containsWildcard(ingress.Spec.TLS[0].Hosts[0]) {\n\t\treturn errors.Errorf(\"ingress TLS host %q contains wildcards\", ingress.Spec.TLS[0].Hosts[0])\n\t}\n\n\tif len(ingress.Spec.Rules) == 0 {\n\t\treturn errors.New(\"ingress does not have any rules\")\n\t}\n\n\tif containsWildcard(ingress.Spec.Rules[0].Host) {\n\t\treturn errors.Errorf(\"ingress host %q contains wildcards\", ingress.Spec.Rules[0].Host)\n\t}\n\n\treturn nil\n}", "func NewIngressController(interval time.Duration, staticAddress string) (*IngressController, error) {\n\tconfig, err := rest.InClusterConfig()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tclient, err := kubernetes.NewForConfig(config)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tcontroller := &IngressController{\n\t\tInterface: client,\n\t\tinterval: interval,\n\t\tstaticAddress: staticAddress,\n\t}\n\n\treturn controller, nil\n}", "func (a *applyConfigController) Begin() {\n\tnowApp := a.manager.store.GetAppService(a.appService.ServiceID)\n\tnowConfigMaps := nowApp.GetConfigMaps()\n\tnewConfigMaps := a.appService.GetConfigMaps()\n\tvar nowConfigMapMaps = make(map[string]*corev1.ConfigMap, len(nowConfigMaps))\n\tfor i, now := range nowConfigMaps {\n\t\tnowConfigMapMaps[now.Name] = nowConfigMaps[i]\n\t}\n\tfor _, new := range newConfigMaps {\n\t\tif nowConfig, ok := nowConfigMapMaps[new.Name]; ok {\n\t\t\tnew.UID = nowConfig.UID\n\t\t\tnewc, err := a.manager.client.CoreV1().ConfigMaps(nowApp.TenantID).Update(context.Background(), new, metav1.UpdateOptions{})\n\t\t\tif err != nil {\n\t\t\t\tlogrus.Errorf(\"update config map failure %s\", err.Error())\n\t\t\t}\n\t\t\tnowApp.SetConfigMap(newc)\n\t\t\tnowConfigMapMaps[new.Name] = nil\n\t\t\tlogrus.Debugf(\"update configmap %s for service %s\", new.Name, a.appService.ServiceID)\n\t\t} else {\n\t\t\tnewc, err := a.manager.client.CoreV1().ConfigMaps(nowApp.TenantID).Create(context.Background(), new, metav1.CreateOptions{})\n\t\t\tif err != nil {\n\t\t\t\tlogrus.Errorf(\"update config map failure %s\", err.Error())\n\t\t\t}\n\t\t\tnowApp.SetConfigMap(newc)\n\t\t\tlogrus.Debugf(\"create configmap %s for service %s\", new.Name, a.appService.ServiceID)\n\t\t}\n\t}\n\tfor name, handle := range nowConfigMapMaps {\n\t\tif handle != nil {\n\t\t\tif err := a.manager.client.CoreV1().ConfigMaps(nowApp.TenantID).Delete(context.Background(), name, metav1.DeleteOptions{}); err != nil {\n\t\t\t\tlogrus.Errorf(\"delete config map failure %s\", err.Error())\n\t\t\t}\n\t\t\tlogrus.Debugf(\"delete configmap %s for service %s\", name, a.appService.ServiceID)\n\t\t}\n\t}\n\ta.manager.callback(a.controllerID, nil)\n}", "func (c *Cluster) handleIngressEvent(event interface{}, action watch.EventType) {\n\teventObj, ok := event.(*v1beta1extensionsapi.Ingress)\n\tif !ok {\n\t\tif action != watch.Error {\n\t\t\tlog.WithFields(log.Fields{\n\t\t\t\t\"cluster\": c.config.Name,\n\t\t\t}).Error(\"Got event in ingress handler which contains no ingress\")\n\t\t} else {\n\t\t\tlog.WithFields(log.Fields{\n\t\t\t\t\"cluster\": c.config.Name,\n\t\t\t\t\"event\": event,\n\t\t\t}).Error(\"Some other error\")\n\t\t}\n\t\treturn\n\t}\n\tc.latestIngressVersion = eventObj.ResourceVersion\n\tswitch action {\n\tcase watch.Deleted:\n\t\tevent := state.IngressChange{\n\t\t\tIngress: state.K8RouterIngress{\n\t\t\t\tName: eventObj.Namespace + \"-\" + eventObj.Name,\n\t\t\t\tHosts: []string{},\n\t\t\t},\n\t\t\tCreated: false,\n\t\t}\n\t\tdelete(c.knownIngresses, event.Ingress.Name)\n\t\tc.ingressEvents <- event\n\tcase watch.Modified:\n\tcase watch.Added:\n\t\tobj := state.K8RouterIngress{\n\t\t\tName: eventObj.Namespace + \"-\" + eventObj.Name,\n\t\t\tHosts: []string{},\n\t\t}\n\t\tfor _, rule := range eventObj.Spec.Rules {\n\t\t\tobj.Hosts = append(obj.Hosts, rule.Host)\n\t\t}\n\t\tmyEvent := state.IngressChange{\n\t\t\tIngress: obj,\n\t\t\tCreated: false,\n\t\t}\n\t\tval, _ := c.knownIngresses[obj.Name]\n\t\tisEquivalent := ok && state.IsIngressEquivalent(&obj, &val)\n\t\tif action == watch.Modified && !isEquivalent {\n\t\t\tc.ingressEvents <- myEvent\n\t\t}\n\t\tif !isEquivalent {\n\t\t\tmyEvent.Created = true\n\t\t\tc.ingressEvents <- myEvent\n\t\t}\n\t\tc.knownIngresses[obj.Name] = obj\n\t}\n}", "func (sr *serviceRepository) ListIngress(n string) ([]resource.Service, error) {\n\tingressList, err := sr.kubernetes.NetworkingV1().Ingresses(n).List(context.Background(), metav1.ListOptions{})\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar services []resource.Service\n\n\tfor _, ing := range ingressList.Items {\n\n\t\tfor _, rules := range ing.Spec.Rules {\n\t\t\tfor _, path := range rules.HTTP.Paths {\n\t\t\t\tsvc := resource.Service{\n\t\t\t\t\tName: path.Backend.Service.Name,\n\t\t\t\t\tAddr: rules.Host,\n\t\t\t\t\tPorts: []resource.Port{\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tPort: path.Backend.Service.Port.Number,\n\t\t\t\t\t\t\tExposedPort: 80,\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t}\n\t\t\t\tservices = append(services, svc)\n\t\t\t}\n\t\t}\n\t}\n\n\treturn services, nil\n\n}", "func (c *Client) Create(ctx context.Context, obj *netv1beta.Ingress, opts ...client.CreateOption) error {\n\tif c.extensionAPI {\n\t\textIngressList := c.fromNetToExt(*obj)\n\t\treturn c.client.Create(ctx, &extIngressList, opts...)\n\t}\n\treturn c.client.Create(ctx, obj, opts...)\n}", "func (i *Ingress) Deploy(res api.DeployerResources) error {\n\t_, err := res.KubeClient.ExtensionsV1beta1().Ingresses(i.Namespace).Create(i.Ingress)\n\treturn err\n}", "func (op *Operator) initIngressCRDWatcher() cache.Controller {\n\tlw := &cache.ListWatch{\n\t\tListFunc: func(opts metav1.ListOptions) (runtime.Object, error) {\n\t\t\treturn op.VoyagerClient.Ingresses(op.Opt.WatchNamespace()).List(metav1.ListOptions{})\n\t\t},\n\t\tWatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {\n\t\t\treturn op.VoyagerClient.Ingresses(op.Opt.WatchNamespace()).Watch(metav1.ListOptions{})\n\t\t},\n\t}\n\t_, informer := cache.NewInformer(lw,\n\t\t&api.Ingress{},\n\t\top.Opt.ResyncPeriod,\n\t\tcache.ResourceEventHandlerFuncs{\n\t\t\tAddFunc: func(obj interface{}) {\n\t\t\t\tctx := etx.Background()\n\t\t\t\tlogger := log.New(ctx)\n\t\t\t\tif engress, ok := obj.(*api.Ingress); ok {\n\t\t\t\t\tengress.Migrate()\n\t\t\t\t\tlogger.Infof(\"%s %s@%s added\", engress.APISchema(), engress.Name, engress.Namespace)\n\t\t\t\t\tif !engress.ShouldHandleIngress(op.Opt.IngressClass) {\n\t\t\t\t\t\tlogger.Infof(\"%s %s@%s does not match ingress class\", engress.APISchema(), engress.Name, engress.Namespace)\n\t\t\t\t\t\treturn\n\t\t\t\t\t}\n\t\t\t\t\tif err := engress.IsValid(op.Opt.CloudProvider); err != nil {\n\t\t\t\t\t\top.recorder.Eventf(\n\t\t\t\t\t\t\tengress.ObjectReference(),\n\t\t\t\t\t\t\tcore.EventTypeWarning,\n\t\t\t\t\t\t\teventer.EventReasonIngressInvalid,\n\t\t\t\t\t\t\t\"Reason: %s\",\n\t\t\t\t\t\t\terr.Error(),\n\t\t\t\t\t\t)\n\t\t\t\t\t\treturn\n\t\t\t\t\t}\n\n\t\t\t\t\top.AddEngress(ctx, engress)\n\t\t\t\t}\n\t\t\t},\n\t\t\tUpdateFunc: func(old, new interface{}) {\n\t\t\t\tctx := etx.Background()\n\t\t\t\tlogger := log.New(ctx)\n\t\t\t\toldEngress, ok := old.(*api.Ingress)\n\t\t\t\tif !ok {\n\t\t\t\t\tlogger.Errorln(\"Invalid Ingress object\")\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\toldEngress.Migrate()\n\t\t\t\tnewEngress, ok := new.(*api.Ingress)\n\t\t\t\tif !ok {\n\t\t\t\t\tlogger.Errorln(\"Invalid Ingress object\")\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tnewEngress.Migrate()\n\t\t\t\tif changed, _ := oldEngress.HasChanged(*newEngress); !changed {\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tdiff := cmp.Diff(oldEngress, newEngress,\n\t\t\t\t\tcmp.Comparer(func(x, y resource.Quantity) bool {\n\t\t\t\t\t\treturn x.Cmp(y) == 0\n\t\t\t\t\t}),\n\t\t\t\t\tcmp.Comparer(func(x, y *metav1.Time) bool {\n\t\t\t\t\t\tif x == nil && y == nil {\n\t\t\t\t\t\t\treturn true\n\t\t\t\t\t\t}\n\t\t\t\t\t\tif x != nil && y != nil {\n\t\t\t\t\t\t\treturn x.Time.Equal(y.Time)\n\t\t\t\t\t\t}\n\t\t\t\t\t\treturn false\n\t\t\t\t\t}))\n\t\t\t\tlogger.Infof(\"%s %s@%s has changed. Diff: %s\", newEngress.APISchema(), newEngress.Name, newEngress.Namespace, diff)\n\t\t\t\tif err := newEngress.IsValid(op.Opt.CloudProvider); err != nil {\n\t\t\t\t\top.recorder.Eventf(\n\t\t\t\t\t\tnewEngress.ObjectReference(),\n\t\t\t\t\t\tcore.EventTypeWarning,\n\t\t\t\t\t\teventer.EventReasonIngressInvalid,\n\t\t\t\t\t\t\"Reason: %s\",\n\t\t\t\t\t\terr.Error(),\n\t\t\t\t\t)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\top.UpdateEngress(ctx, oldEngress, newEngress)\n\t\t\t},\n\t\t\tDeleteFunc: func(obj interface{}) {\n\t\t\t\tif engress, ok := obj.(*api.Ingress); ok {\n\t\t\t\t\tengress.Migrate()\n\t\t\t\t\tctx := etx.Background()\n\t\t\t\t\tlogger := log.New(ctx)\n\t\t\t\t\tlogger.Infof(\"%s %s@%s deleted\", engress.APISchema(), engress.Name, engress.Namespace)\n\t\t\t\t\tif !engress.ShouldHandleIngress(op.Opt.IngressClass) {\n\t\t\t\t\t\tlogger.Infof(\"%s %s@%s does not match ingress class\", engress.APISchema(), engress.Name, engress.Namespace)\n\t\t\t\t\t\treturn\n\t\t\t\t\t}\n\t\t\t\t\top.DeleteEngress(ctx, engress)\n\t\t\t\t}\n\t\t\t},\n\t\t},\n\t)\n\treturn informer\n}", "func (r *reconciler) secretToIngressController(ctx context.Context, o client.Object) []reconcile.Request {\n\tvar (\n\t\trequests []reconcile.Request\n\t\tlist operatorv1.IngressControllerList\n\t\tlistOpts = client.MatchingFields(map[string]string{\n\t\t\t\"defaultCertificateName\": o.GetName(),\n\t\t})\n\t\tingressConfig configv1.Ingress\n\t)\n\tif err := r.cache.List(ctx, &list, listOpts); err != nil {\n\t\tlog.Error(err, \"failed to list ingresscontrollers for secret\", \"secret\", o.GetName())\n\t\treturn requests\n\t}\n\tif err := r.cache.Get(ctx, controller.IngressClusterConfigName(), &ingressConfig); err != nil {\n\t\tlog.Error(err, \"failed to get ingresses.config.openshift.io\", \"name\", controller.IngressClusterConfigName())\n\t\treturn requests\n\t}\n\tfor _, ic := range list.Items {\n\t\tif ic.Status.Domain != ingressConfig.Spec.Domain {\n\t\t\tcontinue\n\t\t}\n\t\tlog.Info(\"queueing ingresscontroller\", \"name\", ic.Name)\n\t\trequest := reconcile.Request{\n\t\t\tNamespacedName: types.NamespacedName{\n\t\t\t\tNamespace: ic.Namespace,\n\t\t\t\tName: ic.Name,\n\t\t\t},\n\t\t}\n\t\trequests = append(requests, request)\n\t}\n\treturn requests\n}", "func (a *FrinxOpenconfigNetworkInstanceApiService) PutFrinxOpenconfigNetworkInstanceNetworkInstancesNetworkInstanceProtocolsProtocolBgpNeighborsNeighborAfiSafisAfiSafiApplyPolicy(ctx context.Context, name string, identifier string, protocolName string, neighborAddress string, afiSafiName string, frinxOpenconfigRoutingPolicyApplypolicygroupApplyPolicyBodyParam FrinxOpenconfigRoutingPolicyApplypolicygroupApplyPolicyRequest1, nodeId string) (*http.Response, error) {\n\tvar (\n\t\tlocalVarHttpMethod = strings.ToUpper(\"Put\")\n\t\tlocalVarPostBody interface{}\n\t\tlocalVarFileName string\n\t\tlocalVarFileBytes []byte\n\t\t\n\t)\n\n\t// create path and map variables\n\tlocalVarPath := a.client.cfg.BasePath + \"/config/network-topology:network-topology/network-topology:topology/unified/network-topology:node/{node-id}/yang-ext:mount/frinx-openconfig-network-instance:network-instances/frinx-openconfig-network-instance:network-instance/{name}/frinx-openconfig-network-instance:protocols/frinx-openconfig-network-instance:protocol/{identifier}/{protocol-name}/frinx-openconfig-network-instance:bgp/frinx-openconfig-network-instance:neighbors/frinx-openconfig-network-instance:neighbor/{neighbor-address}/frinx-openconfig-network-instance:afi-safis/frinx-openconfig-network-instance:afi-safi/{afi-safi-name}/frinx-openconfig-network-instance:apply-policy/\"\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"name\"+\"}\", fmt.Sprintf(\"%v\", name), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"identifier\"+\"}\", fmt.Sprintf(\"%v\", identifier), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"protocol-name\"+\"}\", fmt.Sprintf(\"%v\", protocolName), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"neighbor-address\"+\"}\", fmt.Sprintf(\"%v\", neighborAddress), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"afi-safi-name\"+\"}\", fmt.Sprintf(\"%v\", afiSafiName), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"node-id\"+\"}\", fmt.Sprintf(\"%v\", nodeId), -1)\n\n\tlocalVarHeaderParams := make(map[string]string)\n\tlocalVarQueryParams := url.Values{}\n\tlocalVarFormParams := url.Values{}\n\n\t// to determine the Content-Type header\n\tlocalVarHttpContentTypes := []string{\"application/json\", \"application/xml\"}\n\n\t// set Content-Type header\n\tlocalVarHttpContentType := selectHeaderContentType(localVarHttpContentTypes)\n\tif localVarHttpContentType != \"\" {\n\t\tlocalVarHeaderParams[\"Content-Type\"] = localVarHttpContentType\n\t}\n\n\t// to determine the Accept header\n\tlocalVarHttpHeaderAccepts := []string{\"application/json\", \"application/xml\"}\n\n\t// set Accept header\n\tlocalVarHttpHeaderAccept := selectHeaderAccept(localVarHttpHeaderAccepts)\n\tif localVarHttpHeaderAccept != \"\" {\n\t\tlocalVarHeaderParams[\"Accept\"] = localVarHttpHeaderAccept\n\t}\n\t// body params\n\tlocalVarPostBody = &frinxOpenconfigRoutingPolicyApplypolicygroupApplyPolicyBodyParam\n\tr, err := a.client.prepareRequest(ctx, localVarPath, localVarHttpMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFileName, localVarFileBytes)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tlocalVarHttpResponse, err := a.client.callAPI(r)\n\tif err != nil || localVarHttpResponse == nil {\n\t\treturn localVarHttpResponse, err\n\t}\n\n\tlocalVarBody, err := ioutil.ReadAll(localVarHttpResponse.Body)\n\tlocalVarHttpResponse.Body.Close()\n\tif err != nil {\n\t\treturn localVarHttpResponse, err\n\t}\n\n\n\tif localVarHttpResponse.StatusCode >= 300 {\n\t\tnewErr := GenericSwaggerError{\n\t\t\tbody: localVarBody,\n\t\t\terror: localVarHttpResponse.Status,\n\t\t}\n\t\t\n\t\treturn localVarHttpResponse, newErr\n\t}\n\n\treturn localVarHttpResponse, nil\n}", "func (h *Handler) ApplyFromBytes(data []byte) (sa *corev1.ServiceAccount, err error) {\n\tsa, err = h.CreateFromBytes(data)\n\tif k8serrors.IsAlreadyExists(err) {\n\t\tsa, err = h.UpdateFromBytes(data)\n\t}\n\treturn\n}", "func main() {\n\tc := framework.TemplateProcessor{\n\t\tTemplateData: &API{},\n\t\tPatchTemplates: []framework.PatchTemplate{&framework.ResourcePatchTemplate{\n\t\t\tSelector: &framework.Selector{\n\t\t\t\tKinds: []string{\"Deployment\"},\n\t\t\t\tNames: []string{\"controller-manager\"},\n\t\t\t},\n\t\t\tTemplates: parser.TemplateStrings(`\nspec:\n replicas: {{.Replicas}}\n`),\n\t\t}},\n\t}\n\n\tcmd := command.Build(&c, command.StandaloneEnabled, false)\n\tif err := cmd.Execute(); err != nil {\n\t\tfmt.Fprintln(os.Stderr, err.Error())\n\t\tos.Exit(1)\n\t}\n}", "func add(mgr manager.Manager, r reconcile.Reconciler) error {\n\t// Create a new controller\n\tc, err := controller.New(\"apischeme-controller\", mgr, controller.Options{Reconciler: r})\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// Watch for changes to primary resource APIScheme\n\terr = c.Watch(&source.Kind{Type: &cloudingressv1alpha1.APIScheme{}}, &handler.EnqueueRequestForObject{})\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}", "func IntoResourceFile(p *Params, in io.Reader, out io.Writer) error {\n\treader := yamlDecoder.NewYAMLReader(bufio.NewReaderSize(in, 4096))\n\tfor {\n\t\traw, err := reader.Read()\n\t\tif err == io.EOF {\n\t\t\tbreak\n\t\t}\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tkinds := map[string]struct {\n\t\t\ttyp interface{}\n\t\t\tinject func(typ interface{}) error\n\t\t}{\n\t\t\t\"Job\": {\n\t\t\t\ttyp: &batch.Job{},\n\t\t\t\tinject: func(typ interface{}) error {\n\t\t\t\t\treturn injectIntoPodTemplateSpec(p, &((typ.(*batch.Job)).Spec.Template))\n\t\t\t\t},\n\t\t\t},\n\t\t\t\"DaemonSet\": {\n\t\t\t\ttyp: &v1beta1.DaemonSet{},\n\t\t\t\tinject: func(typ interface{}) error {\n\t\t\t\t\treturn injectIntoPodTemplateSpec(p, &((typ.(*v1beta1.DaemonSet)).Spec.Template))\n\t\t\t\t},\n\t\t\t},\n\t\t\t\"ReplicaSet\": {\n\t\t\t\ttyp: &v1beta1.ReplicaSet{},\n\t\t\t\tinject: func(typ interface{}) error {\n\t\t\t\t\treturn injectIntoPodTemplateSpec(p, &((typ.(*v1beta1.ReplicaSet)).Spec.Template))\n\t\t\t\t},\n\t\t\t},\n\t\t\t\"Deployment\": {\n\t\t\t\ttyp: &v1beta1.Deployment{},\n\t\t\t\tinject: func(typ interface{}) error {\n\t\t\t\t\treturn injectIntoPodTemplateSpec(p, &((typ.(*v1beta1.Deployment)).Spec.Template))\n\t\t\t\t},\n\t\t\t},\n\t\t\t\"ReplicationController\": {\n\t\t\t\ttyp: &v1.ReplicationController{},\n\t\t\t\tinject: func(typ interface{}) error {\n\t\t\t\t\treturn injectIntoPodTemplateSpec(p, ((typ.(*v1.ReplicationController)).Spec.Template))\n\t\t\t\t},\n\t\t\t},\n\t\t\t\"StatefulSet\": {\n\t\t\t\ttyp: &appsv1beta1.StatefulSet{},\n\t\t\t\tinject: func(typ interface{}) error {\n\t\t\t\t\treturn injectIntoPodTemplateSpec(p, &((typ.(*appsv1beta1.StatefulSet)).Spec.Template))\n\t\t\t\t},\n\t\t\t},\n\t\t}\n\t\tvar updated []byte\n\t\tvar meta metav1.TypeMeta\n\t\tif err = yaml.Unmarshal(raw, &meta); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif kind, ok := kinds[meta.Kind]; ok {\n\t\t\tif err = yaml.Unmarshal(raw, kind.typ); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tif err = kind.inject(kind.typ); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tif updated, err = yaml.Marshal(kind.typ); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t} else {\n\t\t\tupdated = raw // unchanged\n\t\t}\n\n\t\tif _, err = out.Write(updated); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif _, err = fmt.Fprint(out, \"---\\n\"); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}", "func main() {\n\tctx := context.Background()\n\t// Set up the Cloud Trace exporter.\n\texporter, err := cloudtrace.New()\n\tif err != nil {\n\t\tlog.Fatalf(\"cloudtrace.New: %v\", err)\n\t}\n\t// Identify your application using resource detection.\n\tres, err := resource.New(ctx,\n\t\t// Use the GCP resource detector to detect information about the GKE Cluster.\n\t\tresource.WithDetectors(gcp.NewDetector()),\n\t\tresource.WithTelemetrySDK(),\n\t)\n\tif err != nil {\n\t\tlog.Fatalf(\"resource.New: %v\", err)\n\t}\n\ttp := trace.NewTracerProvider(\n\t\ttrace.WithBatcher(exporter),\n\t\ttrace.WithResource(res),\n\t)\n\t// Set the global TracerProvider which is used by otelhttp to record spans.\n\totel.SetTracerProvider(tp)\n\t// Flush any pending spans on shutdown.\n\tdefer tp.ForceFlush(ctx)\n\n\t// Set the global Propagators which is used by otelhttp to propagate\n\t// context using the w3c traceparent and baggage formats.\n\totel.SetTextMapPropagator(autoprop.NewTextMapPropagator())\n\n\t// Handle incoming request.\n\tr := mux.NewRouter()\n\tr.HandleFunc(\"/\", mainHandler)\n\tvar handler http.Handler = r\n\n\t// Use otelhttp to create spans and extract context for incoming http\n\t// requests.\n\thandler = otelhttp.NewHandler(handler, \"server\")\n\tlog.Fatal(http.ListenAndServe(fmt.Sprintf(\":%v\", os.Getenv(\"PORT\")), handler))\n}", "func NewNGINXController(config *Configuration, fs file.Filesystem) *NGINXController {\n\teventBroadcaster := record.NewBroadcaster()\n\teventBroadcaster.StartLogging(glog.Infof)\n\teventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{\n\t\tInterface: config.Client.CoreV1().Events(config.Namespace),\n\t})\n\n\tn := &NGINXController{\n\t\tcfg: config,\n\t\tsyncRateLimiter: flowcontrol.NewTokenBucketRateLimiter(config.SyncRateLimit, 1),\n\n\t\trecorder: eventBroadcaster.NewRecorder(scheme.Scheme, apiv1.EventSource{\n\t\t\tComponent: \"kong-ingress-controller\",\n\t\t}),\n\n\t\tstopCh: make(chan struct{}),\n\t\tupdateCh: channels.NewRingChannel(1024),\n\n\t\tstopLock: &sync.Mutex{},\n\n\t\tfileSystem: fs,\n\n\t\t// create an empty configuration.\n\t\trunningConfig: &ingress.Configuration{},\n\t}\n\n\tn.store = store.New(\n\t\tconfig.EnableSSLChainCompletion,\n\t\tconfig.Namespace,\n\t\t\"\",\n\t\t\"\",\n\t\t\"\",\n\t\t\"\",\n\t\tconfig.ResyncPeriod,\n\t\tconfig.Client,\n\t\tfs,\n\t\tn.updateCh)\n\n\tn.syncQueue = task.NewTaskQueue(n.syncIngress)\n\n\tif config.UpdateStatus {\n\t\tn.syncStatus = status.NewStatusSyncer(status.Config{\n\t\t\tClient: config.Client,\n\t\t\tPublishService: config.PublishService,\n\t\t\tPublishStatusAddress: config.PublishStatusAddress,\n\t\t\tIngressLister: n.store,\n\t\t\tElectionID: config.ElectionID,\n\t\t\tIngressClass: class.IngressClass,\n\t\t\tDefaultIngressClass: class.DefaultClass,\n\t\t\tUpdateStatusOnShutdown: config.UpdateStatusOnShutdown,\n\t\t\tUseNodeInternalIP: config.UseNodeInternalIP,\n\t\t})\n\t} else {\n\t\tglog.Warning(\"Update of ingress status is disabled (flag --update-status=false was specified)\")\n\t}\n\n\treturn n\n}", "func TestIngressNoUpdate(t *testing.T) {\n\tingrNoUpdate := &networkingv1.Ingress{\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tNamespace: \"red-ns\",\n\t\t\tName: \"testingr-noupdate\",\n\t\t},\n\t\tSpec: networkingv1.IngressSpec{\n\t\t\tDefaultBackend: &networkingv1.IngressBackend{\n\t\t\t\tService: &networkingv1.IngressServiceBackend{\n\t\t\t\t\tName: \"testsvc\",\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\t_, err := kubeClient.NetworkingV1().Ingresses(\"red-ns\").Create(context.TODO(), ingrNoUpdate, metav1.CreateOptions{})\n\tif err != nil {\n\t\tt.Fatalf(\"error in adding Ingress: %v\", err)\n\t}\n\twaitAndverify(t, \"Ingress/red-ns/testingr-noupdate\")\n\n\tingrNoUpdate.Status = networkingv1.IngressStatus{\n\t\tLoadBalancer: corev1.LoadBalancerStatus{\n\t\t\tIngress: []corev1.LoadBalancerIngress{\n\t\t\t\t{\n\t\t\t\t\tIP: \"1.1.1.1\",\n\t\t\t\t\tHostname: \"testingr.avi.internal\",\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\tingrNoUpdate.ResourceVersion = \"2\"\n\t_, err = kubeClient.NetworkingV1().Ingresses(\"red-ns\").Update(context.TODO(), ingrNoUpdate, metav1.UpdateOptions{})\n\tif err != nil {\n\t\tt.Fatalf(\"error in updating Ingress: %v\", err)\n\t}\n\n\tingrNoUpdate.Status = networkingv1.IngressStatus{\n\t\tLoadBalancer: corev1.LoadBalancerStatus{\n\t\t\tIngress: []corev1.LoadBalancerIngress{\n\t\t\t\t{\n\t\t\t\t\tIP: \"1.1.1.1\",\n\t\t\t\t\tHostname: \"testingr.avi.internal\",\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tIP: \"2.3.4.5\",\n\t\t\t\t\tHostname: \"testingr2.avi.internal\",\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\tingrNoUpdate.ResourceVersion = \"3\"\n\t_, err = kubeClient.NetworkingV1().Ingresses(\"red-ns\").Update(context.TODO(), ingrNoUpdate, metav1.UpdateOptions{})\n\tif err != nil {\n\t\tt.Fatalf(\"error in updating Ingress: %v\", err)\n\t}\n\n\twaitAndverify(t, \"\")\n}", "func NewIngressController(\n\tlog *logrus.Logger,\n\tingClass string,\n\tkubeClient kubernetes.Interface,\n\tvc *varnish.Controller,\n\tinfFactory informers.SharedInformerFactory,\n\tvcrInfFactory vcr_informers.SharedInformerFactory,\n) (*IngressController, error) {\n\n\tingc := IngressController{\n\t\tlog: log,\n\t\tclient: kubeClient,\n\t\tstopCh: make(chan struct{}),\n\t\tvController: vc,\n\t}\n\n\tInitMetrics()\n\n\teventBroadcaster := record.NewBroadcaster()\n\teventBroadcaster.StartLogging(ingc.log.Printf)\n\teventBroadcaster.StartRecordingToSink(&core_v1.EventSinkImpl{\n\t\tInterface: ingc.client.CoreV1().Events(\"\"),\n\t})\n\tevtScheme := runtime.NewScheme()\n\tif err := api_v1.AddToScheme(evtScheme); err != nil {\n\t\treturn nil, err\n\t}\n\tif err := extensions.AddToScheme(evtScheme); err != nil {\n\t\treturn nil, err\n\t}\n\tif err := vcr_v1alpha1.AddToScheme(evtScheme); err != nil {\n\t\treturn nil, err\n\t}\n\tingc.recorder = eventBroadcaster.NewRecorder(evtScheme,\n\t\tapi_v1.EventSource{Component: \"varnish-ingress-controller\"})\n\n\tingc.informers = &infrmrs{\n\t\ting: infFactory.Extensions().V1beta1().Ingresses().Informer(),\n\t\tsvc: infFactory.Core().V1().Services().Informer(),\n\t\tendp: infFactory.Core().V1().Endpoints().Informer(),\n\t\tsecr: infFactory.Core().V1().Secrets().Informer(),\n\t\tvcfg: vcrInfFactory.Ingress().V1alpha1().VarnishConfigs().\n\t\t\tInformer(),\n\t\tbcfg: vcrInfFactory.Ingress().V1alpha1().BackendConfigs().\n\t\t\tInformer(),\n\t}\n\n\tevtFuncs := cache.ResourceEventHandlerFuncs{\n\t\tAddFunc: ingc.addObj,\n\t\tDeleteFunc: ingc.deleteObj,\n\t\tUpdateFunc: ingc.updateObj,\n\t}\n\n\tingc.informers.ing.AddEventHandler(evtFuncs)\n\tingc.informers.svc.AddEventHandler(evtFuncs)\n\tingc.informers.endp.AddEventHandler(evtFuncs)\n\tingc.informers.secr.AddEventHandler(evtFuncs)\n\tingc.informers.vcfg.AddEventHandler(evtFuncs)\n\tingc.informers.bcfg.AddEventHandler(evtFuncs)\n\n\tingc.listers = &Listers{\n\t\ting: infFactory.Extensions().V1beta1().Ingresses().Lister(),\n\t\tsvc: infFactory.Core().V1().Services().Lister(),\n\t\tendp: infFactory.Core().V1().Endpoints().Lister(),\n\t\tsecr: infFactory.Core().V1().Secrets().Lister(),\n\t\tvcfg: vcrInfFactory.Ingress().V1alpha1().VarnishConfigs().\n\t\t\tLister(),\n\t\tbcfg: vcrInfFactory.Ingress().V1alpha1().BackendConfigs().\n\t\t\tLister(),\n\t}\n\n\tingc.nsQs = NewNamespaceQueues(ingc.log, ingClass, ingc.vController,\n\t\tingc.listers, ingc.client, ingc.recorder)\n\n\treturn &ingc, nil\n}", "func IngressToVirtualService(key resource.VersionedKey, i *ingress.IngressSpec, domainSuffix string, ingressByHost map[string]resource.Entry) {\n\t// Ingress allows a single host - if missing '*' is assumed\n\t// We need to merge all rules with a particular host across\n\t// all ingresses, and return a separate VirtualService for each\n\t// host.\n\n\tnamespace, name := key.FullName.InterpretAsNamespaceAndName()\n\tfor _, rule := range i.Rules {\n\t\tif rule.HTTP == nil {\n\t\t\tscope.Infof(\"invalid ingress rule %s:%s for host %q, no paths defined\", namespace, name, rule.Host)\n\t\t\tcontinue\n\t\t}\n\n\t\thost := rule.Host\n\t\tnamePrefix := strings.Replace(host, \".\", \"-\", -1)\n\t\tif host == \"\" {\n\t\t\thost = \"*\"\n\t\t}\n\t\tvirtualService := &v1alpha3.VirtualService{\n\t\t\tHosts: []string{host},\n\t\t\tGateways: []string{model.IstioIngressGatewayName},\n\t\t}\n\n\t\thttpRoutes := []*v1alpha3.HTTPRoute{}\n\t\tfor _, path := range rule.HTTP.Paths {\n\t\t\thttpMatch := &v1alpha3.HTTPMatchRequest{\n\t\t\t\tUri: createStringMatch(path.Path),\n\t\t\t}\n\n\t\t\thttpRoute := ingressBackendToHTTPRoute(&path.Backend, namespace, domainSuffix)\n\t\t\tif httpRoute == nil {\n\t\t\t\tscope.Infof(\"invalid ingress rule %s:%s for host %q, no backend defined for path\", namespace, name, rule.Host)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\thttpRoute.Match = []*v1alpha3.HTTPMatchRequest{httpMatch}\n\t\t\thttpRoutes = append(httpRoutes, httpRoute)\n\t\t}\n\n\t\tvirtualService.Http = httpRoutes\n\n\t\tnewName := namePrefix + \"-\" + name + \"-\" + model.IstioIngressGatewayName\n\t\tnewNamespace := model.IstioIngressNamespace\n\n\t\told, f := ingressByHost[host]\n\t\tif f {\n\t\t\tvs := old.Item.(*v1alpha3.VirtualService)\n\t\t\tvs.Http = append(vs.Http, httpRoutes...)\n\t\t} else {\n\t\t\tingressByHost[host] = resource.Entry{\n\t\t\t\tID: resource.VersionedKey{\n\t\t\t\t\tKey: resource.Key{\n\t\t\t\t\t\tFullName: resource.FullNameFromNamespaceAndName(newNamespace, newName),\n\t\t\t\t\t\tTypeURL: metadata.VirtualService.TypeURL,\n\t\t\t\t\t},\n\t\t\t\t\tVersion: key.Version,\n\t\t\t\t\tCreateTime: key.CreateTime,\n\t\t\t\t},\n\t\t\t\tItem: virtualService,\n\t\t\t}\n\t\t}\n\t}\n\n\t// Matches * and \"/\". Currently not supported - would conflict\n\t// with any other explicit VirtualService.\n\tif i.Backend != nil {\n\t\tscope.Infof(\"Ignore default wildcard ingress, use VirtualService %s:%s\",\n\t\t\tnamespace, name)\n\t}\n}", "func templateDeploy(cmd *cobra.Command, args []string) {\n\t//Check deploy template file.\n\tif len(args) <= 0 || utils.IsFileExist(args[0]) == false {\n\t\tfmt.Fprintf(os.Stderr, \"the deploy template file is required, %s\\n\", \"see https://github.com/Huawei/containerops/singular for more detail.\")\n\t\tos.Exit(1)\n\t}\n\n\ttemplate := args[0]\n\td := new(objects.Deployment)\n\n\t//Read template file and parse.\n\tif err := d.ParseFromFile(template, output); err != nil {\n\t\tfmt.Fprintf(os.Stderr, \"parse deploy template error: %s\\n\", err.Error())\n\t\tos.Exit(1)\n\t}\n\n\t//Set private key file path.\n\tif privateKey != \"\" {\n\t\td.Tools.SSH.Private, d.Tools.SSH.Public = privateKey, publicKey\n\t}\n\n\t//The integrity checking of deploy template.\n\tif err := d.Check(); err != nil {\n\t\tfmt.Fprintf(os.Stderr, \"parse deploy template error: %s\\n\", err.Error())\n\t\tos.Exit(1)\n\t}\n\n\t//Set log and error io.Writer\n\tvar logWriters io.Writer\n\n\t//Generate stdout/stderr io.Writer\n\tstdoutFile, _ := os.Create(path.Join(d.Config, \"deploy.log\"))\n\tdefer stdoutFile.Close()\n\n\t//Using MultiWriter log and error.\n\tif verbose == true {\n\t\tlogWriters = io.MultiWriter(stdoutFile, os.Stdout)\n\t} else {\n\t\tlogWriters = io.MultiWriter(stdoutFile)\n\t}\n\n\t//Deploy cloud native stack\n\tif err := module.DeployInfraStacks(d, db, logWriters, timestamp); err != nil {\n\t\tfmt.Fprintf(os.Stderr, \"%s\\n\", err.Error())\n\t\tos.Exit(1)\n\t}\n\n\t//Delete droplets\n\tif del == true {\n\t\tif err := module.DeleteInfraStacks(d, logWriters, timestamp); err != nil {\n\t\t\tfmt.Fprintf(os.Stderr, \"%s\\n\", err.Error())\n\t\t\tos.Exit(1)\n\t\t}\n\t}\n}", "func (b *Backend) enableAutoScaling(ctx context.Context) error {\n\tvar err error\n\tsvc := applicationautoscaling.New(b.session)\n\n\t// Define scaling targets. Defines minimum and maximum {read,write} capacity.\n\t_, err = svc.RegisterScalableTarget(&applicationautoscaling.RegisterScalableTargetInput{\n\t\tMinCapacity: aws.Int64(int64(b.Config.ReadMinCapacity)),\n\t\tMaxCapacity: aws.Int64(int64(b.Config.ReadMaxCapacity)),\n\t\tResourceId: aws.String(fmt.Sprintf(\"%v/%v\", resourcePrefix, b.TableName)),\n\t\tScalableDimension: aws.String(applicationautoscaling.ScalableDimensionDynamodbTableReadCapacityUnits),\n\t\tServiceNamespace: aws.String(applicationautoscaling.ServiceNamespaceDynamodb),\n\t})\n\tif err != nil {\n\t\treturn convertError(err)\n\t}\n\t_, err = svc.RegisterScalableTarget(&applicationautoscaling.RegisterScalableTargetInput{\n\t\tMinCapacity: aws.Int64(int64(b.Config.WriteMinCapacity)),\n\t\tMaxCapacity: aws.Int64(int64(b.Config.WriteMaxCapacity)),\n\t\tResourceId: aws.String(fmt.Sprintf(\"%v/%v\", resourcePrefix, b.TableName)),\n\t\tScalableDimension: aws.String(applicationautoscaling.ScalableDimensionDynamodbTableWriteCapacityUnits),\n\t\tServiceNamespace: aws.String(applicationautoscaling.ServiceNamespaceDynamodb),\n\t})\n\tif err != nil {\n\t\treturn convertError(err)\n\t}\n\n\t// Define scaling policy. Defines the ratio of {read,write} consumed capacity to\n\t// provisioned capacity DynamoDB will try and maintain.\n\t_, err = svc.PutScalingPolicy(&applicationautoscaling.PutScalingPolicyInput{\n\t\tPolicyName: aws.String(fmt.Sprintf(\"%v-%v\", b.TableName, readScalingPolicySuffix)),\n\t\tPolicyType: aws.String(applicationautoscaling.PolicyTypeTargetTrackingScaling),\n\t\tResourceId: aws.String(fmt.Sprintf(\"%v/%v\", resourcePrefix, b.TableName)),\n\t\tScalableDimension: aws.String(applicationautoscaling.ScalableDimensionDynamodbTableReadCapacityUnits),\n\t\tServiceNamespace: aws.String(applicationautoscaling.ServiceNamespaceDynamodb),\n\t\tTargetTrackingScalingPolicyConfiguration: &applicationautoscaling.TargetTrackingScalingPolicyConfiguration{\n\t\t\tPredefinedMetricSpecification: &applicationautoscaling.PredefinedMetricSpecification{\n\t\t\t\tPredefinedMetricType: aws.String(applicationautoscaling.MetricTypeDynamoDbreadCapacityUtilization),\n\t\t\t},\n\t\t\tTargetValue: aws.Float64(b.Config.ReadTargetValue),\n\t\t},\n\t})\n\tif err != nil {\n\t\treturn convertError(err)\n\t}\n\t_, err = svc.PutScalingPolicy(&applicationautoscaling.PutScalingPolicyInput{\n\t\tPolicyName: aws.String(fmt.Sprintf(\"%v-%v\", b.TableName, writeScalingPolicySuffix)),\n\t\tPolicyType: aws.String(applicationautoscaling.PolicyTypeTargetTrackingScaling),\n\t\tResourceId: aws.String(fmt.Sprintf(\"%v/%v\", resourcePrefix, b.TableName)),\n\t\tScalableDimension: aws.String(applicationautoscaling.ScalableDimensionDynamodbTableWriteCapacityUnits),\n\t\tServiceNamespace: aws.String(applicationautoscaling.ServiceNamespaceDynamodb),\n\t\tTargetTrackingScalingPolicyConfiguration: &applicationautoscaling.TargetTrackingScalingPolicyConfiguration{\n\t\t\tPredefinedMetricSpecification: &applicationautoscaling.PredefinedMetricSpecification{\n\t\t\t\tPredefinedMetricType: aws.String(applicationautoscaling.MetricTypeDynamoDbwriteCapacityUtilization),\n\t\t\t},\n\t\t\tTargetValue: aws.Float64(b.Config.WriteTargetValue),\n\t\t},\n\t})\n\tif err != nil {\n\t\treturn convertError(err)\n\t}\n\n\treturn nil\n}", "func removeAppIngress(ctx context.Context, h *helper.H, index int) {\n\tvar err error\n\n\tPublishingStrategyInstance, ps := getPublishingStrategy(ctx, h)\n\n\t// remove application ingress at index `index`\n\tappIngressList := PublishingStrategyInstance.Spec.ApplicationIngress\n\tPublishingStrategyInstance.Spec.ApplicationIngress = append(appIngressList[:index], appIngressList[index+1:]...)\n\n\tps.Object, err = runtime.DefaultUnstructuredConverter.ToUnstructured(&PublishingStrategyInstance)\n\tExpect(err).NotTo(HaveOccurred())\n\n\t// Update the publishingstrategy\n\tps, err = h.Dynamic().\n\t\tResource(schema.GroupVersionResource{Group: \"cloudingress.managed.openshift.io\", Version: \"v1alpha1\", Resource: \"publishingstrategies\"}).\n\t\tNamespace(OperatorNamespace).\n\t\tUpdate(ctx, ps, metav1.UpdateOptions{})\n\tExpect(err).NotTo(HaveOccurred())\n}", "func Ingress(w http.ResponseWriter, r *http.Request) {\n\tfmt.Fprintf(w, \"Ingress request to simple-service successful!\")\n}", "func (a *FrinxOpenconfigNetworkInstanceApiService) PutFrinxOpenconfigNetworkInstanceNetworkInstancesNetworkInstanceProtocolsProtocolBgpPeerGroupsPeerGroupAfiSafisAfiSafiApplyPolicy(ctx context.Context, name string, identifier string, protocolName string, peerGroupName string, afiSafiName string, frinxOpenconfigRoutingPolicyApplypolicygroupApplyPolicyBodyParam FrinxOpenconfigRoutingPolicyApplypolicygroupApplyPolicyRequest3, nodeId string) (*http.Response, error) {\n\tvar (\n\t\tlocalVarHttpMethod = strings.ToUpper(\"Put\")\n\t\tlocalVarPostBody interface{}\n\t\tlocalVarFileName string\n\t\tlocalVarFileBytes []byte\n\t\t\n\t)\n\n\t// create path and map variables\n\tlocalVarPath := a.client.cfg.BasePath + \"/config/network-topology:network-topology/network-topology:topology/unified/network-topology:node/{node-id}/yang-ext:mount/frinx-openconfig-network-instance:network-instances/frinx-openconfig-network-instance:network-instance/{name}/frinx-openconfig-network-instance:protocols/frinx-openconfig-network-instance:protocol/{identifier}/{protocol-name}/frinx-openconfig-network-instance:bgp/frinx-openconfig-network-instance:peer-groups/frinx-openconfig-network-instance:peer-group/{peer-group-name}/frinx-openconfig-network-instance:afi-safis/frinx-openconfig-network-instance:afi-safi/{afi-safi-name}/frinx-openconfig-network-instance:apply-policy/\"\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"name\"+\"}\", fmt.Sprintf(\"%v\", name), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"identifier\"+\"}\", fmt.Sprintf(\"%v\", identifier), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"protocol-name\"+\"}\", fmt.Sprintf(\"%v\", protocolName), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"peer-group-name\"+\"}\", fmt.Sprintf(\"%v\", peerGroupName), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"afi-safi-name\"+\"}\", fmt.Sprintf(\"%v\", afiSafiName), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"node-id\"+\"}\", fmt.Sprintf(\"%v\", nodeId), -1)\n\n\tlocalVarHeaderParams := make(map[string]string)\n\tlocalVarQueryParams := url.Values{}\n\tlocalVarFormParams := url.Values{}\n\n\t// to determine the Content-Type header\n\tlocalVarHttpContentTypes := []string{\"application/json\", \"application/xml\"}\n\n\t// set Content-Type header\n\tlocalVarHttpContentType := selectHeaderContentType(localVarHttpContentTypes)\n\tif localVarHttpContentType != \"\" {\n\t\tlocalVarHeaderParams[\"Content-Type\"] = localVarHttpContentType\n\t}\n\n\t// to determine the Accept header\n\tlocalVarHttpHeaderAccepts := []string{\"application/json\", \"application/xml\"}\n\n\t// set Accept header\n\tlocalVarHttpHeaderAccept := selectHeaderAccept(localVarHttpHeaderAccepts)\n\tif localVarHttpHeaderAccept != \"\" {\n\t\tlocalVarHeaderParams[\"Accept\"] = localVarHttpHeaderAccept\n\t}\n\t// body params\n\tlocalVarPostBody = &frinxOpenconfigRoutingPolicyApplypolicygroupApplyPolicyBodyParam\n\tr, err := a.client.prepareRequest(ctx, localVarPath, localVarHttpMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFileName, localVarFileBytes)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tlocalVarHttpResponse, err := a.client.callAPI(r)\n\tif err != nil || localVarHttpResponse == nil {\n\t\treturn localVarHttpResponse, err\n\t}\n\n\tlocalVarBody, err := ioutil.ReadAll(localVarHttpResponse.Body)\n\tlocalVarHttpResponse.Body.Close()\n\tif err != nil {\n\t\treturn localVarHttpResponse, err\n\t}\n\n\n\tif localVarHttpResponse.StatusCode >= 300 {\n\t\tnewErr := GenericSwaggerError{\n\t\t\tbody: localVarBody,\n\t\t\terror: localVarHttpResponse.Status,\n\t\t}\n\t\t\n\t\treturn localVarHttpResponse, newErr\n\t}\n\n\treturn localVarHttpResponse, nil\n}", "func Configure(p *config.Provider) {\n\tp.AddResourceConfigurator(\"aws_instance\", func(r *config.Resource) {\n\t\tr.Version = common.VersionV1Alpha2\n\t\tr.ExternalName = config.IdentifierFromProvider\n\t\tr.References[\"subnet_id\"] = config.Reference{\n\t\t\tType: \"Subnet\",\n\t\t}\n\t\tr.References[\"vpc_security_group_ids\"] = config.Reference{\n\t\t\tType: \"SecurityGroup\",\n\t\t\tRefFieldName: \"SecurityGroupIdRefs\",\n\t\t\tSelectorFieldName: \"SecurityGroupIdSelector\",\n\t\t}\n\t\tr.References[\"security_groups\"] = config.Reference{\n\t\t\tType: \"SecurityGroup\",\n\n\t\t\tRefFieldName: \"SecurityGroupRefs\",\n\t\t\tSelectorFieldName: \"SecurityGroupSelector\",\n\t\t}\n\t\tr.References[\"root_block_device.kms_key_id\"] = config.Reference{\n\t\t\tType: \"github.com/crossplane-contrib/provider-jet-aws/apis/kms/v1alpha2.Key\",\n\t\t}\n\t\tr.References[\"network_interface.network_interface_id\"] = config.Reference{\n\t\t\tType: \"NetworkInterface\",\n\t\t}\n\t\tr.References[\"ebs_block_device.kms_key_id\"] = config.Reference{\n\t\t\tType: \"github.com/crossplane-contrib/provider-jet-aws/apis/kms/v1alpha2.Key\",\n\t\t}\n\t\tr.LateInitializer = config.LateInitializer{\n\t\t\t// NOTE(muvaf): These are ignored because they conflict with each other.\n\t\t\t// See the following for more details: https://github.com/crossplane/terrajet/issues/107\n\t\t\tIgnoredFields: []string{\n\t\t\t\t\"subnet_id\",\n\t\t\t\t\"network_interface\",\n\t\t\t\t\"private_ip\",\n\t\t\t\t\"source_dest_check\",\n\t\t\t},\n\t\t}\n\t})\n\tp.AddResourceConfigurator(\"aws_eip\", func(r *config.Resource) {\n\t\tr.Version = common.VersionV1Alpha2\n\t\tr.ExternalName = config.IdentifierFromProvider\n\t\tr.References[\"instance\"] = config.Reference{\n\t\t\tType: \"Instance\",\n\t\t}\n\t\tr.References[\"network_interface\"] = config.Reference{\n\t\t\tType: \"NetworkInterface\",\n\t\t}\n\t\tr.UseAsync = true\n\t})\n\n\tp.AddResourceConfigurator(\"aws_ec2_transit_gateway\", func(r *config.Resource) {\n\t\tr.Version = common.VersionV1Alpha2\n\t\tr.ExternalName = config.IdentifierFromProvider\n\t})\n\n\tp.AddResourceConfigurator(\"aws_ec2_transit_gateway_route\", func(r *config.Resource) {\n\t\tr.Version = common.VersionV1Alpha2\n\t\tr.ExternalName = config.IdentifierFromProvider\n\t\tr.References[\"transit_gateway_attachment_id\"] = config.Reference{\n\t\t\tType: \"TransitGatewayVPCAttachment\",\n\t\t}\n\t\tr.References[\"transit_gateway_route_table_id\"] = config.Reference{\n\t\t\tType: \"TransitGatewayRouteTable\",\n\t\t}\n\t})\n\n\tp.AddResourceConfigurator(\"aws_ec2_transit_gateway_route_table\", func(r *config.Resource) {\n\t\tr.Version = common.VersionV1Alpha2\n\t\tr.ExternalName = config.IdentifierFromProvider\n\t\tr.References[\"transit_gateway_id\"] = config.Reference{\n\t\t\tType: \"TransitGateway\",\n\t\t}\n\t})\n\n\tp.AddResourceConfigurator(\"aws_ec2_transit_gateway_route_table_association\", func(r *config.Resource) {\n\t\tr.Version = common.VersionV1Alpha2\n\t\tr.ExternalName = config.IdentifierFromProvider\n\t\tr.References[\"transit_gateway_attachment_id\"] = config.Reference{\n\t\t\tType: \"TransitGatewayVPCAttachment\",\n\t\t}\n\t\tr.References[\"transit_gateway_route_table_id\"] = config.Reference{\n\t\t\tType: \"TransitGatewayRouteTable\",\n\t\t}\n\t})\n\n\tp.AddResourceConfigurator(\"aws_ec2_transit_gateway_vpc_attachment\", func(r *config.Resource) {\n\t\tr.Version = common.VersionV1Alpha2\n\t\tr.ExternalName = config.IdentifierFromProvider\n\t\tr.References[\"subnet_ids\"] = config.Reference{\n\t\t\tType: \"Subnet\",\n\t\t\tRefFieldName: \"SubnetIdRefs\",\n\t\t\tSelectorFieldName: \"SubnetIdSelector\",\n\t\t}\n\t\tr.References[\"transit_gateway_id\"] = config.Reference{\n\t\t\tType: \"TransitGateway\",\n\t\t}\n\t})\n\n\tp.AddResourceConfigurator(\"aws_ec2_transit_gateway_vpc_attachment_accepter\", func(r *config.Resource) {\n\t\tr.Version = common.VersionV1Alpha2\n\t\tr.ExternalName = config.IdentifierFromProvider\n\t\tr.References[\"transit_gateway_attachment_id\"] = config.Reference{\n\t\t\tType: \"TransitGatewayVPCAttachment\",\n\t\t}\n\t})\n\n\tp.AddResourceConfigurator(\"aws_launch_template\", func(r *config.Resource) {\n\t\tr.Version = common.VersionV1Alpha2\n\t\tr.ExternalName = config.IdentifierFromProvider\n\t\tr.References[\"security_group_names\"] = config.Reference{\n\t\t\tType: \"SecurityGroup\",\n\t\t\tRefFieldName: \"SecurityGroupNameRefs\",\n\t\t\tSelectorFieldName: \"SecurityGroupNameSelector\",\n\t\t}\n\t\tr.References[\"vpc_security_group_ids\"] = config.Reference{\n\t\t\tType: \"SecurityGroup\",\n\n\t\t\tRefFieldName: \"VpcSecurityGroupIdRefs\",\n\t\t\tSelectorFieldName: \"VpcSecurityGroupIdSelector\",\n\t\t}\n\t\tr.References[\"block_device_mappings.ebs.kms_key_id\"] = config.Reference{\n\t\t\tType: \"github.com/crossplane-contrib/provider-jet-aws/apis/kms/v1alpha2.Key\",\n\t\t}\n\t\tr.References[\"iam_instance_profile.arn\"] = config.Reference{\n\t\t\tType: \"github.com/crossplane-contrib/provider-jet-aws/apis/iam/v1alpha2.InstanceProfile\",\n\t\t\tExtractor: common.PathARNExtractor,\n\t\t}\n\t\tr.References[\"iam_instance_profile.name\"] = config.Reference{\n\t\t\tType: \"github.com/crossplane-contrib/provider-jet-aws/apis/iam/v1alpha2.InstanceProfile\",\n\t\t}\n\t\tr.References[\"network_interfaces.network_interface_id\"] = config.Reference{\n\t\t\tType: \"NetworkInterface\",\n\t\t}\n\t\tr.References[\"network_interfaces.security_groups\"] = config.Reference{\n\t\t\tType: \"SecurityGroup\",\n\t\t\tRefFieldName: \"SecurityGroupRefs\",\n\t\t\tSelectorFieldName: \"SecurityGroupSelector\",\n\t\t}\n\t\tr.References[\"network_interfaces.subnet_id\"] = config.Reference{\n\t\t\tType: \"Subnet\",\n\t\t}\n\t})\n\n\tp.AddResourceConfigurator(\"aws_vpc\", func(r *config.Resource) {\n\t\tr.Version = common.VersionV1Alpha2\n\t\tr.ExternalName = config.IdentifierFromProvider\n\t})\n\n\tp.AddResourceConfigurator(\"aws_vpc_endpoint\", func(r *config.Resource) {\n\t\tr.Version = common.VersionV1Alpha2\n\t\tr.ExternalName = config.IdentifierFromProvider\n\t\tr.References[\"subnet_ids\"] = config.Reference{\n\t\t\tType: \"Subnet\",\n\t\t\tRefFieldName: \"SubnetIdRefs\",\n\t\t\tSelectorFieldName: \"SubnetIdSelector\",\n\t\t}\n\t\tr.References[\"security_group_ids\"] = config.Reference{\n\t\t\tType: \"SecurityGroup\",\n\t\t\tRefFieldName: \"SecurityGroupIdRefs\",\n\t\t\tSelectorFieldName: \"SecurityGroupIdSelector\",\n\t\t}\n\t\tr.References[\"route_table_ids\"] = config.Reference{\n\t\t\tType: \"RouteTable\",\n\t\t\tRefFieldName: \"RouteTableIdRefs\",\n\t\t\tSelectorFieldName: \"RouteTableIdSelector\",\n\t\t}\n\t})\n\n\tp.AddResourceConfigurator(\"aws_subnet\", func(r *config.Resource) {\n\t\tr.Version = common.VersionV1Alpha2\n\t\tr.ExternalName = config.IdentifierFromProvider\n\t\tr.LateInitializer = config.LateInitializer{\n\t\t\t// NOTE(muvaf): Conflicts with AvailabilityZone. See the following\n\t\t\t// for more details: https://github.com/crossplane/terrajet/issues/107\n\t\t\tIgnoredFields: []string{\n\t\t\t\t\"availability_zone_id\",\n\t\t\t},\n\t\t}\n\t})\n\n\tp.AddResourceConfigurator(\"aws_network_interface\", func(r *config.Resource) {\n\t\tr.Version = common.VersionV1Alpha2\n\t\tr.ExternalName = config.IdentifierFromProvider\n\t\tr.References[\"subnet_id\"] = config.Reference{\n\t\t\tType: \"Subnet\",\n\t\t}\n\t\tr.References[\"security_groups\"] = config.Reference{\n\t\t\tType: \"SecurityGroup\",\n\t\t\tRefFieldName: \"SecurityGroupRefs\",\n\t\t\tSelectorFieldName: \"SecurityGroupSelector\",\n\t\t}\n\t\tr.References[\"attachment.instance\"] = config.Reference{\n\t\t\tType: \"Instance\",\n\t\t}\n\t\tr.LateInitializer = config.LateInitializer{\n\t\t\tIgnoredFields: []string{\n\t\t\t\t\"interface_type\",\n\t\t\t},\n\t\t}\n\t})\n\n\tp.AddResourceConfigurator(\"aws_security_group\", func(r *config.Resource) {\n\t\tr.Version = common.VersionV1Alpha2\n\t\tr.ExternalName = config.IdentifierFromProvider\n\t\t// Managed by SecurityGroupRule resource.\n\t\tif s, ok := r.TerraformResource.Schema[\"ingress\"]; ok {\n\t\t\ts.Optional = false\n\t\t\ts.Computed = true\n\t\t}\n\t\t// Managed by SecurityGroupRule resource.\n\t\tif s, ok := r.TerraformResource.Schema[\"egress\"]; ok {\n\t\t\ts.Optional = false\n\t\t\ts.Computed = true\n\t\t}\n\t\tr.References[\"egress.security_groups\"] = config.Reference{\n\t\t\tType: \"SecurityGroup\",\n\t\t\tRefFieldName: \"SecurityGroupRefs\",\n\t\t\tSelectorFieldName: \"SecurityGroupSelector\",\n\t\t}\n\t\tr.References[\"ingress.security_groups\"] = config.Reference{\n\t\t\tType: \"SecurityGroup\",\n\t\t\tRefFieldName: \"SecurityGroupRefs\",\n\t\t\tSelectorFieldName: \"SecurityGroupSelector\",\n\t\t}\n\t})\n\n\tp.AddResourceConfigurator(\"aws_security_group_rule\", func(r *config.Resource) {\n\t\tr.Version = common.VersionV1Alpha2\n\t\tr.ExternalName = config.IdentifierFromProvider\n\t\tr.References[\"security_group_id\"] = config.Reference{\n\t\t\tType: \"SecurityGroup\",\n\t\t}\n\t\tr.References[\"source_security_group_id\"] = config.Reference{\n\t\t\tType: \"SecurityGroup\",\n\t\t}\n\t})\n\n\tp.AddResourceConfigurator(\"aws_vpc_ipv4_cidr_block_association\", func(r *config.Resource) {\n\t\tr.Version = common.VersionV1Alpha2\n\t\tr.ExternalName = config.IdentifierFromProvider\n\t})\n\n\tp.AddResourceConfigurator(\"aws_vpc_peering_connection\", func(r *config.Resource) {\n\t\tr.Version = common.VersionV1Alpha2\n\t\tr.ExternalName = config.IdentifierFromProvider\n\t\tr.References[\"peer_vpc_id\"] = config.Reference{\n\t\t\tType: \"VPC\",\n\t\t}\n\t})\n\n\tp.AddResourceConfigurator(\"aws_route\", func(r *config.Resource) {\n\t\tr.Version = common.VersionV1Alpha2\n\t\tr.ExternalName = config.IdentifierFromProvider\n\t\tr.References[\"route_table_id\"] = config.Reference{\n\t\t\tType: \"RouteTable\",\n\t\t}\n\t\tr.References[\"gateway_id\"] = config.Reference{\n\t\t\tType: \"InternetGateway\",\n\t\t}\n\t\tr.References[\"instance_id\"] = config.Reference{\n\t\t\tType: \"Instance\",\n\t\t}\n\t\tr.References[\"network_interface_id\"] = config.Reference{\n\t\t\tType: \"NetworkInterface\",\n\t\t}\n\t\tr.References[\"transit_gateway_id\"] = config.Reference{\n\t\t\tType: \"TransitGateway\",\n\t\t}\n\t\tr.References[\"vpc_peering_connection_id\"] = config.Reference{\n\t\t\tType: \"VPCPeeringConnection\",\n\t\t}\n\t\tr.References[\"vpc_endpoint_id\"] = config.Reference{\n\t\t\tType: \"VPCEndpoint\",\n\t\t}\n\t\tr.UseAsync = true\n\t})\n\n\tp.AddResourceConfigurator(\"aws_route_table\", func(r *config.Resource) {\n\t\tr.Version = common.VersionV1Alpha2\n\t\tr.ExternalName = config.IdentifierFromProvider\n\t\tr.References[\"route.vpc_peering_connection_id\"] = config.Reference{\n\t\t\tType: \"VPCPeeringConnection\",\n\t\t}\n\t\tr.References[\"route.vpc_endpoint_id\"] = config.Reference{\n\t\t\tType: \"VPCEndpoint\",\n\t\t}\n\t\tr.References[\"route.network_interface_id\"] = config.Reference{\n\t\t\tType: \"NetworkInterface\",\n\t\t}\n\t\tr.References[\"route.instance_id\"] = config.Reference{\n\t\t\tType: \"Instance\",\n\t\t}\n\t})\n\n\tp.AddResourceConfigurator(\"aws_route_table_association\", func(r *config.Resource) {\n\t\tr.Version = common.VersionV1Alpha2\n\t\tr.ExternalName = config.IdentifierFromProvider\n\t\tr.References[\"subnet_id\"] = config.Reference{\n\t\t\tType: \"Subnet\",\n\t\t}\n\t\tr.References[\"route_table_id\"] = config.Reference{\n\t\t\tType: \"RouteTable\",\n\t\t}\n\t})\n\n\tp.AddResourceConfigurator(\"aws_main_route_table_association\", func(r *config.Resource) {\n\t\tr.Version = common.VersionV1Alpha2\n\t\tr.ExternalName = config.IdentifierFromProvider\n\t\tr.References[\"route_table_id\"] = config.Reference{\n\t\t\tType: \"RouteTable\",\n\t\t}\n\t})\n\n\tp.AddResourceConfigurator(\"aws_ec2_transit_gateway_route_table_propagation\", func(r *config.Resource) {\n\t\tr.Version = common.VersionV1Alpha2\n\t\tr.ExternalName = config.IdentifierFromProvider\n\t\tr.References[\"transit_gateway_attachment_id\"] = config.Reference{\n\t\t\tType: \"TransitGatewayVPCAttachment\",\n\t\t}\n\t\tr.References[\"transit_gateway_route_table_id\"] = config.Reference{\n\t\t\tType: \"TransitGatewayRouteTable\",\n\t\t}\n\t})\n\n\tp.AddResourceConfigurator(\"aws_internet_gateway\", func(r *config.Resource) {\n\t\tr.Version = common.VersionV1Alpha2\n\t\tr.ExternalName = config.IdentifierFromProvider\n\t})\n}", "func (c Controller) Template(client Client, template, nameSpace string, payload *Payload) (*Dispatched, error) {\n\tvar (\n\t\tbuf bytes.Buffer\n\t\tdispatched *Dispatched\n\t\tinstanceID string\n\t\toperation = \"provision\"\n\t)\n\n\t// wrap up the logic for instansiating a build or not\n\tinstansiateBuild := func(service *Dispatched) (*Dispatched, error) {\n\n\t\tif template != templateCloudApp {\n\t\t\tdispatched.WatchURL = client.DeployLogURL(nameSpace, service.DeploymentName)\n\t\t\treturn dispatched, nil\n\t\t}\n\n\t\tbuild, err := client.InstantiateBuild(nameSpace, service.DeploymentName)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tif build == nil {\n\t\t\treturn nil, errors.New(\"no build returned from call to OSCP. Unable to continue\")\n\t\t}\n\t\tdispatched.WatchURL = client.BuildConfigLogURL(nameSpace, build.Name)\n\t\tdispatched.BuildURL = client.BuildURL(nameSpace, build.Name, payload.CloudAppGUID)\n\t\treturn dispatched, nil\n\t}\n\n\tif nameSpace == \"\" {\n\t\treturn nil, errors.New(\"an empty namespace cannot be provided\")\n\t}\n\tif err := payload.Validate(template); err != nil {\n\t\treturn nil, err\n\t}\n\tinstanceID = InstanceID(nameSpace, payload.ServiceName)\n\tstatusKey := StatusKey(instanceID, operation)\n\tif err := c.statusPublisher.Clear(statusKey); err != nil {\n\t\tc.Logger.Error(\"failed to clear status key \" + statusKey + \" continuing\")\n\t}\n\tif err := c.statusPublisher.Publish(statusKey, configInProgress, \"starting deployment of service \"+payload.ServiceName); err != nil {\n\t\tc.Logger.Error(\"failed to publish status key \" + statusKey + \" continuing\")\n\t}\n\ttpl, err := c.templateLoader.Load(template)\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"failed to load template \"+template+\": \")\n\t}\n\tif err := tpl.ExecuteTemplate(&buf, template, payload); err != nil {\n\t\treturn nil, errors.Wrap(err, \"failed to execute template: \")\n\t}\n\tosTemplate, err := c.TemplateDecoder.Decode(buf.Bytes())\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"failed to decode into an os template: \")\n\t}\n\tsearchCrit := map[string]string{\"rhmap/name\": payload.ServiceName}\n\tif payload.CloudAppGUID != \"\" {\n\t\tsearchCrit = map[string]string{\"rhmap/guid\": payload.CloudAppGUID}\n\t}\n\tdcs, err := client.FindDeploymentConfigsByLabel(nameSpace, searchCrit)\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"error trying to find deployment config: \")\n\t}\n\tbc, err := client.FindBuildConfigByLabel(nameSpace, searchCrit)\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"error trying to find build config: \")\n\t}\n\t//check if already deployed\n\tif len(dcs) > 0 || (nil != bc && len(dcs) > 0) {\n\t\tif err := c.statusPublisher.Publish(statusKey, configInProgress, \"service already exists updating\"); err != nil {\n\t\t\tc.Logger.Error(\"failed to publish status key \" + statusKey + \" continuing \" + err.Error())\n\t\t}\n\t\tdispatched, err = c.update(client, &dcs[0], bc, osTemplate, nameSpace, instanceID, payload)\n\t\tif err != nil {\n\t\t\treturn nil, errors.Wrap(err, \"Error updating deploy: \")\n\t\t}\n\t\tdispatched.InstanceID = instanceID\n\t\tdispatched.Operation = operation\n\t\tconfigurationDetails := &Configuration{Action: operation, DeploymentName: dispatched.DeploymentName, InstanceID: dispatched.InstanceID, NameSpace: nameSpace}\n\t\tc.ConfigurationController.Configure(client, configurationDetails)\n\t\treturn instansiateBuild(dispatched)\n\t}\n\tif err := c.statusPublisher.Publish(statusKey, configInProgress, \"service does not exist creating\"); err != nil {\n\t\tc.Logger.Error(\"failed to publish status key \" + statusKey + \" continuing \" + err.Error())\n\t}\n\t_, err = deployDependencyServices(c, client, osTemplate, nameSpace, payload)\n\tif err != nil {\n\t\tc.statusPublisher.Publish(statusKey, configError, err.Error())\n\t\treturn nil, err\n\t}\n\n\tdispatched, err = c.create(client, osTemplate, nameSpace, instanceID, payload)\n\tif err != nil {\n\t\tc.statusPublisher.Publish(statusKey, configError, err.Error())\n\t\treturn nil, err\n\t}\n\tdispatched.InstanceID = instanceID\n\tdispatched.Operation = operation\n\tconfigurationDetails := &Configuration{Action: operation, DeploymentName: dispatched.DeploymentName, InstanceID: dispatched.InstanceID, NameSpace: nameSpace}\n\tc.ConfigurationController.Configure(client, configurationDetails)\n\treturn instansiateBuild(dispatched)\n\n}", "func (t *TestSpec) CreateCiliumNetworkPolicy() (string, error) {\n\n\ttype rule map[string]interface{}\n\n\tspecs := []api.Rule{}\n\tvar err error\n\n\tingressMap := map[string]interface{}{}\n\tl4ingress := map[string]interface{}{}\n\tegressMap := map[string]interface{}{}\n\tl4egress := map[string]interface{}{}\n\n\tmetadata := []byte(`\n\t{\n\t \"apiVersion\": \"cilium.io/v2\",\n\t \"kind\": \"CiliumNetworkPolicy\",\n\t \"metadata\": {\n\t\t\"name\": \"%[1]s\",\n\t\t\"labels\": {\n\t\t\t\"test\": \"policygen\"\n\t\t}\n\t },\n\t \"specs\": %[2]s}`)\n\n\t//Create template\n\tswitch kind := t.l3.kind; kind {\n\tcase ingress:\n\t\terr = t.l3.SetTemplate(&ingressMap, t)\n\tcase egress:\n\t\terr = t.l3.SetTemplate(&egressMap, t)\n\t}\n\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tswitch kind := t.l4.kind; kind {\n\tcase ingress:\n\t\terr = t.l4.SetTemplate(&l4ingress, t)\n\tcase egress:\n\t\terr = t.l4.SetTemplate(&l4egress, t)\n\t}\n\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tswitch kind := t.l7.kind; kind {\n\tcase ingress:\n\t\terr = t.l7.SetTemplate(&l4ingress, t)\n\tcase egress:\n\t\terr = t.l7.SetTemplate(&l4egress, t)\n\t}\n\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tif len(l4ingress) > 0 {\n\t\tingressMap[toPorts] = []rule{l4ingress}\n\t}\n\n\tif len(l4egress) > 0 {\n\t\tegressMap[toPorts] = []rule{l4egress}\n\t}\n\n\tif len(ingressMap) > 0 {\n\t\tvar ingressVal api.IngressRule\n\t\tjsonOut, err := json.Marshal(ingressMap)\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t\terr = json.Unmarshal(jsonOut, &ingressVal)\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t\tspecs = append(specs, api.Rule{\n\t\t\tEndpointSelector: api.EndpointSelector{\n\t\t\t\tLabelSelector: &slim_metav1.LabelSelector{MatchLabels: map[string]string{\n\t\t\t\t\t\"id\": t.DestPod,\n\t\t\t\t}},\n\t\t\t},\n\t\t\tIngress: []api.IngressRule{ingressVal},\n\t\t\tEgress: []api.EgressRule{},\n\t\t})\n\t}\n\n\tif len(egressMap) > 0 {\n\t\tvar egressVal api.EgressRule\n\t\tjsonOut, err := json.Marshal(egressMap)\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t\terr = json.Unmarshal(jsonOut, &egressVal)\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\n\t\tspecs = append(specs, api.Rule{\n\t\t\tEndpointSelector: api.EndpointSelector{\n\t\t\t\tLabelSelector: &slim_metav1.LabelSelector{MatchLabels: map[string]string{\n\t\t\t\t\t\"id\": t.SrcPod,\n\t\t\t\t}},\n\t\t\t},\n\t\t\tIngress: []api.IngressRule{},\n\t\t\tEgress: []api.EgressRule{egressVal},\n\t\t})\n\t}\n\n\tif len(specs) == 0 {\n\t\treturn \"\", nil\n\t}\n\n\tjsonOutput, err := json.Marshal(specs)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn fmt.Sprintf(string(metadata), t.Prefix, jsonOutput), nil\n}", "func (a *FrinxOpenconfigNetworkInstanceApiService) PutFrinxOpenconfigNetworkInstanceNetworkInstancesNetworkInstanceProtocolsProtocolBgpNeighborsNeighborAfiSafisAfiSafiApplyPolicyConfig(ctx context.Context, name string, identifier string, protocolName string, neighborAddress string, afiSafiName string, frinxOpenconfigRoutingPolicyApplypolicygroupApplypolicyConfigBodyParam FrinxOpenconfigRoutingPolicyApplypolicygroupApplypolicyConfigRequest1, nodeId string) (*http.Response, error) {\n\tvar (\n\t\tlocalVarHttpMethod = strings.ToUpper(\"Put\")\n\t\tlocalVarPostBody interface{}\n\t\tlocalVarFileName string\n\t\tlocalVarFileBytes []byte\n\t\t\n\t)\n\n\t// create path and map variables\n\tlocalVarPath := a.client.cfg.BasePath + \"/config/network-topology:network-topology/network-topology:topology/unified/network-topology:node/{node-id}/yang-ext:mount/frinx-openconfig-network-instance:network-instances/frinx-openconfig-network-instance:network-instance/{name}/frinx-openconfig-network-instance:protocols/frinx-openconfig-network-instance:protocol/{identifier}/{protocol-name}/frinx-openconfig-network-instance:bgp/frinx-openconfig-network-instance:neighbors/frinx-openconfig-network-instance:neighbor/{neighbor-address}/frinx-openconfig-network-instance:afi-safis/frinx-openconfig-network-instance:afi-safi/{afi-safi-name}/frinx-openconfig-network-instance:apply-policy/frinx-openconfig-network-instance:config/\"\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"name\"+\"}\", fmt.Sprintf(\"%v\", name), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"identifier\"+\"}\", fmt.Sprintf(\"%v\", identifier), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"protocol-name\"+\"}\", fmt.Sprintf(\"%v\", protocolName), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"neighbor-address\"+\"}\", fmt.Sprintf(\"%v\", neighborAddress), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"afi-safi-name\"+\"}\", fmt.Sprintf(\"%v\", afiSafiName), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"node-id\"+\"}\", fmt.Sprintf(\"%v\", nodeId), -1)\n\n\tlocalVarHeaderParams := make(map[string]string)\n\tlocalVarQueryParams := url.Values{}\n\tlocalVarFormParams := url.Values{}\n\n\t// to determine the Content-Type header\n\tlocalVarHttpContentTypes := []string{\"application/json\", \"application/xml\"}\n\n\t// set Content-Type header\n\tlocalVarHttpContentType := selectHeaderContentType(localVarHttpContentTypes)\n\tif localVarHttpContentType != \"\" {\n\t\tlocalVarHeaderParams[\"Content-Type\"] = localVarHttpContentType\n\t}\n\n\t// to determine the Accept header\n\tlocalVarHttpHeaderAccepts := []string{\"application/json\", \"application/xml\"}\n\n\t// set Accept header\n\tlocalVarHttpHeaderAccept := selectHeaderAccept(localVarHttpHeaderAccepts)\n\tif localVarHttpHeaderAccept != \"\" {\n\t\tlocalVarHeaderParams[\"Accept\"] = localVarHttpHeaderAccept\n\t}\n\t// body params\n\tlocalVarPostBody = &frinxOpenconfigRoutingPolicyApplypolicygroupApplypolicyConfigBodyParam\n\tr, err := a.client.prepareRequest(ctx, localVarPath, localVarHttpMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFileName, localVarFileBytes)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tlocalVarHttpResponse, err := a.client.callAPI(r)\n\tif err != nil || localVarHttpResponse == nil {\n\t\treturn localVarHttpResponse, err\n\t}\n\n\tlocalVarBody, err := ioutil.ReadAll(localVarHttpResponse.Body)\n\tlocalVarHttpResponse.Body.Close()\n\tif err != nil {\n\t\treturn localVarHttpResponse, err\n\t}\n\n\n\tif localVarHttpResponse.StatusCode >= 300 {\n\t\tnewErr := GenericSwaggerError{\n\t\t\tbody: localVarBody,\n\t\t\terror: localVarHttpResponse.Status,\n\t\t}\n\t\t\n\t\treturn localVarHttpResponse, newErr\n\t}\n\n\treturn localVarHttpResponse, nil\n}", "func (s *Stack) ApplyTemplate(c *stack.Credential) (*stack.Template, error) {\n\tcred, ok := c.Credential.(*Credential)\n\tif !ok {\n\t\treturn nil, fmt.Errorf(\"credential is not of type do.Credential: %T\", c.Credential)\n\t}\n\n\tbootstrap, ok := c.Bootstrap.(*Bootstrap)\n\tif !ok {\n\t\treturn nil, fmt.Errorf(\"bootstrap is not of type do.Bootstrap: %T\", c.Bootstrap)\n\t}\n\n\ttemplate := s.Builder.Template\n\ttemplate.Provider[\"digitalocean\"] = map[string]interface{}{\n\t\t\"token\": cred.AccessToken,\n\t}\n\n\tkeyID, err := strconv.Atoi(bootstrap.KeyID)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tdroplet, err := s.modifyDroplets(keyID)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\ttemplate.Resource[\"digitalocean_droplet\"] = droplet\n\n\tif err := template.ShadowVariables(\"FORBIDDEN\", \"digitalocean_access_token\"); err != nil {\n\t\treturn nil, err\n\t}\n\n\tif err := template.Flush(); err != nil {\n\t\treturn nil, err\n\t}\n\n\tcontent, err := template.JsonOutput()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &stack.Template{\n\t\tContent: content,\n\t}, nil\n}", "func CreateIngressClass(namespace string, c kubernetes.Interface) (string, error) {\n\ticname := fmt.Sprintf(\"ic-%s\", namespace)\n\tvar err error\n\n\tic, err := c.NetworkingV1().IngressClasses().\n\t\tCreate(context.TODO(), &networkingv1.IngressClass{\n\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\tName: icname,\n\t\t\t},\n\t\t\tSpec: networkingv1.IngressClassSpec{\n\t\t\t\tController: k8s.IngressNGINXController,\n\t\t\t},\n\t\t}, metav1.CreateOptions{})\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"unexpected error creating IngressClass %s: %v\", icname, err)\n\t}\n\n\t_, err = c.RbacV1().ClusterRoles().Create(context.TODO(), &rbacv1.ClusterRole{\n\t\tObjectMeta: metav1.ObjectMeta{Name: icname},\n\t\tRules: []rbacv1.PolicyRule{{\n\t\t\tAPIGroups: []string{\"networking.k8s.io\"},\n\t\t\tResources: []string{\"ingressclasses\"},\n\t\t\tVerbs: []string{\"get\", \"list\", \"watch\"},\n\t\t}},\n\t}, metav1.CreateOptions{})\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"unexpected error creating IngressClass ClusterRole %s: %v\", icname, err)\n\t}\n\n\t_, err = c.RbacV1().ClusterRoleBindings().Create(context.TODO(), &rbacv1.ClusterRoleBinding{\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tName: icname,\n\t\t},\n\t\tRoleRef: rbacv1.RoleRef{\n\t\t\tAPIGroup: \"rbac.authorization.k8s.io\",\n\t\t\tKind: \"ClusterRole\",\n\t\t\tName: icname,\n\t\t},\n\t\tSubjects: []rbacv1.Subject{\n\t\t\t{\n\t\t\t\tAPIGroup: \"\",\n\t\t\t\tKind: \"ServiceAccount\",\n\t\t\t\tNamespace: namespace,\n\t\t\t\tName: \"nginx-ingress\",\n\t\t\t},\n\t\t},\n\t}, metav1.CreateOptions{})\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"unexpected error creating IngressClass ClusterRoleBinding %s: %v\", icname, err)\n\t}\n\treturn ic.Name, nil\n}", "func CreateOrUpdate(ctx context.Context, client *k8s.Client, req k8s.Resource, options ...k8s.Option) error {\n\tif err := client.Create(ctx, req, options...); err == nil {\n\t\treturn nil\n\t} else if !IsK8sAlreadyExists(err) {\n\t\treturn maskAny(err)\n\t}\n\t// Exists, update it\n\tif err := client.Update(ctx, req, options...); err != nil {\n\t\treturn maskAny(err)\n\t}\n\treturn nil\n}", "func populateAnnotations(clusterIdx int, depAnnotations map[string]string, svcNoTargetAnnotations map[string]string, svcTargetAnnotations map[string]string,\n\tstatefulSetAnnotations map[string]string, daemonSetAnnotations map[string]string) {\n\t// Define a static set of annotations to the deployment\n\tdepAnnotations[types.ProxyQualifier] = \"tcp\"\n\tdepAnnotations[types.AddressQualifier] = fmt.Sprintf(\"nginx-%d-dep-web\", clusterIdx)\n\tdepAnnotations[types.PortQualifier] = fmt.Sprintf(\"8080:8080,9090:8080\")\n\n\t// Set annotations to the service with no target address\n\tsvcNoTargetAnnotations[types.ProxyQualifier] = \"tcp\"\n\n\t// Set annotations to the service with target address\n\tsvcTargetAnnotations[types.ProxyQualifier] = \"http\"\n\tsvcTargetAnnotations[types.AddressQualifier] = fmt.Sprintf(\"nginx-%d-svc-exp-target\", clusterIdx)\n\n\t//set annotations on statefulset\n\tstatefulSetAnnotations[types.ProxyQualifier] = \"tcp\"\n\tstatefulSetAnnotations[types.AddressQualifier] = fmt.Sprintf(\"nginx-%d-ss-web\", clusterIdx)\n\n\t//set annotations on daemonset\n\tdaemonSetAnnotations[types.ProxyQualifier] = \"tcp\"\n\tdaemonSetAnnotations[types.AddressQualifier] = fmt.Sprintf(\"nginx-%d-ds-web\", clusterIdx)\n}", "func ConvertBindingsAndExposuresEgressIngress(mcs []istiomodel.Config, ci ClusterInfo) ([]istiomodel.Config, error) {\n\tout := make([]istiomodel.Config, 0)\n\n\tfor _, mc := range mcs {\n\t\tvar istio []istiomodel.Config\n\t\tvar err error\n\t\trsb, ok := mc.Spec.(*v1alpha1.RemoteServiceBinding)\n\t\tif ok {\n\t\t\tistio, err = convertRSB(mc, rsb, ci)\n\t\t}\n\t\tsep, ok := mc.Spec.(*v1alpha1.ServiceExpositionPolicy)\n\t\tif ok {\n\t\t\tistio, err = convertSEP(mc, sep)\n\t\t}\n\t\tif err != nil {\n\t\t\treturn out, multierror.Prefix(err, \"Could not convert\")\n\t\t}\n\t\tout = append(out, istio...)\n\t}\n\n\treturn out, nil\n}", "func (r Resource) GetEndpoints(request *restful.Request, response *restful.Response) {\n\ttype element struct {\n\t\tType string `json:\"type\"`\n\t\tUrl string `json:\"url\"`\n\t}\n\tvar responses []element\n\trequestNamespace := utils.GetNamespace(request)\n\n\troute, err := r.RouteClient.RouteV1().Routes(requestNamespace).Get(tektonDashboardIngressName, metav1.GetOptions{})\n\tnoRuleError := \"no Route found labelled \" + tektonDashboardRouteName\n\tif err != nil || route == nil {\n\t\tlogging.Log.Infof(\"Unable to retrieve any routes: %s\", err)\n\t} else {\n\t\tif route.Spec.Host != \"\" { // For that rule, is there actually a host?\n\t\t\trouteHost := route.Spec.Host\n\t\t\tresponses = append(responses, element{\"Route\", routeHost})\n\t\t} else {\n\t\t\tlogging.Log.Error(noRuleError)\n\t\t}\n\t}\n\n\tingress, err := r.K8sClient.ExtensionsV1beta1().Ingresses(requestNamespace).Get(tektonDashboardIngressName, metav1.GetOptions{})\n\tnoRuleError = \"no Ingress rules found labelled \" + tektonDashboardIngressName\n\tif err != nil || ingress == nil {\n\t\tlogging.Log.Infof(\"Unable to retrieve any ingresses: %s\", err)\n\t} else {\n\t\tif len(ingress.Spec.Rules) > 0 { // Got more than zero entries?\n\t\t\tif ingress.Spec.Rules[0].Host != \"\" { // For that rule, is there actually a host?\n\t\t\t\tingressHost := ingress.Spec.Rules[0].Host\n\t\t\t\tresponses = append(responses, element{\"Ingress\", ingressHost})\n\t\t\t}\n\t\t} else {\n\t\t\tlogging.Log.Error(noRuleError)\n\t\t}\n\t}\n\n\tif len(responses) != 0 {\n\t\tresponse.WriteEntity(responses)\n\t} else {\n\t\tlogging.Log.Error(\"Unable to retrieve any Ingresses or Routes\")\n\t\tutils.RespondError(response, err, http.StatusInternalServerError)\n\t}\n}", "func registerTemplateAPIs(ws *restful.WebService) {\n\n\terr := filepath.Walk(DockerfilePath, walkDockerfiles)\n\n\tif err != nil {\n\t\tlog.WarnWithFields(\"error occur when walk dockerfile path, \", log.Fields{\"path\": DockerfilePath, \"err\": err})\n\t}\n\n\terr = filepath.Walk(YamlPath, walkYamlfiles)\n\n\tif err != nil {\n\t\tlog.WarnWithFields(\"error occur when walk yamlfile path, \", log.Fields{\"path\": YamlPath, \"err\": err})\n\t}\n\n\tws.Route(ws.GET(\"/templates/yamls\").\n\t\tTo(listYamlfiles).\n\t\tDoc(\"list all yaml templates\"))\n\n\tws.Route(ws.GET(\"/templates/yamls/{yamlfile}\").\n\t\tTo(getYamlfile).\n\t\tDoc(\"get one yaml template\").\n\t\tParam(ws.PathParameter(\"yamlfile\", \"yaml file name\").DataType(\"string\")))\n\n\tws.Route(ws.GET(\"/templates/dockerfiles\").\n\t\tTo(listDockerfiles).\n\t\tDoc(\"list all docekrfile templates\"))\n\n\tws.Route(ws.GET(\"/templates/dockerfiles/{dockerfile}\").\n\t\tTo(getDockerfile).\n\t\tDoc(\"get one docekrfile template\").\n\t\tParam(ws.PathParameter(\"dockerfile\", \"dockerfile name\").DataType(\"string\")))\n\n}", "func GenerateConfigFileValuesFromIngresses(ingresses []extensions_v1beta1.Ingress, serviceMap map[string]core_v1.Service) []ConfigFileValues {\n\tfiles := []ConfigFileValues{}\n\tfor _, ingress := range ingresses {\n\t\tingressName := fmt.Sprintf(\"%s-%s\", ingress.GetObjectMeta().GetNamespace(), ingress.GetObjectMeta().GetName())\n\t\tallowHTTP := false\n\n\t\tallowHTTPValue, exists := ingress.Annotations[\"kubernetes.io/ingress.allow-http\"]\n\t\tif exists {\n\t\t\tallowHTTP = allowHTTPValue == \"true\"\n\t\t}\n\n\t\tfor _, rule := range ingress.Spec.Rules {\n\t\t\tvalues := []ConfigValues{}\n\t\t\tfor _, path := range rule.HTTP.Paths {\n\t\t\t\tif path.Backend.ServicePort.Type == intstr.String {\n\t\t\t\t\tlog.Println(\"String port values are not yet supported\")\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tservice, ok := serviceMap[fmt.Sprintf(\"%s-%s\", ingress.GetObjectMeta().GetNamespace(), path.Backend.ServiceName)]\n\t\t\t\tif !ok {\n\t\t\t\t\tlog.Printf(\"Service %s not found in namespace %s\\n\", path.Backend.ServiceName, ingress.GetObjectMeta().GetNamespace())\n\t\t\t\t}\n\t\t\t\tvalues = append(values, ConfigValues{\n\t\t\t\t\tPath: path.Path,\n\t\t\t\t\tHost: service.Spec.ClusterIP,\n\t\t\t\t\tPort: path.Backend.ServicePort.IntVal,\n\t\t\t\t})\n\t\t\t}\n\n\t\t\tfiles = append(files, ConfigFileValues{\n\t\t\t\tName: fmt.Sprintf(\"%s-%s\", ingressName, rule.Host),\n\t\t\t\tDomain: rule.Host,\n\t\t\t\tValues: values,\n\t\t\t\tAllowHTTP: allowHTTP,\n\t\t\t})\n\t\t}\n\t}\n\treturn files\n}", "func (data *TestData) createNPDenyAllIngress(key string, value string, name string) (*networkingv1.NetworkPolicy, error) {\n\tspec := &networkingv1.NetworkPolicySpec{\n\t\tPodSelector: metav1.LabelSelector{\n\t\t\tMatchLabels: map[string]string{\n\t\t\t\tkey: value,\n\t\t\t},\n\t\t},\n\t\tPolicyTypes: []networkingv1.PolicyType{networkingv1.PolicyTypeIngress},\n\t}\n\treturn data.createNetworkPolicy(name, spec)\n}", "func (a *FrinxOpenconfigNetworkInstanceApiService) PutFrinxOpenconfigNetworkInstanceNetworkInstancesNetworkInstanceProtocolsProtocolBgpPeerGroupsPeerGroupAfiSafisAfiSafiApplyPolicyConfig(ctx context.Context, name string, identifier string, protocolName string, peerGroupName string, afiSafiName string, frinxOpenconfigRoutingPolicyApplypolicygroupApplypolicyConfigBodyParam FrinxOpenconfigRoutingPolicyApplypolicygroupApplypolicyConfigRequest3, nodeId string) (*http.Response, error) {\n\tvar (\n\t\tlocalVarHttpMethod = strings.ToUpper(\"Put\")\n\t\tlocalVarPostBody interface{}\n\t\tlocalVarFileName string\n\t\tlocalVarFileBytes []byte\n\t\t\n\t)\n\n\t// create path and map variables\n\tlocalVarPath := a.client.cfg.BasePath + \"/config/network-topology:network-topology/network-topology:topology/unified/network-topology:node/{node-id}/yang-ext:mount/frinx-openconfig-network-instance:network-instances/frinx-openconfig-network-instance:network-instance/{name}/frinx-openconfig-network-instance:protocols/frinx-openconfig-network-instance:protocol/{identifier}/{protocol-name}/frinx-openconfig-network-instance:bgp/frinx-openconfig-network-instance:peer-groups/frinx-openconfig-network-instance:peer-group/{peer-group-name}/frinx-openconfig-network-instance:afi-safis/frinx-openconfig-network-instance:afi-safi/{afi-safi-name}/frinx-openconfig-network-instance:apply-policy/frinx-openconfig-network-instance:config/\"\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"name\"+\"}\", fmt.Sprintf(\"%v\", name), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"identifier\"+\"}\", fmt.Sprintf(\"%v\", identifier), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"protocol-name\"+\"}\", fmt.Sprintf(\"%v\", protocolName), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"peer-group-name\"+\"}\", fmt.Sprintf(\"%v\", peerGroupName), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"afi-safi-name\"+\"}\", fmt.Sprintf(\"%v\", afiSafiName), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"node-id\"+\"}\", fmt.Sprintf(\"%v\", nodeId), -1)\n\n\tlocalVarHeaderParams := make(map[string]string)\n\tlocalVarQueryParams := url.Values{}\n\tlocalVarFormParams := url.Values{}\n\n\t// to determine the Content-Type header\n\tlocalVarHttpContentTypes := []string{\"application/json\", \"application/xml\"}\n\n\t// set Content-Type header\n\tlocalVarHttpContentType := selectHeaderContentType(localVarHttpContentTypes)\n\tif localVarHttpContentType != \"\" {\n\t\tlocalVarHeaderParams[\"Content-Type\"] = localVarHttpContentType\n\t}\n\n\t// to determine the Accept header\n\tlocalVarHttpHeaderAccepts := []string{\"application/json\", \"application/xml\"}\n\n\t// set Accept header\n\tlocalVarHttpHeaderAccept := selectHeaderAccept(localVarHttpHeaderAccepts)\n\tif localVarHttpHeaderAccept != \"\" {\n\t\tlocalVarHeaderParams[\"Accept\"] = localVarHttpHeaderAccept\n\t}\n\t// body params\n\tlocalVarPostBody = &frinxOpenconfigRoutingPolicyApplypolicygroupApplypolicyConfigBodyParam\n\tr, err := a.client.prepareRequest(ctx, localVarPath, localVarHttpMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFileName, localVarFileBytes)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tlocalVarHttpResponse, err := a.client.callAPI(r)\n\tif err != nil || localVarHttpResponse == nil {\n\t\treturn localVarHttpResponse, err\n\t}\n\n\tlocalVarBody, err := ioutil.ReadAll(localVarHttpResponse.Body)\n\tlocalVarHttpResponse.Body.Close()\n\tif err != nil {\n\t\treturn localVarHttpResponse, err\n\t}\n\n\n\tif localVarHttpResponse.StatusCode >= 300 {\n\t\tnewErr := GenericSwaggerError{\n\t\t\tbody: localVarBody,\n\t\t\terror: localVarHttpResponse.Status,\n\t\t}\n\t\t\n\t\treturn localVarHttpResponse, newErr\n\t}\n\n\treturn localVarHttpResponse, nil\n}", "func (c *HAProxyController) updateHAProxy() {\n\tlogger.Trace(\"HAProxy config sync started\")\n\n\terr := c.Client.APIStartTransaction()\n\tif err != nil {\n\t\tlogger.Error(err)\n\t\treturn\n\t}\n\tdefer func() {\n\t\tc.Client.APIDisposeTransaction()\n\t}()\n\n\treload, restart := c.handleGlobalConfig()\n\n\tif route.CustomRoutes {\n\t\tlogger.Error(route.RoutesReset(c.Client))\n\t\troute.CustomRoutes = false\n\t}\n\n\tfor _, namespace := range c.Store.Namespaces {\n\t\tif !namespace.Relevant {\n\t\t\tcontinue\n\t\t}\n\t\tfor _, ingress := range namespace.Ingresses {\n\t\t\tif ingress.Status == DELETED {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif !c.igClassIsSupported(ingress) {\n\t\t\t\tlogger.Debugf(\"ingress '%s/%s' ignored: no matching IngressClass\", ingress.Namespace, ingress.Name)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif c.PublishService != nil {\n\t\t\t\tlogger.Error(c.k8s.UpdateIngressStatus(ingress, c.PublishService))\n\t\t\t}\n\t\t\tif ingress.DefaultBackend != nil {\n\t\t\t\tif r, errSvc := c.setDefaultService(ingress, []string{c.Cfg.FrontHTTP, c.Cfg.FrontHTTPS}); errSvc != nil {\n\t\t\t\t\tlogger.Errorf(\"Ingress '%s/%s': default backend: %s\", ingress.Namespace, ingress.Name, errSvc)\n\t\t\t\t} else {\n\t\t\t\t\treload = reload || r\n\t\t\t\t}\n\t\t\t}\n\t\t\t// Ingress secrets\n\t\t\tlogger.Tracef(\"ingress '%s/%s': processing secrets...\", ingress.Namespace, ingress.Name)\n\t\t\tfor _, tls := range ingress.TLS {\n\t\t\t\tif tls.Status == store.DELETED {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tcrt, updated, _ := c.Cfg.Certificates.HandleTLSSecret(c.Store, haproxy.SecretCtx{\n\t\t\t\t\tDefaultNS: ingress.Namespace,\n\t\t\t\t\tSecretPath: tls.SecretName.Value,\n\t\t\t\t\tSecretType: haproxy.FT_CERT,\n\t\t\t\t})\n\t\t\t\tif crt != \"\" && updated {\n\t\t\t\t\treload = true\n\t\t\t\t\tlogger.Debugf(\"Secret '%s' in ingress '%s/%s' was updated, reload required\", tls.SecretName.Value, ingress.Namespace, ingress.Name)\n\t\t\t\t}\n\t\t\t}\n\t\t\t// Ingress annotations\n\t\t\tlogger.Tracef(\"ingress '%s/%s': processing annotations...\", ingress.Namespace, ingress.Name)\n\t\t\tif len(ingress.Rules) == 0 {\n\t\t\t\tlogger.Debugf(\"Ingress %s/%s: no rules defined\", ingress.Namespace, ingress.Name)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tc.handleIngressAnnotations(ingress)\n\t\t\t// Ingress rules\n\t\t\tlogger.Tracef(\"ingress '%s/%s': processing rules...\", ingress.Namespace, ingress.Name)\n\t\t\tfor _, rule := range ingress.Rules {\n\t\t\t\tfor _, path := range rule.Paths {\n\t\t\t\t\tif r, errIng := c.handleIngressPath(ingress, rule.Host, path); errIng != nil {\n\t\t\t\t\t\tlogger.Errorf(\"Ingress '%s/%s': %s\", ingress.Namespace, ingress.Name, errIng)\n\t\t\t\t\t} else {\n\t\t\t\t\t\treload = reload || r\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\tfor _, handler := range c.UpdateHandlers {\n\t\tr, errHandler := handler.Update(c.Store, &c.Cfg, c.Client)\n\t\tlogger.Error(errHandler)\n\t\treload = reload || r\n\t}\n\n\terr = c.Client.APICommitTransaction()\n\tif err != nil {\n\t\tlogger.Error(\"unable to Sync HAProxy configuration !!\")\n\t\tlogger.Error(err)\n\t\tc.clean(true)\n\t\treturn\n\t}\n\tc.clean(false)\n\tif !c.ready {\n\t\tc.setToReady()\n\t}\n\tswitch {\n\tcase restart:\n\t\tif err = c.haproxyService(\"restart\"); err != nil {\n\t\t\tlogger.Error(err)\n\t\t} else {\n\t\t\tlogger.Info(\"HAProxy restarted\")\n\t\t}\n\tcase reload:\n\t\tif err = c.haproxyService(\"reload\"); err != nil {\n\t\t\tlogger.Error(err)\n\t\t} else {\n\t\t\tlogger.Info(\"HAProxy reloaded\")\n\t\t}\n\t}\n\n\tlogger.Trace(\"HAProxy config sync ended\")\n}", "func (r *ReconcileLegacyHeader) reconcileIngress(ctx context.Context, instance *operatorsv1alpha1.LegacyHeader, needToRequeue *bool) error {\n\treqLogger := log.WithValues(\"func\", \"reconcileIngress\", \"instance.Name\", instance.Name)\n\t// Define a new Ingress\n\tnewNavIngress := res.IngressForLegacyUI(instance)\n\t// Set instance as the owner and controller of the ingress\n\terr := controllerutil.SetControllerReference(instance, newNavIngress, r.scheme)\n\tif err != nil {\n\t\treqLogger.Error(err, \"Failed to set owner for Nav ingress\")\n\t\treturn nil\n\t}\n\terr = res.ReconcileIngress(ctx, r.client, instance.Namespace, res.LegacyReleaseName, newNavIngress, needToRequeue)\n\tif err != nil {\n\t\treturn err\n\t}\n\treqLogger.Info(\"got legacy header Ingress\")\n\n\treturn nil\n}", "func baseTemplate() *datamodel.NodeBootstrappingConfiguration {\n\tvar (\n\t\ttrueConst = true\n\t\tfalseConst = false\n\t)\n\treturn &datamodel.NodeBootstrappingConfiguration{\n\t\tContainerService: &datamodel.ContainerService{\n\t\t\tID: \"\",\n\t\t\tLocation: \"eastus\",\n\t\t\tName: \"\",\n\t\t\tPlan: nil,\n\t\t\tTags: map[string]string(nil),\n\t\t\tType: \"Microsoft.ContainerService/ManagedClusters\",\n\t\t\tProperties: &datamodel.Properties{\n\t\t\t\tClusterID: \"\",\n\t\t\t\tProvisioningState: \"\",\n\t\t\t\tOrchestratorProfile: &datamodel.OrchestratorProfile{\n\t\t\t\t\tOrchestratorType: \"Kubernetes\",\n\t\t\t\t\tOrchestratorVersion: \"1.26.0\",\n\t\t\t\t\tKubernetesConfig: &datamodel.KubernetesConfig{\n\t\t\t\t\t\tKubernetesImageBase: \"\",\n\t\t\t\t\t\tMCRKubernetesImageBase: \"\",\n\t\t\t\t\t\tClusterSubnet: \"\",\n\t\t\t\t\t\tNetworkPolicy: \"\",\n\t\t\t\t\t\tNetworkPlugin: \"kubenet\",\n\t\t\t\t\t\tNetworkMode: \"\",\n\t\t\t\t\t\tContainerRuntime: \"\",\n\t\t\t\t\t\tMaxPods: 0,\n\t\t\t\t\t\tDockerBridgeSubnet: \"\",\n\t\t\t\t\t\tDNSServiceIP: \"\",\n\t\t\t\t\t\tServiceCIDR: \"\",\n\t\t\t\t\t\tUseManagedIdentity: false,\n\t\t\t\t\t\tUserAssignedID: \"\",\n\t\t\t\t\t\tUserAssignedClientID: \"\",\n\t\t\t\t\t\tCustomHyperkubeImage: \"\",\n\t\t\t\t\t\tCustomKubeProxyImage: \"mcr.microsoft.com/oss/kubernetes/kube-proxy:v1.26.0.1\",\n\t\t\t\t\t\tCustomKubeBinaryURL: \"https://acs-mirror.azureedge.net/kubernetes/v1.26.0/binaries/kubernetes-node-linux-amd64.tar.gz\",\n\t\t\t\t\t\tMobyVersion: \"\",\n\t\t\t\t\t\tContainerdVersion: \"\",\n\t\t\t\t\t\tWindowsNodeBinariesURL: \"\",\n\t\t\t\t\t\tWindowsContainerdURL: \"\",\n\t\t\t\t\t\tWindowsSdnPluginURL: \"\",\n\t\t\t\t\t\tUseInstanceMetadata: &trueConst,\n\t\t\t\t\t\tEnableRbac: nil,\n\t\t\t\t\t\tEnableSecureKubelet: nil,\n\t\t\t\t\t\tPrivateCluster: nil,\n\t\t\t\t\t\tGCHighThreshold: 0,\n\t\t\t\t\t\tGCLowThreshold: 0,\n\t\t\t\t\t\tEnableEncryptionWithExternalKms: nil,\n\t\t\t\t\t\tAddons: nil,\n\t\t\t\t\t\tContainerRuntimeConfig: map[string]string(nil),\n\t\t\t\t\t\tControllerManagerConfig: map[string]string(nil),\n\t\t\t\t\t\tSchedulerConfig: map[string]string(nil),\n\t\t\t\t\t\tCloudProviderBackoffMode: \"v2\",\n\t\t\t\t\t\tCloudProviderBackoff: &trueConst,\n\t\t\t\t\t\tCloudProviderBackoffRetries: 6,\n\t\t\t\t\t\tCloudProviderBackoffJitter: 0.0,\n\t\t\t\t\t\tCloudProviderBackoffDuration: 5,\n\t\t\t\t\t\tCloudProviderBackoffExponent: 0.0,\n\t\t\t\t\t\tCloudProviderRateLimit: &trueConst,\n\t\t\t\t\t\tCloudProviderRateLimitQPS: 10.0,\n\t\t\t\t\t\tCloudProviderRateLimitQPSWrite: 10.0,\n\t\t\t\t\t\tCloudProviderRateLimitBucket: 100,\n\t\t\t\t\t\tCloudProviderRateLimitBucketWrite: 100,\n\t\t\t\t\t\tCloudProviderDisableOutboundSNAT: &falseConst,\n\t\t\t\t\t\tNodeStatusUpdateFrequency: \"\",\n\t\t\t\t\t\tLoadBalancerSku: \"Standard\",\n\t\t\t\t\t\tExcludeMasterFromStandardLB: nil,\n\t\t\t\t\t\tAzureCNIURLLinux: \"https://acs-mirror.azureedge.net/azure-cni/v1.1.8/binaries/azure-vnet-cni-linux-amd64-v1.1.8.tgz\",\n\t\t\t\t\t\tAzureCNIURLARM64Linux: \"\",\n\t\t\t\t\t\tAzureCNIURLWindows: \"\",\n\t\t\t\t\t\tMaximumLoadBalancerRuleCount: 250,\n\t\t\t\t\t\tPrivateAzureRegistryServer: \"\",\n\t\t\t\t\t\tNetworkPluginMode: \"\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tAgentPoolProfiles: []*datamodel.AgentPoolProfile{\n\t\t\t\t\t{\n\t\t\t\t\t\tName: \"nodepool2\",\n\t\t\t\t\t\tVMSize: \"Standard_DS1_v2\",\n\t\t\t\t\t\tKubeletDiskType: \"\",\n\t\t\t\t\t\tWorkloadRuntime: \"\",\n\t\t\t\t\t\tDNSPrefix: \"\",\n\t\t\t\t\t\tOSType: \"Linux\",\n\t\t\t\t\t\tPorts: nil,\n\t\t\t\t\t\tAvailabilityProfile: \"VirtualMachineScaleSets\",\n\t\t\t\t\t\tStorageProfile: \"ManagedDisks\",\n\t\t\t\t\t\tVnetSubnetID: \"\",\n\t\t\t\t\t\tDistro: \"aks-ubuntu-containerd-18.04-gen2\",\n\t\t\t\t\t\tCustomNodeLabels: map[string]string{\n\t\t\t\t\t\t\t\"kubernetes.azure.com/mode\": \"system\",\n\t\t\t\t\t\t\t\"kubernetes.azure.com/node-image-version\": \"AKSUbuntu-1804gen2containerd-2022.01.19\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tPreprovisionExtension: nil,\n\t\t\t\t\t\tKubernetesConfig: &datamodel.KubernetesConfig{\n\t\t\t\t\t\t\tKubernetesImageBase: \"\",\n\t\t\t\t\t\t\tMCRKubernetesImageBase: \"\",\n\t\t\t\t\t\t\tClusterSubnet: \"\",\n\t\t\t\t\t\t\tNetworkPolicy: \"\",\n\t\t\t\t\t\t\tNetworkPlugin: \"\",\n\t\t\t\t\t\t\tNetworkMode: \"\",\n\t\t\t\t\t\t\tContainerRuntime: \"containerd\",\n\t\t\t\t\t\t\tMaxPods: 0,\n\t\t\t\t\t\t\tDockerBridgeSubnet: \"\",\n\t\t\t\t\t\t\tDNSServiceIP: \"\",\n\t\t\t\t\t\t\tServiceCIDR: \"\",\n\t\t\t\t\t\t\tUseManagedIdentity: false,\n\t\t\t\t\t\t\tUserAssignedID: \"\",\n\t\t\t\t\t\t\tUserAssignedClientID: \"\",\n\t\t\t\t\t\t\tCustomHyperkubeImage: \"\",\n\t\t\t\t\t\t\tCustomKubeProxyImage: \"\",\n\t\t\t\t\t\t\tCustomKubeBinaryURL: \"\",\n\t\t\t\t\t\t\tMobyVersion: \"\",\n\t\t\t\t\t\t\tContainerdVersion: \"\",\n\t\t\t\t\t\t\tWindowsNodeBinariesURL: \"\",\n\t\t\t\t\t\t\tWindowsContainerdURL: \"\",\n\t\t\t\t\t\t\tWindowsSdnPluginURL: \"\",\n\t\t\t\t\t\t\tUseInstanceMetadata: nil,\n\t\t\t\t\t\t\tEnableRbac: nil,\n\t\t\t\t\t\t\tEnableSecureKubelet: nil,\n\t\t\t\t\t\t\tPrivateCluster: nil,\n\t\t\t\t\t\t\tGCHighThreshold: 0,\n\t\t\t\t\t\t\tGCLowThreshold: 0,\n\t\t\t\t\t\t\tEnableEncryptionWithExternalKms: nil,\n\t\t\t\t\t\t\tAddons: nil,\n\t\t\t\t\t\t\tContainerRuntimeConfig: map[string]string(nil),\n\t\t\t\t\t\t\tControllerManagerConfig: map[string]string(nil),\n\t\t\t\t\t\t\tSchedulerConfig: map[string]string(nil),\n\t\t\t\t\t\t\tCloudProviderBackoffMode: \"\",\n\t\t\t\t\t\t\tCloudProviderBackoff: nil,\n\t\t\t\t\t\t\tCloudProviderBackoffRetries: 0,\n\t\t\t\t\t\t\tCloudProviderBackoffJitter: 0.0,\n\t\t\t\t\t\t\tCloudProviderBackoffDuration: 0,\n\t\t\t\t\t\t\tCloudProviderBackoffExponent: 0.0,\n\t\t\t\t\t\t\tCloudProviderRateLimit: nil,\n\t\t\t\t\t\t\tCloudProviderRateLimitQPS: 0.0,\n\t\t\t\t\t\t\tCloudProviderRateLimitQPSWrite: 0.0,\n\t\t\t\t\t\t\tCloudProviderRateLimitBucket: 0,\n\t\t\t\t\t\t\tCloudProviderRateLimitBucketWrite: 0,\n\t\t\t\t\t\t\tCloudProviderDisableOutboundSNAT: nil,\n\t\t\t\t\t\t\tNodeStatusUpdateFrequency: \"\",\n\t\t\t\t\t\t\tLoadBalancerSku: \"\",\n\t\t\t\t\t\t\tExcludeMasterFromStandardLB: nil,\n\t\t\t\t\t\t\tAzureCNIURLLinux: \"\",\n\t\t\t\t\t\t\tAzureCNIURLARM64Linux: \"\",\n\t\t\t\t\t\t\tAzureCNIURLWindows: \"\",\n\t\t\t\t\t\t\tMaximumLoadBalancerRuleCount: 0,\n\t\t\t\t\t\t\tPrivateAzureRegistryServer: \"\",\n\t\t\t\t\t\t\tNetworkPluginMode: \"\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tVnetCidrs: nil,\n\t\t\t\t\t\tWindowsNameVersion: \"\",\n\t\t\t\t\t\tCustomKubeletConfig: nil,\n\t\t\t\t\t\tCustomLinuxOSConfig: nil,\n\t\t\t\t\t\tMessageOfTheDay: \"\",\n\t\t\t\t\t\tNotRebootWindowsNode: nil,\n\t\t\t\t\t\tAgentPoolWindowsProfile: nil,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tLinuxProfile: &datamodel.LinuxProfile{\n\t\t\t\t\tAdminUsername: \"azureuser\",\n\t\t\t\t\tSSH: struct {\n\t\t\t\t\t\tPublicKeys []datamodel.PublicKey \"json:\\\"publicKeys\\\"\"\n\t\t\t\t\t}{\n\t\t\t\t\t\tPublicKeys: []datamodel.PublicKey{\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\tKeyData: \"dummysshkey\",\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\tSecrets: nil,\n\t\t\t\t\tDistro: \"\",\n\t\t\t\t\tCustomSearchDomain: nil,\n\t\t\t\t},\n\t\t\t\tWindowsProfile: nil,\n\t\t\t\tExtensionProfiles: nil,\n\t\t\t\tDiagnosticsProfile: nil,\n\t\t\t\tServicePrincipalProfile: &datamodel.ServicePrincipalProfile{\n\t\t\t\t\tClientID: \"msi\",\n\t\t\t\t\tSecret: \"msi\",\n\t\t\t\t\tObjectID: \"\",\n\t\t\t\t\tKeyvaultSecretRef: nil,\n\t\t\t\t},\n\t\t\t\tCertificateProfile: &datamodel.CertificateProfile{\n\t\t\t\t\tCaCertificate: \"\",\n\t\t\t\t\tAPIServerCertificate: \"\",\n\t\t\t\t\tClientCertificate: \"\",\n\t\t\t\t\tClientPrivateKey: \"\",\n\t\t\t\t\tKubeConfigCertificate: \"\",\n\t\t\t\t\tKubeConfigPrivateKey: \"\",\n\t\t\t\t},\n\t\t\t\tAADProfile: nil,\n\t\t\t\tCustomProfile: nil,\n\t\t\t\tHostedMasterProfile: &datamodel.HostedMasterProfile{\n\t\t\t\t\tFQDN: \"\",\n\t\t\t\t\tIPAddress: \"\",\n\t\t\t\t\tDNSPrefix: \"\",\n\t\t\t\t\tFQDNSubdomain: \"\",\n\t\t\t\t\tSubnet: \"\",\n\t\t\t\t\tAPIServerWhiteListRange: nil,\n\t\t\t\t\tIPMasqAgent: true,\n\t\t\t\t},\n\t\t\t\tAddonProfiles: map[string]datamodel.AddonProfile(nil),\n\t\t\t\tFeatureFlags: nil,\n\t\t\t\tCustomCloudEnv: nil,\n\t\t\t\tCustomConfiguration: nil,\n\t\t\t},\n\t\t},\n\t\tCloudSpecConfig: &datamodel.AzureEnvironmentSpecConfig{\n\t\t\tCloudName: \"AzurePublicCloud\",\n\t\t\tDockerSpecConfig: datamodel.DockerSpecConfig{\n\t\t\t\tDockerEngineRepo: \"https://aptdocker.azureedge.net/repo\",\n\t\t\t\tDockerComposeDownloadURL: \"https://github.com/docker/compose/releases/download\",\n\t\t\t},\n\t\t\tKubernetesSpecConfig: datamodel.KubernetesSpecConfig{\n\t\t\t\tAzureTelemetryPID: \"\",\n\t\t\t\tKubernetesImageBase: \"k8s.gcr.io/\",\n\t\t\t\tTillerImageBase: \"gcr.io/kubernetes-helm/\",\n\t\t\t\tACIConnectorImageBase: \"microsoft/\",\n\t\t\t\tMCRKubernetesImageBase: \"mcr.microsoft.com/\",\n\t\t\t\tNVIDIAImageBase: \"nvidia/\",\n\t\t\t\tAzureCNIImageBase: \"mcr.microsoft.com/containernetworking/\",\n\t\t\t\tCalicoImageBase: \"calico/\",\n\t\t\t\tEtcdDownloadURLBase: \"\",\n\t\t\t\tKubeBinariesSASURLBase: \"https://acs-mirror.azureedge.net/kubernetes/\",\n\t\t\t\tWindowsTelemetryGUID: \"fb801154-36b9-41bc-89c2-f4d4f05472b0\",\n\t\t\t\tCNIPluginsDownloadURL: \"https://acs-mirror.azureedge.net/cni/cni-plugins-amd64-v0.7.6.tgz\",\n\t\t\t\tVnetCNILinuxPluginsDownloadURL: \"https://acs-mirror.azureedge.net/azure-cni/v1.1.3/binaries/azure-vnet-cni-linux-amd64-v1.1.3.tgz\",\n\t\t\t\tVnetCNIWindowsPluginsDownloadURL: \"https://acs-mirror.azureedge.net/azure-cni/v1.1.3/binaries/azure-vnet-cni-singletenancy-windows-amd64-v1.1.3.zip\",\n\t\t\t\tContainerdDownloadURLBase: \"https://storage.googleapis.com/cri-containerd-release/\",\n\t\t\t\tCSIProxyDownloadURL: \"https://acs-mirror.azureedge.net/csi-proxy/v0.1.0/binaries/csi-proxy.tar.gz\",\n\t\t\t\tWindowsProvisioningScriptsPackageURL: \"https://acs-mirror.azureedge.net/aks-engine/windows/provisioning/signedscripts-v0.2.2.zip\",\n\t\t\t\tWindowsPauseImageURL: \"mcr.microsoft.com/oss/kubernetes/pause:1.4.0\",\n\t\t\t\tAlwaysPullWindowsPauseImage: false,\n\t\t\t\tCseScriptsPackageURL: \"https://acs-mirror.azureedge.net/aks/windows/cse/csescripts-v0.0.1.zip\",\n\t\t\t\tCNIARM64PluginsDownloadURL: \"https://acs-mirror.azureedge.net/cni-plugins/v0.8.7/binaries/cni-plugins-linux-arm64-v0.8.7.tgz\",\n\t\t\t\tVnetCNIARM64LinuxPluginsDownloadURL: \"https://acs-mirror.azureedge.net/azure-cni/v1.4.13/binaries/azure-vnet-cni-linux-arm64-v1.4.14.tgz\",\n\t\t\t},\n\t\t\tEndpointConfig: datamodel.AzureEndpointConfig{\n\t\t\t\tResourceManagerVMDNSSuffix: \"cloudapp.azure.com\",\n\t\t\t},\n\t\t\tOSImageConfig: map[datamodel.Distro]datamodel.AzureOSImageConfig(nil),\n\t\t},\n\t\tK8sComponents: &datamodel.K8sComponents{\n\t\t\tPodInfraContainerImageURL: \"mcr.microsoft.com/oss/kubernetes/pause:3.6\",\n\t\t\tHyperkubeImageURL: \"mcr.microsoft.com/oss/kubernetes/\",\n\t\t\tWindowsPackageURL: \"windowspackage\",\n\t\t},\n\t\tAgentPoolProfile: &datamodel.AgentPoolProfile{\n\t\t\tName: \"nodepool2\",\n\t\t\tVMSize: \"Standard_DS1_v2\",\n\t\t\tKubeletDiskType: \"\",\n\t\t\tWorkloadRuntime: \"\",\n\t\t\tDNSPrefix: \"\",\n\t\t\tOSType: \"Linux\",\n\t\t\tPorts: nil,\n\t\t\tAvailabilityProfile: \"VirtualMachineScaleSets\",\n\t\t\tStorageProfile: \"ManagedDisks\",\n\t\t\tVnetSubnetID: \"\",\n\t\t\tDistro: \"aks-ubuntu-containerd-18.04-gen2\",\n\t\t\tCustomNodeLabels: map[string]string{\n\t\t\t\t\"kubernetes.azure.com/mode\": \"system\",\n\t\t\t\t\"kubernetes.azure.com/node-image-version\": \"AKSUbuntu-1804gen2containerd-2022.01.19\",\n\t\t\t},\n\t\t\tPreprovisionExtension: nil,\n\t\t\tKubernetesConfig: &datamodel.KubernetesConfig{\n\t\t\t\tKubernetesImageBase: \"\",\n\t\t\t\tMCRKubernetesImageBase: \"\",\n\t\t\t\tClusterSubnet: \"\",\n\t\t\t\tNetworkPolicy: \"\",\n\t\t\t\tNetworkPlugin: \"\",\n\t\t\t\tNetworkMode: \"\",\n\t\t\t\tContainerRuntime: \"containerd\",\n\t\t\t\tMaxPods: 0,\n\t\t\t\tDockerBridgeSubnet: \"\",\n\t\t\t\tDNSServiceIP: \"\",\n\t\t\t\tServiceCIDR: \"\",\n\t\t\t\tUseManagedIdentity: false,\n\t\t\t\tUserAssignedID: \"\",\n\t\t\t\tUserAssignedClientID: \"\",\n\t\t\t\tCustomHyperkubeImage: \"\",\n\t\t\t\tCustomKubeProxyImage: \"\",\n\t\t\t\tCustomKubeBinaryURL: \"\",\n\t\t\t\tMobyVersion: \"\",\n\t\t\t\tContainerdVersion: \"\",\n\t\t\t\tWindowsNodeBinariesURL: \"\",\n\t\t\t\tWindowsContainerdURL: \"\",\n\t\t\t\tWindowsSdnPluginURL: \"\",\n\t\t\t\tUseInstanceMetadata: nil,\n\t\t\t\tEnableRbac: nil,\n\t\t\t\tEnableSecureKubelet: nil,\n\t\t\t\tPrivateCluster: nil,\n\t\t\t\tGCHighThreshold: 0,\n\t\t\t\tGCLowThreshold: 0,\n\t\t\t\tEnableEncryptionWithExternalKms: nil,\n\t\t\t\tAddons: nil,\n\t\t\t\tContainerRuntimeConfig: map[string]string(nil),\n\t\t\t\tControllerManagerConfig: map[string]string(nil),\n\t\t\t\tSchedulerConfig: map[string]string(nil),\n\t\t\t\tCloudProviderBackoffMode: \"\",\n\t\t\t\tCloudProviderBackoff: nil,\n\t\t\t\tCloudProviderBackoffRetries: 0,\n\t\t\t\tCloudProviderBackoffJitter: 0.0,\n\t\t\t\tCloudProviderBackoffDuration: 0,\n\t\t\t\tCloudProviderBackoffExponent: 0.0,\n\t\t\t\tCloudProviderRateLimit: nil,\n\t\t\t\tCloudProviderRateLimitQPS: 0.0,\n\t\t\t\tCloudProviderRateLimitQPSWrite: 0.0,\n\t\t\t\tCloudProviderRateLimitBucket: 0,\n\t\t\t\tCloudProviderRateLimitBucketWrite: 0,\n\t\t\t\tCloudProviderDisableOutboundSNAT: nil,\n\t\t\t\tNodeStatusUpdateFrequency: \"\",\n\t\t\t\tLoadBalancerSku: \"\",\n\t\t\t\tExcludeMasterFromStandardLB: nil,\n\t\t\t\tAzureCNIURLLinux: \"\",\n\t\t\t\tAzureCNIURLARM64Linux: \"\",\n\t\t\t\tAzureCNIURLWindows: \"\",\n\t\t\t\tMaximumLoadBalancerRuleCount: 0,\n\t\t\t\tPrivateAzureRegistryServer: \"\",\n\t\t\t\tNetworkPluginMode: \"\",\n\t\t\t},\n\t\t\tVnetCidrs: nil,\n\t\t\tWindowsNameVersion: \"\",\n\t\t\tCustomKubeletConfig: nil,\n\t\t\tCustomLinuxOSConfig: nil,\n\t\t\tMessageOfTheDay: \"\",\n\t\t\tNotRebootWindowsNode: nil,\n\t\t\tAgentPoolWindowsProfile: nil,\n\t\t},\n\t\tTenantID: \"\",\n\t\tSubscriptionID: \"\",\n\t\tResourceGroupName: \"\",\n\t\tUserAssignedIdentityClientID: \"\",\n\t\tOSSKU: \"\",\n\t\tConfigGPUDriverIfNeeded: true,\n\t\tDisable1804SystemdResolved: false,\n\t\tEnableGPUDevicePluginIfNeeded: false,\n\t\tEnableKubeletConfigFile: false,\n\t\tEnableNvidia: false,\n\t\tEnableACRTeleportPlugin: false,\n\t\tTeleportdPluginURL: \"\",\n\t\tContainerdVersion: \"\",\n\t\tRuncVersion: \"\",\n\t\tContainerdPackageURL: \"\",\n\t\tRuncPackageURL: \"\",\n\t\tKubeletClientTLSBootstrapToken: nil,\n\t\tFIPSEnabled: false,\n\t\tHTTPProxyConfig: &datamodel.HTTPProxyConfig{\n\t\t\tHTTPProxy: nil,\n\t\t\tHTTPSProxy: nil,\n\t\t\tNoProxy: &[]string{\n\t\t\t\t\"localhost\",\n\t\t\t\t\"127.0.0.1\",\n\t\t\t\t\"168.63.129.16\",\n\t\t\t\t\"169.254.169.254\",\n\t\t\t\t\"10.0.0.0/16\",\n\t\t\t\t\"agentbaker-agentbaker-e2e-t-8ecadf-c82d8251.hcp.eastus.azmk8s.io\",\n\t\t\t},\n\t\t\tTrustedCA: nil,\n\t\t},\n\t\tKubeletConfig: map[string]string{\n\t\t\t\"--address\": \"0.0.0.0\",\n\t\t\t\"--anonymous-auth\": \"false\",\n\t\t\t\"--authentication-token-webhook\": \"true\",\n\t\t\t\"--authorization-mode\": \"Webhook\",\n\t\t\t\"--azure-container-registry-config\": \"/etc/kubernetes/azure.json\",\n\t\t\t\"--cgroups-per-qos\": \"true\",\n\t\t\t\"--client-ca-file\": \"/etc/kubernetes/certs/ca.crt\",\n\t\t\t\"--cloud-config\": \"/etc/kubernetes/azure.json\",\n\t\t\t\"--cloud-provider\": \"azure\",\n\t\t\t\"--cluster-dns\": \"10.0.0.10\",\n\t\t\t\"--cluster-domain\": \"cluster.local\",\n\t\t\t\"--dynamic-config-dir\": \"/var/lib/kubelet\",\n\t\t\t\"--enforce-node-allocatable\": \"pods\",\n\t\t\t\"--event-qps\": \"0\",\n\t\t\t\"--eviction-hard\": \"memory.available<750Mi,nodefs.available<10%,nodefs.inodesFree<5%\",\n\t\t\t\"--feature-gates\": \"RotateKubeletServerCertificate=true\",\n\t\t\t\"--image-gc-high-threshold\": \"85\",\n\t\t\t\"--image-gc-low-threshold\": \"80\",\n\t\t\t\"--keep-terminated-pod-volumes\": \"false\",\n\t\t\t\"--kube-reserved\": \"cpu=100m,memory=1638Mi\",\n\t\t\t\"--kubeconfig\": \"/var/lib/kubelet/kubeconfig\",\n\t\t\t\"--max-pods\": \"110\",\n\t\t\t\"--network-plugin\": \"kubenet\",\n\t\t\t\"--node-status-update-frequency\": \"10s\",\n\t\t\t\"--pod-infra-container-image\": \"mcr.microsoft.com/oss/kubernetes/pause:3.6\",\n\t\t\t\"--pod-manifest-path\": \"/etc/kubernetes/manifests\",\n\t\t\t\"--pod-max-pids\": \"-1\",\n\t\t\t\"--protect-kernel-defaults\": \"true\",\n\t\t\t\"--read-only-port\": \"0\",\n\t\t\t\"--resolv-conf\": \"/run/systemd/resolve/resolv.conf\",\n\t\t\t\"--rotate-certificates\": \"false\",\n\t\t\t\"--streaming-connection-idle-timeout\": \"4h\",\n\t\t\t\"--tls-cert-file\": \"/etc/kubernetes/certs/kubeletserver.crt\",\n\t\t\t\"--tls-cipher-suites\": \"TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256\",\n\t\t\t\"--tls-private-key-file\": \"/etc/kubernetes/certs/kubeletserver.key\",\n\t\t},\n\t\tKubeproxyConfig: map[string]string(nil),\n\t\tEnableRuncShimV2: false,\n\t\tGPUInstanceProfile: \"\",\n\t\tPrimaryScaleSetName: \"\",\n\t\tSIGConfig: datamodel.SIGConfig{\n\t\t\tTenantID: \"tenantID\",\n\t\t\tSubscriptionID: \"subID\",\n\t\t\tGalleries: map[string]datamodel.SIGGalleryConfig{\n\t\t\t\t\"AKSUbuntu\": {\n\t\t\t\t\tGalleryName: \"aksubuntu\",\n\t\t\t\t\tResourceGroup: \"resourcegroup\",\n\t\t\t\t},\n\t\t\t\t\"AKSCBLMariner\": {\n\t\t\t\t\tGalleryName: \"akscblmariner\",\n\t\t\t\t\tResourceGroup: \"resourcegroup\",\n\t\t\t\t},\n\t\t\t\t\"AKSWindows\": {\n\t\t\t\t\tGalleryName: \"AKSWindows\",\n\t\t\t\t\tResourceGroup: \"AKS-Windows\",\n\t\t\t\t},\n\t\t\t\t\"AKSUbuntuEdgeZone\": {\n\t\t\t\t\tGalleryName: \"AKSUbuntuEdgeZone\",\n\t\t\t\t\tResourceGroup: \"AKS-Ubuntu-EdgeZone\",\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\tIsARM64: false,\n\t\tCustomCATrustConfig: nil,\n\t\tDisableUnattendedUpgrades: true,\n\t\tSSHStatus: 0,\n\t\tDisableCustomData: false,\n\t}\n}", "func add(mgr manager.Manager, r reconcile.Reconciler) error {\n\t// Create a new controller\n\tc, err := controller.New(\"serviceinstance-controller\", mgr, controller.Options{Reconciler: r})\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// Watch for changes to ServiceInstance\n\terr = c.Watch(&source.Kind{Type: &osbv1alpha1.SFServiceInstance{}}, &handler.EnqueueRequestForObject{})\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// TODO dynamically setup rbac rules and watches\n\tpostgres := &unstructured.Unstructured{}\n\tpostgres.SetKind(\"Postgres\")\n\tpostgres.SetAPIVersion(\"kubedb.com/v1alpha1\")\n\tpostgres2 := &unstructured.Unstructured{}\n\tpostgres2.SetKind(\"Postgresql\")\n\tpostgres2.SetAPIVersion(\"kubernetes.sapcloud.io/v1alpha1\")\n\tdirector := &unstructured.Unstructured{}\n\tdirector.SetKind(\"Director\")\n\tdirector.SetAPIVersion(\"deployment.servicefabrik.io/v1alpha1\")\n\tdocker := &unstructured.Unstructured{}\n\tdocker.SetKind(\"Docker\")\n\tdocker.SetAPIVersion(\"deployment.servicefabrik.io/v1alpha1\")\n\tpostgresqlmts := &unstructured.Unstructured{}\n\tpostgresqlmts.SetKind(\"PostgresqlMT\")\n\tpostgresqlmts.SetAPIVersion(\"deployment.servicefabrik.io/v1alpha1\")\n\tvhostmts := &unstructured.Unstructured{}\n\tvhostmts.SetKind(\"VirtualHost\")\n\tvhostmts.SetAPIVersion(\"deployment.servicefabrik.io/v1alpha1\")\n\tsubresources := []runtime.Object{\n\t\t&appsv1.Deployment{},\n\t\t&corev1.ConfigMap{},\n\t\tpostgres,\n\t\tpostgres2,\n\t\tdirector,\n\t\tdocker,\n\t\tpostgresqlmts,\n\t\tvhostmts,\n\t}\n\n\tfor _, subresource := range subresources {\n\t\terr = c.Watch(&source.Kind{Type: subresource}, &handler.EnqueueRequestForOwner{\n\t\t\tIsController: true,\n\t\t\tOwnerType: &osbv1alpha1.SFServiceInstance{},\n\t\t})\n\t\tif err != nil {\n\t\t\tlog.Printf(\"%v\", err)\n\t\t}\n\t}\n\n\treturn nil\n}", "func (c *ovnClient) CreateIngressAcl(pgName, asIngressName, asExceptName, protocol string, npp []netv1.NetworkPolicyPort) error {\n\tacls := make([]*ovnnb.ACL, 0)\n\n\t/* default drop acl */\n\tAllIpMatch := NewAndAclMatch(\n\t\tNewAclMatch(\"outport\", \"==\", \"@\"+pgName, \"\"),\n\t\tNewAclMatch(\"ip\", \"\", \"\", \"\"),\n\t)\n\toptions := func(acl *ovnnb.ACL) {\n\t\tacl.Name = &pgName\n\t\tacl.Log = true\n\t\tacl.Severity = &ovnnb.ACLSeverityWarning\n\t}\n\n\tdefaultDropAcl, err := c.newAcl(pgName, ovnnb.ACLDirectionToLport, util.IngressDefaultDrop, AllIpMatch.String(), ovnnb.ACLActionDrop, options)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"new default drop ingress acl for port group %s: %v\", pgName, err)\n\t}\n\n\tacls = append(acls, defaultDropAcl)\n\n\t/* allow acl */\n\tmatches := newNetworkPolicyAclMatch(pgName, asIngressName, asExceptName, protocol, ovnnb.ACLDirectionToLport, npp)\n\tfor _, m := range matches {\n\t\tallowAcl, err := c.newAcl(pgName, ovnnb.ACLDirectionToLport, util.IngressAllowPriority, m, ovnnb.ACLActionAllowRelated)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"new allow ingress acl for port group %s: %v\", pgName, err)\n\t\t}\n\n\t\tacls = append(acls, allowAcl)\n\t}\n\n\tif err := c.CreateAcls(pgName, portGroupKey, acls...); err != nil {\n\t\treturn fmt.Errorf(\"add ingress acls to port group %s: %v\", pgName, err)\n\t}\n\n\treturn nil\n}", "func ReconcileIngresses(ctx context.Context, namedGetters []NamedIngressCreatorGetter, namespace string, client ctrlruntimeclient.Client, objectModifiers ...ObjectModifier) error {\n\tfor _, get := range namedGetters {\n\t\tname, create := get()\n\t\tcreateObject := IngressObjectWrapper(create)\n\t\tcreateObject = createWithNamespace(createObject, namespace)\n\t\tcreateObject = createWithName(createObject, name)\n\n\t\tfor _, objectModifier := range objectModifiers {\n\t\t\tcreateObject = objectModifier(createObject)\n\t\t}\n\n\t\tif err := EnsureNamedObject(ctx, types.NamespacedName{Namespace: namespace, Name: name}, createObject, client, &extensionsv1beta1.Ingress{}, false); err != nil {\n\t\t\treturn fmt.Errorf(\"failed to ensure Ingress %s/%s: %v\", namespace, name, err)\n\t\t}\n\t}\n\n\treturn nil\n}" ]
[ "0.6070712", "0.5991409", "0.5934488", "0.5794082", "0.5787708", "0.56982076", "0.5579018", "0.55455333", "0.54942566", "0.5434323", "0.53162134", "0.5253241", "0.5249246", "0.52103496", "0.5191674", "0.51860744", "0.5181246", "0.51493275", "0.5131349", "0.51312405", "0.5122762", "0.50992364", "0.5079273", "0.50743085", "0.5071563", "0.5067423", "0.50316006", "0.5024039", "0.5018327", "0.50117785", "0.5011328", "0.50110847", "0.49763986", "0.4974498", "0.49477315", "0.49314278", "0.4924187", "0.4899473", "0.4887587", "0.48767215", "0.4866112", "0.48395413", "0.48368615", "0.4833063", "0.48293757", "0.48203844", "0.48125303", "0.47913066", "0.47862142", "0.47748607", "0.47727078", "0.47684348", "0.47679302", "0.47644588", "0.4759409", "0.4759066", "0.47525713", "0.47461966", "0.47289696", "0.4727243", "0.4726903", "0.47238952", "0.47202855", "0.47120452", "0.47094736", "0.4705654", "0.47055295", "0.4704195", "0.47031066", "0.47014162", "0.4694582", "0.46844274", "0.46788257", "0.4676469", "0.4672302", "0.46695885", "0.46422878", "0.46422246", "0.46403039", "0.46178603", "0.46118113", "0.460774", "0.46071452", "0.46053824", "0.46046865", "0.45875505", "0.45776954", "0.45738035", "0.45705655", "0.45639053", "0.45578682", "0.45480478", "0.45424598", "0.45408934", "0.45374772", "0.45359355", "0.45138454", "0.4513683", "0.45119303", "0.4498934" ]
0.61125463
0
Get performs GET request for MobileAppContentFile
func (r *MobileAppContentFileRequest) Get(ctx context.Context) (resObj *MobileAppContentFile, err error) { var query string if r.query != nil { query = "?" + r.query.Encode() } err = r.JSONRequest(ctx, "GET", query, nil, &resObj) return }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (c *EzClient) FileGet(url, path string) (int, error) {\n\tresp, err := c.Client.Get(url)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\n\treturn writeToPath(resp, path)\n}", "func (r *httpRetriever) get(uri *url.URL) (b []byte, err error) {\n\treturn r.getFile(uri)\n}", "func GetHandler(basePath string, w http.ResponseWriter, r *http.Request) {\n\tFilesLock.RLock()\n\tval, ok := Files.Get(fileKey(r.URL))\n\tFilesLock.RUnlock()\n\n\tif !ok {\n\t\tw.WriteHeader(http.StatusNotFound)\n\t\treturn\n\t}\n\tf := val.(*File)\n\n\tlog.Println(\"GET Content-Type \" + f.ContentType)\n\n\tw.Header().Set(\"Content-Type\", f.ContentType)\n\tw.Header().Set(\"Transfer-Encoding\", \"chunked\")\n\tw.Header().Set(\"Access-Control-Allow-Origin\", \"*\")\n\tw.WriteHeader(http.StatusOK)\n\tio.Copy(ChunkedResponseWriter{w}, f.NewReader(basePath, w))\n}", "func (client *Client) getContents(id string, mimeType string) ([]byte, error) {\n\tres, err := client.Service.Files.Export(id, mimeType).Download()\n\tif err != nil {\n\t\treturn []byte{}, err\n\t}\n\tdefer res.Body.Close()\n\treturn ioutil.ReadAll(res.Body)\n}", "func (m *Minio) GetContent(ctx context.Context, bucketName, fileName string) ([]byte, error) {\n\tobject, err := m.client.GetObject(ctx, bucketName, fileName, minio.GetObjectOptions{})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tbuf := new(bytes.Buffer)\n\tif _, err := buf.ReadFrom(object); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn buf.Bytes(), nil\n}", "func get(resource string) ([]byte, error) {\n\thttpParams := &HTTPParams{\n\t\tResource: resource,\n\t\tVerb: \"GET\",\n\t}\n\treturn processRequest(httpParams)\n}", "func GetFile(w http.ResponseWriter, r *http.Request) {\n\tfmt.Print(r.Header.Get(\"Authorization\"))\n\trespondWithJSON(w, http.StatusOK, Response{true, \"File uploaded successfully\"})\n}", "func (d *driver) GetContent(ctx context.Context, path string) ([]byte, error) {\n\tpath = path[1:]\n\tbaseUrl := qiniu.MakeBaseUrl(d.Config.Domain,path)\n\tfmt.Print(baseUrl)\n\tres, err := http.Get(baseUrl)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tcontent, err := ioutil.ReadAll(res.Body)\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn content, nil\n}", "func GetContent(host, path string, requiredCode int) ([]byte, error) {\n\tresp, err := GetRequest(host, path)\n\tif err != nil {\n\t\treturn []byte{}, err\n\t}\n\tdata, err := out(resp, requiredCode)\n\tif err != nil {\n\t\treturn []byte{}, err\n\t}\n\treturn data, nil\n}", "func getFileContentsFromId(fileId string) []byte {\r\n // returns *FilesGetCall from file id\r\n getter := fs.Get(fileId)\r\n // download the file\r\n random := strconv.Itoa(rand.Int())\r\n \r\n // this is terrible but works. If the server gives an error, just try again\r\n var resp *http.Response = nil\r\n var err error = errors.New(\"empty error\")\r\n for err != nil {\r\n resp, err = getter.Download(googleapi.QuotaUser(random))\r\n }\r\n \r\n //handleError(err)\r\n // print the contents to the screen\r\n robots, err := ioutil.ReadAll(resp.Body)\r\n resp.Body.Close()\r\n if err != nil {\r\n log.Fatal(err)\r\n }\r\n return robots\r\n}", "func (c *client) GetFile(org, repo, filepath, commit string) ([]byte, error) {\n\tdurationLogger := c.log(\"GetFile\", org, repo, filepath, commit)\n\tdefer durationLogger()\n\n\tpath := fmt.Sprintf(\"/repos/%s/%s/contents/%s\", org, repo, filepath)\n\tif commit != \"\" {\n\t\tpath = fmt.Sprintf(\"%s?ref=%s\", path, url.QueryEscape(commit))\n\t}\n\n\tvar res Content\n\tcode, err := c.request(&request{\n\t\tmethod: http.MethodGet,\n\t\tpath: path,\n\t\torg: org,\n\t\texitCodes: []int{200, 404},\n\t}, &res)\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif code == 404 {\n\t\treturn nil, &FileNotFound{\n\t\t\torg: org,\n\t\t\trepo: repo,\n\t\t\tpath: filepath,\n\t\t\tcommit: commit,\n\t\t}\n\t}\n\n\tdecoded, err := base64.StdEncoding.DecodeString(res.Content)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"error decoding %s : %w\", res.Content, err)\n\t}\n\n\treturn decoded, nil\n}", "func GetContent(url string, data ...interface{}) string {\n\treturn RequestContent(\"GET\", url, data...)\n}", "func (client *Client) GetContent(path string) *VoidResponse {\n\tendpoint := client.baseURL + fmt.Sprintf(EndpointGetContent, client.accessToken, path)\n\trequest := gorequest.New().Get(endpoint).Set(UserAgentHeader, UserAgent+\"/\"+Version)\n\n\treturn &VoidResponse{\n\t\tClient: client,\n\t\tRequest: request,\n\t}\n}", "func getFileContent(srv *drive.Service, fileId string) (*goquery.Document, error) {\n\tvar doc goquery.Document\n\n\tresponse, err := srv.Files.Export(fileId, \"text/html\").Download()\n\n\tif err != nil {\n\t\treturn &doc, err\n\t}\n\n\tif response.StatusCode != 200 {\n\t\treturn &doc, fmt.Errorf(\"Status code: %d\", response.StatusCode)\n\t}\n\n\treturn goquery.NewDocumentFromResponse(response)\n}", "func (cs *CasServer) GetContent(w http.ResponseWriter, r *http.Request, ps httprouter.Params) {\n\n\tdata, err := ioutil.ReadFile(fmt.Sprintf(\"./%s/%s\", cs.casFolder, ps.ByName(cs.casParam)))\n\n\tif err == nil {\n\t\t_, err := w.Write(data)\n\t\tif err != nil {\n\t\t\tlog.Error(err)\n\t\t}\n\n\t} else {\n\t\tlog.Error(err)\n\t\tw.WriteHeader(http.StatusNotFound)\n\n\t\t_, err := w.Write([]byte(http.StatusText(http.StatusNotFound)))\n\t\tif err != nil {\n\t\t\tlog.Error(err)\n\t\t}\n\t}\n}", "func (info *FileInfo) Get() {\n\tlog.Info().Msgf(\"Getting %s\", info.URL)\n\tresp, err := http.Post(info.URL, \"application/x-www-form-urlencoded; charset=ISO-8859-1\", bytes.NewReader([]byte(\"\")))\n\n\tif err != nil {\n\t\tlog.Error().Msgf(\"%v, failed to post\", err)\n\t}\n\n\tcontent, err := ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\tlog.Error().Msgf(\"%v, failed to load response body\", err)\n\t}\n\n\tdata := strings.Split(string(content), \"|\")\n\tinfo.Total = data\n\n\tif len(data) < 11 {\n\t\tlog.Warn().Msgf(\"%v\", data)\n\t\tlog.Error().Msgf(\"%v less than 11 elements\", data)\n\t}\n\n\tinfo.Collection = data[0]\n\tinfo.PatientID = data[1]\n\tinfo.StudyUID = data[2]\n\tinfo.SeriesUID = data[3]\n\n\tif size, err := strconv.ParseInt(data[6], 10, 64); err != nil {\n\t\tlog.Error().Msgf(\"%v, failed to convert size to int64\", err)\n\t} else {\n\t\tinfo.Size = int64(size)\n\t}\n\n\tinfo.Date = data[11]\n\n\tlog.Printf(\"%v\", info)\n}", "func FilesGet(ctx *gin.Context) {\n\tif _, exists := ctx.Get(\"User\"); !exists {\n\t\t// User has not been created, we only allow existing user to visit images stored\n\t\tmisc.ReturnStandardError(ctx, 403, \"you will have to be a registered user to do this\")\n\t\treturn\n\t}\n\tfileType := ctx.DefaultQuery(\"type\", DefaultType)\n\tif _, ok := FileTypes[fileType]; !ok {\n\t\tmisc.ReturnStandardError(ctx, http.StatusBadRequest, \"type '\"+fileType+\"' is not accepted\")\n\t\treturn\n\t}\n\tif ReadSASExpiresAt == nil || time.Now().After(*ReadSASExpiresAt) {\n\t\t// SAS token expires or have not been created at all, we need to generate a new one\n\t\tnewExpiresAt := time.Now().UTC().Add(SASValidTime)\n\t\tqp, err := getSASQueryParam(newExpiresAt, FileTypes[fileType], \"\",\n\t\t\tazblob.ContainerSASPermissions{Read: true}.String())\n\t\tif err != nil {\n\t\t\tmisc.ReturnStandardError(ctx, 500, err.Error())\n\t\t\treturn\n\t\t}\n\t\tReadSASQueryParam = qp\n\t\tnewExpiresAt = newExpiresAt.Add(-SASValidAllowance)\n\t\tReadSASExpiresAt = &newExpiresAt\n\t}\n\tdata := map[string]map[string]string{\n\t\t\"meta\": {\n\t\t\t\"qp\": ReadSASQueryParam,\n\t\t\t\"qp_expires_at\": ReadSASExpiresAt.Format(time.RFC3339),\n\t\t\t\"endpoint\": \"https://\" + viper.GetString(\"azure.accountName\") + \".blob.core.windows.net/\" + FileTypes[fileType] + \"/\",\n\t\t},\n\t}\n\tctx.JSON(http.StatusOK, data)\n}", "func (c *FilesGetCall) Download(opts ...googleapi.CallOption) (*http.Response, error) {\n\tgensupport.SetOptions(c.urlParams_, opts...)\n\tres, err := c.doRequest(\"media\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif err := googleapi.CheckResponse(res); err != nil {\n\t\tres.Body.Close()\n\t\treturn nil, err\n\t}\n\treturn res, nil\n}", "func (b *Binary) Get(timeout time.Duration) error {\n\tclient := &getter.HttpGetter{\n\t\tReadTimeout: timeout,\n\t}\n\n\tif err := client.GetFile(b.LocalPath(), b.URL()); err != nil {\n\t\treturn errors.Wrapf(err, \"failed to http get file: %s\", b.URL())\n\t}\n\n\treturn nil\n}", "func Get(url string, filename string, headers map[string]string) []byte {\n\tcachedContent, err := ioutil.ReadFile(fmt.Sprintf(\"%v.json\", filename))\n\tif err != nil {\n\t\treturn fetchNewData(url, filename, headers)\n\t}\n\treturn cachedContent\n}", "func (d *Datanode) Get(req GetRequest, resp *GetResponse) error {\n\n\tfileName := req.Filename\n\n\tif !strings.Contains(fileName, \"cache\") {\n\t\tfileName = Config.EncodeFileName(req.Filename)\n\t}\n\n\tsdfsfilepath := Config.SdfsfileDir + \"/\" + fileName\n\n\t//Open file\n\tsdfsfile, err := os.Open(sdfsfilepath)\n\tif err != nil {\n\t\tlog.Printf(\"os.Open() can't open file %s\\n\", sdfsfilepath)\n\t\treturn err\n\t}\n\tdefer sdfsfile.Close()\n\n\t//Read file into resp\n\tbuf := make([]byte, req.Size)\n\n\tn, err := sdfsfile.ReadAt(buf, req.Offset)\n\tif err != nil {\n\t\tif err != io.EOF {\n\t\t\treturn err\n\t\t} else {\n\t\t\t// fmt.Printf(\"Read sdfsfile %s succeed!!\\n\", req.Filename)\n\t\t\tresp.Eof = true\n\t\t}\n\t}\n\n\tresp.Content = buf[:n]\n\n\treturn nil\n}", "func (c Client) get(path string) ([]byte, error) {\n\turl := c.Host + path\n\n\tresp, err := c.HTTPClient.Get(url)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tdefer resp.Body.Close()\n\n\tbody, err := ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn body, nil\n}", "func (m *GetFile) Method() string {\n\treturn \"getFile\"\n}", "func (d *driver) GetContent(ctx context.Context, path string) ([]byte, error) {\n reader, err := d.Reader(ctx, path, 0)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn ioutil.ReadAll(reader)\n}", "func GetFileHandler(w http.ResponseWriter, r *http.Request) {\n\thttpSession, _ := session.HTTPSession.Get(r, session.CookieName)\n\tif httpSession.IsNew {\n\t\thttp.Error(w, \"Forbidden\", http.StatusForbidden)\n\n\t\treturn\n\t}\n\tuid := httpSession.Values[\"uid\"].(string)\n\n\tresult := gulu.Ret.NewResult()\n\tdefer gulu.Ret.RetResult(w, r, result)\n\n\tvar args map[string]interface{}\n\n\tif err := json.NewDecoder(r.Body).Decode(&args); err != nil {\n\t\tlogger.Error(err)\n\t\tresult.Code = -1\n\n\t\treturn\n\t}\n\n\tpath := args[\"path\"].(string)\n\n\tif !gulu.Go.IsAPI(path) && !session.CanAccess(uid, path) {\n\t\thttp.Error(w, \"Forbidden\", http.StatusForbidden)\n\n\t\treturn\n\t}\n\n\tsize := gulu.File.GetFileSize(path)\n\tif size > 5242880 { // 5M\n\t\tresult.Code = -1\n\t\tresult.Msg = \"This file is too large to open :(\"\n\n\t\treturn\n\t}\n\n\tdata := map[string]interface{}{}\n\tresult.Data = &data\n\n\tbuf, _ := ioutil.ReadFile(path)\n\n\textension := filepath.Ext(path)\n\n\tif gulu.File.IsImg(extension) {\n\t\t// image file will be open in a browser tab\n\n\t\tdata[\"mode\"] = \"img\"\n\n\t\tuserId := conf.GetOwner(path)\n\t\tif \"\" == userId {\n\t\t\tlogger.Warnf(\"The path [%s] has no owner\", path)\n\t\t\tdata[\"path\"] = \"\"\n\n\t\t\treturn\n\t\t}\n\n\t\tuser := conf.GetUser(uid)\n\n\t\tdata[\"path\"] = \"/workspace/\" + user.Name + \"/\" + strings.Replace(path, user.WorkspacePath(), \"\", 1)\n\n\t\treturn\n\t}\n\n\tcontent := string(buf)\n\n\tif gulu.File.IsBinary(content) {\n\t\tresult.Code = -1\n\t\tresult.Msg = \"Can't open a binary file :(\"\n\t} else {\n\t\tdata[\"content\"] = content\n\t\tdata[\"path\"] = path\n\t}\n}", "func Get(project string) http.FileSystem {\n\treturn data[project]\n}", "func (d *driver) GetContent(ctx context.Context, path string) ([]byte, error) {\n\tdefer debugTime()()\n\treader, err := d.shell.Cat(d.fullPath(path))\n\tif err != nil {\n\t\tif strings.HasPrefix(err.Error(), \"no link named\") {\n\t\t\treturn nil, storagedriver.PathNotFoundError{Path: path}\n\t\t}\n\t\treturn nil, err\n\t}\n\n\tcontent, err := ioutil.ReadAll(reader)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tlog.Debugf(\"Got content %s: %s\", path, content)\n\n\treturn content, nil\n}", "func (c *Client) GetFile(w http.ResponseWriter, r *http.Request) {\n\n\ttarget := c.getLargestFile()\n\tentry, err := NewFileReader(target)\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\tlog.Printf(\"\\nGetFile %#v %v\\n\", target, entry)\n\tdefer func() {\n\t\tif err := entry.Close(); err != nil {\n\t\t\tlog.Printf(\"Error closing file reader: %s\\n\", err)\n\t\t}\n\t}()\n\n\tw.Header().Set(\"Content-Disposition\", \"attachment; filename=\\\"\"+c.Torrent.Info().Name+\"\\\"\")\n\thttp.ServeContent(w, r, target.DisplayPath(), time.Now(), entry)\n}", "func Get(p string) (string, error) {\n\tfile, err := os.Open(p)\n\n\tif err != nil {\n\t\tlog.Println(err)\n\t\tos.Exit(1)\n\t}\n\n\tbuff := make([]byte, 512)\n\n\t_, err = file.Read(buff)\n\n\tif err != nil {\n\t\tlog.Println(err)\n\t\tos.Exit(1)\n\t}\n\n\tfiletype := http.DetectContentType(buff)\n\n\text := imgext.Get()\n\n\tfor i := 0; i < len(ext); i++ {\n\t\tif strings.Contains(ext[i], filetype[6:len(filetype)]) {\n\t\t\treturn filetype, nil\n\t\t}\n\t}\n\n\treturn \"\", errors.New(\"Invalid image type\")\n\n}", "func Get(file string) []byte {\n\treturn blob.Get(file)\n}", "func (d *KrakenStorageDriver) GetContent(ctx context.Context, path string) ([]byte, error) {\n\tlog.Debugf(\"(*KrakenStorageDriver).GetContent %s\", path)\n\tpathType, pathSubType, err := ParsePath(path)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar data []byte\n\tswitch pathType {\n\tcase _manifests:\n\t\tdata, err = d.manifests.getDigest(path, pathSubType)\n\tcase _uploads:\n\t\tdata, err = d.uploads.getContent(path, pathSubType)\n\tcase _layers:\n\t\tdata, err = d.blobs.getDigest(path)\n\tcase _blobs:\n\t\tdata, err = d.blobs.getContent(ctx, path)\n\tdefault:\n\t\treturn nil, InvalidRequestError{path}\n\t}\n\tif err != nil {\n\t\treturn nil, toDriverError(err, path)\n\t}\n\treturn data, nil\n}", "func (ds *DriverService) GetFile(ID string) (*http.Response, error) {\n\treturn ds.Files.Get(ID).Download()\n}", "func GetFiles(w http.ResponseWriter, r *http.Request) {\n\tvar ws datastructures.Workspace\n\n\tif body, err := ioutil.ReadAll(r.Body); err != nil {\n\t\tlog.Println(err)\n\t\tw.WriteHeader(500)\n\t\tw.Write([]byte(\"Could not read request body\"))\n\t\treturn\n\t} else if err = json.Unmarshal(body, &ws); err != nil {\n\t\tlog.Println(err)\n\t\tw.WriteHeader(400)\n\t\tw.Write([]byte(\"Bad request\"))\n\t\treturn\n\t}\n\n\tlog.Println(\"Get files request for workspace\", ws.ToString())\n\n\tvar files []datastructures.File\n\n\tfilesInCache, err := utils.IsWorkspaceInCache(ws)\n\n\tif filesInCache {\n\t\tfiles, err = utils.GetFilesFromCache(ws)\n\t} else {\n\t\tfiles, err = utils.GetFilesFromDisk(ws)\n\n\t\tcopy, ok := r.URL.Query()[\"copy\"]\n\n\t\tif ok && copy[0] == \"YES\" {\n\t\t\tgo func() {\n\t\t\t\tif err = utils.GetFilesToCache(ws, files); err != nil {\n\t\t\t\t\tlog.Println(err)\n\t\t\t\t}\n\t\t\t}()\n\t\t}\n\t}\n\n\tif err != nil {\n\t\tlog.Println(err)\n\t\tw.WriteHeader(500)\n\t\tw.Write([]byte(\"Files could not be read\"))\n\t\treturn\n\t}\n\n\tif b, err := json.Marshal(files); err != nil {\n\t\tlog.Println(err)\n\t} else {\n\t\tw.Write(b)\n\t}\n}", "func getFile(c context.Context, r *http.Request, id int64) (models.File, error) {\n\t// Get the File by id\n\tvar file models.File\n\tfileId := datastore.NewKey(c, \"File\", \"\", id, nil)\n\n\terr := nds.Get(c, fileId, &file)\n\tif err != nil {\n\t\tlog.Errorf(c, \"%v\", err)\n\t\treturn models.File{}, err\n\t}\n\n\tif !file.Created.IsZero() {\n\t\tfile.Format(fileId, \"files\")\n\n\t\tuser, err := controllers.GetCurrentUser(c, r)\n\t\tif err != nil {\n\t\t\tlog.Errorf(c, \"%v\", err)\n\t\t\treturn models.File{}, errors.New(\"Could not get user\")\n\t\t}\n\t\tif file.CreatedBy != user.Id && !user.IsAdmin {\n\t\t\treturn models.File{}, errors.New(\"Forbidden\")\n\t\t}\n\n\t\treturn file, nil\n\t}\n\treturn models.File{}, errors.New(\"No file by this id\")\n}", "func FileGetContent(file string) ([]byte, error) {\n\tif !IsFile(file) {\n\t\treturn nil, os.ErrNotExist\n\t}\n\tb, e := ioutil.ReadFile(file)\n\tif e != nil {\n\t\treturn nil, e\n\t}\n\treturn b, nil\n}", "func (b *Bot) GetFile(fileId string) (result axon.O, err error) {\n\tvar response interface{}\n\tif response, err = b.doGet(\"getFile?file_id=\" + fileId); err == nil {\n\t\tresult = response.(map[string]interface{})\n\t}\n\treturn\n}", "func (its *Request) Get() ([]byte, error) {\n\trequest, err := http.NewRequest(\"GET\", its.IsHTTPS+\"://\"+its.Host+its.Path, nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// add BearToken auth\n\tif its.BearToken != \"\" {\n\t\trequest.Header.Add(\"Authorization\", \"Bearer \"+its.BearToken)\n\t}\n\n\tclient := http.Client{}\n\t// add InsecureSkipVerify\n\tif its.IsHTTPS == \"https\" {\n\t\tclient.Transport = tr\n\t}\n\n\t// execute this request\n\tresp, err := client.Do(request)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// get,read and return\n\tdefer resp.Body.Close()\n\ttmp, err := ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn tmp, nil\n}", "func Get(file string) ([]byte, error) {\n\tdata, ok := files[file]\n\tif !ok {\n\t\treturn nil, fmt.Errorf(\"File data for %q not found\", file)\n\t}\n\n\treturn data, nil\n}", "func (ref *FileRef) Get(file *File, authOptions ...AuthOption) error {\n\terr := objectGet(ref, file, authOptions...)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}", "func FileGetContent(file string) (string, error) {\n\tif !IsFile(file) {\n\t\treturn \"\", os.ErrNotExist\n\t}\n\tb, e := ioutil.ReadFile(file)\n\tif e != nil {\n\t\treturn \"\", e\n\t}\n\treturn string(b), nil\n}", "func (conn Connection) GetWithContent(cmd string, content, result interface{}) (effect *SideEffect, resp *http.Response, err error) {\n\treturn conn.Send(http.MethodGet, cmd, content, result)\n}", "func (sdk *Sdk) GetMedia(isAll, isExcludeDisabled, MediaType string) (string, error) {\n\tsdkC := sdk.cms\n\tparams := map[string]string{\n\t\t\"is_all\": isAll,\n\t\t\"exclude_disabled\": isExcludeDisabled,\n\t\t\"type\": MediaType,\n\t}\n\n\treturn sdkC.rq.Get(\"/media\", params)\n}", "func (c *client) Get(_ context.Context, request *blobstore.GetRequest) (*blobstore.GetResponse, error) {\n\tdata, err := util.ReadFile(c.bodyPath(request.Key))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\ttagsData, err := util.ReadFile(c.tagsPath(request.Key))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\ttags := make(map[string]string)\n\tif err := json.Unmarshal(tagsData, &tags); err != nil {\n\t\treturn nil, err\n\t}\n\treturn &blobstore.GetResponse{\n\t\tBlob: blobstore.Blob{\n\t\t\tBody: data,\n\t\t\tTags: tags,\n\t\t},\n\t}, nil\n}", "func (g *gcsclient) GetFileContents(ctx context.Context, path string) ([]byte, error) {\n\tresponse, err := g.client.Bucket(g.bucket).Object(path).NewReader(ctx)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer util.Close(response)\n\treturn ioutil.ReadAll(response)\n}", "func (h ManifestHandler) Get(w http.ResponseWriter, r *http.Request) {\n\tlog.WithFields(log.Fields{\"handler\": \"ManifestHandler\"}).Debug(\"Handler called\")\n\n\tcr, err := cabby.NewRange(r.Header.Get(\"Range\"))\n\tif err != nil {\n\t\trangeNotSatisfiable(w, err)\n\t\treturn\n\t}\n\n\tmanifest, err := h.ManifestService.Manifest(r.Context(), takeCollectionID(r), &cr, newFilter(r))\n\tif err != nil {\n\t\tinternalServerError(w, err)\n\t\treturn\n\t}\n\n\tif len(manifest.Objects) <= 0 {\n\t\tresourceNotFound(w, errors.New(\"No manifest available for this collection\"))\n\t\treturn\n\t}\n\n\tif cr.Valid() {\n\t\tw.Header().Set(\"Content-Range\", cr.String())\n\t\twritePartialContent(w, cabby.TaxiiContentType, resourceToJSON(manifest))\n\t} else {\n\t\twriteContent(w, cabby.TaxiiContentType, resourceToJSON(manifest))\n\t}\n}", "func (rs *HTTPResource) GetObject(w http.ResponseWriter, r *http.Request) {\n\tid := uuid.New().String()\n\n\tlog.Info().\n\t\tStr(\"id\", id).\n\t\tStr(\"method\", r.Method).\n\t\tStr(\"path\", r.URL.Path).\n\t\tMsg(\"object requested\")\n\n\tif r.Method != http.MethodGet {\n\t\tlog.Warn().\n\t\t\tStr(\"id\", id).\n\t\t\tStr(\"method\", r.Method).\n\t\t\tStr(\"path\", r.URL.Path).\n\t\t\tMsg(\"bad request method\")\n\n\t\tw.WriteHeader(http.StatusMethodNotAllowed)\n\t\tw.Write([]byte(\"method not allowed\"))\n\t\treturn\n\t}\n\n\tfileID := strings.TrimPrefix(r.URL.Path, \"/\")\n\n\t// fail if someone is trying to potentially access different paths\n\t// on the filesystem, or the image name is too large.\n\t// we should probably do more to sanitise the fileID here, but\n\t// it should be good for now\n\tif strings.ContainsRune(fileID, '/') || len(fileID) > 256 {\n\t\tlog.Warn().\n\t\t\tStr(\"id\", id).\n\t\t\tStr(\"method\", r.Method).\n\t\t\tStr(\"path\", r.URL.Path).\n\t\t\tMsg(\"bad fileID requested\")\n\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\tw.Write([]byte(\"bad request: image URL is invalid\"))\n\t\treturn\n\t}\n\n\t// set the content type, get the object and write it to the response\n\tw.Header().Set(\"Content-Type\", mime.TypeByExtension(filepath.Ext(fileID)))\n\n\terr := rs.storage.ReadObject(fileID, w)\n\tif err != nil {\n\t\tif errors.Is(err, storage.ErrFileDoesNotExist) {\n\t\t\tlog.Warn().\n\t\t\t\tStr(\"id\", id).\n\t\t\t\tStr(\"method\", r.Method).\n\t\t\t\tStr(\"path\", r.URL.Path).\n\t\t\t\tMsg(\"requested file not found\")\n\n\t\t\tw.WriteHeader(http.StatusNotFound)\n\t\t\tw.Write([]byte(\"image not found\"))\n\t\t\treturn\n\t\t}\n\n\t\tlog.Warn().\n\t\t\tStr(\"id\", id).\n\t\t\tStr(\"method\", r.Method).\n\t\t\tStr(\"path\", r.URL.Path).\n\t\t\tStr(\"error\", err.Error()).\n\t\t\tMsg(\"could not serve file\")\n\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\tw.Write([]byte(\"internal server error\"))\n\t\treturn\n\t}\n\n}", "func get(w http.ResponseWriter, r *http.Request) {\r\n\tif r.Method != \"GET\" {\r\n\t\thttp.Error(w, \"404 not found.\", http.StatusNotFound)\r\n\t\treturn\r\n\t}\r\n\tid := r.URL.Query().Get(\"id\")\r\n\t//fmt.Println(id)\r\n\tquery := \"SELECT id,value,completed FROM public.item WHERE id = $1\"\r\n\trow := db.QueryRow(query, id)\r\n\tcontents := content{}\r\n\trow.Scan(&contents.ID, &contents.value, &contents.completed)\r\n\tjson.NewEncoder(w).Encode(contents)\r\n}", "func (c *Controller) GetFile(key string) (multipart.File, *multipart.FileHeader, error) {\n\treturn c.Ctx.Request.FormFile(key)\n}", "func (c *Client) Get(path string) (f interface{}, err error) {\n\treturn c.do(\"GET\", path, nil)\n}", "func (m *FileAssessmentRequest) GetContentData()(*string) {\n return m.contentData\n}", "func getFile(url string) ([]byte, string) {\n\treq, _ := http.NewRequest(\"GET\", url, nil)\n\n\treq.Header.Add(\"cache-control\", \"no-cache\")\n\n\tres, err := http.DefaultClient.Do(req)\n\tif err != nil {\n\t\tlog.Println(\"getFile:\", err)\n\t}\n\n\tdefer res.Body.Close()\n\tbody, _ := ioutil.ReadAll(res.Body)\n\n\t//get the receipt number from the filename\n\t// Make a Regex to say we only want numbers\n\treg, err := regexp.Compile(`[^\\d ]+`)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tfilename := res.Header[\"Content-Disposition\"]\n\t// BUG: this leaves a space in the number, the result is a file name with a\n\t// \t\tdouble space between ShortDescription and the number. When fixed\n\t// \t\tremove the hack in the write file for block in func main.\n\tnumber := reg.ReplaceAllString(strings.Join(filename, \" \"), \"\")\n\treturn body, number\n}", "func Get(client *cliHttp.SimpleClient, gameID int, size int64, checksum string) (*GetResult, error) {\n\tgetParams := url.Values(map[string][]string{\n\t\t\"game_id\": {strconv.Itoa(gameID)},\n\t\t\"size\": {strconv.FormatInt(size, 10)},\n\t\t\"checksum\": {checksum},\n\t})\n\n\t_, res, err := client.Get(\"files/add\", getParams)\n\n\tif err != nil {\n\t\treturn nil, errors.New(\"Failed to fetch current state of file on the server: \" + err.Error())\n\t}\n\tdefer res.Body.Close()\n\n\tbody, err := ioutil.ReadAll(res.Body)\n\tif err != nil {\n\t\treturn nil, errors.New(\"Failed to fetch current state of file on the server: \" + err.Error())\n\t}\n\n\tresult := &GetResult{}\n\tif err = json.Unmarshal(body, result); err != nil {\n\t\treturn nil, errors.New(\"Failed to fetch current state of file on the server, the server returned a weird looking response: \" + string(body))\n\t}\n\n\tif result.Error != nil {\n\t\treturn nil, apiErrors.New(result.Error)\n\t}\n\treturn result, nil\n}", "func (fs *FileSystem) Get(p string) ([]byte, error) {\n\tp = path.Clean(p)\n\tlog.Tracef(\"Getting %v\", p)\n\tif fs.local != \"\" {\n\t\tb, err := ioutil.ReadFile(filepath.Join(fs.local, p))\n\t\tif err != nil {\n\t\t\tif !os.IsNotExist(err) {\n\t\t\t\tlog.Debugf(\"Error accessing resource %v on filesystem: %v\", p, err)\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tlog.Tracef(\"Resource %v does not exist on filesystem, using embedded resource instead\", p)\n\t\t} else {\n\t\t\tlog.Tracef(\"Using local resource %v\", p)\n\t\t\treturn b, nil\n\t\t}\n\t}\n\tb, found := fs.files[p]\n\tif !found {\n\t\terr := fmt.Errorf(\"%v not found\", p)\n\t\treturn nil, err\n\t}\n\tlog.Tracef(\"Using embedded resource %v\", p)\n\treturn b, nil\n}", "func (afs *assetFiles) GetContent(name string) []byte {\n\ts, err := afs.GetAssetFile(name)\n\tif err != nil {\n\t\treturn []byte(\"\")\n\t}\n\treturn s.Content()\n}", "func (m Mux) Get(ctx context.Context, url, etag string) (io.ReadCloser, reflow.File, error) {\n\tbucket, key, err := m.Bucket(ctx, url)\n\tif err != nil {\n\t\treturn nil, reflow.File{}, err\n\t}\n\treturn bucket.Get(ctx, key, etag)\n}", "func get(cacheDir, url string) ([]byte, error) {\n\tclient := grab.NewClient()\n\treq, err := grab.NewRequest(cacheDir, url)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tresp := client.Do(req)\n\t<-resp.Done\n\treturn ioutil.ReadFile(resp.Filename)\n}", "func Get() (string, error) {\n\trw := CreateReadWrite()\n\tcontent, err := rw.Read(\"sample.txt\")\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn content, nil\n}", "func (r *regulator) GetContent(ctx context.Context, path string) ([]byte, error) {\n\tr.enter()\n\tdefer r.exit()\n\n\treturn r.StorageDriver.GetContent(ctx, path)\n}", "func (s *SwiftLocation) GetFile(path string, requestHeaders schwift.ObjectHeaders) (io.ReadCloser, FileState, error) {\n\tobject := s.ObjectAtPath(path)\n\n\tbody, err := object.Download(requestHeaders.ToOpts()).AsReadCloser()\n\tif schwift.Is(err, http.StatusNotModified) {\n\t\treturn nil, FileState{SkipTransfer: true}, nil\n\t}\n\tif err != nil {\n\t\treturn nil, FileState{}, err\n\t}\n\t//NOTE: Download() uses a GET request, so object metadata has already been\n\t//received and cached, so Headers() is cheap now and will never fail.\n\thdr, err := object.Headers()\n\tif err != nil {\n\t\tbody.Close()\n\t\treturn nil, FileState{}, err\n\t}\n\n\tvar expiryTime *time.Time\n\tif hdr.ExpiresAt().Exists() {\n\t\tt := hdr.ExpiresAt().Get()\n\t\texpiryTime = &t\n\t}\n\n\treturn body, FileState{\n\t\tEtag: hdr.Etag().Get(),\n\t\tLastModified: hdr.Get(\"Last-Modified\"),\n\t\tSizeBytes: int64(hdr.SizeBytes().Get()),\n\t\tExpiryTime: expiryTime,\n\t\tContentType: hdr.ContentType().Get(),\n\t}, nil\n}", "func (m *Mock) File(req *acomm.Request) (interface{}, *url.URL, error) {\n\tvar err error\n\tif !m.Data.File {\n\t\terr = errors.New(\"file does not exist\")\n\t}\n\treturn nil, nil, err\n}", "func (t *targetrunner) httpfilget(w http.ResponseWriter, r *http.Request) {\n\tapitems := t.restApiItems(r.URL.Path, 5)\n\tif apitems = t.checkRestAPI(w, r, apitems, 1, Rversion, Rfiles); apitems == nil {\n\t\treturn\n\t}\n\tbucket, objname := apitems[0], \"\"\n\tif len(apitems) > 1 {\n\t\tobjname = apitems[1]\n\t}\n\tt.statsif.add(\"numget\", 1)\n\t//\n\t// list the bucket and return\n\t//\n\tif len(objname) == 0 {\n\t\tgetcloudif().listbucket(w, bucket)\n\t\treturn\n\t}\n\t//\n\t// get from the bucket\n\t//\n\tfqn := t.fqn(bucket, objname)\n\tvar file *os.File\n\t_, err := os.Stat(fqn)\n\tif os.IsNotExist(err) {\n\t\tt.statsif.add(\"numcoldget\", 1)\n\t\tglog.Infof(\"Bucket %s key %s fqn %q is not cached\", bucket, objname, fqn)\n\t\t// TODO: do getcloudif().getobj() and write http response in parallel\n\t\tif file, err = getcloudif().getobj(w, fqn, bucket, objname); err != nil {\n\t\t\treturn\n\t\t}\n\t\tfile.Seek(0, 0) // NOTE: needed?\n\t} else {\n\t\tif file, err = os.Open(fqn); err != nil {\n\t\t\ts := fmt.Sprintf(\"Failed to open local file %q, err: %v\", fqn, err)\n\t\t\tt.statsif.add(\"numerr\", 1)\n\t\t\tinvalmsghdlr(w, r, s)\n\t\t\treturn\n\t\t}\n\t}\n\tdefer file.Close()\n\t// NOTE: the following copyBuffer() call is equaivalent to:\n\t// \trt, _ := w.(io.ReaderFrom)\n\t// \twritten, err := rt.ReadFrom(file) ==> sendfile path\n\twritten, err := copyBuffer(w, file)\n\tif err != nil {\n\t\tglog.Errorf(\"Failed to copy %q to http, err: %v\", fqn, err)\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\tt.statsif.add(\"numerr\", 1)\n\t} else if glog.V(3) {\n\t\tglog.Infof(\"Copied %q to http(%.2f MB)\", fqn, float64(written)/1000/1000)\n\t}\n\tglog.Flush()\n}", "func (gw2 *GW2Api) Files() (res []string, err error) {\n\tver := \"v2\"\n\ttag := \"files\"\n\terr = gw2.fetchEndpoint(ver, tag, nil, &res)\n\treturn\n}", "func (c ProfilesController) GetMedia(profileName, subPath string) (JSONResponse, error) {\n\tprofile, err := c.profileProvider.GetProfile(profileName)\n\n\tif err != nil {\n\t\treturn JSONResponse{}, err\n\t}\n\n\tfiles, err := c.mediaProvider.GetLocalMedia(profile.MediaPath, subPath)\n\n\tif err != nil {\n\t\treturn JSONResponse{}, err\n\t}\n\n\treturn c.JSONResponse(200, files), nil\n}", "func (client *TcpBridgeClient) QueryFile(meta *QueryFileMeta) (*QueryFileResponseMeta, error) {\n frame, e := client.sendReceive(FRAME_OPERATION_QUERY_FILE, STATE_VALIDATED, meta, 0, nil)\n if e != nil {\n return nil, e\n }\n var res = &QueryFileResponseMeta{}\n e1 := json.Unmarshal(frame.FrameMeta, res)\n if e1 != nil {\n return nil, e1\n }\n return res, nil\n}", "func (b *Client) GetFileContent(pull models.PullRequest, fileName string) (bool, []byte, error) {\n\treturn false, []byte{}, fmt.Errorf(\"Not Implemented\")\n}", "func getContent(url string) ([]byte, error) {\n\tresp, err := http.Get(url)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"GET error: %v\", err)\n\t}\n\tdefer resp.Body.Close()\n\n\tif resp.StatusCode != http.StatusOK {\n\t\treturn nil, fmt.Errorf(\"Status error: %v\", resp.StatusCode)\n\t}\n\n\tdata, err := ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"Read body: %v\", err)\n\t}\n\n\treturn data, nil\n}", "func Get(c http.Client, url string) Result {\n\tres, err := c.Get(url)\n\tif err != nil {\n\t\treturn Result{url, \"\", nil, &ErrResult{err}}\n\t}\n\n\tbody, err := ioutil.ReadAll(res.Body)\n\tif err != nil {\n\t\treturn Result{url, \"\", res, &ErrResult{err}}\n\t}\n\n\tif contentTypes, ok := res.Header[\"Content-Type\"]; ok {\n\t\treturn Result{url, Format(contentTypes[0], body), res, nil}\n\t} else {\n\t\treturn Result{url, Format(\"\", body), res, nil}\n\t}\n\n\tpanic(\"unreachable\")\n}", "func contentFromServer(url string) string {\n resp, err := http.Get(url)\n checkError(err)\n\n defer resp.Body.Close()\n bytes, err := ioutil.ReadAll(resp.Body)\n checkError(err)\n\n return string(bytes)\n}", "func FetchFile(url string) (bytes []byte, err error) {\n\tres, err := http.Get(url)\n\tdefer res.Body.Close()\n\tif res.StatusCode == 200 {\n\t\tbytes, err = ioutil.ReadAll(res.Body)\n\t} else {\n\t\terr = fmt.Errorf(\"Cannot find file %s : %d\", url, res.StatusCode)\n\t}\n\treturn bytes, err\n}", "func (h FileHandler) GetFile(w http.ResponseWriter, r *http.Request) {\n\tvars := mux.Vars(r)\n\thash := vars[\"filehash\"]\n\n\thttp.ServeFile(w, r, getHashedFilepath(h.staticDir, hash))\n}", "func getFile(fileName string) (bytes []byte, filetype string) {\n\tfile, _ := os.Open(fileName)\n\tdefer file.Close()\n\n\tfileInfo, _ := file.Stat()\n\tsize := fileInfo.Size()\n\tbytes = make([]byte, size)\n\n\tbuffer := bufio.NewReader(file)\n\tbuffer.Read(bytes)\n\n\tfiletype = http.DetectContentType(bytes)\n\treturn\n}", "func (client HttpSourceClient) getContent(endpoint string) (bytes []byte, err error) {\n\thttpClient := &http.Client{Timeout: client.timeoutPeriod}\n\tresp, err := httpClient.Get(apiBaseUrl + endpoint)\n\tif err != nil {\n\t\treturn\n\t}\n\tdefer func() {\n\t\tclosingErr := resp.Body.Close()\n\t\tif err == nil {\n\t\t\terr = closingErr\n\t\t}\n\t}()\n\treturn io.ReadAll(resp.Body)\n}", "func DoBcsStorageGetRequest(fullPath string, tokenBase64 string, contentType string) (response *http.Response,\n\terr error) {\n\tif fullPath == \"\" {\n\t\tklog.Errorf(\"Http path is nil, please check again.\\n\")\n\t\treturn nil, fmt.Errorf(\"Http path is nil, please check again.\\n\")\n\t}\n\n\tclient := &http.Client{}\n\trequest, err := http.NewRequest(\"GET\", fullPath, nil)\n\tif err != nil {\n\t\tklog.Errorf(\"Get func NewRequest failed, %s\\n\", err)\n\t\treturn nil, fmt.Errorf(\"Get func NewRequest failed, %s\\n\", err)\n\t}\n\n\tif tokenBase64 != \"\" {\n\t\ttoken, err := base64.StdEncoding.DecodeString(tokenBase64)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tvar bearer = \"Bearer \" + string(token)\n\t\trequest.Header.Add(\"Authorization\", bearer)\n\t}\n\n\trequest.Header.Set(\"Content-type\", contentType)\n\n\tresponse, err = client.Do(request)\n\tif err != nil {\n\t\tklog.Errorf(\"Get func client.Do failed, %s\\n\", err)\n\t\treturn nil, fmt.Errorf(\"Get func client.Do failed, %s\\n\", err)\n\t}\n\treturn response, err\n}", "func (m *Macross) File(path, file string) {\n\tm.Get(path, func(c *Context) error {\n\t\treturn c.ServeFile(file)\n\t})\n}", "func (base *Base) GetContent(ctx context.Context, path string) ([]byte, error) {\n\tctx, done := dcontext.WithTrace(ctx)\n\tdefer done(\"%s.GetContent(%q)\", base.Name(), path)\n\n\tif !storagedriver.PathRegexp.MatchString(path) {\n\t\treturn nil, storagedriver.InvalidPathError{Path: path, DriverName: base.StorageDriver.Name()}\n\t}\n\n\tstart := time.Now()\n\tb, e := base.StorageDriver.GetContent(ctx, path)\n\tstorageAction.WithValues(base.Name(), \"GetContent\").UpdateSince(start)\n\treturn b, base.setDriverName(e)\n}", "func getContent(name string) ([]byte, error) {\n\treturn ioutil.ReadFile(\"content/\" + name)\n}", "func (_Bucket *BucketCaller) GetFile(opts *bind.CallOpts, _fileId *big.Int) (struct {\n\tStorageRef string\n\tName string\n\tFileSize *big.Int\n\tIsPublic bool\n\tIsDeleted bool\n\tFileOwner common.Address\n\tIsOwner bool\n\tLastModified *big.Int\n\tPermissionAddresses []common.Address\n\tWriteAccess bool\n}, error) {\n\tret := new(struct {\n\t\tStorageRef string\n\t\tName string\n\t\tFileSize *big.Int\n\t\tIsPublic bool\n\t\tIsDeleted bool\n\t\tFileOwner common.Address\n\t\tIsOwner bool\n\t\tLastModified *big.Int\n\t\tPermissionAddresses []common.Address\n\t\tWriteAccess bool\n\t})\n\tout := ret\n\terr := _Bucket.contract.Call(opts, out, \"getFile\", _fileId)\n\treturn *ret, err\n}", "func (s *GDrive) Get(ctx context.Context, token string, filename string, rng *Range) (reader io.ReadCloser, contentLength uint64, err error) {\n\tvar fileID string\n\tfileID, err = s.findID(filename, token)\n\tif err != nil {\n\t\treturn\n\t}\n\n\tvar fi *drive.File\n\tfi, err = s.service.Files.Get(fileID).Fields(\"size\", \"md5Checksum\").Do()\n\tif err != nil {\n\t\treturn\n\t}\n\tif !s.hasChecksum(fi) {\n\t\terr = fmt.Errorf(\"cannot find file %s/%s\", token, filename)\n\t\treturn\n\t}\n\n\tcontentLength = uint64(fi.Size)\n\n\tfileGetCall := s.service.Files.Get(fileID)\n\tif rng != nil {\n\t\theader := fileGetCall.Header()\n\t\theader.Set(\"Range\", rng.Range())\n\t}\n\n\tvar res *http.Response\n\tres, err = fileGetCall.Context(ctx).Download()\n\tif err != nil {\n\t\treturn\n\t}\n\n\tif rng != nil {\n\t\treader = res.Body\n\t\trng.AcceptLength(contentLength)\n\t\treturn\n\t}\n\n\treader = res.Body\n\n\treturn\n}", "func get(url string) ([]byte, error) {\n\treq, err := http.NewRequest(\"GET\", url, nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treq.Header.Set(\"User-Agent\", userAgent)\n\n\tresp, err := client.Do(req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer resp.Body.Close()\n\n\tif resp.StatusCode != http.StatusOK {\n\t\treturn nil, fmt.Errorf(\"got status code: %s\", resp.Status)\n\t}\n\n\treturn ioutil.ReadAll(resp.Body)\n}", "func getPage(c *gin.Context) {\n\tfmt.Println(\"getPage\")\n\n\t// TODO: check mode\n\t// if local\n\tr := getFilestoreDoc(c.Param(\"id\"))\n\n\t// if firestore\n\t// TODO: add firestore\n\n\tc.JSON(http.StatusOK, r)\n}", "func Get(url string) ([]byte, error) {\n\trsp, err := http.Get(url)\n\tif err != nil {\n\t\treturn nil, err\n\n\t}\n\tdefer rsp.Body.Close()\n\treturn ioutil.ReadAll(rsp.Body)\n}", "func (f *FileDir) GetFilesContents(file []byte, reply *FileDir) error {\n\n\t//line contains full path of the file\n\ttime.Sleep(5 * time.Second)\n\tfilePath := string(file) //taking the path of the file from the byte variable\n\n\tcontent, err := ioutil.ReadFile(filePath) //reading the contents of the file\n\tif err != nil {\n\t\tfmt.Println(\"File reading error\", err)\n\t\treturn nil\n\t}\n\n\tdata := string(content) //converting the contents of the file to string\n\t*reply = FileDir{data} //referencing the content to the sent to the client\n\treadBlocked = false\n\treturn nil\n}", "func (m *FileRequestBuilder) Get(ctx context.Context, requestConfiguration *FileRequestBuilderGetRequestConfiguration)(iadcd81124412c61e647227ecfc4449d8bba17de0380ddda76f641a29edf2b242.AgreementFileable, error) {\n requestInfo, err := m.CreateGetRequestInformation(ctx, requestConfiguration);\n if err != nil {\n return nil, err\n }\n errorMapping := i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f.ErrorMappings {\n \"4XX\": ia572726a95efa92ddd544552cd950653dc691023836923576b2f4bf716cf204a.CreateODataErrorFromDiscriminatorValue,\n \"5XX\": ia572726a95efa92ddd544552cd950653dc691023836923576b2f4bf716cf204a.CreateODataErrorFromDiscriminatorValue,\n }\n res, err := m.requestAdapter.SendAsync(ctx, requestInfo, iadcd81124412c61e647227ecfc4449d8bba17de0380ddda76f641a29edf2b242.CreateAgreementFileFromDiscriminatorValue, errorMapping)\n if err != nil {\n return nil, err\n }\n if res == nil {\n return nil, nil\n }\n return res.(iadcd81124412c61e647227ecfc4449d8bba17de0380ddda76f641a29edf2b242.AgreementFileable), nil\n}", "func Get(c context.Context, path string) ([]byte, error) {\n\treturn FromContext(c).Get(path)\n}", "func (c *Client) GetFile(owner, repo, filepath, commit string) ([]byte, error) {\n\tctx := context.Background()\n\tfullName := c.repositoryName(owner, repo)\n\tanswer, _, err := c.client.Contents.Find(ctx, fullName, filepath, commit)\n\tvar data []byte\n\tif answer != nil {\n\t\tdata = answer.Data\n\t}\n\treturn data, err\n}", "func (c *ReportsFilesGetCall) Download(opts ...googleapi.CallOption) (*http.Response, error) {\n\tgensupport.SetOptions(c.urlParams_, opts...)\n\tres, err := c.doRequest(\"media\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif err := googleapi.CheckResponse(res); err != nil {\n\t\tres.Body.Close()\n\t\treturn nil, err\n\t}\n\treturn res, nil\n}", "func get(url string, qparms rest.QParms) ([]byte, error) {\n\theaders := rest.Headers{\"Authorization\": \"Bearer \" + token}\n\tfor k, v := range defaultHeaders {\n\t\theaders[k] = v\n\t}\n\tclient := rest.NewClient(headers, qparms)\n\n\tbody, err := client.Get(url)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn body, nil\n}", "func (s *StaticHandler) Get() {\n\tp := s.Vars[\"path\"]\n\tif p == \"\" {\n\t\tp = \"index.html\"\n\t\tif _, err := os.Stat(p); err != nil {\n\t\t\tp = \"index.htm\"\n\t\t}\n\t}\n\tcontent, err := ioutil.ReadFile(p)\n\text := filepath.Ext(p)\n\tif err != nil {\n\t\tlog.Println(err)\n\t\ts.Status(404)\n\t} else {\n\t\tswitch ext {\n\t\tcase \".html\", \".htm\":\n\t\t\tcs := string(content)\n\t\t\tcs = strings.Replace(cs, \"</body>\", `<script src=\"/`+LIVERELOAD+`.js\"></script>`+\"\\n</body>\", 1)\n\t\t\tcontent = []byte(cs)\n\t\t}\n\n\t\ts.Response().Header().Add(\"Content-Type\", mime.TypeByExtension(ext))\n\n\t\ts.Write(content)\n\t}\n}", "func (c *Client) get(path string) (string, error) {\n\turl := c.endpoint + path\n\tresp, err := c.httpClient.Get(url)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tdefer resp.Body.Close()\n\tbodyBytes, err := io.ReadAll(resp.Body)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tbody := string(bodyBytes)\n\tif resp.StatusCode < 200 || resp.StatusCode >= 300 {\n\t\treturn body, fmt.Errorf(\"response status was %d\", resp.StatusCode)\n\t}\n\treturn body, nil\n}", "func (api *api) get(w http.ResponseWriter, r *http.Request) {\n\tpath := getCleanPath(r)\n\tapi.Manager.RLock()\n\tdefer api.Manager.RUnlock()\n\tw.Header().Add(headerRedirPort, api.RedirPort)\n\tif path == \"\" {\n\t\t//Get all\n\t\tans := map[string]ajax{}\n\t\tfor k, v := range api.Manager.pathes {\n\t\t\tans[k] = ajax{v.content, v.hits}\n\t\t}\n\n\t\tsendJSON(ans, w)\n\t} else {\n\t\t// One\n\t\tv, ok := api.Manager.pathes[path]\n\t\tif !ok {\n\t\t\tw.WriteHeader(http.StatusNotFound)\n\t\t\treturn\n\t\t}\n\t\tsendJSON(v.content, w)\n\t}\n}", "func File(contentType, fp string) Response {\n\treturn fileResp{contentType, fp}\n}", "func (s *Drive) GetFile(sha256sum []byte) ([]byte, error) {\n\treturn nil, nil\n}", "func GetFileMeta(res http.ResponseWriter,req *http.Request){\n\treq.ParseForm()\n\tfilehash := req.Form[\"filehash\"][0]\n\tfMeta := meta.Get(filehash)\n\tdata,err := json.Marshal(fMeta)\n\tif err != nil{\n\t\tres.WriteHeader(http.StatusInternalServerError)\n\t\treturn\n\t}\n\tres.Write(data)\n}", "func (s *DFSServer) GetFile(req *transfer.GetFileReq, stream transfer.FileTransfer_GetFileServer) (err error) {\n\tserviceName := \"GetFile\"\n\tpeerAddr := getPeerAddressString(stream.Context())\n\tglog.V(3).Infof(\"%s start, client: %s, %v\", serviceName, peerAddr, req)\n\n\tif len(req.Id) == 0 || req.Domain <= 0 {\n\t\treturn fmt.Errorf(\"invalid request [%v]\", req)\n\t}\n\n\treturn streamFunc(s.getFileStream).withStreamDeadline(serviceName, req, stream, serviceName, peerAddr, s)\n}", "func getFile(w http.ResponseWriter, r *http.Request) *Upload {\n\tfiles, _ := writeUploadedFiles(r)\n\tif len(files) > 1 {\n\t\tnumErrors++\n\t\tw.WriteHeader(400)\n\t\tw.Write([]byte(\"Only a single file allowed.\"))\n\t\treturn nil\n\t} else if len(files) == 0 {\n\t\tnumErrors++\n\t\tw.WriteHeader(400)\n\t\tw.Write([]byte(\"No files uploaded.\"))\n\t\treturn nil\n\t}\n\n\tcore.VPrintf(\"Got upload %#v\\n\", files[0])\n\treturn &files[0]\n}", "func RetrieveFile(w http.ResponseWriter, r *http.Request) {\n\t//log\n\tnow, userIP := globalPkg.SetLogObj(r)\n\tlogobj := logpkg.LogStruct{\"_\", now, userIP, \"macAdress\", \"RetrieveFile\", \"file\", \"_\", \"_\", \"_\", 0}\n\n\tvar obj RetrieveBody\n\tdecoder := json.NewDecoder(r.Body)\n\tdecoder.DisallowUnknownFields()\n\terr := decoder.Decode(&obj)\n\tif err != nil {\n\t\tglobalPkg.SendError(w, \"please enter your correct request\")\n\t\tglobalPkg.WriteLog(logobj, \"please enter your correct request\", \"failed\")\n\t\treturn\n\t}\n\t// check for pk\n\tacc := account.GetAccountByAccountPubicKey(obj.Publickey)\n\tif acc.AccountPublicKey != obj.Publickey {\n\t\tglobalPkg.SendError(w, \"error in public key\")\n\t\tglobalPkg.WriteLog(logobj, \"error in public key\", \"failed\")\n\t\treturn\n\t}\n\t// check for pwd\n\tif acc.AccountPassword != obj.Password {\n\t\tglobalPkg.SendError(w, \"error in password\")\n\t\tglobalPkg.WriteLog(logobj, \"error in password\", \"failed\")\n\t\treturn\n\t}\n\t// TODO check time\n\t// Validate Signture\n\tvalidSig := false\n\tpk := account.FindpkByAddress(acc.AccountPublicKey).Publickey\n\tif pk != \"\" {\n\t\tpublickey := cryptogrpghy.ParsePEMtoRSApublicKey(pk)\n\t\t// signatureData := obj.FileID + obj.Publickey + obj.Password +\n\t\t// \tobj.Time\n\t\tsignatureData := obj.Publickey + obj.Password + obj.FileID\n\n\t\tvalidSig = cryptogrpghy.VerifyPKCS1v15(obj.Signture, signatureData, *publickey)\n\t\t// validSig = true\n\t} else {\n\t\tvalidSig = false\n\t}\n\tif !validSig {\n\t\tglobalPkg.SendError(w, \"you are not allowed to download\")\n\t\tglobalPkg.WriteLog(logobj, \"you are not allowed to download\", \"failed\")\n\t\treturn\n\t}\n\t// check is user own this file ?\n\tfiles := acc.Filelist\n\tfound := false\n\tvar selectedFile accountdb.FileList\n\tfor _, file := range files {\n\t\tif file.Fileid == obj.FileID {\n\t\t\tfound = true\n\t\t\tselectedFile = file\n\t\t\tbreak\n\t\t}\n\t}\n\t// check if this file share to this account== who take share file can download it\n\tsharefiles := filestorage.FindSharedfileByAccountIndex(acc.AccountIndex)\n\tif len(sharefiles.OwnerSharefile) != 0 {\n\t\tfor _, sharefileobj := range sharefiles.OwnerSharefile {\n\t\t\tif containsfileid(sharefileobj.Fileid, obj.FileID) {\n\t\t\t\tfound = true\n\t\t\t\taccuntObj := account.GetAccountByAccountPubicKey(sharefileobj.OwnerPublicKey)\n\t\t\t\tfor _, filelistObj := range accuntObj.Filelist {\n\t\t\t\t\tif filelistObj.Fileid == obj.FileID {\n\t\t\t\t\t\tselectedFile = filelistObj\n\t\t\t\t\t\tbreak\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\t// fmt.Println(\"selectedFile.FileName \", selectedFile.FileName)\n\tif !found {\n\t\tglobalPkg.SendError(w, \"You don't have this file or file shared to you\")\n\t\tglobalPkg.WriteLog(logobj, \"You don't have this file or file shared to you\", \"failed\")\n\t\treturn\n\t}\n\n\t// collect file and save it in a temp file\n\tdecryptIndexBlock1 := cryptogrpghy.KeyDecrypt(globalpkg.EncryptAccount, selectedFile.Blockindex)\n\tfmt.Println(\" *********** block index \", decryptIndexBlock1)\n\tblkObj := block.GetBlockInfoByID(decryptIndexBlock1)\n\tvar fStrct filestorage.FileStruct\n\tfor _, tx := range blkObj.BlockTransactions {\n\t\tfStrct = tx.Filestruct\n\t\tif fStrct.Fileid == selectedFile.Fileid {\n\t\t\tfStrct = tx.Filestruct\n\t\t\tbreak\n\t\t}\n\t}\n\t// check active validators\n\tvar actives []validator.ValidatorStruct\n\tvalidatorLst := validator.GetAllValidators()\n\tfor _, valdtr := range validatorLst {\n\t\tif valdtr.ValidatorActive {\n\t\t\tactives = append(actives, valdtr)\n\t\t}\n\t}\n\tvar chnkObj filestorage.Chunkdb\n\tnewPath := filepath.Join(uploadPath, fStrct.Fileid+fStrct.FileType)\n\tfile, er := os.OpenFile(newPath, os.O_TRUNC|os.O_CREATE|os.O_WRONLY, 0777)\n\tif er != nil {\n\t\tfmt.Println(\"error in open file \", err)\n\t\tglobalPkg.SendError(w, \"server is down !\")\n\t\tglobalPkg.WriteLog(logobj, \"can not oprn file \", \"failed\")\n\t\treturn\n\t}\n\tdefer file.Close()\n\tnotvalidchnkdata := false\n\tcountnotvalidchnkdata := 0\n\tvar res broadcastTcp.FileBroadcastResponse\n\tvar chunkcount int = 0\n\tfor key, value := range fStrct.Mapping {\n\t\tfor _, chunkDta := range value {\n\t\t\t// time.Sleep(time.Millisecond * 10)\n\t\t\tindofvalidator := contains(actives, chunkDta.ValidatorIP)\n\t\t\tif indofvalidator != -1 {\n\t\t\t\tvalidatorObj2 := actives[indofvalidator]\n\t\t\t\tchnkObj.Chunkid = key\n\t\t\t\tchnkObj.Chunkhash = chunkDta.Chunkhash\n\t\t\t\t// _, _, res := broadcastTcp.SendObject(chnkObj, actives[i].ValidatorPublicKey, \"getchunkdata\", \"file\", actives[i].ValidatorSoketIP)\n\t\t\t\tif validatorObj2.ValidatorPublicKey == validator.CurrentValidator.ValidatorPublicKey {\n\t\t\t\t\t_, _, res = broadcastTcp.SendObject(chnkObj, validator.CurrentValidator.ValidatorPublicKey, \"getchunkdata\", \"file\", validator.CurrentValidator.ValidatorSoketIP)\n\n\t\t\t\t} else {\n\t\t\t\t\t_, _, res = broadcastTcp.SendObject(chnkObj, validatorObj2.ValidatorPublicKey, \"getchunkdata\", \"file\", validatorObj2.ValidatorSoketIP)\n\t\t\t\t}\n\t\t\t\tif !res.Valid {\n\t\t\t\t\tfmt.Println(\"server is down\")\n\t\t\t\t\tnotvalidchnkdata = true\n\t\t\t\t\tcontinue\n\t\t\t\t} else {\n\t\t\t\t\treshashchunk := globalPkg.GetHash(res.ChunkData)\n\t\t\t\t\tif reshashchunk != chnkObj.Chunkhash {\n\t\t\t\t\t\tfmt.Println(\"chunk data is lost .\")\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t} else {\n\t\t\t\t\t\tnotvalidchnkdata = false\n\n\t\t\t\t\t\t_, err := file.Write(res.ChunkData)\n\t\t\t\t\t\tif err != nil {\n\t\t\t\t\t\t\tfmt.Println(\"error in write chunk to file : \", err)\n\t\t\t\t\t\t}\n\t\t\t\t\t\tchunkcount++\n\t\t\t\t\t\tbreak\n\t\t\t\t\t}\n\t\t\t\t} // end else\n\t\t\t\tif notvalidchnkdata { // currupted\n\t\t\t\t\tcountnotvalidchnkdata++\n\t\t\t\t\tfmt.Println(\"Count of not valid chunk data : \", countnotvalidchnkdata)\n\t\t\t\t}\n\n\t\t\t}\n\t\t}\n\t}\n\tfmt.Println(\"written chunk \", chunkcount)\n\tfile0, er2 := ioutil.ReadFile(newPath)\n\tif er2 != nil {\n\t\tfmt.Println(\"error in reading file !!!\")\n\t}\n\tcollectedhashfile := globalPkg.GetHash(file0)\n\tfmt.Println(\"Collected File Hash \", collectedhashfile)\n\tfmt.Println(\"Original File Hash \", fStrct.FileHash2)\n\n\t// if collectedhashfile != fStrct.FileHash {\n\t// \tif countnotvalidchnkdata > 0 {\n\t// \t\tfmt.Println(\"error in getting chunk data !!!\")\n\t// \t}\n\t// \tglobalPkg.SendError(w, \"server is down !\")\n\t// \tglobalPkg.WriteLog(logobj, \"collected file hash not equall\", \"failed\")\n\t// \treturn\n\t// }\n\n\t// read file as bytes\n\tfile2, er2 := os.Open(newPath)\n\tif er2 != nil {\n\t\tfmt.Println(\"error in reading file !!!\")\n\t}\n\n\tfileinfoCollected, _ := file2.Stat()\n\tfmt.Println(\"File Size \", fStrct.FileSize)\n\tfmt.Println(\"Collected File Size \", fileinfoCollected.Size())\n\t// if fStrct.FileSize != fileinfoCollected.Size() {\n\t// \tglobalPkg.SendError(w, \"file is corrupted\")\n\t// \tglobalPkg.WriteLog(logobj, \"file is corrupted size file is different\", \"failed\")\n\t// \treturn\n\t// }\n\t// ip := strings.Split(validator.CurrentValidator.ValidatorIP, \":\")\n\t// fmt.Println(\"length of string : \", len(ip))\n\t// strip := ip[0] + \"s\"\n\t// httpsip := strip + \":\" + ip[1] + \":\" + ip[2]\n\t// // u, err := url.Parse(validator.CurrentValidator.ValidatorIP)\n\t// u, err := url.Parse(httpsip)\n\t// fmt.Println(\"=================== link \", u, \"========path ==== \", validator.CurrentValidator.ValidatorIP)\n\t// if err != nil {\n\t// \tfmt.Println(err)\n\t// }\n\t// u, err := url.Parse(\"https://us-demoinochain.inovatian.com\")\n\tu, err := url.Parse(globalPkg.GlobalObj.Downloadfileip)\n\n\tu.Path = path.Join(u.Path, \"files\", fStrct.Fileid+fStrct.FileType)\n\tlink := u.String()\n\tglobalPkg.SendResponseMessage(w, link)\n\tglobalPkg.WriteLog(logobj, \"File downloaded successfully\", \"failed\")\n\n}", "func (s *Service) GetContent(c context.Context, likeSubType int, likes map[int64]*model.Like, ids []int64, wids []int64, mids []int64) (err error) {\n\tswitch likeSubType {\n\tcase model.PICTURE, model.PICTURELIKE, model.DRAWYOO, model.DRAWYOOLIKE, model.TEXT, model.TEXTLIKE, model.QUESTION:\n\t\terr = s.accountAndContent(c, ids, mids, likes)\n\tcase model.VIDEO, model.VIDEOLIKE, model.ONLINEVOTE, model.VIDEO2, model.PHONEVIDEO, model.SMALLVIDEO:\n\t\terr = s.archiveWithTag(c, wids, likes)\n\tcase model.ARTICLE:\n\t\terr = s.articles(c, wids, likes)\n\tcase model.MUSIC:\n\t\terr = s.musicsAndAct(c, wids, mids, likes)\n\tdefault:\n\t\terr = ecode.RequestErr\n\t}\n\treturn\n}", "func (h *httpCloud) get(path string, resp interface{}) error {\n\trequestType := \"GET\"\n\tbody, err := h.sendHTTPRequest(requestType, path, nil)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"HTTP request to cloudprovider failed: %v\", err)\n\t}\n\tif body != nil {\n\t\tif err := json.Unmarshal(body, resp); err != nil {\n\t\t\treturn fmt.Errorf(\"GET response Unmarshal for %s failed with error: %v\\n\", path, err)\n\t\t}\n\t}\n\treturn nil\n}", "func (a *Client) GetMediaFiles(params *GetMediaFilesParams) (*GetMediaFilesOK, error) {\n\t// TODO: Validate the params before sending\n\tif params == nil {\n\t\tparams = NewGetMediaFilesParams()\n\t}\n\n\tresult, err := a.transport.Submit(&runtime.ClientOperation{\n\t\tID: \"get_media_files\",\n\t\tMethod: \"GET\",\n\t\tPathPattern: \"/api/rest/v1/media-files\",\n\t\tProducesMediaTypes: []string{\"application/json\"},\n\t\tConsumesMediaTypes: []string{\"application/json\"},\n\t\tSchemes: []string{\"http\"},\n\t\tParams: params,\n\t\tReader: &GetMediaFilesReader{formats: a.formats},\n\t\tContext: params.Context,\n\t\tClient: params.HTTPClient,\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tsuccess, ok := result.(*GetMediaFilesOK)\n\tif ok {\n\t\treturn success, nil\n\t}\n\t// unexpected success response\n\t// safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue\n\tmsg := fmt.Sprintf(\"unexpected success response for get_media_files: API contract not enforced by server. Client expected to get an error, but got: %T\", result)\n\tpanic(msg)\n}", "func _getReal(url string) (content []byte, err error) {\n\tvar req *http.Request\n\treq, err = http.NewRequest(\"GET\", url, nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tApplyHeaders(req, global.Conf.TranslatorAPIHeaders)\n\n\tvar resp *http.Response\n\tresp, err = http.DefaultClient.Do(req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer resp.Body.Close()\n\n\treturn ioutil.ReadAll(resp.Body)\n}" ]
[ "0.6392516", "0.6372234", "0.6199706", "0.61942226", "0.6124794", "0.6098814", "0.6066002", "0.601279", "0.5994589", "0.5979411", "0.5961219", "0.5959829", "0.5917716", "0.59151256", "0.59097975", "0.5899526", "0.58678234", "0.5862983", "0.58176565", "0.58107644", "0.5794979", "0.5792535", "0.5787578", "0.5747875", "0.57183236", "0.57106274", "0.57079965", "0.5698996", "0.56928164", "0.56910866", "0.5687499", "0.56834793", "0.567277", "0.5662783", "0.56523156", "0.56517553", "0.56348044", "0.5611568", "0.5611358", "0.5586871", "0.55708843", "0.556612", "0.5534729", "0.5525926", "0.55247325", "0.550107", "0.5489714", "0.54813254", "0.5479582", "0.5476954", "0.5476213", "0.54756826", "0.5474973", "0.5474191", "0.5472856", "0.546856", "0.5461048", "0.5451467", "0.5447451", "0.5440608", "0.5436131", "0.54352516", "0.5422652", "0.54207516", "0.54182875", "0.54097486", "0.5407703", "0.5406932", "0.54047734", "0.54002106", "0.5397693", "0.53918874", "0.5384056", "0.538268", "0.53745794", "0.53677154", "0.5363585", "0.5357529", "0.53571725", "0.53534377", "0.5350535", "0.53441036", "0.5333675", "0.53304315", "0.5329142", "0.53134704", "0.5311861", "0.5311817", "0.53112614", "0.5310007", "0.53099763", "0.5308181", "0.5307138", "0.5301457", "0.52901286", "0.52891773", "0.5288775", "0.5284297", "0.5278682", "0.52750903" ]
0.8425734
0
Update performs PATCH request for MobileAppContentFile
func (r *MobileAppContentFileRequest) Update(ctx context.Context, reqObj *MobileAppContentFile) error { return r.JSONRequest(ctx, "PATCH", "", reqObj, nil) }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func PATCH(c *httputil.Client, data DataMultipartWriter, v interface{}, url string) error {\n\treturn Do(c, \"PATCH\", data, v, url)\n}", "func (u *App) Update(c echo.Context, r *Update) (result *model.File, err error) {\n\tfile, err := u.udb.View(u.db, r.ID)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif err := u.rbac.EnforceUser(c, file.UserID); err != nil {\n\t\treturn nil, err\n\t}\n\n\tif file.Type == model.ResourceApplication {\n\t\tif err = u.rbac.EnforceRole(c, model.OperatorRole); err != nil {\n\t\t\treturn\n\t\t}\n\t}\n\n\tfile.Comment = r.Comment\n\tfile.Status = r.Status\n\t// file.Public = r.Public\n\n\tif err = u.udb.Update(u.db, file); err != nil {\n\t\treturn\n\t}\n\treturn u.udb.View(u.db, r.ID)\n}", "func (r *ExtensionRequest) Update(ctx context.Context, reqObj *Extension) error {\n\treturn r.JSONRequest(ctx, \"PATCH\", \"\", reqObj, nil)\n}", "func (r *resourceFrameworkShare) Update(ctx context.Context, req resource.UpdateRequest, resp *resource.UpdateResponse) {\n}", "func (o *Object) updateSimple(ctx context.Context, body io.Reader, getBody func() (io.ReadCloser, error), filePath string, size int64, contentType string, extraHeaders map[string]string, rootURL string, options ...fs.OpenOption) (err error) {\n\tvar resp *http.Response\n\n\tif extraHeaders == nil {\n\t\textraHeaders = map[string]string{}\n\t}\n\n\topts := rest.Opts{\n\t\tMethod: \"PUT\",\n\t\tPath: filePath,\n\t\tGetBody: getBody,\n\t\tBody: body,\n\t\tNoResponse: true,\n\t\tContentLength: &size, // FIXME this isn't necessary with owncloud - See https://github.com/nextcloud/nextcloud-snap/issues/365\n\t\tContentType: contentType,\n\t\tOptions: options,\n\t\tExtraHeaders: extraHeaders,\n\t\tRootURL: rootURL,\n\t}\n\terr = o.fs.pacer.CallNoRetry(func() (bool, error) {\n\t\tresp, err = o.fs.srv.Call(ctx, &opts)\n\t\treturn o.fs.shouldRetry(ctx, resp, err)\n\t})\n\tif err != nil {\n\t\t// Give the WebDAV server a chance to get its internal state in order after the\n\t\t// error. The error may have been local in which case we closed the connection.\n\t\t// The server may still be dealing with it for a moment. A sleep isn't ideal but I\n\t\t// haven't been able to think of a better method to find out if the server has\n\t\t// finished - ncw\n\t\ttime.Sleep(1 * time.Second)\n\t\t// Remove failed upload\n\t\t_ = o.Remove(ctx)\n\t\treturn err\n\t}\n\treturn nil\n\n}", "func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) {\n\tsize := src.Size()\n\t// modTime := src.ModTime(ctx)\n\tremote := o.Remote()\n\n\t// Create the directory for the object if it doesn't exist\n\tleaf, directoryID, err := o.fs.dirCache.FindPath(ctx, remote, true)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// if file doesn't exist, create it\n\tif o.id == \"\" {\n\t\to.id, err = o.fs.createFile(ctx, directoryID, leaf, fs.MimeType(ctx, src))\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"failed to create file: %w\", err)\n\t\t}\n\t\tif o.id == \"\" {\n\t\t\treturn errors.New(\"failed to create file: no ID\")\n\t\t}\n\t\t// if created the file and returning an error then delete the file\n\t\tdefer func() {\n\t\t\tif err != nil {\n\t\t\t\tdelErr := o.fs.delete(ctx, true, o.id, remote, o.fs.opt.HardDelete)\n\t\t\t\tif delErr != nil {\n\t\t\t\t\tfs.Errorf(o, \"failed to remove failed upload: %v\", delErr)\n\t\t\t\t}\n\t\t\t}\n\t\t}()\n\t}\n\n\tvar resp *http.Response\n\topts := rest.Opts{\n\t\tMethod: \"PUT\",\n\t\tRootURL: o.id,\n\t\tPath: \"/data\",\n\t\tNoResponse: true,\n\t\tOptions: options,\n\t\tBody: in,\n\t}\n\tif size >= 0 {\n\t\topts.ContentLength = &size\n\t}\n\terr = o.fs.pacer.CallNoRetry(func() (bool, error) {\n\t\tresp, err = o.fs.srv.Call(ctx, &opts)\n\t\treturn shouldRetry(ctx, resp, err)\n\t})\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to upload file: %w\", err)\n\t}\n\n\to.hasMetaData = false\n\treturn o.readMetaData(ctx)\n}", "func (r *ExternalRequest) Update(ctx context.Context, reqObj *External) error {\n\treturn r.JSONRequest(ctx, \"PATCH\", \"\", reqObj, nil)\n}", "func (s *server) File(_ context.Context, request *pb.FileRequest) (*pb.FileResponse, error) {\n\tfmt.Printf(\"Patching file %s\\n\", path.Join(s.localReplicaPath, request.FullPath))\n\terr := ioutil.WriteFile(path.Join(s.localReplicaPath, request.FullPath), []byte(request.FullContents), defaultRights)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"unable to file %s: %w\", request.FullPath, err)\n\t}\n\treturn &pb.FileResponse{}, nil\n}", "func (m *FileRequestBuilder) Patch(ctx context.Context, body iadcd81124412c61e647227ecfc4449d8bba17de0380ddda76f641a29edf2b242.AgreementFileable, requestConfiguration *FileRequestBuilderPatchRequestConfiguration)(iadcd81124412c61e647227ecfc4449d8bba17de0380ddda76f641a29edf2b242.AgreementFileable, error) {\n requestInfo, err := m.CreatePatchRequestInformation(ctx, body, requestConfiguration);\n if err != nil {\n return nil, err\n }\n errorMapping := i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f.ErrorMappings {\n \"4XX\": ia572726a95efa92ddd544552cd950653dc691023836923576b2f4bf716cf204a.CreateODataErrorFromDiscriminatorValue,\n \"5XX\": ia572726a95efa92ddd544552cd950653dc691023836923576b2f4bf716cf204a.CreateODataErrorFromDiscriminatorValue,\n }\n res, err := m.requestAdapter.SendAsync(ctx, requestInfo, iadcd81124412c61e647227ecfc4449d8bba17de0380ddda76f641a29edf2b242.CreateAgreementFileFromDiscriminatorValue, errorMapping)\n if err != nil {\n return nil, err\n }\n if res == nil {\n return nil, nil\n }\n return res.(iadcd81124412c61e647227ecfc4449d8bba17de0380ddda76f641a29edf2b242.AgreementFileable), nil\n}", "func (api *MediaApi) update(c *routing.Context) error {\n\tid := c.Param(\"id\")\n\n\tmodel, fetchErr := api.dao.GetByID(id)\n\tif fetchErr != nil {\n\t\treturn utils.NewNotFoundError(fmt.Sprintf(\"Media item with id \\\"%v\\\" doesn't exist!\", id))\n\t}\n\n\tform := &models.MediaUpdateForm{}\n\tif readErr := c.Read(form); readErr != nil {\n\t\treturn utils.NewBadRequestError(\"Oops, an error occurred while updating media item.\", readErr)\n\t}\n\n\tform.Model = model\n\n\tupdatedModel, updateErr := api.dao.Update(form)\n\n\tif updateErr != nil {\n\t\treturn utils.NewBadRequestError(\"Oops, an error occurred while updating media item.\", updateErr)\n\t}\n\n\tupdatedModel = daos.ToAbsMediaPath(updatedModel)\n\n\treturn c.Write(updatedModel)\n}", "func PatchContent(url string, data ...interface{}) string {\n\treturn RequestContent(\"PATCH\", url, data...)\n}", "func (st *fakeConn) Update(ctx context.Context, filePath string, contents []byte, version Version) (ver Version, err error) {\n\tif st.readOnly {\n\t\treturn nil, vterrors.Errorf(vtrpc.Code_READ_ONLY, \"topo server connection is read-only\")\n\t}\n\tif filePath == \"error\" {\n\t\treturn ver, fmt.Errorf(\"Dummy error\")\n\n\t}\n\treturn ver, err\n}", "func (a *App) Patch(w http.ResponseWriter, r *http.Request) {\n\tw.Write([]byte(\"patching in testing mode. Get ready to send multipart-form data\"))\n}", "func (r Requester) Update(path string, payload interface{}) Requester {\n\tb, err := json.Marshal(payload)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tbody := bytes.NewReader(b)\n\tr.httpRequest, err = http.NewRequest(http.MethodPut, r.url, body)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\treturn r\n}", "func (r *app) Update(appGUID string, appPayload AppRequest, opts ...bool) (*AppFields, error) {\n\tasync := true\n\tif len(opts) > 0 {\n\t\tasync = opts[0]\n\t}\n\trawURL := fmt.Sprintf(\"/v2/apps/%s?async=%t\", appGUID, async)\n\tappFields := AppFields{}\n\t_, err := r.client.Put(rawURL, appPayload, &appFields)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &appFields, nil\n}", "func (fr *FileResource) Update() error {\n\t// Purge extra files\n\tif fr.Purge {\n\t\tfor name := range fr.extra {\n\t\t\tdstFile := utils.NewFileUtil(name)\n\t\t\tfr.Printf(\"purging %s\\n\", name)\n\t\t\tif err := dstFile.Remove(); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\n\t// Fix outdated files\n\tfor _, item := range fr.outdated {\n\t\tdstFile := utils.NewFileUtil(item.dst)\n\n\t\t// Update file content if needed\n\t\tif item.flags&flagOutdatedContent != 0 {\n\t\t\t// Create parent directory for file if missing\n\t\t\tdstDir := filepath.Dir(item.dst)\n\t\t\t_, err := os.Stat(dstDir)\n\t\t\tif os.IsNotExist(err) {\n\t\t\t\tif err := os.MkdirAll(dstDir, 0755); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tsrcFile := utils.NewFileUtil(item.src)\n\t\t\tsrcMd5, err := srcFile.Md5()\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\tfr.Printf(\"setting content of %s to md5:%s\\n\", item.dst, srcMd5)\n\t\t\tif err := dstFile.CopyFrom(item.src, true); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t\t// Update permissions if needed\n\t\tif item.flags&flagOutdatedPermissions != 0 {\n\t\t\tfr.Printf(\"setting permissions of %s to %#o\\n\", item.dst, fr.Mode)\n\t\t\tif err := dstFile.Chmod(os.FileMode(fr.Mode)); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t\t// Update ownership if needed\n\t\tif item.flags&flagOutdatedOwner != 0 {\n\t\t\tfr.Printf(\"setting owner of %s to %s:%s\\n\", item.dst, fr.Owner, fr.Group)\n\t\t\tif err := dstFile.SetOwner(fr.Owner, fr.Group); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\n\treturn nil\n}", "func (_obj *Apichannels) Channels_editPhoto(params *TLchannels_editPhoto, _opt ...map[string]string) (ret Updates, err error) {\n\n\tvar length int32\n\tvar have bool\n\tvar ty byte\n\t_os := codec.NewBuffer()\n\terr = params.WriteBlock(_os, 1)\n\tif err != nil {\n\t\treturn ret, err\n\t}\n\n\tvar _status map[string]string\n\tvar _context map[string]string\n\tif len(_opt) == 1 {\n\t\t_context = _opt[0]\n\t} else if len(_opt) == 2 {\n\t\t_context = _opt[0]\n\t\t_status = _opt[1]\n\t}\n\t_resp := new(requestf.ResponsePacket)\n\ttarsCtx := context.Background()\n\n\terr = _obj.s.Tars_invoke(tarsCtx, 0, \"channels_editPhoto\", _os.ToBytes(), _status, _context, _resp)\n\tif err != nil {\n\t\treturn ret, err\n\t}\n\n\t_is := codec.NewReader(tools.Int8ToByte(_resp.SBuffer))\n\terr = ret.ReadBlock(_is, 0, true)\n\tif err != nil {\n\t\treturn ret, err\n\t}\n\n\tif len(_opt) == 1 {\n\t\tfor k := range _context {\n\t\t\tdelete(_context, k)\n\t\t}\n\t\tfor k, v := range _resp.Context {\n\t\t\t_context[k] = v\n\t\t}\n\t} else if len(_opt) == 2 {\n\t\tfor k := range _context {\n\t\t\tdelete(_context, k)\n\t\t}\n\t\tfor k, v := range _resp.Context {\n\t\t\t_context[k] = v\n\t\t}\n\t\tfor k := range _status {\n\t\t\tdelete(_status, k)\n\t\t}\n\t\tfor k, v := range _resp.Status {\n\t\t\t_status[k] = v\n\t\t}\n\n\t}\n\t_ = length\n\t_ = have\n\t_ = ty\n\treturn ret, nil\n}", "func (r *Request) Patch(path, contentType string, data ...interface{}) {\n\tr.Send(\"PATCH\", path, contentType, data...)\n}", "func (r *MobileAppContentFileRequest) Get(ctx context.Context) (resObj *MobileAppContentFile, err error) {\n\tvar query string\n\tif r.query != nil {\n\t\tquery = \"?\" + r.query.Encode()\n\t}\n\terr = r.JSONRequest(ctx, \"GET\", query, nil, &resObj)\n\treturn\n}", "func (conn Connection) Patch(cmd string, content, result interface{}) (resp *http.Response, err error) {\n\treturn conn.Send(http.MethodPatch, cmd, content, result)\n}", "func (r *ApplicationTemplateRequest) Update(ctx context.Context, reqObj *ApplicationTemplate) error {\n\treturn r.JSONRequest(ctx, \"PATCH\", \"\", reqObj, nil)\n}", "func TestApiUpdateOtherFileInDirectory(t *testing.T) {\n\tserver = createTestServerWithContext(false)\n\n\trepoPath := \"../tests/tmp/repositories/update_file\"\n\tlr, _ := setupSmallTestRepo(repoPath)\n\n\ttarget := fmt.Sprintf(\"%s/%s\", server.URL, \"api/directories/documents/documents/document_3/files/index.md\")\n\n\tncf := NewCommitFile{\n\t\tPath: \"documents\",\n\t\tDocument: \"document_2\",\n\t\tFilename: \"index.md\", // note, target contains document_2.md\n\t\tBody: \"# The quick brown fox\",\n\t\tFrontMatter: FrontMatter{\n\t\t\tTitle: \"Document Three\",\n\t\t\tAuthor: \"Timothy Lovejoy\",\n\t\t},\n\t}\n\n\tnc := &NewCommit{\n\t\tMessage: \"Forty whacks with a wet noodle\",\n\t\tFiles: []NewCommitFile{ncf},\n\t\tRepositoryInfo: RepositoryInfo{LatestRevision: lr.String()},\n\t}\n\n\tpayload, err := json.Marshal(nc)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tbuff := bytes.NewBuffer(payload)\n\n\tclient := &http.Client{}\n\n\treq, _ := http.NewRequest(\"PATCH\", target, buff)\n\n\tresp, err := client.Do(req)\n\n\tvar receiver FailureResponse\n\n\tjson.NewDecoder(resp.Body).Decode(&receiver)\n\n\tassert.Equal(t, \"No supplied file matches path\", receiver.Message)\n\tassert.Equal(t, http.StatusBadRequest, resp.StatusCode)\n\n}", "func (r *ApplicationRequest) Update(ctx context.Context, reqObj *Application) error {\n\treturn r.JSONRequest(ctx, \"PATCH\", \"\", reqObj, nil)\n}", "func (f5 *f5LTM) patch(url string, payload interface{}, result interface{}) error {\n\treturn f5.restRequestPayload(\"PATCH\", url, payload, result)\n}", "func (r *DeviceAppManagementRequest) Update(ctx context.Context, reqObj *DeviceAppManagement) error {\n\treturn r.JSONRequest(ctx, \"PATCH\", \"\", reqObj, nil)\n}", "func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {\n\tif o.fs.opt.SharedFiles || o.fs.opt.SharedFolders {\n\t\treturn errNotSupportedInSharedMode\n\t}\n\tremote := o.remotePath()\n\tif ignoredFiles.MatchString(remote) {\n\t\treturn fserrors.NoRetryError(fmt.Errorf(\"file name %q is disallowed - not uploading\", path.Base(remote)))\n\t}\n\tcommitInfo := files.NewCommitInfo(o.fs.opt.Enc.FromStandardPath(o.remotePath()))\n\tcommitInfo.Mode.Tag = \"overwrite\"\n\t// The Dropbox API only accepts timestamps in UTC with second precision.\n\tclientModified := src.ModTime(ctx).UTC().Round(time.Second)\n\tcommitInfo.ClientModified = &clientModified\n\t// Don't attempt to create filenames that are too long\n\tif cErr := checkPathLength(commitInfo.Path); cErr != nil {\n\t\treturn cErr\n\t}\n\n\tsize := src.Size()\n\tvar err error\n\tvar entry *files.FileMetadata\n\tif size > int64(o.fs.opt.ChunkSize) || size < 0 || o.fs.batcher.Batching() {\n\t\tentry, err = o.uploadChunked(ctx, in, commitInfo, size)\n\t} else {\n\t\terr = o.fs.pacer.CallNoRetry(func() (bool, error) {\n\t\t\tentry, err = o.fs.srv.Upload(&files.UploadArg{CommitInfo: *commitInfo}, in)\n\t\t\treturn shouldRetry(ctx, err)\n\t\t})\n\t}\n\tif err != nil {\n\t\treturn fmt.Errorf(\"upload failed: %w\", err)\n\t}\n\t// If we haven't received data back from batch upload then fake it\n\t//\n\t// This will only happen if we are uploading async batches\n\tif entry == nil {\n\t\to.bytes = size\n\t\to.modTime = *commitInfo.ClientModified\n\t\to.hash = \"\" // we don't have this\n\t\treturn nil\n\t}\n\treturn o.setMetadataFromEntry(entry)\n}", "func Update(fileMeta FileMeta){\n\tfileMetas[fileMeta.FileSha1] = fileMeta\n}", "func (r *ExtensionPropertyRequest) Update(ctx context.Context, reqObj *ExtensionProperty) error {\n\treturn r.JSONRequest(ctx, \"PATCH\", \"\", reqObj, nil)\n}", "func (r *DeviceManagementExportJobRequest) Update(ctx context.Context, reqObj *DeviceManagementExportJob) error {\n\treturn r.JSONRequest(ctx, \"PATCH\", \"\", reqObj, nil)\n}", "func (fc *FileInCore) updateFileInCore(volID uint64, vf *proto.File, volLoc *DataReplica, volLocIndex int) {\n\tif vf.Modified > fc.LastModify {\n\t\tfc.LastModify = vf.Modified\n\t}\n\n\tisFind := false\n\tfor i := 0; i < len(fc.MetadataArray); i++ {\n\t\tif fc.MetadataArray[i].getLocationAddr() == volLoc.Addr {\n\t\t\tfc.MetadataArray[i].Crc = vf.Crc\n\t\t\tfc.MetadataArray[i].Size = vf.Size\n\t\t\tisFind = true\n\t\t\tbreak\n\t\t}\n\t}\n\n\tif isFind == false {\n\t\tfm := newFileMetadata(vf.Crc, volLoc.Addr, volLocIndex, vf.Size)\n\t\tfc.MetadataArray = append(fc.MetadataArray, fm)\n\t}\n\n}", "func Update(c messagebird.Client, id string, request *Request) error {\n\tif err := validateUpdate(request); err != nil {\n\t\treturn err\n\t}\n\n\treturn c.Request(nil, http.MethodPatch, path+\"/\"+id, request)\n}", "func (c *Client) EditMedia(request *EditMediaRequest) (response *EditMediaResponse, err error) {\n if request == nil {\n request = NewEditMediaRequest()\n }\n response = NewEditMediaResponse()\n err = c.Send(request, response)\n return\n}", "func PatchBytes(url string, data ...interface{}) []byte {\n\treturn RequestBytes(\"PATCH\", url, data...)\n}", "func (r *ExternalItemRequest) Update(ctx context.Context, reqObj *ExternalItem) error {\n\treturn r.JSONRequest(ctx, \"PATCH\", \"\", reqObj, nil)\n}", "func UpdateHandler(w http.ResponseWriter, r *http.Request) {\n\tr.ParseForm()\n\n\topType := r.Form.Get(\"op\")\n\tfileSha1 := r.Form.Get(\"filehash\")\n\tnewFileName := r.Form.Get(\"filename\")\n\n\tif opType != \"0\" {\n\t\tw.WriteHeader(http.StatusForbidden)\n\t\treturn\n\t}\n\n\tif r.Method != \"POST\" {\n\t\tw.WriteHeader(http.StatusMethodNotAllowed)\n\t\treturn\n\t}\n\n\tfileMeta := meta.GetFileMeta(fileSha1)\n\tfileMeta.FileName = newFileName\n\tmeta.UpdateFileMeta(fileMeta)\n\n\tdata, err := json.Marshal(fileMeta)\n\tif err != nil {\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\treturn\n\t}\n\tw.WriteHeader(http.StatusOK)\n\tw.Write(data)\n}", "func (r *DeviceManagementScriptRequest) Update(ctx context.Context, reqObj *DeviceManagementScript) error {\n\treturn r.JSONRequest(ctx, \"PATCH\", \"\", reqObj, nil)\n}", "func (F *Frisby) Patch(url string) *Frisby {\n\tF.Method = \"PATCH\"\n\tF.Url = url\n\treturn F\n}", "func Update(file, content string, args ...interface{}) error {\n\tpermission := uint32(0644)\n\n\tif len(args) > 0 {\n\t\tpermission = args[0].(uint32)\n\t}\n\n\tf, err := os.OpenFile(file, os.O_APPEND|os.O_WRONLY, os.FileMode(permission))\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif _, err := f.WriteString(content); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}", "func (c *Client) Patch(url string, headers map[string][]string, body io.Reader) (client.Status, map[string][]string, io.ReadCloser, error) {\n\treturn c.Do(\"PATCH\", url, headers, body)\n}", "func patchResource(mapping *meta.RESTMapping, config *rest.Config, group string,\n\tversion string, namespace string, data []byte) error {\n\trestClient, err := getRESTClient(config, group, version)\n\tif err != nil {\n\t\treturn &kfapis.KfError{\n\t\t\tCode: int(kfapis.INVALID_ARGUMENT),\n\t\t\tMessage: fmt.Sprintf(\"patchResource error: %v\", err),\n\t\t}\n\t}\n\n\tif _, err = restClient.\n\t\tPatch(k8stypes.JSONPatchType).\n\t\tResource(mapping.Resource.Resource).\n\t\tNamespaceIfScoped(namespace, mapping.Scope.Name() == \"namespace\").\n\t\tBody(data).\n\t\tDo().\n\t\tGet(); err == nil {\n\t\treturn nil\n\t} else {\n\t\treturn &kfapis.KfError{\n\t\t\tCode: int(kfapis.INVALID_ARGUMENT),\n\t\t\tMessage: fmt.Sprintf(\"patchResource error: %v\", err),\n\t\t}\n\t}\n}", "func TestFileShareUpdate(t *testing.T) {\n\tvar result FileShare\n\terr := json.NewDecoder(strings.NewReader(fileShareBody)).Decode(&result)\n\n\tif err != nil {\n\t\tt.Errorf(\"Error decoding JSON: %s\", err)\n\t}\n\n\ttestClient := &common.TestClient{}\n\tresult.SetClient(testClient)\n\n\tresult.CASupported = false\n\tresult.FileShareQuotaType = SoftQuotaType\n\tresult.FileShareTotalQuotaBytes = 1024\n\terr = result.Update()\n\n\tif err != nil {\n\t\tt.Errorf(\"Error making Update call: %s\", err)\n\t}\n\n\tcalls := testClient.CapturedCalls()\n\n\tif !strings.Contains(calls[0].Payload, \"CASupported:false\") {\n\t\tt.Errorf(\"Unexpected CASupported update payload: %s\", calls[0].Payload)\n\t}\n\n\tif !strings.Contains(calls[0].Payload, \"FileShareQuotaType:Soft\") {\n\t\tt.Errorf(\"Unexpected FileShareQuotaType update payload: %s\", calls[0].Payload)\n\t}\n\n\tif !strings.Contains(calls[0].Payload, \"FileShareTotalQuotaBytes:1024\") {\n\t\tt.Errorf(\"Unexpected FileShareTotalQuotaBytes update payload: %s\", calls[0].Payload)\n\t}\n}", "func (c *Controller) updateInAPI(uri string, data io.Reader) error {\n\tclient := &http.Client{}\n\treq, err := http.NewRequest(http.MethodPatch, uri, data)\n\treq.Header.Set(\"Content-Type\", \"application/json; charset=utf-8\")\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t_, err = client.Do(req)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}", "func (c *Client) Patch(url string, reqBody, resType interface{}) error {\n\treturn c.CallAPI(\"PATCH\", url, reqBody, resType, true)\n}", "func (r *DeviceAppManagementTaskRequest) Update(ctx context.Context, reqObj *DeviceAppManagementTask) error {\n\treturn r.JSONRequest(ctx, \"PATCH\", \"\", reqObj, nil)\n}", "func update(w http.ResponseWriter, r *http.Request) {\r\n\tif r.Method != \"PUT\" {\r\n\t\thttp.Error(w, \"404 not found.\", http.StatusNotFound)\r\n\t\treturn\r\n\t}\r\n\tid, _ := strconv.ParseInt(r.URL.Query().Get(\"id\"), 10, 64)\r\n\tcontents := &content{\r\n\t\tID: id,\r\n\t\tvalue: r.PostFormValue(\"value\"),\r\n\t\tcompleted: r.PostFormValue(\"completed\"),\r\n\t}\r\n\t//fmt.Println(user)\r\n\tquery := \"UPDATE public.item SET completed=$3,value=$2 WHERE id = $1;\"\r\n\t_, err = db.Exec(query, contents.ID, contents.value,contents.completed)\r\n\tif err != nil {\r\n\t\tpanic(err)\r\n\t}\r\n\tjson.NewEncoder(w).Encode(contents)\r\n}", "func (c *Memcache) Update(data interface{}) Response {\n\tc.Lock()\n\tdefer c.Unlock()\n\tc.contents = data\n\tc.created = time.Now()\n\treturn Response{\n\t\tCached: false,\n\t\tData: data,\n\t}\n}", "func (c *Client) Patch(url string, headers, queryParams map[string][]string, data interface{}) (response *http.Response, err error) {\n\treturn c.makeRequest(url, http.MethodPatch, headers, queryParams, data)\n}", "func (me *CHARGES_IMPL) UpdateChargeMetadata (\r\n chargeId string,\r\n body *models_pkg.ChargesMetadataRequest,\r\n idempotencyKey *string) (*models_pkg.ChargesMetadataResponse, error) {\r\n //the endpoint path uri\r\n _pathUrl := \"/Charges/{charge_id}/metadata\"\r\n\r\n //variable to hold errors\r\n var err error = nil\r\n //process optional template parameters\r\n _pathUrl, err = apihelper_pkg.AppendUrlWithTemplateParameters(_pathUrl, map[string]interface{} {\r\n \"charge_id\" : chargeId,\r\n })\r\n if err != nil {\r\n //error in template param handling\r\n return nil, err\r\n }\r\n\r\n //the base uri for api requests\r\n _queryBuilder := configuration_pkg.BASEURI;\r\n\r\n //prepare query string for API call\r\n _queryBuilder = _queryBuilder + _pathUrl\r\n\r\n //validate and preprocess url\r\n _queryBuilder, err = apihelper_pkg.CleanUrl(_queryBuilder)\r\n if err != nil {\r\n //error in url validation or cleaning\r\n return nil, err\r\n }\r\n //prepare headers for the outgoing request\r\n headers := map[string]interface{} {\r\n \"user-agent\" : \"MundiSDK - Go 2.4.5\",\r\n \"accept\" : \"application/json\",\r\n \"content-type\" : \"application/json; charset=utf-8\",\r\n \"Content-Type\" : \"application/json\",\r\n \"idempotency-key\" : apihelper_pkg.ToString(idempotencyKey, \"\"),\r\n }\r\n\r\n //prepare API request\r\n _request := unirest.PatchWithAuth(_queryBuilder, headers, body, me.config.BasicAuthUserName(), me.config.BasicAuthPassword())\r\n //and invoke the API call request to fetch the response\r\n _response, err := unirest.AsString(_request,false);\r\n if err != nil {\r\n //error in API invocation\r\n return nil, err\r\n }\r\n\r\n //error handling using HTTP status codes\r\n if (_response.Code == 400) {\r\n err = apihelper_pkg.NewAPIError(\"Invalid request\", _response.Code, _response.RawBody)\r\n } else if (_response.Code == 401) {\r\n err = apihelper_pkg.NewAPIError(\"Invalid API key\", _response.Code, _response.RawBody)\r\n } else if (_response.Code == 404) {\r\n err = apihelper_pkg.NewAPIError(\"An informed resource was not found\", _response.Code, _response.RawBody)\r\n } else if (_response.Code == 412) {\r\n err = apihelper_pkg.NewAPIError(\"Business validation error\", _response.Code, _response.RawBody)\r\n } else if (_response.Code == 422) {\r\n err = apihelper_pkg.NewAPIError(\"Contract validation error\", _response.Code, _response.RawBody)\r\n } else if (_response.Code == 500) {\r\n err = apihelper_pkg.NewAPIError(\"Internal server error\", _response.Code, _response.RawBody)\r\n } else if (_response.Code < 200) || (_response.Code > 206) { //[200,206] = HTTP OK\r\n err = apihelper_pkg.NewAPIError(\"HTTP Response Not OK\", _response.Code, _response.RawBody)\r\n }\r\n if(err != nil) {\r\n //error detected in status code validation\r\n return nil, err\r\n }\r\n\r\n //returning the response\r\n var retVal *models_pkg.ChargesMetadataResponse = &models_pkg.ChargesMetadataResponse{}\r\n err = json.Unmarshal(_response.RawBody, &retVal)\r\n\r\n if err != nil {\r\n //error in parsing\r\n return nil, err\r\n }\r\n return retVal, nil\r\n\r\n}", "func (rb *RequestBuilder) Patch(url string, body interface{}) *Response {\n\treturn rb.DoRequest(http.MethodPatch, url, body)\n}", "func (a *CampaignManagementApiService) UpdateCampaignUsingPATCHExecute(r ApiUpdateCampaignUsingPATCHRequest) (Campaign, *_nethttp.Response, error) {\n\tvar (\n\t\tlocalVarHTTPMethod = _nethttp.MethodPatch\n\t\tlocalVarPostBody interface{}\n\t\tlocalVarFormFileName string\n\t\tlocalVarFileName string\n\t\tlocalVarFileBytes []byte\n\t\tlocalVarReturnValue Campaign\n\t)\n\n\tlocalBasePath, err := a.client.cfg.ServerURLWithContext(r.ctx, \"CampaignManagementApiService.UpdateCampaignUsingPATCH\")\n\tif err != nil {\n\t\treturn localVarReturnValue, nil, GenericOpenAPIError{error: err.Error()}\n\t}\n\n\tlocalVarPath := localBasePath + \"/api/v0/deviceMgt/campaigns/{campaignId}\"\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"campaignId\"+\"}\", _neturl.PathEscape(parameterToString(r.campaignId, \"\")), -1)\n\n\tlocalVarHeaderParams := make(map[string]string)\n\tlocalVarQueryParams := _neturl.Values{}\n\tlocalVarFormParams := _neturl.Values{}\n\tif r.xAPIKEY == nil {\n\t\treturn localVarReturnValue, nil, reportError(\"xAPIKEY is required and must be specified\")\n\t}\n\tif r.campaign == nil {\n\t\treturn localVarReturnValue, nil, reportError(\"campaign is required and must be specified\")\n\t}\n\n\t// to determine the Content-Type header\n\tlocalVarHTTPContentTypes := []string{\"application/json\"}\n\n\t// set Content-Type header\n\tlocalVarHTTPContentType := selectHeaderContentType(localVarHTTPContentTypes)\n\tif localVarHTTPContentType != \"\" {\n\t\tlocalVarHeaderParams[\"Content-Type\"] = localVarHTTPContentType\n\t}\n\n\t// to determine the Accept header\n\tlocalVarHTTPHeaderAccepts := []string{\"application/json\"}\n\n\t// set Accept header\n\tlocalVarHTTPHeaderAccept := selectHeaderAccept(localVarHTTPHeaderAccepts)\n\tif localVarHTTPHeaderAccept != \"\" {\n\t\tlocalVarHeaderParams[\"Accept\"] = localVarHTTPHeaderAccept\n\t}\n\tlocalVarHeaderParams[\"X-API-KEY\"] = parameterToString(*r.xAPIKEY, \"\")\n\t// body params\n\tlocalVarPostBody = r.campaign\n\treq, err := a.client.prepareRequest(r.ctx, localVarPath, localVarHTTPMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFormFileName, localVarFileName, localVarFileBytes)\n\tif err != nil {\n\t\treturn localVarReturnValue, nil, err\n\t}\n\n\tlocalVarHTTPResponse, err := a.client.callAPI(req)\n\tif err != nil || localVarHTTPResponse == nil {\n\t\treturn localVarReturnValue, localVarHTTPResponse, err\n\t}\n\n\tlocalVarBody, err := _ioutil.ReadAll(localVarHTTPResponse.Body)\n\tlocalVarHTTPResponse.Body.Close()\n\tif err != nil {\n\t\treturn localVarReturnValue, localVarHTTPResponse, err\n\t}\n\n\tif localVarHTTPResponse.StatusCode >= 300 {\n\t\tnewErr := GenericOpenAPIError{\n\t\t\tbody: localVarBody,\n\t\t\terror: localVarHTTPResponse.Status,\n\t\t}\n\t\tif localVarHTTPResponse.StatusCode == 403 {\n\t\t\tvar v ErrorResponseWeb\n\t\t\terr = a.client.decode(&v, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\tnewErr.error = err.Error()\n\t\t\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t\t\t}\n\t\t\tnewErr.model = v\n\t\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t\t}\n\t\tif localVarHTTPResponse.StatusCode == 404 {\n\t\t\tvar v ErrorResponseWeb\n\t\t\terr = a.client.decode(&v, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\tnewErr.error = err.Error()\n\t\t\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t\t\t}\n\t\t\tnewErr.model = v\n\t\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t\t}\n\t\tif localVarHTTPResponse.StatusCode == 409 {\n\t\t\tvar v ErrorResponseWeb\n\t\t\terr = a.client.decode(&v, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\tnewErr.error = err.Error()\n\t\t\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t\t\t}\n\t\t\tnewErr.model = v\n\t\t}\n\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t}\n\n\terr = a.client.decode(&localVarReturnValue, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\tif err != nil {\n\t\tnewErr := GenericOpenAPIError{\n\t\t\tbody: localVarBody,\n\t\t\terror: err.Error(),\n\t\t}\n\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t}\n\n\treturn localVarReturnValue, localVarHTTPResponse, nil\n}", "func (yad *yandexDisk) UpdateResource(path string, fields []string, body *ResourcePatch) (r *Resource, e error) {\n\tvalues := url.Values{}\n\tvalues.Add(\"path\", path)\n\tvalues.Add(\"fields\", strings.Join(fields, \",\"))\n\tbodyJSON, e := json.Marshal(body)\n\tif e != nil {\n\t\treturn nil, e\n\t}\n\treq, e := yad.client.request(http.MethodPatch, \"/disk/resources?\"+values.Encode(), bytes.NewReader(bodyJSON))\n\tif e != nil {\n\t\treturn nil, e\n\t}\n\n\tr = new(Resource)\n\t_, e = yad.client.getResponse(req, &r)\n\tif e != nil {\n\t\treturn nil, e\n\t}\n\treturn\n}", "func (c *Client) Patch(ctx context.Context, url string, data ...interface{}) (*Response, error) {\n\treturn c.DoRequest(ctx, http.MethodPatch, url, data...)\n}", "func (a MediaApi) ReplaceAccountMediaFiles(accountId int32, mediaId int32, jsonParam string, file *os.File) (*MediaFull, *APIResponse, error) {\n\n\tvar localVarHttpMethod = strings.ToUpper(\"Put\")\n\t// create path and map variables\n\tlocalVarPath := a.Configuration.BasePath + \"/accounts/{account_id}/media/files/{media_id}\"\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"account_id\"+\"}\", fmt.Sprintf(\"%v\", accountId), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"media_id\"+\"}\", fmt.Sprintf(\"%v\", mediaId), -1)\n\n\tlocalVarHeaderParams := make(map[string]string)\n\tlocalVarQueryParams := url.Values{}\n\tlocalVarFormParams := make(map[string]string)\n\tvar localVarPostBody interface{}\n\tvar localVarFileName string\n\tvar localVarFileBytes []byte\n\t// authentication '(apiKey)' required\n\t// set key with prefix in header\n\tlocalVarHeaderParams[\"Authorization\"] = a.Configuration.GetAPIKeyWithPrefix(\"Authorization\")\n\t// add default headers if any\n\tfor key := range a.Configuration.DefaultHeader {\n\t\tlocalVarHeaderParams[key] = a.Configuration.DefaultHeader[key]\n\t}\n\n\tclearEmptyParams(localVarQueryParams)\n\n\t// to determine the Content-Type header\n\tlocalVarHttpContentTypes := []string{ \"multipart/form-data\", }\n\n\t// set Content-Type header\n\tlocalVarHttpContentType := a.Configuration.APIClient.SelectHeaderContentType(localVarHttpContentTypes)\n\tif localVarHttpContentType != \"\" {\n\t\tlocalVarHeaderParams[\"Content-Type\"] = localVarHttpContentType\n\t}\n\t// to determine the Accept header\n\tlocalVarHttpHeaderAccepts := []string{\n\t\t\"application/json\",\n\t\t}\n\n\t// set Accept header\n\tlocalVarHttpHeaderAccept := a.Configuration.APIClient.SelectHeaderAccept(localVarHttpHeaderAccepts)\n\tif localVarHttpHeaderAccept != \"\" {\n\t\tlocalVarHeaderParams[\"Accept\"] = localVarHttpHeaderAccept\n\t}\n\tlocalVarFormParams[\"json\"] = a.Configuration.APIClient.ParameterToString(jsonParam, \"\")\n\tfbs, _ := ioutil.ReadAll(file)\n\tlocalVarFileBytes = fbs\n\tlocalVarFileName = file.Name()\n\tvar successPayload = new(MediaFull)\n\tlocalVarHttpResponse, err := a.Configuration.APIClient.CallAPI(localVarPath, localVarHttpMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFileName, localVarFileBytes)\n\n\tvar localVarURL, _ = url.Parse(localVarPath)\n\tlocalVarURL.RawQuery = localVarQueryParams.Encode()\n\tvar localVarAPIResponse = &APIResponse{Operation: \"ReplaceAccountMediaFiles\", Method: localVarHttpMethod, RequestURL: localVarURL.String()}\n\tif localVarHttpResponse != nil {\n\t\tlocalVarAPIResponse.Response = localVarHttpResponse.RawResponse\n\t\tlocalVarAPIResponse.Payload = localVarHttpResponse.Body()\n\t}\n\n\tif err != nil {\n\t\treturn successPayload, localVarAPIResponse, err\n\t}\n\terr = json.Unmarshal(localVarHttpResponse.Body(), &successPayload)\n\treturn successPayload, localVarAPIResponse, err\n}", "func TestApiUpdateNoFilesInDirectory(t *testing.T) {\n\tserver = createTestServerWithContext(false)\n\n\trepoPath := \"../tests/tmp/repositories/update_file\"\n\tlr, _ := setupSmallTestRepo(repoPath)\n\n\ttarget := fmt.Sprintf(\"%s/%s\", server.URL, \"api/directories/documents/documents/document_3/files/index.md\")\n\n\tnc := &NewCommit{\n\t\tMessage: \"Forty whacks with a wet noodle\",\n\t\tFiles: []NewCommitFile{},\n\t\tRepositoryInfo: RepositoryInfo{LatestRevision: lr.String()},\n\t}\n\n\tpayload, err := json.Marshal(nc)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tbuff := bytes.NewBuffer(payload)\n\n\tclient := &http.Client{}\n\n\treq, _ := http.NewRequest(\"PATCH\", target, buff)\n\n\tresp, err := client.Do(req)\n\n\tvar receiver FailureResponse\n\n\tjson.NewDecoder(resp.Body).Decode(&receiver)\n\n\tassert.Equal(t, \"No files specified for update\", receiver.Message)\n\tassert.Equal(t, http.StatusBadRequest, resp.StatusCode)\n\n}", "func updateFile(client *github.Client, owner, name, path, sha string, data []byte) (string, error) {\n\tres, _, err := client.Repositories.UpdateFile(context.Background(),\n\t\towner,\n\t\tname,\n\t\tpath,\n\t\t&github.RepositoryContentFileOptions{\n\t\t\tContent: data,\n\t\t\tMessage: github.String(\"Updated MAINTAINERS file [CI SKIP]\"),\n\t\t\tSHA: github.String(sha),\n\t\t},\n\t)\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"Error updating MAINTAINERS file at %s. %s\", sha, err)\n\t}\n\treturn *res.SHA, nil\n}", "func (a *ContentApiService) UpdateContentMetadata(ctx _context.Context, contentMetadataId int64, body ContentMeta) (ContentMeta, *_nethttp.Response, error) {\n\tvar (\n\t\tlocalVarHTTPMethod = _nethttp.MethodPatch\n\t\tlocalVarPostBody interface{}\n\t\tlocalVarFormFileName string\n\t\tlocalVarFileName string\n\t\tlocalVarFileBytes []byte\n\t\tlocalVarReturnValue ContentMeta\n\t)\n\n\t// create path and map variables\n\tlocalVarPath := a.client.cfg.BasePath + \"/content_metadata/{content_metadata_id}\"\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"content_metadata_id\"+\"}\", _neturl.QueryEscape(parameterToString(contentMetadataId, \"\")) , -1)\n\n\tlocalVarHeaderParams := make(map[string]string)\n\tlocalVarQueryParams := _neturl.Values{}\n\tlocalVarFormParams := _neturl.Values{}\n\n\t// to determine the Content-Type header\n\tlocalVarHTTPContentTypes := []string{\"application/json\"}\n\n\t// set Content-Type header\n\tlocalVarHTTPContentType := selectHeaderContentType(localVarHTTPContentTypes)\n\tif localVarHTTPContentType != \"\" {\n\t\tlocalVarHeaderParams[\"Content-Type\"] = localVarHTTPContentType\n\t}\n\n\t// to determine the Accept header\n\tlocalVarHTTPHeaderAccepts := []string{\"application/json\"}\n\n\t// set Accept header\n\tlocalVarHTTPHeaderAccept := selectHeaderAccept(localVarHTTPHeaderAccepts)\n\tif localVarHTTPHeaderAccept != \"\" {\n\t\tlocalVarHeaderParams[\"Accept\"] = localVarHTTPHeaderAccept\n\t}\n\t// body params\n\tlocalVarPostBody = &body\n\tr, err := a.client.prepareRequest(ctx, localVarPath, localVarHTTPMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFormFileName, localVarFileName, localVarFileBytes)\n\tif err != nil {\n\t\treturn localVarReturnValue, nil, err\n\t}\n\n\tlocalVarHTTPResponse, err := a.client.callAPI(r)\n\tif err != nil || localVarHTTPResponse == nil {\n\t\treturn localVarReturnValue, localVarHTTPResponse, err\n\t}\n\n\tlocalVarBody, err := _ioutil.ReadAll(localVarHTTPResponse.Body)\n\tlocalVarHTTPResponse.Body.Close()\n\tif err != nil {\n\t\treturn localVarReturnValue, localVarHTTPResponse, err\n\t}\n\n\tif localVarHTTPResponse.StatusCode >= 300 {\n\t\tnewErr := GenericOpenAPIError{\n\t\t\tbody: localVarBody,\n\t\t\terror: localVarHTTPResponse.Status,\n\t\t}\n\t\tif localVarHTTPResponse.StatusCode == 200 {\n\t\t\tvar v ContentMeta\n\t\t\terr = a.client.decode(&v, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\tnewErr.error = err.Error()\n\t\t\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t\t\t}\n\t\t\tnewErr.model = v\n\t\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t\t}\n\t\tif localVarHTTPResponse.StatusCode == 400 {\n\t\t\tvar v Error\n\t\t\terr = a.client.decode(&v, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\tnewErr.error = err.Error()\n\t\t\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t\t\t}\n\t\t\tnewErr.model = v\n\t\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t\t}\n\t\tif localVarHTTPResponse.StatusCode == 404 {\n\t\t\tvar v Error\n\t\t\terr = a.client.decode(&v, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\tnewErr.error = err.Error()\n\t\t\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t\t\t}\n\t\t\tnewErr.model = v\n\t\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t\t}\n\t\tif localVarHTTPResponse.StatusCode == 422 {\n\t\t\tvar v ValidationError\n\t\t\terr = a.client.decode(&v, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\tnewErr.error = err.Error()\n\t\t\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t\t\t}\n\t\t\tnewErr.model = v\n\t\t}\n\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t}\n\n\terr = a.client.decode(&localVarReturnValue, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\tif err != nil {\n\t\tnewErr := GenericOpenAPIError{\n\t\t\tbody: localVarBody,\n\t\t\terror: err.Error(),\n\t\t}\n\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t}\n\n\treturn localVarReturnValue, localVarHTTPResponse, nil\n}", "func Update(\n\tctx context.Context,\n\tcurrent runtime.Object,\n\tmodified client.Object,\n\tc client.Client,\n\tlogger logr.Logger,\n) error {\n\tprepareResourceForPatch(current, modified)\n\topts := []patch.CalculateOption{\n\t\tpatch.IgnoreStatusFields(),\n\t\tpatch.IgnoreVolumeClaimTemplateTypeMetaAndStatus(),\n\t}\n\tpatchResult, err := patch.DefaultPatchMaker.Calculate(current, modified, opts...)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif !patchResult.IsEmpty() {\n\t\t// need to set current version first otherwise the request would get rejected\n\t\tlogger.Info(fmt.Sprintf(\"Resource %s (%s) changed, updating. Diff: %v\",\n\t\t\tmodified.GetName(), modified.GetObjectKind().GroupVersionKind().Kind, string(patchResult.Patch)))\n\t\tif err := patch.DefaultAnnotator.SetLastAppliedAnnotation(modified); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tmetaAccessor := meta.NewAccessor()\n\t\tcurrentVersion, err := metaAccessor.ResourceVersion(current)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\terr = metaAccessor.SetResourceVersion(modified, currentVersion)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tprepareResourceForUpdate(current, modified)\n\t\tif err := c.Update(ctx, modified); err != nil {\n\t\t\treturn fmt.Errorf(\"failed to update resource: %w\", err)\n\t\t}\n\t}\n\treturn nil\n}", "func updateFromFile(cLink, fileName, user, passw string, params url.Values, headers map[string]string, client *http.Client) (solrResp *glsolr.Response, err error) {\r\n\tfile, err := os.Open(fileName)\r\n\tif err != nil {\r\n\t\treturn nil, err\r\n\t}\r\n\tdefer file.Close()\r\n\r\n\tsolrResp, err = glsolr.Update(cLink, user, passw, file, params, headers, client)\r\n\tif err != nil {\r\n\t\treturn nil, err\r\n\t}\r\n\r\n\treturn solrResp, nil\r\n}", "func (f FileRepo) Update(context context.Context, id string, file model.FileDTO) (string, error) {\n\tobjID, err := primitive.ObjectIDFromHex(id)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tfileEntity, err := file.Entity()\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tquery := bson.M{\n\t\t\"_id\": objID,\n\t}\n\tupdate := bson.M{\n\t\t\"$set\": fileEntity,\n\t}\n\tvar updateFile model.File\n\terr = f.collection.FindOneAndUpdate(context, query, update).Decode(&updateFile)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\treturn updateFile.ID.Hex(), nil\n}", "func (r *DeviceManagementCachedReportConfigurationRequest) Update(ctx context.Context, reqObj *DeviceManagementCachedReportConfiguration) error {\n\treturn r.JSONRequest(ctx, \"PATCH\", \"\", reqObj, nil)\n}", "func (c *Client) patch(rawURL string, authenticate bool, expectedStatus int, in interface{}, out interface{}) error {\n\terr := c.do(rawURL, \"PATCH\", authenticate, expectedStatus, in, out)\n\treturn errio.Error(err)\n}", "func (c *Client) Patch(rawurl string, in, out interface{}) error {\n\treturn c.Do(rawurl, \"PATCH\", in, out)\n}", "func (conn Connection) Patch(cmd string, content, result interface{}) (effect *SideEffect, resp *http.Response, err error) {\n\treturn conn.Send(http.MethodPatch, cmd, content, result)\n}", "func Patch(url string, data ...interface{}) (*ClientResponse, error) {\n\treturn DoRequest(\"PATCH\", url, data...)\n}", "func (r *Request) Patch(url string) (*Response, error) {\n\treturn r.Execute(MethodPatch, url)\n}", "func (APIResourceBase) Patch(session *Session, url string, queries url.Values, body io.Reader) (APIStatus, interface{}) {\n\treturn FailSimple(http.StatusMethodNotAllowed), nil\n}", "func (r *ExternalConnectionRequest) Update(ctx context.Context, reqObj *ExternalConnection) error {\n\treturn r.JSONRequest(ctx, \"PATCH\", \"\", reqObj, nil)\n}", "func (a *App) Update(w http.ResponseWriter, r *http.Request) {\n\tv := mux.Vars(r)\n\tidentifier := v[\"identifier\"]\n\n\tchange, err := parsebody(r)\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusBadRequest)\n\t\treturn\n\t}\n\n\tif err = change.InitChangeRequest(); err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\t// Generate the configuration.\n\tcfg, err := a.GenerateConfig(change)\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\tjsonpath := path.Join(a.ConfigDir, fmt.Sprintf(\"%s.json\", identifier))\n\tconfigpath := path.Join(a.ConfigDir, fmt.Sprintf(\"%s.conf\", identifier))\n\tif _, err = os.Stat(jsonpath); os.IsNotExist(err) {\n\t\terr = errors.Wrapf(err, \"path does not exist: %s\", jsonpath)\n\t\thttp.Error(w, err.Error(), http.StatusNotFound)\n\t\treturn\n\t}\n\n\tif _, err = os.Stat(configpath); os.IsNotExist(err) {\n\t\terr = errors.Wrapf(err, \"path does not exist: %s\", configpath)\n\t\thttp.Error(w, err.Error(), http.StatusNotFound)\n\t\treturn\n\t}\n\n\t// Serialize the JSON file to the filesystem.\n\tjson, err := json.Marshal(change)\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n\tif err = ioutil.WriteFile(jsonpath, json, 0644); err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\t// Serialize the configuration file to the filesystem.\n\tif err = ioutil.WriteFile(configpath, cfg, 0644); err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\tif err = a.SignalContainers(); err != nil {\n\t\terr = errors.Wrap(err, \"error HUPing container(s)\")\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n}", "func Patch(ctx context.Context, url string, body Body, options ...RequestOption) (*Response, error) {\n\tr, err := newRequest(ctx, http.MethodPatch, url, body, options...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tr.Header.Set(\"Content-Type\", body.ContentType())\n\treturn doRequest(http.DefaultClient, r)\n}", "func (e *Engine) PATCH(path string, fn Controller) {\n\te.gin.PATCH(path, adapt(fn))\n}", "func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) {\n\terr = o.fs.mkParentDir(ctx, o.filePath())\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Update mkParentDir failed: %w\", err)\n\t}\n\n\tif o.shouldUseChunkedUpload(src) {\n\t\tfs.Debugf(src, \"Update will use the chunked upload strategy\")\n\t\terr = o.updateChunked(ctx, in, src, options...)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t} else {\n\t\tfs.Debugf(src, \"Update will use the normal upload strategy (no chunks)\")\n\t\tcontentType := fs.MimeType(ctx, src)\n\t\tfilePath := o.filePath()\n\t\textraHeaders := o.extraHeaders(ctx, src)\n\t\t// TODO: define getBody() to enable low-level HTTP/2 retries\n\t\terr = o.updateSimple(ctx, in, nil, filePath, src.Size(), contentType, extraHeaders, o.fs.endpointURL, options...)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\t// read metadata from remote\n\to.hasMetaData = false\n\treturn o.readMetaData(ctx)\n}", "func (r *PresenceRequest) Update(ctx context.Context, reqObj *Presence) error {\n\treturn r.JSONRequest(ctx, \"PATCH\", \"\", reqObj, nil)\n}", "func (me *INVOICES_IMPL) UpdateInvoiceMetadata (\r\n invoiceId string,\r\n body *models_pkg.InvoicesMetadataRequest,\r\n idempotencyKey *string) (*models_pkg.InvoicesMetadataResponse, error) {\r\n //the endpoint path uri\r\n _pathUrl := \"/invoices/{invoice_id}/metadata\"\r\n\r\n //variable to hold errors\r\n var err error = nil\r\n //process optional template parameters\r\n _pathUrl, err = apihelper_pkg.AppendUrlWithTemplateParameters(_pathUrl, map[string]interface{} {\r\n \"invoice_id\" : invoiceId,\r\n })\r\n if err != nil {\r\n //error in template param handling\r\n return nil, err\r\n }\r\n\r\n //the base uri for api requests\r\n _queryBuilder := configuration_pkg.BASEURI;\r\n\r\n //prepare query string for API call\r\n _queryBuilder = _queryBuilder + _pathUrl\r\n\r\n //validate and preprocess url\r\n _queryBuilder, err = apihelper_pkg.CleanUrl(_queryBuilder)\r\n if err != nil {\r\n //error in url validation or cleaning\r\n return nil, err\r\n }\r\n //prepare headers for the outgoing request\r\n headers := map[string]interface{} {\r\n \"user-agent\" : \"MundiSDK - Go 2.4.5\",\r\n \"accept\" : \"application/json\",\r\n \"content-type\" : \"application/json; charset=utf-8\",\r\n \"Content-Type\" : \"application/json\",\r\n \"idempotency-key\" : apihelper_pkg.ToString(idempotencyKey, \"\"),\r\n }\r\n\r\n //prepare API request\r\n _request := unirest.PatchWithAuth(_queryBuilder, headers, body, me.config.BasicAuthUserName(), me.config.BasicAuthPassword())\r\n //and invoke the API call request to fetch the response\r\n _response, err := unirest.AsString(_request,false);\r\n if err != nil {\r\n //error in API invocation\r\n return nil, err\r\n }\r\n\r\n //error handling using HTTP status codes\r\n if (_response.Code == 400) {\r\n err = apihelper_pkg.NewAPIError(\"Invalid request\", _response.Code, _response.RawBody)\r\n } else if (_response.Code == 401) {\r\n err = apihelper_pkg.NewAPIError(\"Invalid API key\", _response.Code, _response.RawBody)\r\n } else if (_response.Code == 404) {\r\n err = apihelper_pkg.NewAPIError(\"An informed resource was not found\", _response.Code, _response.RawBody)\r\n } else if (_response.Code == 412) {\r\n err = apihelper_pkg.NewAPIError(\"Business validation error\", _response.Code, _response.RawBody)\r\n } else if (_response.Code == 422) {\r\n err = apihelper_pkg.NewAPIError(\"Contract validation error\", _response.Code, _response.RawBody)\r\n } else if (_response.Code == 500) {\r\n err = apihelper_pkg.NewAPIError(\"Internal server error\", _response.Code, _response.RawBody)\r\n } else if (_response.Code < 200) || (_response.Code > 206) { //[200,206] = HTTP OK\r\n err = apihelper_pkg.NewAPIError(\"HTTP Response Not OK\", _response.Code, _response.RawBody)\r\n }\r\n if(err != nil) {\r\n //error detected in status code validation\r\n return nil, err\r\n }\r\n\r\n //returning the response\r\n var retVal *models_pkg.InvoicesMetadataResponse = &models_pkg.InvoicesMetadataResponse{}\r\n err = json.Unmarshal(_response.RawBody, &retVal)\r\n\r\n if err != nil {\r\n //error in parsing\r\n return nil, err\r\n }\r\n return retVal, nil\r\n\r\n}", "func (o *DesktopApp) Update() (*restapi.GenericMapResponse, error) {\n\tif o.ID == \"\" {\n\t\treturn nil, errors.New(\"error: ID is empty\")\n\t}\n\tvar queryArg = make(map[string]interface{})\n\tqueryArg, err := generateRequestMap(o)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tqueryArg[\"_RowKey\"] = o.ID\n\n\tLogD.Printf(\"Generated Map for Update(): %+v\", queryArg)\n\n\tresp, err := o.client.CallGenericMapAPI(o.apiUpdate, queryArg)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif !resp.Success {\n\t\treturn nil, errors.New(resp.Message)\n\t}\n\n\treturn resp, nil\n}", "func (g *GistFile) Update(interface{}) (*http.Response, error) {\n\tpanic(\"implement me\")\n}", "func (c Client) Update(u UpdateOperation) (success bool, err error) {\n\tu.SetToken(c.token)\n\tvalues := url.Values{}\n\tvalues.Set(\"data\", base64.StdEncoding.EncodeToString([]byte(u.JSON())))\n\treq := url.URL{\n\t\tScheme: Protocol,\n\t\tHost: Host,\n\t\tPath: UpdatePath,\n\t\tRawQuery: values.Encode(),\n\t}\n\tresp, err := http.Get(req.String())\n\tif success = err == nil; !success {\n\t\treturn\n\t}\n\tdefer func() {\n\t\terr = resp.Body.Close()\n\t}()\n\trespBody, err := ioutil.ReadAll(resp.Body)\n\tif success = err == nil; !success {\n\t\treturn\n\t}\n\tsuccess = string(respBody) == \"1\"\n\treturn\n}", "func (crowdin *Crowdin) Update(CrowdinFileName string, LocalFileName string, updateOption string) (fileId int, revId int, err error) {\r\n\r\n\tcrowdin.log(fmt.Sprintf(\"Update()\\n\"))\r\n\r\n\t// Lookup fileId in Crowdin\r\n\tfileId, crowdinFilename, err := crowdin.LookupFileId(CrowdinFileName)\r\n\tif err != nil {\r\n\t\tcrowdin.log(fmt.Sprintf(\" err=%s\\n\", err))\r\n\t\treturn 0, 0, err\r\n\t}\r\n\r\n\tcrowdin.log(fmt.Sprintf(\"Update() fileId=%d fileName=%s\\n\", fileId, crowdinFilename))\r\n\r\n\t// Send local file to storageId\r\n\taddStor, err := crowdin.AddStorage(&AddStorageOptions{FileName: LocalFileName})\r\n\tif err != nil {\r\n\t\treturn 0, 0, errors.New(\"Update() - Error adding file to storage.\")\r\n\t}\r\n\tstorageId := addStor.Data.Id\r\n\r\n\t// fmt.Printf(\"Directory Id = %d, filename= %s, fileId %d storageId= %d\\n\", dirId, crowdinFilename, fileId, storageId)\r\n\r\n\t// Update file\r\n\tupdres, err := crowdin.UpdateFile(fileId, &UpdateFileOptions{StorageId: storageId, UpdateOption: updateOption})\r\n\r\n\t// Delete storage\r\n\terr1 := crowdin.DeleteStorage(&DeleteStorageOptions{StorageId: storageId})\r\n\r\n\tif err != nil {\r\n\t\tcrowdin.log(fmt.Sprintf(\"Update() - error updating file %v\", updres))\r\n\t\treturn 0, 0, errors.New(\"Update() - Error updating file.\") //\r\n\t}\r\n\r\n\tif err1 != nil {\r\n\t\tcrowdin.log(fmt.Sprintf(\"Update() - error deleting storage %v\", err1))\r\n\t}\r\n\r\n\trevId = updres.Data.RevisionId\r\n\r\n\tcrowdin.log(fmt.Sprintf(\"Update() - result %v\", updres))\r\n\r\n\treturn fileId, revId, nil\r\n}", "func (e *Expect) PATCH(path string, pathargs ...interface{}) *Request {\n\treturn e.Request(http.MethodPatch, path, pathargs...)\n}", "func (p *Peer) Update(ctx context.Context, info content.Info, fieldpaths ...string) (content.Info, error) {\n\treturn content.Info{}, errors.Wrapf(errdefs.ErrFailedPrecondition, \"update not supported on immutable content store\")\n}", "func (s *sshClient) UpdateFile(log zerolog.Logger, filePath string, content []byte, perm os.FileMode) error {\n\tif err := s.EnsureDirectoryOf(log, filePath, perm); err != nil {\n\t\treturn maskAny(err)\n\t}\n\tif _, err := s.Run(log, fmt.Sprintf(\"sudo tee %s\", filePath), string(content), true); err != nil {\n\t\treturn maskAny(err)\n\t}\n\tif _, err := s.Run(log, fmt.Sprintf(\"sudo chmod 0%o %s\", perm, filePath), \"\", true); err != nil {\n\t\treturn maskAny(err)\n\t}\n\treturn nil\n}", "func (e *Entity) Patch(uri string, payload interface{}) error {\n\theader := make(map[string]string)\n\tif e.etag != \"\" {\n\t\theader[\"If-Match\"] = e.etag\n\t}\n\n\tresp, err := e.client.PatchWithHeaders(uri, payload, header)\n\tif err == nil {\n\t\treturn resp.Body.Close()\n\t}\n\treturn err\n}", "func (avisess *AviSession) Patch(uri string, payload interface{}, patchOp string, response interface{}, options ...ApiOptionsParams) error {\n\tvar patchPayload = make(map[string]interface{})\n\tpatchPayload[patchOp] = payload\n\tglog.Infof(\" PATCH OP %v data %v\", patchOp, payload)\n\treturn avisess.restRequestInterfaceResponse(\"PATCH\", uri, patchPayload, response, options...)\n}", "func (r *DeviceRequest) Update(ctx context.Context, reqObj *Device) error {\n\treturn r.JSONRequest(ctx, \"PATCH\", \"\", reqObj, nil)\n}", "func (s *Service) calDiff(file *model.ResourceFile) (err error) {\n\tvar (\n\t\tnewPath string\n\t\toldPath string\n\t\tpatchPath string\n\t\tpatchURL string\n\t\tpatchFInfo *model.FileInfo\n\t)\n\t// update the status of the file, to avoid being picked by another\n\tif err = s.chgFStatus(file.ID, _calculationInProgress); err != nil {\n\t\tlog.Error(errFormat, \"calDiff\", \"chgFStatus\", err)\n\t\treturn\n\t}\n\t// save the new file\n\tif newPath, err = s.catchFile(file, 0); err != nil {\n\t\tlog.Error(errFormat, \"callDiff\", \"catchFile\", err)\n\t\treturn\n\t}\n\t// save the old file\n\tif oldPath, err = s.catchFile(file, int(file.FromVer)); err != nil {\n\t\tlog.Error(errFormat, \"callDiff\", \"catchFile\", err)\n\t\treturn\n\t}\n\t// exec bsdiff to get the patch file and upload it\n\tfile.Name = strings.Replace(file.Name, \"/\", _tmpSlash, 1)\n\tpatchPath = s.c.Cfg.Diff.Folder + \"/\" + file.Name\n\tif patchFInfo, patchURL, err = s.diffCmd(file.Name, patchPath, newPath, oldPath); err != nil {\n\t\tlog.Error(errFormat, \"calDiff\", \"diffCmd\", err)\n\t\ts.chgFStatus(file.ID, _diffPackge)\n\t\treturn\n\t}\n\tlog.Info(\"Upload Path File From [%s] to [%s], Size: %d\", patchPath, patchURL)\n\t// save the url to the file\n\tif err = s.dao.SaveFile(ctx, file.ID, &model.FileInfo{\n\t\tName: patchFInfo.Name,\n\t\tSize: patchFInfo.Size,\n\t\tMd5: patchFInfo.Md5,\n\t\tURL: patchURL}); err != nil {\n\t\tlog.Error(errFormat, \"calDiff\", \"updateURL\", err)\n\t\treturn\n\t}\n\t// delete all the packages used\n\tif err = delPkgs(newPath, oldPath, patchPath); err != nil {\n\t\tlog.Error(errFormat, \"calDiff\", \"delPkgs\", err)\n\t}\n\treturn\n}", "func (tr *Transport) PATCH(\n\turl string,\n\tfn Handler,\n\toptions ...HandlerOption,\n) {\n\ttr.mux.Handler(\n\t\tnet_http.MethodPatch,\n\t\turl,\n\t\tnewHandler(fn, append(tr.options, options...)...),\n\t)\n}", "func (r *Router) PATCH(url string, viewFn View) *Path {\n\treturn r.Path(fasthttp.MethodPatch, url, viewFn)\n}", "func (w *Worker) Patch(c *http.Client, url string, data interface{}, bind interface{}) (int, error) {\n\tbs, err := json.Marshal(data)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\treq, err := http.NewRequest(\"PATCH\", url, bytes.NewReader(bs))\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\treq.Header.Set(\"Content-Type\", \"application/json\")\n\tres, err := c.Do(req)\n\tif err != nil {\n\t\tif res != nil {\n\t\t\tioutil.ReadAll(res.Body)\n\t\t\tres.Body.Close()\n\t\t}\n\t\treturn 0, err\n\t}\n\tdefer res.Body.Close()\n\terr = json.NewDecoder(res.Body).Decode(bind)\n\tif res.StatusCode == http.StatusNoContent || bind == nil {\n\t\treturn res.StatusCode, nil\n\t}\n\treturn res.StatusCode, err\n}", "func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {\n\treturn errorReadOnly\n}", "func (leaf *Node) updateContent() (err error) {\n\tif leaf.Size > MaxFileSize {\n\t\treturn nil\n\t}\n\n\tleaf.V, err = ioutil.ReadFile(leaf.SysPath)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}", "func (r *DeviceManagementExchangeConnectorRequest) Update(ctx context.Context, reqObj *DeviceManagementExchangeConnector) error {\n\treturn r.JSONRequest(ctx, \"PATCH\", \"\", reqObj, nil)\n}", "func marshalUpdateInterconnectPatchRequest(c *Client, m map[string]interface{}) ([]byte, error) {\n\n\treturn json.Marshal(m)\n}", "func (r *DeviceManagementRequest) Update(ctx context.Context, reqObj *DeviceManagement) error {\n\treturn r.JSONRequest(ctx, \"PATCH\", \"\", reqObj, nil)\n}", "func (handler *ObjectWebHandler) Update(w http.ResponseWriter, r *http.Request) {\n\trespondWithError(w, http.StatusNotImplemented, \"Not implemented\", nil)\n}", "func (client MSIXPackagesClient) UpdateResponder(resp *http.Response) (result MSIXPackage, err error) {\n\terr = autorest.Respond(\n\t\tresp,\n\t\tazure.WithErrorUnlessStatusCode(http.StatusOK),\n\t\tautorest.ByUnmarshallingJSON(&result),\n\t\tautorest.ByClosing())\n\tresult.Response = autorest.Response{Response: resp}\n\treturn\n}", "func UpdatePatchTrafficInfluenceSubscription(w http.ResponseWriter,\n\tr *http.Request) {\n\n\tnefCtx := r.Context().Value(nefCtxKey(\"nefCtx\")).(*nefContext)\n\tnef := &nefCtx.nef\n\n\tvars := mux.Vars(r)\n\tlog.Infof(\" AFID : %s\", vars[\"afId\"])\n\tlog.Infof(\" SUBSCRIPTION ID : %s\", vars[\"subscriptionId\"])\n\n\taf, ok := nef.nefGetAf(vars[\"afId\"])\n\tif ok == nil {\n\n\t\tb, err := ioutil.ReadAll(r.Body)\n\n\t\tdefer closeReqBody(r)\n\n\t\tif err != nil {\n\t\t\tlog.Err(err)\n\t\t\tsendCustomeErrorRspToAF(w, 400, \"Failed to read HTTP PATCH Body\")\n\t\t\treturn\n\t\t}\n\n\t\t//Traffic Influence Sub Patch data\n\t\tTrInSPBody := TrafficInfluSubPatch{}\n\n\t\t//Convert the json Traffic Influence data into struct\n\t\terr1 := json.Unmarshal(b, &TrInSPBody)\n\n\t\tif err1 != nil {\n\t\t\tlog.Err(err1)\n\t\t\tsendCustomeErrorRspToAF(w, 400, \"Failed UnMarshal PATCH data\")\n\t\t\treturn\n\t\t}\n\n\t\trsp, ti, err := af.afPartialUpdateSubscription(nefCtx,\n\t\t\tvars[\"subscriptionId\"], TrInSPBody)\n\n\t\tif err != nil {\n\t\t\tsendErrorResponseToAF(w, rsp)\n\t\t\treturn\n\t\t}\n\n\t\tmdata, err2 := json.Marshal(ti)\n\n\t\tif err2 != nil {\n\t\t\tlog.Err(err2)\n\t\t\tsendCustomeErrorRspToAF(w, 400,\n\t\t\t\t\"Failed to Marshal PATCH response data\")\n\t\t\treturn\n\n\t\t}\n\t\tw.WriteHeader(http.StatusOK)\n\t\tw.Header().Set(\"Content-Type\", \"application/json; charset=UTF-8\")\n\n\t\t_, err = w.Write(mdata)\n\t\tif err != nil {\n\t\t\tlog.Errf(\"Write Failed: %v\", err)\n\t\t}\n\t\treturn\n\t}\n\n\tlog.Infoln(ok)\n\tsendCustomeErrorRspToAF(w, 404, \"Failed to find AF records\")\n}", "func (c *SubresourceClient) Patch(namespace string, name string, data []byte) (e error) {\n\tif c.Error != \"\" {\n\t\te = fmt.Errorf(c.Error)\n\t}\n\treturn\n}", "func (res *Resource) Patch(storage store.Update) {\n\tres.HandleFuncC(\n\t\tpat.Patch(patID),\n\t\tfunc(ctx context.Context, w http.ResponseWriter, r *http.Request) {\n\t\t\tres.patchHandler(ctx, w, r, storage)\n\t\t},\n\t)\n\n\tres.addRoute(patch, patID)\n}", "func (r *run) updatePhoto(ctx context.Context, parent *importer.Object, ph photo) (ret error) {\n\tif ph.ID == \"\" {\n\t\treturn errors.New(\"photo has no ID\")\n\t}\n\n\t// fileRefStr, in addition to being used as the camliConent value, is used\n\t// as a sentinel: if it is still blank after the call to\n\t// ChildPathObjectOrFunc, it means that a permanode for the photo object\n\t// already exists.\n\tvar fileRefStr string\n\t// picasAttrs holds the attributes of the picasa node for the photo, if any is found.\n\tvar picasAttrs url.Values\n\n\tfilename := ph.filename()\n\n\tphotoNode, err := parent.ChildPathObjectOrFunc(ph.ID, func() (*importer.Object, error) {\n\t\th := blob.NewHash()\n\t\trc, err := r.downloader.openPhoto(ctx, ph)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tfileRef, err := schema.WriteFileFromReader(r.Host.Target(), filename, io.TeeReader(rc, h))\n\t\trc.Close()\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tfileRefStr = fileRef.String()\n\t\twholeRef := blob.RefFromHash(h)\n\t\tpn, attrs, err := findExistingPermanode(r.Host.Searcher(), wholeRef)\n\t\tif err != nil {\n\t\t\tif err != os.ErrNotExist {\n\t\t\t\treturn nil, fmt.Errorf(\"could not look for permanode with %v as camliContent : %v\", fileRefStr, err)\n\t\t\t}\n\t\t\treturn r.Host.NewObject()\n\t\t}\n\t\tif attrs != nil {\n\t\t\tpicasAttrs = attrs\n\t\t}\n\t\treturn r.Host.ObjectFromRef(pn)\n\t})\n\tif err != nil {\n\t\tif fileRefStr != \"\" {\n\t\t\treturn fmt.Errorf(\"error getting permanode for photo %q, with content %v: $v\", ph.ID, fileRefStr, err)\n\t\t}\n\t\treturn fmt.Errorf(\"error getting permanode for photo %q: %v\", ph.ID, err)\n\t}\n\n\tif fileRefStr == \"\" {\n\t\t// photoNode was created in a previous run, but it is not\n\t\t// guaranteed its attributes were set. e.g. the importer might have\n\t\t// been interrupted. So we check for an existing camliContent.\n\t\tif camliContent := photoNode.Attr(nodeattr.CamliContent); camliContent == \"\" {\n\t\t\t// looks like an incomplete node, so we need to re-download.\n\t\t\trc, err := r.downloader.openPhoto(ctx, ph)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tfileRef, err := schema.WriteFileFromReader(r.Host.Target(), filename, rc)\n\t\t\trc.Close()\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tfileRefStr = fileRef.String()\n\t\t}\n\t} else {\n\t\tif picasAttrs.Get(nodeattr.CamliContent) != \"\" {\n\t\t\t// We've just created a new file schema, but we're also recycling a\n\t\t\t// picasa node, and we prefer keeping the existing file schema from the\n\t\t\t// picasa node, because the file from Drive never gets updates\n\t\t\t// (https://productforums.google.com/forum/#!msg/drive/HbNOd1o40CQ/VfIJCncyAAAJ).\n\t\t\t// Thanks to blob deduplication, these two file schemas are most likely\n\t\t\t// the same anyway. If not, the newly created one will/should get GCed\n\t\t\t// eventually.\n\t\t\tfileRefStr = picasAttrs.Get(nodeattr.CamliContent)\n\t\t}\n\t}\n\n\tattrs := []string{\n\t\tattrDriveId, ph.ID,\n\t\tnodeattr.Version, strconv.FormatInt(ph.Version, 10),\n\t\tnodeattr.Title, ph.title(picasAttrs.Get(nodeattr.Title)),\n\t\tnodeattr.Description, orAltAttr(ph.Description, picasAttrs.Get(nodeattr.Description)),\n\t\tnodeattr.DateCreated, schema.RFC3339FromTime(ph.CreatedTime),\n\t\tnodeattr.DateModified, orAltAttr(schema.RFC3339FromTime(ph.ModifiedTime), picasAttrs.Get(nodeattr.DateModified)),\n\t\t// Even if the node already had some nodeattr.URL picasa attribute, it's\n\t\t// ok to overwrite it, because from what I've tested it's useless nowadays\n\t\t// (gives a 404 in a browser). Plus, we don't overwrite the actually useful\n\t\t// \"picasaMediaURL\" attribute.\n\t\tnodeattr.URL, ph.WebContentLink,\n\t}\n\n\tif ph.Location != nil {\n\t\tif ph.Location.Altitude != 0 {\n\t\t\tattrs = append(attrs, nodeattr.Altitude, floatToString(ph.Location.Altitude))\n\t\t}\n\t\tif ph.Location.Latitude != 0 || ph.Location.Longitude != 0 {\n\t\t\tattrs = append(attrs,\n\t\t\t\tnodeattr.Latitude, floatToString(ph.Location.Latitude),\n\t\t\t\tnodeattr.Longitude, floatToString(ph.Location.Longitude),\n\t\t\t)\n\t\t}\n\t}\n\tif err := photoNode.SetAttrs(attrs...); err != nil {\n\t\treturn err\n\t}\n\n\tif fileRefStr != \"\" {\n\t\t// camliContent is set last, as its presence defines whether we consider a\n\t\t// photo successfully updated.\n\t\tif err := photoNode.SetAttr(nodeattr.CamliContent, fileRefStr); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}", "func (r *DeviceComplianceActionItemRequest) Update(ctx context.Context, reqObj *DeviceComplianceActionItem) error {\n\treturn r.JSONRequest(ctx, \"PATCH\", \"\", reqObj, nil)\n}", "func (r *DeviceManagementTemplateRequest) Update(ctx context.Context, reqObj *DeviceManagementTemplate) error {\n\treturn r.JSONRequest(ctx, \"PATCH\", \"\", reqObj, nil)\n}" ]
[ "0.6554185", "0.6069797", "0.6045703", "0.6006631", "0.5952792", "0.59269416", "0.59181356", "0.5916743", "0.5810968", "0.5754984", "0.57314044", "0.5704721", "0.5700456", "0.56991684", "0.5699168", "0.5695067", "0.56792235", "0.5673", "0.56656206", "0.5664543", "0.5654653", "0.5652608", "0.5648224", "0.5647633", "0.5640716", "0.5637625", "0.5622347", "0.5606691", "0.5553883", "0.5527345", "0.5526686", "0.55251527", "0.552449", "0.55162424", "0.550456", "0.550071", "0.54985946", "0.54966015", "0.5483391", "0.5478806", "0.5476352", "0.547001", "0.546968", "0.5462303", "0.5461642", "0.5460578", "0.54574895", "0.54543775", "0.5453632", "0.54422754", "0.54382485", "0.54280317", "0.5426419", "0.54179794", "0.5410016", "0.5399143", "0.5398582", "0.5375771", "0.537515", "0.5367799", "0.53641295", "0.5364124", "0.5344573", "0.5342502", "0.5338231", "0.5335825", "0.5331259", "0.53309035", "0.53267735", "0.53224355", "0.53073233", "0.53041655", "0.5294919", "0.52946377", "0.52914", "0.52896005", "0.5285921", "0.5283237", "0.5281345", "0.52796614", "0.52775836", "0.5260774", "0.5259094", "0.52500856", "0.52500045", "0.52478904", "0.52251464", "0.5216403", "0.5216107", "0.5215786", "0.52007306", "0.51997304", "0.5193268", "0.51914257", "0.5190335", "0.518683", "0.5185294", "0.5178719", "0.51780945", "0.51774883" ]
0.830765
0
Delete performs DELETE request for MobileAppContentFile
func (r *MobileAppContentFileRequest) Delete(ctx context.Context) error { return r.JSONRequest(ctx, "DELETE", "", nil, nil) }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func DeleteFile(w http.ResponseWriter, r *http.Request) {\n\tvar body datastructures.DeleteBody\n\n\tif reqBody, err := ioutil.ReadAll(r.Body); err != nil {\n\t\tlog.Println(err)\n\t\tw.WriteHeader(500)\n\t\tw.Write([]byte(\"Could not read request body\"))\n\t\treturn\n\t} else if err = json.Unmarshal(reqBody, &body); err != nil {\n\t\tlog.Println(err)\n\t\tw.WriteHeader(400)\n\t\tw.Write([]byte(\"Bad request\"))\n\t\treturn\n\t}\n\n\tlog.Println(\n\t\t\"Delete file request for\",\n\t\t\"workspace\", body.Workspace.ToString(),\n\t\t\"path\", filepath.Join(body.Path...),\n\t)\n\n\tif err := utils.DeleteFile(body.Workspace, body.Path); err != nil {\n\t\tlog.Println(err)\n\t}\n}", "func (userdata *User) DeleteFile(eKey []byte, uuid userlib.UUID) (err error) {\n\t//get the file struct\n\t//get the file struct from the datastore\n\tencryptedHeader, _ := userlib.DatastoreGet(uuid)\n\tvar store Stored\n\tjson.Unmarshal(encryptedHeader, &store)\n\n\tfile_header := userlib.SymDec(eKey, store.EncryptedM)\n\tfile_header = dePad(file_header)\n\n\tvar header File\n\t//unmarshal the data structure\n\tjson.Unmarshal(file_header, &header)\n\n\t//loop through the File struct list and get all the blocks of the file\n\tfor _, fileUUID := range header.Files {\n\t\t//CHECK HMAC STUFF\n\t\tuserlib.DatastoreDelete(fileUUID)\n\t}\n\treturn nil\n}", "func (m *FileRequestBuilder) Delete(ctx context.Context, requestConfiguration *FileRequestBuilderDeleteRequestConfiguration)(error) {\n requestInfo, err := m.CreateDeleteRequestInformation(ctx, requestConfiguration);\n if err != nil {\n return err\n }\n errorMapping := i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f.ErrorMappings {\n \"4XX\": ia572726a95efa92ddd544552cd950653dc691023836923576b2f4bf716cf204a.CreateODataErrorFromDiscriminatorValue,\n \"5XX\": ia572726a95efa92ddd544552cd950653dc691023836923576b2f4bf716cf204a.CreateODataErrorFromDiscriminatorValue,\n }\n err = m.requestAdapter.SendNoContentAsync(ctx, requestInfo, errorMapping)\n if err != nil {\n return err\n }\n return nil\n}", "func (storage *B2Storage) DeleteFile(threadIndex int, filePath string) (err error) {\n\n if strings.HasSuffix(filePath, \".fsl\") {\n filePath = filePath[:len(filePath) - len(\".fsl\")]\n entries, err := storage.clients[threadIndex].ListFileNames(filePath, true, true)\n if err != nil {\n return err\n }\n\n toBeDeleted := false\n\n for _, entry := range entries {\n if entry.FileName != filePath || (!toBeDeleted && entry.Action != \"hide\" ) {\n continue\n }\n\n toBeDeleted = true\n\n err = storage.clients[threadIndex].DeleteFile(filePath, entry.FileID)\n if err != nil {\n return err\n }\n }\n\n return nil\n\n } else {\n entries, err := storage.clients[threadIndex].ListFileNames(filePath, true, false)\n if err != nil {\n return err\n }\n\n if len(entries) == 0 {\n return nil\n }\n return storage.clients[threadIndex].DeleteFile(filePath, entries[0].FileID)\n }\n}", "func (s *PublicStorageServer) Delete(ctx context.Context, url *pbs.FileURL) (*emptypb.Empty, error) {\n\tvar obj file.MinioObj\n\tif err := obj.FromURL(url.Url); err != nil {\n\t\treturn nil, err\n\t}\n\n\terr := services.MinioClient.RemoveObject(context.Background(), \"public\", obj.ObjectName, minio.RemoveObjectOptions{})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tlog.Printf(\"Delete from url: %s\", obj.URL)\n\n\treturn &emptypb.Empty{}, nil\n}", "func FileDeleteHandler(w http.ResponseWriter, r *http.Request) {\n\tr.ParseForm()\n\n\tfileSha1 := r.Form.Get(\"filehash\")\n\tfileMeta := meta.GetFileMeta(fileSha1)\n\n\t//delete file from disk\n\terr := os.Remove(fileMeta.FileAddr)\n\tif err != nil {\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\t// delete file index\n\tmeta.RemoveFileMeta(fileSha1)\n\n\tw.WriteHeader(http.StatusOK)\n}", "func (c *client) Delete(_ context.Context, request *blobstore.DeleteRequest) (*blobstore.DeleteResponse, error) {\n\tif err := os.Remove(c.bodyPath(request.Key)); err != nil {\n\t\treturn nil, err\n\t}\n\tif err := os.Remove(c.tagsPath(request.Key)); err != nil {\n\t\treturn nil, err\n\t}\n\treturn &blobstore.DeleteResponse{}, nil\n}", "func (c *UploadController) Delete() {\n\timg := struct {\n\t\tFileName string `json:\"fileName\"`\n\t}{}\n\terr := json.Unmarshal(c.Ctx.Input.RequestBody, &img)\n\tif err != nil {\n\t\tlog.Println(err)\n\t}\n\n\t// remove thumbnail\n\terr = os.Remove(thumbnailsFolder + img.FileName)\n\tif err != nil {\n\t\tlog.Println(err)\n\t}\n\t// remove main image\n\terr = os.Remove(imagesFolder + img.FileName)\n\tif err != nil {\n\t\tlog.Println(err)\n\t}\n\tc.Data[\"json\"] = img\n\tc.ServeJSON()\n}", "func Delete(w http.ResponseWriter, r *http.Request) {\n\t// get user info\n\tusername := r.Context().Value(\"username\")\n\tif username == nil {\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\t// retrieve photo id from api call\n\tvar requestedPhoto Photo\n\terr := json.NewDecoder(r.Body).Decode(&requestedPhoto)\n\tif err != nil {\n\t\tw.Write([]byte(\"Missing PhotoID or IsPublic attribute\"))\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\treturn\n\t}\n\n\tif requestedPhoto.ID == \"\" {\n\t\tw.Write([]byte(\"PhotoID not provided in request body\"))\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\treturn\n\t}\n\n\t// make sure photo exists\n\tvar photos []Photo\n\tDB.Where(&Photo{ID: requestedPhoto.ID}).Find(&photos)\n\n\tif len(photos) > 1 {\n\t\tw.Write([]byte(\"Multiple photos returned\"))\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\n\t}\n\n\tif len(photos) == 0 {\n\t\tw.Write([]byte(\"No photos returned\"))\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\tphoto := photos[0]\n\n\t// Make sure photo belongs to user\n\tuserID, err := GetUserGUID(username.(string))\n\tif photo.UserID != *userID {\n\t\tw.Write([]byte(\"photo does not belong to user\"))\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\treturn\n\t}\n\n\t// delete photo from photos table\n\tDB.Delete(&photo)\n\n\t// delete file from bucket\n\timageFile := Client.Bucket(getBucketForPhoto(photo)).Object(photo.ID)\n\tif err = imageFile.Delete(r.Context()); err != nil {\n\t\terr = fmt.Errorf(\"Object(%q).Delete: %v\", photo.ID, err)\n\t\tw.Write([]byte(err.Error()))\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\tw.Write([]byte(\"photo deleted\"))\n\tw.WriteHeader(http.StatusOK)\n}", "func (f FileURL) Delete(ctx context.Context) (*FileDeleteResponse, error) {\n\treturn f.fileClient.Delete(ctx, nil)\n}", "func (file *File) Delete() (err error) {\n\treturn file.upload.client.removeFile(file.upload.getParams(), file.getParams())\n}", "func deleteHandler(w http.ResponseWriter, r *http.Request) {\n\tfmt.Println(\"Received a content request. Preparing response\")\n\tresp := response{\n\t\tSuccessful: false,\n\t\tErrMsg: errors.New(\"Unknown failure\"),\n\t}\n\twriter := json.NewEncoder(w)\n\tdefer writer.Encode(resp)\n\n\t// Parse the request.\n\tfmt.Println(\"Parsing request\")\n\tdata := form{}\n\tresp.ErrMsg = json.NewDecoder(r.Body).Decode(&data)\n\n\tfmt.Println(\"Obtained following data: \")\n\tfmt.Printf(\"%+v\\n\", data)\n\n\t// Validate requestor token\n\tvalid := true\n\tvalid, resp.ErrMsg = session.Validate(data.User, data.Token)\n\n\tfilepath = path + data.ID + data.ContentType\n\n\tmodErr = update.ModifyContentFilePath(\"\")\n\tdeleteErr = delete(filepath)\n\n\tif modErr != nil {\n\t\tresp.ErrMsg = modErr\n\t} else {\n\t\tresp.ErrMsg = deleteErr\n\t}\n\n}", "func _file_delete(call otto.FunctionCall) otto.Value {\n\tpath, _ := call.Argument(0).ToString()\n\n\terr := os.RemoveAll(path)\n\tif err != nil {\n\t\tjsThrow(call, err)\n\t}\n\treturn otto.Value{}\n}", "func (r *Request) Delete(path, contentType string, data ...interface{}) {\n\tr.Send(\"DELETE\", path, contentType, data...)\n}", "func DeleteContent(url string, data ...interface{}) string {\n\treturn RequestContent(\"DELETE\", url, data...)\n}", "func TestDeleteFile(t *testing.T) {\n\tfmt.Println(\"Get File info test\")\n\tcc := getClientConnection()\n\n\t// when done the connection will be close.\n\tdefer cc.Close()\n\n\t// Create a new client service...\n\tc := filepb.NewFileServiceClient(cc)\n\n\trqst := &filepb.DeleteFileRequest{\n\t\tPath: \"C:\\\\Temp\\\\toto.bmp\",\n\t}\n\n\trsp, err := c.DeleteFile(context.Background(), rqst)\n\tif err != nil {\n\t\tlog.Fatalf(\"error while testing get file info: %v\", err)\n\t}\n\n\tlog.Println(\"Delete file succeed:\", rsp.Result)\n}", "func DeleteFile(res http.ResponseWriter, req *http.Request, params httprouter.Params) {\n\tphotoID := params.ByName(\"id\")\n\n\tphoto := Photo{}\n\tdb.Get().First(&photo, \"id = ?\", photoID)\n\n\terr := os.Remove(photo.Path)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tdb.Get().Delete(&photo)\n\n\thttp.Redirect(res, req, \"/home\", http.StatusSeeOther)\n}", "func delete(resource string, id string) ([]byte, error) {\n\thttpParams := &HTTPParams{\n\t\tResource: resource + \"/\" + id,\n\t\tVerb: \"DELETE\",\n\t}\n\treturn processRequest(httpParams)\n}", "func (api *MediaApi) delete(c *routing.Context) error {\n\tid := c.Param(\"id\")\n\n\tmodel, fetchErr := api.dao.GetByID(id)\n\tif fetchErr != nil {\n\t\treturn utils.NewNotFoundError(fmt.Sprintf(\"Media item with id \\\"%v\\\" doesn't exist!\", id))\n\t}\n\n\tdeleteErr := api.dao.Delete(model)\n\tif deleteErr != nil {\n\t\treturn utils.NewBadRequestError(\"Oops, an error occurred while deleting Language item.\", deleteErr)\n\t}\n\n\tc.Response.WriteHeader(http.StatusNoContent)\n\n\treturn nil\n}", "func (u *App) Delete(c echo.Context, id string) error {\n\tfile, err := u.udb.View(u.db, id)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif err := u.rbac.EnforceUser(c, file.UserID); err != nil {\n\t\treturn err\n\t}\n\n\tif file.Type == model.ResourceApplication {\n\t\tif err = u.rbac.EnforceRole(c, model.OperatorRole); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tgo model.DeleteFiles(&[]model.File{*file})\n\treturn u.udb.Delete(u.db, id)\n}", "func (r *VCMPResource) Delete(id string) error {\n\tif err := r.c.ModQuery(\"DELETE\", BasePath+VCMPEndpoint+\"/\"+id, nil); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}", "func (conn Connection) Delete(cmd string, content, result interface{}) (resp *http.Response, err error) {\n\treturn conn.Send(http.MethodDelete, cmd, content, result)\n}", "func DeleteHandler(basePath string, w http.ResponseWriter, r *http.Request) {\n\tFilesLock.RLock()\n\tval, ok := Files.Get(fileKey(r.URL))\n\tFilesLock.RUnlock()\n\n\tif !ok {\n\t\tw.WriteHeader(http.StatusNotFound)\n\t\treturn\n\t}\n\tf := val.(*File)\n\n\tFilesLock.Lock()\n\tFiles.Remove(fileKey(r.URL))\n\tFilesLock.Unlock()\n\n\tf.RemoveFromDisk(basePath)\n\tw.WriteHeader(http.StatusNoContent)\n}", "func (m *ItemPhotoRequestBuilder) Delete(ctx context.Context, requestConfiguration *ItemPhotoRequestBuilderDeleteRequestConfiguration)(error) {\n requestInfo, err := m.ToDeleteRequestInformation(ctx, requestConfiguration);\n if err != nil {\n return err\n }\n errorMapping := i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f.ErrorMappings {\n \"4XX\": ia572726a95efa92ddd544552cd950653dc691023836923576b2f4bf716cf204a.CreateODataErrorFromDiscriminatorValue,\n \"5XX\": ia572726a95efa92ddd544552cd950653dc691023836923576b2f4bf716cf204a.CreateODataErrorFromDiscriminatorValue,\n }\n err = m.BaseRequestBuilder.RequestAdapter.SendNoContent(ctx, requestInfo, errorMapping)\n if err != nil {\n return err\n }\n return nil\n}", "func (c *Client) Delete(path string) error {\n\t_, _, err := c.sendRequest(\"DELETE\", fmt.Sprintf(\"files/%s/%s\", c.Username, path), nil, nil, nil)\n\treturn err\n}", "func (httpfs *FS) Delete(ctx context.Context, path string) (err error) {\n\treturn qfs.ErrReadOnly\n}", "func deleteFile(ctx context.Context, baseDir, hash, extension string) error {\n\t// Verify object exists.\n\tfilepath := getPathByHash(ctx, baseDir, hash, extension)\n\n\treturn os.Remove(filepath)\n}", "func (f *UploadedFiles) Del(fileId string) {\n\thave := false\n\tindex := 0\n\tfor i := 0; i < len(f.Items); i++ {\n\t\t_fileId,_ := url.QueryUnescape(f.Items[i].FileId) // 兼容性删除\n\t\tif f.Items[i].FileId == fileId || _fileId == fileId {\n\t\t\thave = true\n\t\t\tindex = i\n\t\t\tbreak\n\t\t}\n\t}\n\tif have {\n\t\tf.mu.Lock()\n\t f.Items=append(f.Items[:index],f.Items[index+1:]...)\n\t f.mu.Unlock()\n\t log.Println(\"after delete\")\n\t}\n}", "func (c *Content) Delete() error {\n\treturn os.Remove(c.Path)\n}", "func (c *Client) Delete(url string, headers, queryParams map[string][]string, data interface{}) (response *http.Response, err error) {\n\treturn c.makeRequest(url, http.MethodDelete, headers, queryParams, data)\n}", "func (c *ServerConn) Delete(path string) error {\n\t_, _, err := c.cmd(StatusRequestedFileActionOK, \"DELE %s\", path)\n\treturn err\n}", "func (r *ExternalRequest) Delete(ctx context.Context) error {\n\treturn r.JSONRequest(ctx, \"DELETE\", \"\", nil, nil)\n}", "func deleteDoc(c *gin.Context) {\n\tkey := c.Params.ByName(\"id\")\n\terr := os.Remove(dataDir + \"/\" + key)\n\tif err != nil {\n\t\tlog.Printf(\"Error removing document: %s\", err.Error())\n\t\tc.JSON(statusErr, newErrorResp(key, \"error removing document\", err))\n\t} else {\n\t\terr = deleteMetadata(key)\n\t\tif err != nil {\n\t\t\tlog.Printf(\"Error removing metadata: %s\", err.Error())\n\t\t\tc.JSON(statusErr, newErrorResp(key, \"error removing metadata\", err))\n\t\t} else {\n\t\t\tc.JSON(statusOk, newSuccessResp(key, \"removed document\"))\n\t\t}\n\t}\n}", "func (f5 *f5LTM) delete(url string, result interface{}) error {\n\treturn f5.restRequest(\"DELETE\", url, nil, result)\n}", "func (c *Client) Delete(d core.Digest) error {\n\t_, err := httputil.Delete(fmt.Sprintf(\"http://%s/blobs/%s\", c.addr, d))\n\treturn err\n}", "func DeleteFile(w http.ResponseWriter, r *http.Request) {\n\t//log\n\tnow, userIP := globalPkg.SetLogObj(r)\n\tlogobj := logpkg.LogStruct{\"_\", now, userIP, \"macAdress\", \"DeleteFile\", \"file\", \"_\", \"_\", \"_\", 0}\n\tvar obj RetrieveBody\n\tdecoder := json.NewDecoder(r.Body)\n\tdecoder.DisallowUnknownFields()\n\terr := decoder.Decode(&obj)\n\tif err != nil {\n\t\tglobalPkg.SendError(w, \"please enter your correct request\")\n\t\tglobalPkg.WriteLog(logobj, \"please enter your correct request\", \"failed\")\n\t\treturn\n\t}\n\ttime.Sleep(time.Millisecond * 10)\n\tacc := account.GetAccountByAccountPubicKey(obj.Publickey)\n\tif acc.AccountPublicKey != obj.Publickey {\n\t\tglobalPkg.SendError(w, \"error in public key\")\n\t\tglobalPkg.WriteLog(logobj, \"error in public key\", \"failed\")\n\t\treturn\n\t}\n\tif acc.AccountPassword != obj.Password {\n\t\tglobalPkg.SendError(w, \"error in password\")\n\t\tglobalPkg.WriteLog(logobj, \"error in password\", \"failed\")\n\t\treturn\n\t}\n\t// \ttnow := globalPkg.UTCtime()\n\t// \tt, _ := time.Parse(\"2006-01-02T15:04:05Z07:00\", obj.Time)\n\t// \ttfile := globalPkg.UTCtimefield(t)\n\t// \ttimeDifference := tnow.Sub(tfile).Seconds()\n\t// \tif timeDifference > float64(globalPkg.GlobalObj.TxValidationTimeInSeconds) {\n\t// \t\tglobalPkg.SendError(w, \"please check your time\")\n\t// \t\tglobalPkg.WriteLog(logobj, \"please check your time\", \"failed\")\n\t// \t\treturn\n\t// \t}\n\n\t// Signture string\n\tvalidSig := false\n\tpk := account.FindpkByAddress(acc.AccountPublicKey).Publickey\n\tif pk != \"\" {\n\t\tpublickey := cryptogrpghy.ParsePEMtoRSApublicKey(pk)\n\t\t// signatureData := obj.FileID + obj.Publickey + obj.Password + obj.Time\n\t\tsignatureData := obj.Publickey + obj.Password + obj.FileID\n\t\tvalidSig = cryptogrpghy.VerifyPKCS1v15(obj.Signture, signatureData, *publickey)\n\t\tvalidSig = true\n\t} else {\n\t\tvalidSig = false\n\t}\n\tif !validSig {\n\t\tglobalPkg.SendError(w, \"you are not allowed to delete\")\n\t\tglobalPkg.WriteLog(logobj, \"you are not allowed to delete\", \"failed\")\n\t\treturn\n\t}\n\t// check user own this file id\n\tfiles := acc.Filelist\n\tfound := false\n\tfoundtx := false\n\tvar selectedFile accountdb.FileList\n\tfor _, file := range files {\n\t\tif file.Fileid == obj.FileID {\n\t\t\tfound = true\n\t\t\tselectedFile = file\n\t\t\tbreak\n\t\t}\n\t}\n\tif !found {\n\t\tglobalPkg.SendError(w, \"You don't have this file\")\n\t\tglobalPkg.WriteLog(logobj, \"You don't have this file\", \"failed\")\n\t\treturn\n\t}\n\n\t//check files in transaction pool with status deleted\n\ttxs := transaction.Pending_transaction\n\tfor _, tx := range txs {\n\t\t// owned files => owner delete files and get explore files\n\t\tif tx.SenderPK == acc.AccountPublicKey {\n\t\t\tfileObj := tx.Transaction.Filestruct\n\t\t\tif fileObj.FileSize != 0 && fileObj.Deleted == true && fileObj.Fileid == obj.FileID {\n\t\t\t\tfoundtx = true\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t}\n\tif foundtx {\n\t\tglobalPkg.SendError(w, \"You don't have this file!\")\n\t\tglobalPkg.WriteLog(logobj, \"You don't have this file\", \"failed\")\n\t\treturn\n\t}\n\n\tdecryptIndexBlock1 := cryptogrpghy.KeyDecrypt(\"123456789\", selectedFile.Blockindex)\n\tfmt.Println(\"Block Index to be deletd data \", decryptIndexBlock1)\n\tblkObj := block.GetBlockInfoByID(decryptIndexBlock1)\n\tvar fStrct filestorage.FileStruct\n\tfor _, tx := range blkObj.BlockTransactions {\n\t\tfStrct = tx.Filestruct\n\t\tif fStrct.Fileid == selectedFile.Fileid {\n\t\t\tfStrct = tx.Filestruct\n\t\t\tbreak\n\t\t}\n\t}\n\tfStrct.Deleted = true\n\tfStrct.Transactionid = globalPkg.CreateHash(fStrct.Timefile, fmt.Sprintf(\"%s\", fStrct), 3)\n\t// add on trsansaction pool\n\tbroadcastTcp.BoardcastingTCP(fStrct, \"deletefile\", \"file\")\n\t// delete file if share from share file table\n\n\tglobalPkg.SendResponseMessage(w, \"File Deleted Successfully\")\n\tglobalPkg.WriteLog(logobj, \"File Deleted Successfully\", \"success\")\n}", "func DeleteFile(w http.ResponseWriter, r *http.Request) {\n\t//log\n\tnow, userIP := globalPkg.SetLogObj(r)\n\tlogobj := logpkg.LogStruct{\"_\", now, userIP, \"macAdress\", \"DeleteFile\", \"file\", \"_\", \"_\", \"_\", 0}\n\tvar obj RetrieveBody\n\tdecoder := json.NewDecoder(r.Body)\n\tdecoder.DisallowUnknownFields()\n\terr := decoder.Decode(&obj)\n\tif err != nil {\n\t\tglobalPkg.SendError(w, \"please enter your correct request\")\n\t\tglobalPkg.WriteLog(logobj, \"please enter your correct request\", \"failed\")\n\t\treturn\n\t}\n\ttime.Sleep(time.Millisecond * 10)\n\tacc := account.GetAccountByAccountPubicKey(obj.Publickey)\n\tif acc.AccountPublicKey != obj.Publickey {\n\t\tglobalPkg.SendError(w, \"error in public key\")\n\t\tglobalPkg.WriteLog(logobj, \"error in public key\", \"failed\")\n\t\treturn\n\t}\n\tif acc.AccountPassword != obj.Password {\n\t\tglobalPkg.SendError(w, \"error in password\")\n\t\tglobalPkg.WriteLog(logobj, \"error in password\", \"failed\")\n\t\treturn\n\t}\n\t// \ttnow := globalPkg.UTCtime()\n\t// \tt, _ := time.Parse(\"2006-01-02T15:04:05Z07:00\", obj.Time)\n\t// \ttfile := globalPkg.UTCtimefield(t)\n\t// \ttimeDifference := tnow.Sub(tfile).Seconds()\n\t// \tif timeDifference > float64(globalPkg.GlobalObj.TxValidationTimeInSeconds) {\n\t// \t\tglobalPkg.SendError(w, \"please check your time\")\n\t// \t\tglobalPkg.WriteLog(logobj, \"please check your time\", \"failed\")\n\t// \t\treturn\n\t// \t}\n\n\t// Signture string\n\tvalidSig := false\n\tpk := account.FindpkByAddress(acc.AccountPublicKey).Publickey\n\tif pk != \"\" {\n\t\tpublickey := cryptogrpghy.ParsePEMtoRSApublicKey(pk)\n\t\t// signatureData := obj.FileID + obj.Publickey + obj.Password + obj.Time\n\t\tsignatureData := obj.Publickey + obj.Password + obj.FileID\n\t\tvalidSig = cryptogrpghy.VerifyPKCS1v15(obj.Signture, signatureData, *publickey)\n\t\t// validSig = true\n\t} else {\n\t\tvalidSig = false\n\t}\n\tif !validSig {\n\t\tglobalPkg.SendError(w, \"you are not allowed to delete\")\n\t\tglobalPkg.WriteLog(logobj, \"you are not allowed to delete\", \"failed\")\n\t\treturn\n\t}\n\t// check user own this file id\n\tfiles := acc.Filelist\n\tfound := false\n\tfoundtx := false\n\tvar selectedFile accountdb.FileList\n\tfor _, file := range files {\n\t\tif file.Fileid == obj.FileID {\n\t\t\tfound = true\n\t\t\tselectedFile = file\n\t\t\tbreak\n\t\t}\n\t}\n\tif !found {\n\t\tglobalPkg.SendError(w, \"You don't have this file\")\n\t\tglobalPkg.WriteLog(logobj, \"You don't have this file\", \"failed\")\n\t\treturn\n\t}\n\n\t//check files in transaction pool with status deleted\n\ttxs := transaction.Pending_transaction\n\tfor _, tx := range txs {\n\t\t// owned files => owner delete files and get explore files\n\t\tif tx.SenderPK == acc.AccountPublicKey {\n\t\t\tfileObj := tx.Transaction.Filestruct\n\t\t\tif fileObj.FileSize != 0 && fileObj.Deleted == true && fileObj.Fileid == obj.FileID {\n\t\t\t\tfoundtx = true\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t}\n\tif foundtx {\n\t\tglobalPkg.SendError(w, \"You don't have this file!\")\n\t\tglobalPkg.WriteLog(logobj, \"You don't have this file\", \"failed\")\n\t\treturn\n\t}\n\tdecryptIndexBlock1 := cryptogrpghy.KeyDecrypt(globalpkg.EncryptAccount, selectedFile.Blockindex)\n\tfmt.Println(\"Block Index to be deletd data \", decryptIndexBlock1)\n\tblkObj := block.GetBlockInfoByID(decryptIndexBlock1)\n\tvar fStrct filestorage.FileStruct\n\tfor _, tx := range blkObj.BlockTransactions {\n\t\tfStrct = tx.Filestruct\n\t\tif fStrct.Fileid == selectedFile.Fileid {\n\t\t\tfStrct = tx.Filestruct\n\t\t\tbreak\n\t\t}\n\t}\n\tfStrct.Deleted = true\n\tfStrct.Transactionid = globalPkg.CreateHash(fStrct.Timefile, fmt.Sprintf(\"%s\", fStrct), 3)\n\t// add on trsansaction pool\n\tbroadcastTcp.BoardcastingTCP(fStrct, \"deletefile\", \"file\")\n\t// delete file if share from share file table\n\n\tglobalPkg.SendResponseMessage(w, \"File Deleted Successfully\")\n\tglobalPkg.WriteLog(logobj, \"File Deleted Successfully\", \"success\")\n}", "func (a MediaApi) DeleteAccountMedia(accountId int32, mediaId int32) (*DeleteEntry, *APIResponse, error) {\n\n\tvar localVarHttpMethod = strings.ToUpper(\"Delete\")\n\t// create path and map variables\n\tlocalVarPath := a.Configuration.BasePath + \"/accounts/{account_id}/media/{media_id}\"\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"account_id\"+\"}\", fmt.Sprintf(\"%v\", accountId), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"media_id\"+\"}\", fmt.Sprintf(\"%v\", mediaId), -1)\n\n\tlocalVarHeaderParams := make(map[string]string)\n\tlocalVarQueryParams := url.Values{}\n\tlocalVarFormParams := make(map[string]string)\n\tvar localVarPostBody interface{}\n\tvar localVarFileName string\n\tvar localVarFileBytes []byte\n\t// authentication '(apiKey)' required\n\t// set key with prefix in header\n\tlocalVarHeaderParams[\"Authorization\"] = a.Configuration.GetAPIKeyWithPrefix(\"Authorization\")\n\t// add default headers if any\n\tfor key := range a.Configuration.DefaultHeader {\n\t\tlocalVarHeaderParams[key] = a.Configuration.DefaultHeader[key]\n\t}\n\n\tclearEmptyParams(localVarQueryParams)\n\n\t// to determine the Content-Type header\n\tlocalVarHttpContentTypes := []string{ \"application/json\", }\n\n\t// set Content-Type header\n\tlocalVarHttpContentType := a.Configuration.APIClient.SelectHeaderContentType(localVarHttpContentTypes)\n\tif localVarHttpContentType != \"\" {\n\t\tlocalVarHeaderParams[\"Content-Type\"] = localVarHttpContentType\n\t}\n\t// to determine the Accept header\n\tlocalVarHttpHeaderAccepts := []string{\n\t\t\"application/json\",\n\t\t}\n\n\t// set Accept header\n\tlocalVarHttpHeaderAccept := a.Configuration.APIClient.SelectHeaderAccept(localVarHttpHeaderAccepts)\n\tif localVarHttpHeaderAccept != \"\" {\n\t\tlocalVarHeaderParams[\"Accept\"] = localVarHttpHeaderAccept\n\t}\n\tvar successPayload = new(DeleteEntry)\n\tlocalVarHttpResponse, err := a.Configuration.APIClient.CallAPI(localVarPath, localVarHttpMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFileName, localVarFileBytes)\n\n\tvar localVarURL, _ = url.Parse(localVarPath)\n\tlocalVarURL.RawQuery = localVarQueryParams.Encode()\n\tvar localVarAPIResponse = &APIResponse{Operation: \"DeleteAccountMedia\", Method: localVarHttpMethod, RequestURL: localVarURL.String()}\n\tif localVarHttpResponse != nil {\n\t\tlocalVarAPIResponse.Response = localVarHttpResponse.RawResponse\n\t\tlocalVarAPIResponse.Payload = localVarHttpResponse.Body()\n\t}\n\n\tif err != nil {\n\t\treturn successPayload, localVarAPIResponse, err\n\t}\n\terr = json.Unmarshal(localVarHttpResponse.Body(), &successPayload)\n\treturn successPayload, localVarAPIResponse, err\n}", "func (self *File_Client) DeleteFile(path string) error {\n\n\trqst := &filepb.DeleteFileRequest{\n\t\tPath: Utility.ToString(path),\n\t}\n\n\t_, err := self.c.DeleteFile(context.Background(), rqst)\n\tif err != nil {\n\t\tlog.Fatalf(\"error while testing get file info: %v\", err)\n\t}\n\n\treturn err\n}", "func (client StorageGatewayClient) deleteFileSystem(ctx context.Context, request common.OCIRequest) (common.OCIResponse, error) {\n\thttpRequest, err := request.HTTPRequest(http.MethodDelete, \"/storageGateways/{storageGatewayId}/fileSystems/{fileSystemName}\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar response DeleteFileSystemResponse\n\tvar httpResponse *http.Response\n\thttpResponse, err = client.Call(ctx, &httpRequest)\n\tdefer common.CloseBodyIfValid(httpResponse)\n\tresponse.RawResponse = httpResponse\n\tif err != nil {\n\t\treturn response, err\n\t}\n\n\terr = common.UnmarshalResponse(httpResponse, &response)\n\treturn response, err\n}", "func (a *DefaultClient) Delete(file vfs.File) error {\n\tURL, err := url.Parse(file.Location().(*Location).ContainerURL())\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tcontainerURL := azblob.NewContainerURL(*URL, a.pipeline)\n\tblobURL := containerURL.NewBlockBlobURL(utils.RemoveLeadingSlash(file.Path()))\n\t_, err = blobURL.Delete(context.Background(), azblob.DeleteSnapshotsOptionNone, azblob.BlobAccessConditions{})\n\treturn err\n}", "func (conn Connection) Delete(cmd string, content, result interface{}) (effect *SideEffect, resp *http.Response, err error) {\n\treturn conn.Send(http.MethodDelete, cmd, content, result)\n}", "func (c *Client) Delete(ctx context.Context, link string) error {\n\n\tauthKey, err := GetAccessTokenFromContext(ctx)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Missing token in context\")\n\t}\n\n\treq, err := http.NewRequest(\"DELETE\", link, nil)\n\n\tif err != nil {\n\t\tlog.Error(\"Cannot create request:\", err)\n\t\treturn err\n\t}\n\n\treq.Header.Add(\"X-Requested-With\", \"XMLHttpRequest\")\n\treq.Header.Add(\"authorization\", authKey)\n\treq.Header.Add(\"Accept\", \"application/json\")\n\tresp, err := c.httpClient.Do(req)\n\n\tif err != nil {\n\t\tlog.Error(\"POST request error:\", err)\n\t\treturn err\n\t}\n\t// this is required to properly empty the buffer for the next call\n\tdefer func() {\n\t\tio.Copy(ioutil.Discard, resp.Body)\n\t}()\n\n\tif resp.StatusCode != http.StatusOK {\n\t\tbody, err := ioutil.ReadAll(resp.Body)\n\t\tlog.Error(err, \": \", string(body))\n\t}\n\n\treturn err\n}", "func (r *ExtensionRequest) Delete(ctx context.Context) error {\n\treturn r.JSONRequest(ctx, \"DELETE\", \"\", nil, nil)\n}", "func (client *RestClient) Delete(path string) error {\n\tres, err := client.R().Delete(path)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif res.StatusCode() >= 400 {\n\t\treturn UnmarshalError(res)\n\t}\n\n\treturn nil\n}", "func Delete(c messagebird.Client, id string) error {\n\treturn c.Request(nil, http.MethodDelete, path+\"/\"+id, nil)\n}", "func DeleteAsset(settings *playfab.Settings, postData *DeleteAssetRequestModel, entityToken string) (*EmptyResponseModel, error) {\n if entityToken == \"\" {\n return nil, playfab.NewCustomError(\"entityToken should not be an empty string\", playfab.ErrorGeneric)\n }\n b, errMarshal := json.Marshal(postData)\n if errMarshal != nil {\n return nil, playfab.NewCustomError(errMarshal.Error(), playfab.ErrorMarshal)\n }\n\n sourceMap, err := playfab.Request(settings, b, \"/MultiplayerServer/DeleteAsset\", \"X-EntityToken\", entityToken)\n if err != nil {\n return nil, err\n }\n \n result := &EmptyResponseModel{}\n\n config := mapstructure.DecoderConfig{\n DecodeHook: playfab.StringToDateTimeHook,\n Result: result,\n }\n \n decoder, errDecoding := mapstructure.NewDecoder(&config)\n if errDecoding != nil {\n return nil, playfab.NewCustomError(errDecoding.Error(), playfab.ErrorDecoding)\n }\n \n errDecoding = decoder.Decode(sourceMap)\n if errDecoding != nil {\n return nil, playfab.NewCustomError(errDecoding.Error(), playfab.ErrorDecoding)\n }\n\n return result, nil\n}", "func delete(cl *lib.Client, list *[]extAQLFileInfo) error {\n\n\t// range over list and make delete calls to Artifactory\n\tfor _, d := range *list {\n\n\t\t//construct request\n\t\tdl := []string{\n\t\t\td.Repo,\n\t\t\td.Path,\n\t\t\td.Name,\n\t\t}\n\t\tdlj := strings.Join(dl, \"/\")\n\n\t\tvar request lib.Request\n\t\trequest.Verb = \"DELETE\"\n\t\trequest.Path = \"/\" + (dlj)\n\n\t\t// make request\n\t\t_, err := cl.HTTPRequest(request)\n\t\tif err != nil {\n\t\t\texitErrorf(\"could not delete %q: \", dlj, err)\n\t\t}\n\t\tfmt.Println(\"deleted: \", dlj)\n\t}\n\treturn nil\n}", "func (a *ContentHandler) Delete(c echo.Context) error {\n\tidP, err := strconv.Atoi(c.Param(\"id\"))\n\tif err != nil {\n\t\treturn c.JSON(http.StatusNotFound, domain.ErrNotFound.Error())\n\t}\n\n\tid := int64(idP)\n\tctx := c.Request().Context()\n\n\terr = a.AUsecase.Delete(ctx, id)\n\tif err != nil {\n\t\treturn c.JSON(getStatusCode(err), ResponseError{Message: err.Error()})\n\t}\n\n\treturn c.NoContent(http.StatusNoContent)\n}", "func (em *entityManager) Delete(ctx context.Context, entityPath string, mw ...MiddlewareFunc) (*http.Response, error) {\n\tctx, span := em.startSpanFromContext(ctx, \"sb.EntityManger.Delete\")\n\tdefer span.End()\n\n\treturn em.Execute(ctx, http.MethodDelete, entityPath, http.NoBody, mw...)\n}", "func Delete(url string, token string, client http.Client) (err error) {\n\treq, err := http.NewRequest(\"DELETE\", url, nil)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treq.Header.Set(\"X-Auth-Token\", token)\n\n\tresp, err := client.Do(req)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// Expecting a successful delete\n\tif !(resp.StatusCode == 200 || resp.StatusCode == 202 || resp.StatusCode == 204) {\n\t\terr = fmt.Errorf(\"Unexpected server response status code on Delete '%s'\", resp.StatusCode)\n\t\treturn\n\t}\n\n\treturn nil\n}", "func (c *fileStorageClient) Delete(key string) error {\n\treturn c.Batch(DeleteOperation(key))\n}", "func (obj *MessengerFileCipher) delete() {\n\tC.vssq_messenger_file_cipher_delete(obj.cCtx)\n}", "func (o *FilesStorage) Delete(exec boil.Executor) (int64, error) {\n\tif o == nil {\n\t\treturn 0, errors.New(\"models: no FilesStorage provided for delete\")\n\t}\n\n\tif err := o.doBeforeDeleteHooks(exec); err != nil {\n\t\treturn 0, err\n\t}\n\n\targs := queries.ValuesFromMapping(reflect.Indirect(reflect.ValueOf(o)), filesStoragePrimaryKeyMapping)\n\tsql := \"DELETE FROM `files_storages` WHERE `id`=?\"\n\n\tif boil.DebugMode {\n\t\tfmt.Fprintln(boil.DebugWriter, sql)\n\t\tfmt.Fprintln(boil.DebugWriter, args...)\n\t}\n\n\tresult, err := exec.Exec(sql, args...)\n\tif err != nil {\n\t\treturn 0, errors.Wrap(err, \"models: unable to delete from files_storages\")\n\t}\n\n\trowsAff, err := result.RowsAffected()\n\tif err != nil {\n\t\treturn 0, errors.Wrap(err, \"models: failed to get rows affected by delete for files_storages\")\n\t}\n\n\tif err := o.doAfterDeleteHooks(exec); err != nil {\n\t\treturn 0, err\n\t}\n\n\treturn rowsAff, nil\n}", "func (f FileRepo) Delete(context context.Context, id string) (string, error) {\n\topts := options.FindOneAndDelete().SetProjection(bson.D{{\"_id\", 1}})\n\tobjID, err := primitive.ObjectIDFromHex(id)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tquery := bson.M{\n\t\t\"_id\": objID,\n\t}\n\tvar delFile model.File\n\terr = f.collection.FindOneAndDelete(context, query, opts).Decode(&delFile)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\treturn delFile.ID.Hex(), nil\n}", "func DeleteFile(docId string) error {\n\t// delete file from drive\n\terr := driveSrv.Files.Delete(docId).Do()\n\t// return err\n\treturn err\n}", "func (self *Client) Delete(dst interface{}, path string, data url.Values) error {\n\tvar addr *url.URL\n\tvar err error\n\tvar body *strings.Reader\n\n\tif addr, err = url.Parse(self.Prefix + strings.TrimLeft(path, \"/\")); err != nil {\n\t\treturn err\n\t}\n\n\tif data != nil {\n\t\tbody = strings.NewReader(data.Encode())\n\t}\n\n\treturn self.newRequest(dst, \"DELETE\", addr, body)\n}", "func (s BmUnitResource) Delete(id string, r api2go.Request) (api2go.Responder, error) {\n\terr := s.BmUnitStorage.Delete(id)\n\treturn &Response{Code: http.StatusNoContent}, err\n}", "func adminImagesDelete(w http.ResponseWriter, r *http.Request) {\n\ttype response struct {\n\t\tOk bool\n\t}\n\tid := r.URL.Path\n\tresp := response{}\n\n\t_, err := db.Exec(`DELETE FROM images WHERE appointment_id = $1`, id)\n\tif err != nil {\n\t\tjson.NewEncoder(w).Encode(resp)\n\t\treturn\n\t}\n\n\t// set ok and send\n\tresp.Ok = true\n\terr = json.NewEncoder(w).Encode(resp)\n\tif err != nil {\n\t\tlog.Println(\"appointment img delete:\", err)\n\t}\n}", "func (a *API) Delete(w http.ResponseWriter, r *http.Request) {\n\t// Get Request Vars\n\tvars := mux.Vars(r)\n\trealm, ok := vars[\"realm\"]\n\tif !ok {\n\t\tRaiseError(w, \"Realm is missing\", http.StatusBadRequest, ErrorCodeRealmMissing)\n\t\treturn\n\t}\n\n\tkey, ok := vars[\"key\"]\n\tif !ok {\n\t\tRaiseError(w, \"Key is missing\", http.StatusBadRequest, ErrorCodeKeyMissing)\n\t\treturn\n\t}\n\n\tok = a.Storage.Delete(realm, key)\n\tif !ok {\n\t\tRaiseError(w, fmt.Sprintf(\"No value found for key %v/%v\", realm, key), http.StatusNotFound, ErrorCodeEntityNotFound)\n\t\treturn\n\t}\n\n\tw.Header().Add(\"Content-Type\", \"application/json\")\n\tw.WriteHeader(http.StatusNoContent)\n}", "func (r *ECMResource) Delete(id string) error {\n\tif err := r.c.ModQuery(\"DELETE\", BasePath+ECMEndpoint+\"/\"+id, nil); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}", "func (o *SwiftObject) Delete(metadata map[string]string) error {\n\tif _, err := o.newFile(\"ts\", 0); err != nil {\n\t\treturn err\n\t} else {\n\t\tdefer o.Close()\n\t\treturn o.Commit(metadata)\n\t}\n}", "func DiskDelete(w rest.ResponseWriter, r *rest.Request) {\n\treq := DiskDeleteRequest{}\n\terr := r.DecodeJsonPayload(&req)\n\tif err != nil {\n\t\tlogit.Error.Println(err.Error())\n\t\trest.Error(w, err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n\tlogit.Info.Println(\"DiskDelete called \" + req.Path)\n\n\tvar cmd *exec.Cmd\n\tcmd = exec.Command(\"deletevolume\", req.Path)\n\tvar out bytes.Buffer\n\tvar stderr bytes.Buffer\n\tcmd.Stdout = &out\n\tcmd.Stderr = &stderr\n\terr = cmd.Run()\n\tif err != nil {\n\t\tlogit.Error.Println(err.Error())\n\t\trest.Error(w, err.Error(), 400)\n\t\treturn\n\t}\n\n\tvar response DiskDeleteResponse\n\tresponse.Output = out.String()\n\tresponse.Status = \"OK\"\n\tw.WriteJson(&response)\n}", "func (s *Service) Delete(file *basefs.File) error {\n\treturn s.megaCli.Delete(file.Data.(*MegaPath).Node, false)\n}", "func (r Requester) Delete(path string) Requester {\n\treq, err := http.NewRequest(http.MethodDelete, r.url, nil)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tr.httpRequest = req\n\treturn r\n}", "func (s *GDrive) Delete(ctx context.Context, token string, filename string) (err error) {\n\tmetadata, _ := s.findID(fmt.Sprintf(\"%s.metadata\", filename), token)\n\t_ = s.service.Files.Delete(metadata).Do()\n\n\tvar fileID string\n\tfileID, err = s.findID(filename, token)\n\tif err != nil {\n\t\treturn\n\t}\n\n\terr = s.service.Files.Delete(fileID).Context(ctx).Do()\n\treturn\n}", "func Delete(ctx context.Context, url string, body Body, options ...RequestOption) (*Response, error) {\n\tr, err := newRequest(ctx, http.MethodDelete, url, body, options...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tr.Header.Set(\"Content-Type\", body.ContentType())\n\treturn doRequest(http.DefaultClient, r)\n}", "func Delete(url string, data ...interface{}) (*ClientResponse, error) {\n\treturn DoRequest(\"DELETE\", url, data...)\n}", "func (c *Client) Delete(ctx context.Context, url string, data ...interface{}) (*Response, error) {\n\treturn c.DoRequest(ctx, http.MethodDelete, url, data...)\n}", "func (f *Fs) delete(ctx context.Context, isFile bool, id string, remote string, hardDelete bool) (err error) {\n\tif hardDelete {\n\t\topts := rest.Opts{\n\t\t\tMethod: \"DELETE\",\n\t\t\tRootURL: id,\n\t\t\tNoResponse: true,\n\t\t}\n\t\treturn f.pacer.Call(func() (bool, error) {\n\t\t\tresp, err := f.srv.Call(ctx, &opts)\n\t\t\treturn shouldRetry(ctx, resp, err)\n\t\t})\n\t}\n\t// Move file/dir to deleted files if not hard delete\n\tleaf := path.Base(remote)\n\tif isFile {\n\t\t_, err = f.moveFile(ctx, id, leaf, f.opt.DeletedID)\n\t} else {\n\t\terr = f.moveDir(ctx, id, leaf, f.opt.DeletedID)\n\t}\n\treturn err\n}", "func DeleteFile(src string) {\n\tdatabase, err := GetDBIns()\n\tif err != nil {\n\t\tprintln(\"error in delete Tag:\", err.Error())\n\t}\n\n\tif deleteFileFromChaStmt == nil {\n\t\tdeleteFileFromChaStmt, err = database.Prepare(deleteFileFromCha)\n\t\tif err != nil {\n\t\t\tprintln(\"error in deleteFile init cha:\", err.Error())\n\t\t}\n\t}\n\n\tif deleteFileFromMapStmt == nil {\n\t\tdeleteFileFromMapStmt, err = database.Prepare(deleteFileFromMap)\n\t\tif err != nil {\n\t\t\tprintln(\"error in deleteFile init map:\", err.Error())\n\t\t}\n\t}\n\n\tdeleteFileFromChaStmt.Exec(src)\n\tdeleteFileFromMapStmt.Exec()\n}", "func (c *MediaClient) Delete() *MediaDelete {\n\tmutation := newMediaMutation(c.config, OpDelete)\n\treturn &MediaDelete{config: c.config, hooks: c.Hooks(), mutation: mutation}\n}", "func (c *Client) Delete(url string, headers map[string][]string) (client.Status, map[string][]string, io.ReadCloser, error) {\n\treturn c.Do(\"DELETE\", url, headers, nil)\n}", "func delete(w http.ResponseWriter, r *http.Request) {\r\n\tid, _ := strconv.ParseInt(r.URL.Query().Get(\"id\"), 10, 64)\r\n\tquery := \"DELETE from public.item where id=$1\"\r\n\t_, err = db.Exec(query,id)\r\n\tif err != nil {\r\n\t\tpanic(err)\r\n\t}\r\n\t//json.NewEncoder(w).Encode()\t\r\n}", "func DeleteBytes(url string, data ...interface{}) []byte {\n\treturn RequestBytes(\"DELETE\", url, data...)\n}", "func deleteFile(file string) {\n\tcmd := exec.Command(\n\t\t\"rm\",\n\t\tgetPacoPath() + \"/core/\" + file,\n\t)\n\n\terr := cmd.Run()\n\tif err != nil {\n\t\tlog.Fatalf(\"mv failed with %s\\n\", err)\n\t}\n}", "func (s *Storage) DeleteRequest(id string) error {\n\t// TODO: Unimplemented\n\treturn nil\n}", "func (a *ContentApiService) DeleteContentMetadataAccess(ctx _context.Context, contentMetadataAccessId int64) (string, *_nethttp.Response, error) {\n\tvar (\n\t\tlocalVarHTTPMethod = _nethttp.MethodDelete\n\t\tlocalVarPostBody interface{}\n\t\tlocalVarFormFileName string\n\t\tlocalVarFileName string\n\t\tlocalVarFileBytes []byte\n\t\tlocalVarReturnValue string\n\t)\n\n\t// create path and map variables\n\tlocalVarPath := a.client.cfg.BasePath + \"/content_metadata_access/{content_metadata_access_id}\"\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"content_metadata_access_id\"+\"}\", _neturl.QueryEscape(parameterToString(contentMetadataAccessId, \"\")) , -1)\n\n\tlocalVarHeaderParams := make(map[string]string)\n\tlocalVarQueryParams := _neturl.Values{}\n\tlocalVarFormParams := _neturl.Values{}\n\n\t// to determine the Content-Type header\n\tlocalVarHTTPContentTypes := []string{}\n\n\t// set Content-Type header\n\tlocalVarHTTPContentType := selectHeaderContentType(localVarHTTPContentTypes)\n\tif localVarHTTPContentType != \"\" {\n\t\tlocalVarHeaderParams[\"Content-Type\"] = localVarHTTPContentType\n\t}\n\n\t// to determine the Accept header\n\tlocalVarHTTPHeaderAccepts := []string{\"application/json\"}\n\n\t// set Accept header\n\tlocalVarHTTPHeaderAccept := selectHeaderAccept(localVarHTTPHeaderAccepts)\n\tif localVarHTTPHeaderAccept != \"\" {\n\t\tlocalVarHeaderParams[\"Accept\"] = localVarHTTPHeaderAccept\n\t}\n\tr, err := a.client.prepareRequest(ctx, localVarPath, localVarHTTPMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFormFileName, localVarFileName, localVarFileBytes)\n\tif err != nil {\n\t\treturn localVarReturnValue, nil, err\n\t}\n\n\tlocalVarHTTPResponse, err := a.client.callAPI(r)\n\tif err != nil || localVarHTTPResponse == nil {\n\t\treturn localVarReturnValue, localVarHTTPResponse, err\n\t}\n\n\tlocalVarBody, err := _ioutil.ReadAll(localVarHTTPResponse.Body)\n\tlocalVarHTTPResponse.Body.Close()\n\tif err != nil {\n\t\treturn localVarReturnValue, localVarHTTPResponse, err\n\t}\n\n\tif localVarHTTPResponse.StatusCode >= 300 {\n\t\tnewErr := GenericOpenAPIError{\n\t\t\tbody: localVarBody,\n\t\t\terror: localVarHTTPResponse.Status,\n\t\t}\n\t\tif localVarHTTPResponse.StatusCode == 204 {\n\t\t\tvar v string\n\t\t\terr = a.client.decode(&v, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\tnewErr.error = err.Error()\n\t\t\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t\t\t}\n\t\t\tnewErr.model = v\n\t\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t\t}\n\t\tif localVarHTTPResponse.StatusCode == 400 {\n\t\t\tvar v Error\n\t\t\terr = a.client.decode(&v, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\tnewErr.error = err.Error()\n\t\t\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t\t\t}\n\t\t\tnewErr.model = v\n\t\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t\t}\n\t\tif localVarHTTPResponse.StatusCode == 404 {\n\t\t\tvar v Error\n\t\t\terr = a.client.decode(&v, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\tnewErr.error = err.Error()\n\t\t\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t\t\t}\n\t\t\tnewErr.model = v\n\t\t}\n\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t}\n\n\terr = a.client.decode(&localVarReturnValue, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\tif err != nil {\n\t\tnewErr := GenericOpenAPIError{\n\t\t\tbody: localVarBody,\n\t\t\terror: err.Error(),\n\t\t}\n\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t}\n\n\treturn localVarReturnValue, localVarHTTPResponse, nil\n}", "func (r *ExternalItemRequest) Delete(ctx context.Context) error {\n\treturn r.JSONRequest(ctx, \"DELETE\", \"\", nil, nil)\n}", "func Delete(uri string) error {\n\treq, err := http.NewRequest(\"DELETE\", Host+uri, nil)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tres, err := Client.Do(req)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer res.Body.Close()\n\n\tif res.StatusCode != 204 {\n\t\treturn fmt.Errorf(\"got %d\", res.StatusCode)\n\t}\n\n\treturn nil\n}", "func (r *DeviceManagementScriptRequest) Delete(ctx context.Context) error {\n\treturn r.JSONRequest(ctx, \"DELETE\", \"\", nil, nil)\n}", "func (f *File) Delete() error {\n\tf.writeBuffer = nil\n\tif err := f.Close(); err != nil {\n\t\treturn err\n\t}\n\thandle, err := f.getObjectHandle()\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn handle.Delete(f.fileSystem.ctx)\n}", "func DeleteVideoFile(ctx context.Context, userid int64, projectid int64, database *sql.DB) error {\n\tprefix := fmt.Sprintf(\"%s%d%s%s%d%s%s\", userdir, userid, \"/\", projectdir, projectid, \"/\", videodir)\n\tvar delimeter = \"/\"\n\n\tbucket, err := GetBucket(ctx)\n\tif err != nil {\n\t\tlog.Println(\"[ERR] failed to get bucket err : \", err)\n\t\treturn err\n\t}\n\n\tit := bucket.Objects(ctx, &storage.Query{Prefix: prefix, Delimiter: delimeter})\n\tvideolink, err := ReadVideoFileLink(projectid, database)\n\n\tfor {\n\t\tattrs, err := it.Next()\n\t\tif err == iterator.Done {\n\t\t\tlog.Println(\"[LOG] iterator done!\")\n\t\t\tbreak\n\t\t}\n\n\t\tif err != nil {\n\t\t\tlog.Println(\"[ERR] bucket objects err : \", err)\n\t\t\treturn err\n\t\t}\n\t\tif videolink != attrs.Name {\n\t\t\terr = bucket.Object(attrs.Name).Delete(ctx)\n\t\t\tif err != nil {\n\t\t\t\tlog.Println(\"[ERR] failed to delete object err : \", err)\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\n\treturn nil\n}", "func (r *repository) Delete(id uint) error {\n\tif err := r.db.Where(\"id = ?\", id).Delete(&models.Upload{}).Error; err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}", "func (c *Files) Delete(in *DeleteInput) (out *DeleteOutput, err error) {\n\tbody, err := c.call(\"/files/delete_v2\", in)\n\tif err != nil {\n\t\treturn\n\t}\n\tdefer body.Close()\n\n\terr = json.NewDecoder(body).Decode(&out)\n\treturn\n}", "func (rest *RESTService) ApiDELETE(w http.ResponseWriter, r *http.Request) {\n\n}", "func (r DeleteNotSupported) Delete(app *App, request *http.Request) (int, interface{}, error) {\n\treturn notSupported(DELETE)\n}", "func Delete(url string, authHeader string) (int, []byte) {\n\tcode, _, response := DeleteWithHeaderInResult(url, authHeader)\n\treturn code, response\n}", "func (c *Client) Delete(path gfs.Path) error {\n\tvar reply gfs.DeleteFileReply\n\terr := util.Call(c.master, \"Master.RPCDeleteFile\", gfs.DeleteFileArg{path}, &reply)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}", "func deleteImageResource(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics {\n\t// Warning or errors can be collected in a slice type\n\tvar diags diag.Diagnostics\n\tclient := (meta.(Client)).Client\n\tname := rdEntryStr(d, \"name\")\n\tid := rdEntryStr(d, \"id\")\n\terrMsgPrefix := getErrMsgPrefix(\"Image\", name, id, \"Delete\")\n\tcfg, err := getImage(client, name, id)\n\tif err != nil {\n\t\treturn diag.Errorf(\"%s Failed to get Image. err: %s\", errMsgPrefix, err.Error())\n\t}\n\tif cfg == nil {\n\t\tlog.Printf(\"%s Unexpected Error. nil config\", errMsgPrefix)\n\t\treturn diags\n\t}\n\tclient.XRequestIdPrefix = \"TF-image-delete\"\n\turlExtension := getImageUrl(name, id, \"delete\")\n\trspData := &swagger_models.ZsrvResponse{}\n\t_, err = client.SendReq(\"DELETE\", urlExtension, nil, rspData)\n\tif err != nil {\n\t\treturn diag.Errorf(\"%s. Request Failed. err: %s\", errMsgPrefix, err.Error())\n\t}\n\tlog.Printf(\"[INFO] Image %s(id:%s) Delete Successful.\", name, cfg.ID)\n\treturn diags\n}", "func (g *gcsclient) DeleteFile(ctx context.Context, path string) error {\n\treturn g.client.Bucket(g.bucket).Object(path).Delete(ctx)\n}", "func (media *SavedMedia) Delete() error {\n\tfor _, i := range media.Items {\n\t\terr := i.Media.Unsave()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}", "func (APIResourceBase) Delete(session *Session, url string, queries url.Values, body io.Reader) (APIStatus, interface{}) {\n\treturn FailSimple(http.StatusMethodNotAllowed), nil\n}", "func clearMediaStoreDB(ctx context.Context, a *arc.ARC) error {\n\ttesting.ContextLog(ctx, \"Starting to clear MediaStore\")\n\treturn a.Command(ctx, \"content\", \"delete\", \"--uri\", \"content://media/external/file?deletedata=false\").Run(testexec.DumpLogOnError)\n}", "func (obj *MessengerCloudFsFileInfoList) delete() {\n\tC.vssq_messenger_cloud_fs_file_info_list_delete(obj.cCtx)\n}", "func Delete(url string, r io.Reader, w io.Writer, clientGenerator func() *http.Client, reqTuner ...func(*http.Request)) error {\n\treturn Request(\"DELETE\", url, r, w, clientGenerator, reqTuner...)\n}", "func (a *UtilsApiService) DeleteApplicationUsingDelete(ctx context.Context, applicationId string) (*http.Response, error) {\n\tvar (\n\t\tlocalVarHttpMethod = strings.ToUpper(\"Delete\")\n\t\tlocalVarPostBody interface{}\n\t\tlocalVarFileName string\n\t\tlocalVarFileBytes []byte\n\t)\n\n\t// create path and map variables\n\ta.client = NewAPIClient(&Configuration{\n\t\tBasePath: ctx.Value(\"BasePath\").(string),\n\t\tDefaultHeader: make(map[string]string),\n\t\tUserAgent: \"Swagger-Codegen/1.0.0/go\",\n\t})\n\tlocalVarPath := a.client.cfg.BasePath + \"/nucleus/v1/application/{application_id}\"\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"application_id\"+\"}\", fmt.Sprintf(\"%v\", applicationId), -1)\n\n\tlocalVarHeaderParams := make(map[string]string)\n\tlocalVarQueryParams := url.Values{}\n\tlocalVarFormParams := url.Values{}\n\n\t// to determine the Content-Type header\n\tlocalVarHttpContentTypes := []string{}\n\n\t// set Content-Type header\n\tlocalVarHttpContentType := selectHeaderContentType(localVarHttpContentTypes)\n\tif localVarHttpContentType != \"\" {\n\t\tlocalVarHeaderParams[\"Content-Type\"] = localVarHttpContentType\n\t}\n\n\t// to determine the Accept header\n\tlocalVarHttpHeaderAccepts := []string{\"*/*\"}\n\n\t// set Accept header\n\tlocalVarHttpHeaderAccept := selectHeaderAccept(localVarHttpHeaderAccepts)\n\tif localVarHttpHeaderAccept != \"\" {\n\t\tlocalVarHeaderParams[\"Accept\"] = localVarHttpHeaderAccept\n\t}\n\tr, err := a.client.prepareRequest(ctx, localVarPath, localVarHttpMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFileName, localVarFileBytes)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tlocalVarHttpResponse, err := a.client.callAPI(r)\n\tif err != nil || localVarHttpResponse == nil {\n\t\treturn localVarHttpResponse, err\n\t}\n\n\tlocalVarBody, err := ioutil.ReadAll(localVarHttpResponse.Body)\n\tlocalVarHttpResponse.Body.Close()\n\tif err != nil {\n\t\treturn localVarHttpResponse, err\n\t}\n\n\tif localVarHttpResponse.StatusCode >= 300 {\n\t\tnewErr := GenericSwaggerError{\n\t\t\tbody: localVarBody,\n\t\t\terror: localVarHttpResponse.Status,\n\t\t}\n\n\t\treturn localVarHttpResponse, newErr\n\t}\n\n\treturn localVarHttpResponse, nil\n}", "func (rb *RequestBuilder) Delete(url string) *Response {\n\treturn rb.DoRequest(http.MethodDelete, url, nil)\n}", "func (r *DeviceManagementExportJobRequest) Delete(ctx context.Context) error {\n\treturn r.JSONRequest(ctx, \"DELETE\", \"\", nil, nil)\n}", "func (sfs *SafeFileSet) Delete(metahash [32]byte) {\n\tsfs.filesSet.Delete(metahash)\n}" ]
[ "0.6664548", "0.6523098", "0.64310193", "0.6361053", "0.6320671", "0.63150233", "0.62969255", "0.62795895", "0.62649393", "0.6260926", "0.62259483", "0.62193996", "0.61149424", "0.6099652", "0.6080751", "0.6080232", "0.60775816", "0.60768515", "0.60654914", "0.6064816", "0.6049584", "0.60139465", "0.60053945", "0.600025", "0.59764165", "0.59378934", "0.59353346", "0.5931656", "0.58761054", "0.5866979", "0.5853663", "0.5836586", "0.5827172", "0.5813765", "0.580904", "0.58049494", "0.58049494", "0.58048725", "0.580144", "0.58000916", "0.5797911", "0.57878274", "0.5778497", "0.5775438", "0.57744193", "0.5773075", "0.57572204", "0.57548326", "0.57506275", "0.57456607", "0.57308555", "0.57304215", "0.57260245", "0.5725935", "0.57086277", "0.5695444", "0.569393", "0.5683352", "0.5676007", "0.5675479", "0.5664951", "0.5663067", "0.56630385", "0.5650464", "0.5647274", "0.56390244", "0.56368506", "0.5632667", "0.56322896", "0.56209", "0.5619561", "0.56144744", "0.5614005", "0.5608243", "0.55953926", "0.55937904", "0.5582158", "0.55803573", "0.5575691", "0.5573834", "0.556487", "0.5563358", "0.5558011", "0.5556405", "0.55509335", "0.55406624", "0.553944", "0.5535843", "0.5533685", "0.5527591", "0.5525404", "0.5520181", "0.55196667", "0.5513007", "0.5512719", "0.55092365", "0.5507594", "0.55067635", "0.5505144", "0.54989326" ]
0.82767993
0
TestCreateRetryUnrecoverable ensures that an attempt to create a mapping using failing registry update calls will return an error.
func TestCreateRetryUnrecoverable(t *testing.T) { registry := registryhostname.TestingRegistryHostnameRetriever(nil, "", testDefaultRegistryURL) restInstance := &REST{ strategy: NewStrategy(registry), imageRegistry: &fakeImageRegistry{ createImage: func(ctx context.Context, image *imageapi.Image) error { return nil }, }, imageStreamRegistry: &fakeImageStreamRegistry{ getImageStream: func(ctx context.Context, id string, options *metav1.GetOptions) (*imageapi.ImageStream, error) { return validImageStream(), nil }, listImageStreams: func(ctx context.Context, options *metainternal.ListOptions) (*imageapi.ImageStreamList, error) { s := validImageStream() return &imageapi.ImageStreamList{Items: []imageapi.ImageStream{*s}}, nil }, updateImageStreamStatus: func(ctx context.Context, repo *imageapi.ImageStream) (*imageapi.ImageStream, error) { return nil, errors.NewServiceUnavailable("unrecoverable error") }, }, } obj, err := restInstance.Create(apirequest.NewDefaultContext(), validNewMappingWithName(), rest.ValidateAllObjectFunc, false) if err == nil { t.Errorf("expected an error") } if obj != nil { t.Fatalf("expected a nil result") } }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (r *ReconcileAerospikeCluster) recoverFailedCreate(aeroCluster *aerospikev1alpha1.AerospikeCluster) error {\n\tlogger := pkglog.New(log.Ctx{\"AerospikeCluster\": utils.ClusterNamespacedName(aeroCluster)})\n\tlogger.Info(\"Forcing a cluster recreate as status is nil. The cluster could be unreachable due to bad configuration.\")\n\n\t// Delete all statefulsets and everything related so that it can be properly created and updated in next run.\n\tstatefulSetList, err := r.getClusterStatefulSets(aeroCluster)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Error getting statefulsets while forcing recreate of the cluster as status is nil: %v\", err)\n\t}\n\n\tlogger.Debug(\"Found statefulset for cluster. Need to delete them\", log.Ctx{\"nSTS\": len(statefulSetList.Items)})\n\tfor _, statefulset := range statefulSetList.Items {\n\t\tif err := r.deleteStatefulSet(aeroCluster, &statefulset); err != nil {\n\t\t\treturn fmt.Errorf(\"Error deleting statefulset while forcing recreate of the cluster as status is nil: %v\", err)\n\t\t}\n\t}\n\n\t// Clear pod status as well in status since we want to be re-initializing or cascade deleting devices if any.\n\t// This is not necessary since scale-up would cleanup danglin pod status. However done here for general\n\t// cleanliness.\n\trackStateList := getNewRackStateList(aeroCluster)\n\tfor _, state := range rackStateList {\n\t\tpods, err := r.getRackPodList(aeroCluster, state.Rack.ID)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"Failed recover failed cluster: %v\", err)\n\t\t}\n\n\t\tnewPodNames := []string{}\n\t\tfor i := 0; i < len(pods.Items); i++ {\n\t\t\tnewPodNames = append(newPodNames, pods.Items[i].Name)\n\t\t}\n\n\t\terr = r.cleanupPods(aeroCluster, newPodNames, state)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"Failed recover failed cluster: %v\", err)\n\t\t}\n\t}\n\n\treturn fmt.Errorf(\"Forcing recreate of the cluster as status is nil\")\n}", "func TestClient_CreateReplica_Err(t *testing.T) {\n\tc := OpenClient(0)\n\tdefer c.Close()\n\tc.Server.Handler.Broker().CreateReplica(123, &url.URL{Host: \"localhost\"})\n\tif err := c.CreateReplica(123, &url.URL{Host: \"localhost\"}); err == nil || err.Error() != `replica already exists` {\n\t\tt.Fatalf(\"unexpected error: %v\", err)\n\t}\n}", "func TestAzureDevOpsServiceEndpointDockerRegistry_Create_DoesNotSwallowError(t *testing.T) {\n\tctrl := gomock.NewController(t)\n\tdefer ctrl.Finish()\n\n\tr := resourceServiceEndpointDockerRegistry()\n\tresourceData := schema.TestResourceDataRaw(t, r.Schema, nil)\n\tflattenServiceEndpointDockerRegistry(resourceData, &dockerRegistryTestServiceEndpoint, dockerRegistryTestServiceEndpointProjectID)\n\n\tbuildClient := azdosdkmocks.NewMockServiceendpointClient(ctrl)\n\tclients := &config.AggregatedClient{ServiceEndpointClient: buildClient, Ctx: context.Background()}\n\n\texpectedArgs := serviceendpoint.CreateServiceEndpointArgs{Endpoint: &dockerRegistryTestServiceEndpoint, Project: dockerRegistryTestServiceEndpointProjectID}\n\tbuildClient.\n\t\tEXPECT().\n\t\tCreateServiceEndpoint(clients.Ctx, expectedArgs).\n\t\tReturn(nil, errors.New(\"CreateServiceEndpoint() Failed\")).\n\t\tTimes(1)\n\n\terr := r.Create(resourceData, clients)\n\trequire.Contains(t, err.Error(), \"CreateServiceEndpoint() Failed\")\n}", "func TestReloadWithReadLock_GetNewVMsFails(t *testing.T) {\n\trequire := require.New(t)\n\n\tresources := initVMRegistryTest(t)\n\n\tresources.mockVMGetter.EXPECT().Get().Times(1).Return(nil, nil, errTest)\n\n\tinstalledVMs, failedVMs, err := resources.vmRegistry.ReloadWithReadLock(context.Background())\n\trequire.ErrorIs(err, errTest)\n\trequire.Empty(installedVMs)\n\trequire.Empty(failedVMs)\n}", "func TestCreateRetryConflictNoTagDiff(t *testing.T) {\n\tregistry := registryhostname.TestingRegistryHostnameRetriever(nil, \"\", testDefaultRegistryURL)\n\tfirstUpdate := true\n\trestInstance := &REST{\n\t\tstrategy: NewStrategy(registry),\n\t\timageRegistry: &fakeImageRegistry{\n\t\t\tcreateImage: func(ctx context.Context, image *imageapi.Image) error {\n\t\t\t\treturn nil\n\t\t\t},\n\t\t},\n\t\timageStreamRegistry: &fakeImageStreamRegistry{\n\t\t\tgetImageStream: func(ctx context.Context, id string, options *metav1.GetOptions) (*imageapi.ImageStream, error) {\n\t\t\t\tstream := validImageStream()\n\t\t\t\tstream.Status = imageapi.ImageStreamStatus{\n\t\t\t\t\tTags: map[string]imageapi.TagEventList{\n\t\t\t\t\t\t\"latest\": {Items: []imageapi.TagEvent{{DockerImageReference: \"localhost:5000/someproject/somerepo:original\"}}},\n\t\t\t\t\t},\n\t\t\t\t}\n\t\t\t\treturn stream, nil\n\t\t\t},\n\t\t\tupdateImageStreamStatus: func(ctx context.Context, repo *imageapi.ImageStream) (*imageapi.ImageStream, error) {\n\t\t\t\t// For the first update call, return a conflict to cause a retry of an\n\t\t\t\t// image stream whose tags haven't changed.\n\t\t\t\tif firstUpdate {\n\t\t\t\t\tfirstUpdate = false\n\t\t\t\t\treturn nil, errors.NewConflict(imagegroup.Resource(\"imagestreams\"), repo.Name, fmt.Errorf(\"resource modified\"))\n\t\t\t\t}\n\t\t\t\treturn repo, nil\n\t\t\t},\n\t\t},\n\t}\n\tobj, err := restInstance.Create(apirequest.NewDefaultContext(), validNewMappingWithName(), rest.ValidateAllObjectFunc, false)\n\tif err != nil {\n\t\tt.Errorf(\"unexpected error: %v\", err)\n\t}\n\tif obj == nil {\n\t\tt.Fatalf(\"expected a result\")\n\t}\n}", "func TestReload_GetNewVMsFails(t *testing.T) {\n\trequire := require.New(t)\n\n\tresources := initVMRegistryTest(t)\n\n\tresources.mockVMGetter.EXPECT().Get().Times(1).Return(nil, nil, errTest)\n\n\tinstalledVMs, failedVMs, err := resources.vmRegistry.Reload(context.Background())\n\trequire.ErrorIs(err, errTest)\n\trequire.Empty(installedVMs)\n\trequire.Empty(failedVMs)\n}", "func TestRecreateRunningWorkflowFails(t *testing.T) {\n\tr := task.NewTaskRegistry()\n\tr.AddOrchestratorN(\"SleepyWorkflow\", func(ctx *task.OrchestrationContext) (any, error) {\n\t\terr := ctx.CreateTimer(24 * time.Hour).Await(nil)\n\t\treturn nil, err\n\t})\n\n\tctx := context.Background()\n\tclient, engine := startEngine(ctx, t, r)\n\n\tfor _, opt := range GetTestOptions() {\n\t\tt.Run(opt(engine), func(t *testing.T) {\n\t\t\t// Start the first workflow, which will not complete\n\t\t\tvar metadata *api.OrchestrationMetadata\n\t\t\tid, err := client.ScheduleNewOrchestration(ctx, \"SleepyWorkflow\")\n\t\t\tif assert.NoError(t, err) {\n\t\t\t\tif metadata, err = client.WaitForOrchestrationStart(ctx, id); assert.NoError(t, err) {\n\t\t\t\t\tassert.False(t, metadata.IsComplete())\n\t\t\t\t}\n\t\t\t}\n\n\t\t\t// Attempting to start a second workflow with the same ID should fail\n\t\t\t_, err = client.ScheduleNewOrchestration(ctx, \"SleepyWorkflow\", api.WithInstanceID(id))\n\t\t\trequire.Error(t, err)\n\t\t\t// We expect that the workflow instance ID is included in the error message\n\t\t\tassert.Contains(t, err.Error(), id)\n\t\t})\n\t}\n}", "func TestRetryNotRequired(t *testing.T) {\n\tcheck := assert.New(t)\n\tretryRequired := checkRetryRequired(http.StatusConflict)\n\tcheck.Equal(retryRequired, false)\n}", "func TestEmployeeManagerMapCreate_BadRequest(t *testing.T) {\n\tdb, _, err := sqlmock.New()\n\tif err != nil {\n\t\tt.Fatalf(\"an error '%s' was not expected when opening a stub database connection\", err)\n\t}\n\tdefer db.Close()\n\n\templyManagerMap := NewEmployeeManagerMapHandler(db)\n\n\tw := httptest.NewRecorder()\n\tvar jsonStr = []byte(`{\"invalidjson\":}`)\n\tr := httptest.NewRequest(\"POST\", \"http://localhost:9090/api/v1/emplymgrmap\", bytes.NewBuffer(jsonStr))\n\tr = r.WithContext(context.Background())\n\templyManagerMap.Create(w, r)\n\n\texpectedResponse := `{\"error_message\":\"Error:: Invalid Request\"}`\n\tassert.Equal(t, gohttp.StatusBadRequest, w.Code)\n\tassert.Equal(t, expectedResponse, w.Body.String())\n}", "func TestReloadWithReadLock_PartialRegisterFailure(t *testing.T) {\n\trequire := require.New(t)\n\n\tresources := initVMRegistryTest(t)\n\n\tfactory1 := vms.NewMockFactory(resources.ctrl)\n\tfactory2 := vms.NewMockFactory(resources.ctrl)\n\tfactory3 := vms.NewMockFactory(resources.ctrl)\n\tfactory4 := vms.NewMockFactory(resources.ctrl)\n\n\tregisteredVms := map[ids.ID]vms.Factory{\n\t\tid1: factory1,\n\t\tid2: factory2,\n\t}\n\n\tunregisteredVms := map[ids.ID]vms.Factory{\n\t\tid3: factory3,\n\t\tid4: factory4,\n\t}\n\n\tresources.mockVMGetter.EXPECT().\n\t\tGet().\n\t\tTimes(1).\n\t\tReturn(registeredVms, unregisteredVms, nil)\n\tresources.mockVMRegisterer.EXPECT().\n\t\tRegisterWithReadLock(gomock.Any(), id3, factory3).\n\t\tTimes(1).\n\t\tReturn(errTest)\n\tresources.mockVMRegisterer.EXPECT().\n\t\tRegisterWithReadLock(gomock.Any(), id4, factory4).\n\t\tTimes(1).\n\t\tReturn(nil)\n\n\tinstalledVMs, failedVMs, err := resources.vmRegistry.ReloadWithReadLock(context.Background())\n\trequire.NoError(err)\n\trequire.Len(failedVMs, 1)\n\trequire.ErrorIs(failedVMs[id3], errTest)\n\trequire.Len(installedVMs, 1)\n\trequire.Equal(id4, installedVMs[0])\n}", "func TestAppRecovery(t *testing.T) {\n\tserviceContext := entrypoint.StartAllServicesWithManualScheduler()\n\tproxy := serviceContext.RMProxy\n\n\t// Register RM\n\tconfigData := `\npartitions:\n - name: default\n queues:\n - name: root\n submitacl: \"*\"\n queues:\n - name: a\n resources:\n guaranteed:\n memory: 100\n vcore: 10\n max:\n memory: 150\n vcore: 20\n`\n\tconfigs.MockSchedulerConfigByData([]byte(configData))\n\tmockRM := NewMockRMCallbackHandler()\n\n\t_, err := proxy.RegisterResourceManager(\n\t\t&si.RegisterResourceManagerRequest{\n\t\t\tRmID: \"rm:123\",\n\t\t\tPolicyGroup: \"policygroup\",\n\t\t\tVersion: \"0.0.2\",\n\t\t}, mockRM)\n\n\tassert.NilError(t, err, \"RegisterResourceManager failed\")\n\n\tappID := \"app-1\"\n\t// Register nodes, and add apps\n\terr = proxy.Update(&si.UpdateRequest{\n\t\tNewSchedulableNodes: []*si.NewNodeInfo{\n\t\t\t{\n\t\t\t\tNodeID: \"node-1:1234\",\n\t\t\t\tAttributes: map[string]string{},\n\t\t\t\tSchedulableResource: &si.Resource{\n\t\t\t\t\tResources: map[string]*si.Quantity{\n\t\t\t\t\t\t\"memory\": {Value: 100},\n\t\t\t\t\t\t\"vcore\": {Value: 20},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\tNodeID: \"node-2:1234\",\n\t\t\t\tAttributes: map[string]string{},\n\t\t\t\tSchedulableResource: &si.Resource{\n\t\t\t\t\tResources: map[string]*si.Quantity{\n\t\t\t\t\t\t\"memory\": {Value: 100},\n\t\t\t\t\t\t\"vcore\": {Value: 20},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\tNewApplications: newAddAppRequest(map[string]string{appID: \"root.a\"}),\n\t\tRmID: \"rm:123\",\n\t})\n\n\tassert.NilError(t, err, \"UpdateRequest nodes and apps failed\")\n\n\t// waiting for recovery\n\tmockRM.waitForAcceptedNode(t, \"node-1:1234\", 1000)\n\tmockRM.waitForAcceptedNode(t, \"node-2:1234\", 1000)\n\n\tapp01 := serviceContext.Scheduler.GetClusterSchedulingContext().\n\t\tGetSchedulingApplication(appID, \"[rm:123]default\")\n\tassert.Assert(t, app01 != nil)\n\tassert.Equal(t, app01.ApplicationInfo.ApplicationID, appID)\n\tassert.Equal(t, app01.ApplicationInfo.QueueName, \"root.a\")\n}", "func (m *MockPartitions) FailCreateAfter(n int) *MockPartitions {\n\tm.failCreateAfter = n\n\treturn m\n}", "func TestAppRecoveryAlone(t *testing.T) {\n\tserviceContext := entrypoint.StartAllServicesWithManualScheduler()\n\tproxy := serviceContext.RMProxy\n\n\t// Register RM\n\tconfigData := `\npartitions:\n - name: default\n queues:\n - name: root\n submitacl: \"*\"\n queues:\n - name: a\n resources:\n guaranteed:\n memory: 100\n vcore: 10\n max:\n memory: 150\n vcore: 20\n`\n\tconfigs.MockSchedulerConfigByData([]byte(configData))\n\tmockRM := NewMockRMCallbackHandler()\n\n\t_, err := proxy.RegisterResourceManager(\n\t\t&si.RegisterResourceManagerRequest{\n\t\t\tRmID: \"rm:123\",\n\t\t\tPolicyGroup: \"policygroup\",\n\t\t\tVersion: \"0.0.2\",\n\t\t}, mockRM)\n\n\tassert.NilError(t, err, \"RegisterResourceManager failed\")\n\n\t// Register apps alone\n\tappID := \"app-1\"\n\terr = proxy.Update(&si.UpdateRequest{\n\t\tNewApplications: newAddAppRequest(map[string]string{appID: \"root.a\", \"app-2\": \"root.a\"}),\n\t\tRmID: \"rm:123\",\n\t})\n\n\tassert.NilError(t, err, \"UpdateRequest app failed\")\n\n\tmockRM.waitForAcceptedApplication(t, appID, 1000)\n\tmockRM.waitForAcceptedApplication(t, \"app-2\", 1000)\n\n\t// verify app state\n\tapps := serviceContext.Cache.GetPartition(\"[rm:123]default\").GetApplications()\n\tfound := 0\n\tfor _, app := range apps {\n\t\tif app.ApplicationID == appID || app.ApplicationID == \"app-2\" {\n\t\t\tassert.Equal(t, app.GetApplicationState(), cache.New.String())\n\t\t\tfound++\n\t\t}\n\t}\n\n\tassert.Equal(t, found, 2, \"did not find expected number of apps after recovery\")\n}", "func recreateConfigMapFunc(f *framework.Framework, tc *nodeConfigTestCase) error {\n\t// need to ignore NotFound error, since there could be cases where delete\n\t// fails during a retry because the delete in a previous attempt succeeded,\n\t// before some other error occurred.\n\terr := deleteConfigMapFunc(f, tc)\n\tif err != nil && !apierrors.IsNotFound(err) {\n\t\treturn err\n\t}\n\treturn createConfigMapFunc(f, tc)\n}", "func CreateTemplateFailErrMocked(t *testing.T, templateIn *types.Template) *types.Template {\n\n\tassert := assert.New(t)\n\n\t// wire up\n\tcs := &utils.MockConcertoService{}\n\tds, err := NewTemplateService(cs)\n\tassert.Nil(err, \"Couldn't load template service\")\n\tassert.NotNil(ds, \"Template service not instanced\")\n\n\t// convertMap\n\tmapIn, err := utils.ItemConvertParams(*templateIn)\n\tassert.Nil(err, \"Template test data corrupted\")\n\n\t// to json\n\tdOut, err := json.Marshal(templateIn)\n\tassert.Nil(err, \"Template test data corrupted\")\n\n\t// call service\n\tcs.On(\"Post\", \"/blueprint/templates/\", mapIn).Return(dOut, 200, fmt.Errorf(\"mocked error\"))\n\ttemplateOut, err := ds.CreateTemplate(mapIn)\n\tassert.NotNil(err, \"We are expecting an error\")\n\tassert.Nil(templateOut, \"Expecting nil output\")\n\tassert.Equal(err.Error(), \"mocked error\", \"Error should be 'mocked error'\")\n\n\treturn templateOut\n}", "func CreateTemplateFailStatusMocked(t *testing.T, templateIn *types.Template) *types.Template {\n\n\tassert := assert.New(t)\n\n\t// wire up\n\tcs := &utils.MockConcertoService{}\n\tds, err := NewTemplateService(cs)\n\tassert.Nil(err, \"Couldn't load template service\")\n\tassert.NotNil(ds, \"Template service not instanced\")\n\n\t// convertMap\n\tmapIn, err := utils.ItemConvertParams(*templateIn)\n\tassert.Nil(err, \"Template test data corrupted\")\n\n\t// to json\n\tdOut, err := json.Marshal(templateIn)\n\tassert.Nil(err, \"Template test data corrupted\")\n\n\t// call service\n\tcs.On(\"Post\", \"/blueprint/templates/\", mapIn).Return(dOut, 499, nil)\n\ttemplateOut, err := ds.CreateTemplate(mapIn)\n\tassert.NotNil(err, \"We are expecting an status code error\")\n\tassert.Nil(templateOut, \"Expecting nil output\")\n\tassert.Contains(err.Error(), \"499\", \"Error should contain http code 499\")\n\n\treturn templateOut\n}", "func TestServiceEndpointAzureRM_Create_DoesNotSwallowError(t *testing.T) {\n\tctrl := gomock.NewController(t)\n\tdefer ctrl.Finish()\n\n\tr := ResourceServiceEndpointAzureRM()\n\tfor _, resource := range azurermTestServiceEndpointsAzureRM {\n\t\tresourceData := getResourceData(t, resource)\n\t\tflattenServiceEndpointAzureRM(resourceData, &resource, azurermTestServiceEndpointAzureRMProjectID)\n\n\t\tbuildClient := azdosdkmocks.NewMockServiceendpointClient(ctrl)\n\t\tclients := &client.AggregatedClient{ServiceEndpointClient: buildClient, Ctx: context.Background()}\n\n\t\texpectedArgs := serviceendpoint.CreateServiceEndpointArgs{Endpoint: &resource}\n\n\t\tbuildClient.\n\t\t\tEXPECT().\n\t\t\tCreateServiceEndpoint(clients.Ctx, expectedArgs).\n\t\t\tReturn(nil, errors.New(\"CreateServiceEndpoint() Failed\")).\n\t\t\tTimes(1)\n\n\t\terr := r.Create(resourceData, clients)\n\t\trequire.Contains(t, err.Error(), \"CreateServiceEndpoint() Failed\")\n\n\t}\n}", "func (f *FailingKubeClient) Create(resources kube.ResourceList) (*kube.Result, error) {\n\tf.CreateCallsCnt++\n\treturn nil, f.CreateError\n}", "func TestImportProjectCreatedFailWhenAPIIsExisted(t *testing.T) {\n\tapim := apimClients[0]\n\tprojectName := \"OpenAPI3Project\"\n\tusername := superAdminUser\n\tpassword := superAdminPassword\n\n\targs := &testutils.InitTestArgs{\n\t\tCtlUser: testutils.Credentials{Username: username, Password: password},\n\t\tSrcAPIM: apim,\n\t\tInitFlag: projectName,\n\t\tOasFlag: utils.TestOpenAPI3DefinitionPath,\n\t\tForceFlag: false,\n\t}\n\n\t//Import API for the First time\n\ttestutils.ValidateImportInitializedProject(t, args)\n\n\t//Import API for the second time\n\ttestutils.ValidateImportFailedWithInitializedProject(t, args)\n\n}", "func TestSchedulerRecoveryWithoutAppInfo(t *testing.T) {\n\t// Register RM\n\tconfigData := `\npartitions:\n -\n name: default\n queues:\n - name: root\n submitacl: \"*\"\n queues:\n - name: a\n resources:\n guaranteed:\n memory: 100\n vcore: 10\n max:\n memory: 150\n vcore: 20\n`\n\tms := &mockScheduler{}\n\tdefer ms.Stop()\n\n\terr := ms.Init(configData, false)\n\tassert.NilError(t, err, \"RegisterResourceManager failed\")\n\n\t// Register nodes, and add apps\n\t// here we only report back existing allocations, without registering applications\n\terr = ms.proxy.Update(&si.UpdateRequest{\n\t\tNewSchedulableNodes: []*si.NewNodeInfo{\n\t\t\t{\n\t\t\t\tNodeID: \"node-1:1234\",\n\t\t\t\tAttributes: map[string]string{},\n\t\t\t\tSchedulableResource: &si.Resource{\n\t\t\t\t\tResources: map[string]*si.Quantity{\n\t\t\t\t\t\t\"memory\": {Value: 100},\n\t\t\t\t\t\t\"vcore\": {Value: 20},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tExistingAllocations: []*si.Allocation{\n\t\t\t\t\t{\n\t\t\t\t\t\tAllocationKey: \"allocation-key-01\",\n\t\t\t\t\t\tUUID: \"UUID01\",\n\t\t\t\t\t\tApplicationID: \"app-01\",\n\t\t\t\t\t\tPartitionName: \"default\",\n\t\t\t\t\t\tQueueName: \"root.a\",\n\t\t\t\t\t\tNodeID: \"node-1:1234\",\n\t\t\t\t\t\tResourcePerAlloc: &si.Resource{\n\t\t\t\t\t\t\tResources: map[string]*si.Quantity{\n\t\t\t\t\t\t\t\tresources.MEMORY: {\n\t\t\t\t\t\t\t\t\tValue: 1024,\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\tresources.VCORE: {\n\t\t\t\t\t\t\t\t\tValue: 1,\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\tNodeID: \"node-2:1234\",\n\t\t\t\tAttributes: map[string]string{},\n\t\t\t\tSchedulableResource: &si.Resource{\n\t\t\t\t\tResources: map[string]*si.Quantity{\n\t\t\t\t\t\t\"memory\": {Value: 100},\n\t\t\t\t\t\t\"vcore\": {Value: 20},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\tRmID: \"rm:123\",\n\t})\n\n\tassert.NilError(t, err, \"UpdateRequest nodes and apps failed\")\n\n\t// waiting for recovery\n\t// node-1 should be rejected as some of allocations cannot be recovered\n\tms.mockRM.waitForRejectedNode(t, \"node-1:1234\", 1000)\n\tms.mockRM.waitForAcceptedNode(t, \"node-2:1234\", 1000)\n\n\t// verify partitionInfo resources\n\tpartitionInfo := ms.clusterInfo.GetPartition(\"[rm:123]default\")\n\tassert.Equal(t, partitionInfo.GetTotalNodeCount(), 1)\n\tassert.Equal(t, partitionInfo.GetTotalApplicationCount(), 0)\n\tassert.Equal(t, partitionInfo.GetTotalAllocationCount(), 0)\n\tassert.Equal(t, partitionInfo.GetNode(\"node-2:1234\").GetAllocatedResource().Resources[resources.MEMORY],\n\t\tresources.Quantity(0))\n\n\t// register the node again, with application info attached\n\terr = ms.proxy.Update(&si.UpdateRequest{\n\t\tNewSchedulableNodes: []*si.NewNodeInfo{\n\t\t\t{\n\t\t\t\tNodeID: \"node-1:1234\",\n\t\t\t\tAttributes: map[string]string{},\n\t\t\t\tSchedulableResource: &si.Resource{\n\t\t\t\t\tResources: map[string]*si.Quantity{\n\t\t\t\t\t\t\"memory\": {Value: 100},\n\t\t\t\t\t\t\"vcore\": {Value: 20},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tExistingAllocations: []*si.Allocation{\n\t\t\t\t\t{\n\t\t\t\t\t\tAllocationKey: \"allocation-key-01\",\n\t\t\t\t\t\tUUID: \"UUID01\",\n\t\t\t\t\t\tApplicationID: \"app-01\",\n\t\t\t\t\t\tPartitionName: \"default\",\n\t\t\t\t\t\tQueueName: \"root.a\",\n\t\t\t\t\t\tNodeID: \"node-1:1234\",\n\t\t\t\t\t\tResourcePerAlloc: &si.Resource{\n\t\t\t\t\t\t\tResources: map[string]*si.Quantity{\n\t\t\t\t\t\t\t\tresources.MEMORY: {\n\t\t\t\t\t\t\t\t\tValue: 100,\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\tresources.VCORE: {\n\t\t\t\t\t\t\t\t\tValue: 1,\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\tNewApplications: newAddAppRequest(map[string]string{\"app-01\": \"root.a\"}),\n\t\tRmID: \"rm:123\",\n\t})\n\n\tassert.NilError(t, err, \"UpdateRequest re-register nodes and app failed\")\n\n\tms.mockRM.waitForAcceptedNode(t, \"node-1:1234\", 1000)\n\n\tassert.Equal(t, partitionInfo.GetTotalNodeCount(), 2)\n\tassert.Equal(t, partitionInfo.GetTotalApplicationCount(), 1)\n\tassert.Equal(t, partitionInfo.GetTotalAllocationCount(), 1)\n\tassert.Equal(t, partitionInfo.GetNode(\"node-1:1234\").GetAllocatedResource().Resources[resources.MEMORY], resources.Quantity(100))\n\tassert.Equal(t, partitionInfo.GetNode(\"node-1:1234\").GetAllocatedResource().Resources[resources.VCORE], resources.Quantity(1))\n\tassert.Equal(t, partitionInfo.GetNode(\"node-2:1234\").GetAllocatedResource().Resources[resources.MEMORY], resources.Quantity(0))\n\tassert.Equal(t, partitionInfo.GetNode(\"node-2:1234\").GetAllocatedResource().Resources[resources.VCORE], resources.Quantity(0))\n\n\tt.Log(\"verifying scheduling queues\")\n\trecoveredQueueRoot := ms.getSchedulingQueue(\"root\")\n\trecoveredQueue := ms.getSchedulingQueue(\"root.a\")\n\tassert.Equal(t, recoveredQueue.QueueInfo.GetAllocatedResource().Resources[resources.MEMORY], resources.Quantity(100))\n\tassert.Equal(t, recoveredQueue.QueueInfo.GetAllocatedResource().Resources[resources.VCORE], resources.Quantity(1))\n\tassert.Equal(t, recoveredQueueRoot.QueueInfo.GetAllocatedResource().Resources[resources.MEMORY], resources.Quantity(100))\n\tassert.Equal(t, recoveredQueueRoot.QueueInfo.GetAllocatedResource().Resources[resources.VCORE], resources.Quantity(1))\n}", "func TestReload_PartialRegisterFailure(t *testing.T) {\n\trequire := require.New(t)\n\n\tresources := initVMRegistryTest(t)\n\n\tfactory1 := vms.NewMockFactory(resources.ctrl)\n\tfactory2 := vms.NewMockFactory(resources.ctrl)\n\tfactory3 := vms.NewMockFactory(resources.ctrl)\n\tfactory4 := vms.NewMockFactory(resources.ctrl)\n\n\tregisteredVms := map[ids.ID]vms.Factory{\n\t\tid1: factory1,\n\t\tid2: factory2,\n\t}\n\n\tunregisteredVms := map[ids.ID]vms.Factory{\n\t\tid3: factory3,\n\t\tid4: factory4,\n\t}\n\n\tresources.mockVMGetter.EXPECT().\n\t\tGet().\n\t\tTimes(1).\n\t\tReturn(registeredVms, unregisteredVms, nil)\n\tresources.mockVMRegisterer.EXPECT().\n\t\tRegister(gomock.Any(), id3, factory3).\n\t\tTimes(1).\n\t\tReturn(errTest)\n\tresources.mockVMRegisterer.EXPECT().\n\t\tRegister(gomock.Any(), id4, factory4).\n\t\tTimes(1).\n\t\tReturn(nil)\n\n\tinstalledVMs, failedVMs, err := resources.vmRegistry.Reload(context.Background())\n\trequire.NoError(err)\n\trequire.Len(failedVMs, 1)\n\trequire.ErrorIs(failedVMs[id3], errTest)\n\trequire.Len(installedVMs, 1)\n\trequire.Equal(id4, installedVMs[0])\n}", "func ensureSubscriptionCreationFails(subscription *eventingv1alpha1.Subscription, ctx context.Context) {\n\tif subscription.Namespace != \"default \" {\n\t\tnamespace := fixtureNamespace(subscription.Namespace)\n\t\tif namespace.Name != \"default\" {\n\t\t\tExpect(k8sClient.Create(ctx, namespace)).Should(BeNil())\n\t\t}\n\t}\n\tExpect(k8sClient.Create(ctx, subscription)).Should(\n\t\tAnd(\n\t\t\t// prevent nil-pointer stacktrace\n\t\t\tNot(BeNil()),\n\t\t\ttesting.IsK8sUnprocessableEntity(),\n\t\t),\n\t)\n}", "func TestCmdDeploy_retryRejectNonFailed(t *testing.T) {\n\tvar existingDeployment *kapi.ReplicationController\n\n\tcommandClient := &deployCommandClientImpl{\n\t\tGetDeploymentFn: func(namespace, name string) (*kapi.ReplicationController, error) {\n\t\t\treturn existingDeployment, nil\n\t\t},\n\t\tUpdateDeploymentConfigFn: func(config *deployapi.DeploymentConfig) (*deployapi.DeploymentConfig, error) {\n\t\t\tt.Fatalf(\"unexpected call to UpdateDeploymentConfig\")\n\t\t\treturn nil, nil\n\t\t},\n\t\tUpdateDeploymentFn: func(deployment *kapi.ReplicationController) (*kapi.ReplicationController, error) {\n\t\t\tt.Fatalf(\"unexpected call to UpdateDeployment\")\n\t\t\treturn nil, nil\n\t\t},\n\t\tListDeployerPodsForFn: func(namespace, deploymentName string) (*kapi.PodList, error) {\n\t\t\tt.Fatalf(\"unexpected call to ListDeployerPodsFor\")\n\t\t\treturn nil, nil\n\t\t},\n\t\tDeletePodFn: func(pod *kapi.Pod) error {\n\t\t\tt.Fatalf(\"unexpected call to DeletePod\")\n\t\t\treturn nil\n\t\t},\n\t}\n\n\tc := &retryDeploymentCommand{client: commandClient}\n\n\tinvalidStatusList := []deployapi.DeploymentStatus{\n\t\tdeployapi.DeploymentStatusNew,\n\t\tdeployapi.DeploymentStatusPending,\n\t\tdeployapi.DeploymentStatusRunning,\n\t\tdeployapi.DeploymentStatusComplete,\n\t}\n\n\tfor _, status := range invalidStatusList {\n\t\tconfig := deploytest.OkDeploymentConfig(1)\n\t\texistingDeployment = deploymentFor(config, status)\n\t\terr := c.retry(config, ioutil.Discard)\n\t\tif err == nil {\n\t\t\tt.Errorf(\"expected an error retrying deployment with status %s\", status)\n\t\t}\n\t}\n}", "func crashAndFastRestore(t *testing.T) {\n\tvar err error\n\tproxyURL := tutils.RandomProxyURL(t)\n\tsmap := tutils.GetClusterMap(t, proxyURL)\n\ttlog.Logf(\"targets: %d, proxies: %d\\n\", smap.CountActiveTargets(), smap.CountActiveProxies())\n\n\t// Make sure proxyURL is not primary URL.\n\t_, proxyURL, err = chooseNextProxy(smap)\n\ttassert.CheckFatal(t, err)\n\toldPrimaryID := smap.Primary.ID()\n\ttlog.Logf(\"The current primary %s, Smap version %d\\n\", oldPrimaryID, smap.Version)\n\n\tcmd, err := tutils.KillNode(smap.Primary)\n\ttassert.CheckFatal(t, err)\n\n\t// quick crash and recover\n\ttime.Sleep(2 * time.Second)\n\terr = tutils.RestoreNode(cmd, true, \"proxy (primary)\")\n\ttassert.CheckFatal(t, err)\n\n\ttlog.Logf(\"The %s is currently restarting\\n\", oldPrimaryID)\n\n\t// NOTE: using (version - 1) because the primary will restart with its old version,\n\t// there will be no version change for this restore, so force beginning version to 1 less\n\t// than the original version in order to use WaitForClusterState.\n\tsmap, err = tutils.WaitForClusterState(proxyURL, \"restore\", smap.Version-1, 0, 0)\n\ttassert.CheckFatal(t, err)\n\n\tif smap.Primary.ID() != oldPrimaryID {\n\t\tt.Fatalf(\"Wrong primary proxy: %s, expecting: %s\", smap.Primary.ID(), oldPrimaryID)\n\t}\n}", "func TestReplicaNetwork_Failure_Resilience(t *testing.T) {\n\tnew(test.Consistency).Run(t, newReplicatedNetwork(network.NewNodeDownEvent(5, 30)))\n}", "func TestActiveReplicatorReconnectOnStart(t *testing.T) {\n\tbase.RequireNumTestBuckets(t, 2)\n\n\tif testing.Short() {\n\t\tt.Skipf(\"Test skipped in short mode\")\n\t}\n\n\ttests := []struct {\n\t\tname string\n\t\tusernameOverride string\n\t\tremoteURLHostOverride string\n\t\texpectedErrorContains string\n\t\texpectedErrorIsConnectionRefused bool\n\t}{\n\t\t{\n\t\t\tname: \"wrong user\",\n\t\t\tusernameOverride: \"bob\",\n\t\t\texpectedErrorContains: \"unexpected status code 401 from target database\",\n\t\t},\n\t\t{\n\t\t\tname: \"invalid port\", // fails faster than unroutable address (connection refused vs. connect timeout)\n\t\t\tremoteURLHostOverride: \"127.0.0.1:1234\",\n\t\t\texpectedErrorIsConnectionRefused: true,\n\t\t},\n\t}\n\tfor _, test := range tests {\n\t\tt.Run(test.name, func(t *testing.T) {\n\n\t\t\tvar abortTimeout = time.Millisecond * 500\n\t\t\tif runtime.GOOS == \"windows\" {\n\t\t\t\t// A longer timeout is required on Windows as connection refused errors take approx 2 seconds vs. instantaneous on Linux.\n\t\t\t\tabortTimeout = time.Second * 5\n\t\t\t}\n\t\t\t// test cases with and without a timeout. Ensure replicator retry loop is stopped in both cases.\n\t\t\ttimeoutVals := []time.Duration{\n\t\t\t\t0,\n\t\t\t\tabortTimeout,\n\t\t\t}\n\n\t\t\tfor _, timeoutVal := range timeoutVals {\n\t\t\t\tt.Run(test.name+\" with timeout \"+timeoutVal.String(), func(t *testing.T) {\n\n\t\t\t\t\tbase.SetUpTestLogging(t, logger.LevelDebug, logger.KeyAll)\n\n\t\t\t\t\t// Passive\n\t\t\t\t\ttb2 := base.GetTestBucket(t)\n\t\t\t\t\trt2 := NewRestTester(t, &RestTesterConfig{\n\t\t\t\t\t\tTestBucket: tb2,\n\t\t\t\t\t\tDatabaseConfig: &DatabaseConfig{DbConfig: DbConfig{\n\t\t\t\t\t\t\tUsers: map[string]*db.PrincipalConfig{\n\t\t\t\t\t\t\t\t\"alice\": {\n\t\t\t\t\t\t\t\t\tPassword: base.StringPtr(\"pass\"),\n\t\t\t\t\t\t\t\t\tExplicitChannels: utils.SetOf(\"alice\"),\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t}},\n\t\t\t\t\t})\n\t\t\t\t\tdefer rt2.Close()\n\n\t\t\t\t\t// Make rt2 listen on an actual HTTP port, so it can receive the blipsync request from rt1\n\t\t\t\t\tsrv := httptest.NewServer(rt2.TestPublicHandler())\n\t\t\t\t\tdefer srv.Close()\n\n\t\t\t\t\t// Build remoteDBURL with basic auth creds\n\t\t\t\t\tremoteDBURL, err := url.Parse(srv.URL + \"/db\")\n\t\t\t\t\trequire.NoError(t, err)\n\n\t\t\t\t\t// Add basic auth creds to target db URL\n\t\t\t\t\tusername := \"alice\"\n\t\t\t\t\tif test.usernameOverride != \"\" {\n\t\t\t\t\t\tusername = test.usernameOverride\n\t\t\t\t\t}\n\t\t\t\t\tremoteDBURL.User = url.UserPassword(username, \"pass\")\n\n\t\t\t\t\tif test.remoteURLHostOverride != \"\" {\n\t\t\t\t\t\tremoteDBURL.Host = test.remoteURLHostOverride\n\t\t\t\t\t}\n\n\t\t\t\t\t// Active\n\t\t\t\t\ttb1 := base.GetTestBucket(t)\n\t\t\t\t\trt1 := NewRestTester(t, &RestTesterConfig{\n\t\t\t\t\t\tTestBucket: tb1,\n\t\t\t\t\t})\n\t\t\t\t\tdefer rt1.Close()\n\n\t\t\t\t\tid, err := base.GenerateRandomID()\n\t\t\t\t\trequire.NoError(t, err)\n\t\t\t\t\tarConfig := db.ActiveReplicatorConfig{\n\t\t\t\t\t\tID: id,\n\t\t\t\t\t\tDirection: db.ActiveReplicatorTypePush,\n\t\t\t\t\t\tRemoteDBURL: remoteDBURL,\n\t\t\t\t\t\tActiveDB: &db.Database{\n\t\t\t\t\t\t\tDatabaseContext: rt1.GetDatabase(),\n\t\t\t\t\t\t},\n\t\t\t\t\t\tContinuous: true,\n\t\t\t\t\t\t// aggressive reconnect intervals for testing purposes\n\t\t\t\t\t\tInitialReconnectInterval: time.Millisecond,\n\t\t\t\t\t\tMaxReconnectInterval: time.Millisecond * 50,\n\t\t\t\t\t\tTotalReconnectTimeout: timeoutVal,\n\t\t\t\t\t\tReplicationStatsMap: base.SyncGatewayStats.NewDBStats(t.Name(), false, false, false).DBReplicatorStats(t.Name()),\n\t\t\t\t\t}\n\n\t\t\t\t\t// Create the first active replicator to pull from seq:0\n\t\t\t\t\tar := db.NewActiveReplicator(&arConfig)\n\t\t\t\t\trequire.NoError(t, err)\n\n\t\t\t\t\tassert.Equal(t, int64(0), ar.Push.GetStats().NumConnectAttempts.Value())\n\n\t\t\t\t\terr = ar.Start()\n\t\t\t\t\tassert.Error(t, err, \"expecting ar.Start() to return error, but it didn't\")\n\n\t\t\t\t\tif test.expectedErrorIsConnectionRefused {\n\t\t\t\t\t\tassert.True(t, base.IsConnectionRefusedError(err))\n\t\t\t\t\t}\n\n\t\t\t\t\tif test.expectedErrorContains != \"\" {\n\t\t\t\t\t\tassert.True(t, strings.Contains(err.Error(), test.expectedErrorContains))\n\t\t\t\t\t}\n\n\t\t\t\t\t// wait for an arbitrary number of reconnect attempts\n\t\t\t\t\twaitAndRequireCondition(t, func() bool {\n\t\t\t\t\t\treturn ar.Push.GetStats().NumConnectAttempts.Value() > 2\n\t\t\t\t\t}, \"Expecting NumConnectAttempts > 2\")\n\n\t\t\t\t\tif timeoutVal > 0 {\n\t\t\t\t\t\ttime.Sleep(timeoutVal + time.Millisecond*250)\n\t\t\t\t\t\t// wait for the retry loop to hit the TotalReconnectTimeout and give up retrying\n\t\t\t\t\t\twaitAndRequireCondition(t, func() bool {\n\t\t\t\t\t\t\treturn ar.Push.GetStats().NumReconnectsAborted.Value() > 0\n\t\t\t\t\t\t}, \"Expecting NumReconnectsAborted > 0\")\n\t\t\t\t\t}\n\n\t\t\t\t\tassert.NoError(t, ar.Stop())\n\t\t\t\t})\n\t\t\t}\n\t\t})\n\t}\n}", "func TestFailbackRetryFailed10Times(t *testing.T) {\n\tctrl := gomock.NewController(t)\n\tdefer ctrl.Finish()\n\n\tinvoker := mock.NewMockInvoker(ctrl)\n\tclusterInvoker := registerFailback(invoker).(*failbackClusterInvoker)\n\tclusterInvoker.maxRetries = 10\n\n\tinvoker.EXPECT().IsAvailable().Return(true).AnyTimes()\n\tinvoker.EXPECT().GetURL().Return(failbackUrl).AnyTimes()\n\n\t// 10 task should failed firstly.\n\tmockFailedResult := &protocol.RPCResult{Err: perrors.New(\"error\")}\n\tinvoker.EXPECT().Invoke(gomock.Any(), gomock.Any()).Return(mockFailedResult).Times(10)\n\n\t// 10 task should retry and failed.\n\tvar wg sync.WaitGroup\n\twg.Add(10)\n\tnow := time.Now()\n\tinvoker.EXPECT().Invoke(gomock.Any(), gomock.Any()).DoAndReturn(func(context.Context, protocol.Invocation) protocol.Result {\n\t\tdelta := time.Since(now).Nanoseconds() / int64(time.Second)\n\t\tassert.True(t, delta >= 5)\n\t\twg.Done()\n\t\treturn mockFailedResult\n\t}).Times(10)\n\n\tfor i := 0; i < 10; i++ {\n\t\tresult := clusterInvoker.Invoke(context.Background(), &invocation.RPCInvocation{})\n\t\tassert.Nil(t, result.Error())\n\t\tassert.Nil(t, result.Result())\n\t\tassert.Equal(t, 0, len(result.Attachments()))\n\t}\n\n\twg.Wait()\n\ttime.Sleep(time.Second) // in order to ensure checkRetry have done\n\tassert.Equal(t, int64(10), clusterInvoker.taskList.Len())\n\n\tinvoker.EXPECT().Destroy().Return()\n\tclusterInvoker.Destroy()\n\n\tassert.Equal(t, int64(0), clusterInvoker.taskList.Len())\n}", "func createFallbackSnapshot() cache.Snapshot {\n\treturn fallbackSnapshot(fallbackBindAddr, fallbackBindPort, fallbackStatusCode)\n}", "func TestReconcileClusterServiceBrokerFailureOnFinalRetry(t *testing.T) {\n\tfakeKubeClient, fakeCatalogClient, fakeClusterServiceBrokerClient, testController, _ := newTestController(t, fakeosb.FakeClientConfiguration{\n\t\tCatalogReaction: &fakeosb.CatalogReaction{\n\t\t\tError: errors.New(\"ooops\"),\n\t\t},\n\t})\n\n\tbroker := getTestClusterServiceBroker()\n\tstartTime := metav1.NewTime(time.Now().Add(-7 * 24 * time.Hour))\n\tbroker.Status.OperationStartTime = &startTime\n\n\tif err := reconcileClusterServiceBroker(t, testController, broker); err != nil {\n\t\tt.Fatalf(\"Should have return no error because the retry duration has elapsed: %v\", err)\n\t}\n\n\tbrokerActions := fakeClusterServiceBrokerClient.Actions()\n\tassertNumberOfClusterServiceBrokerActions(t, brokerActions, 1)\n\tassertGetCatalog(t, brokerActions[0])\n\n\tactions := fakeCatalogClient.Actions()\n\tassertNumberOfActions(t, actions, 2)\n\n\tupdatedClusterServiceBroker := assertUpdateStatus(t, actions[0], broker)\n\tassertClusterServiceBrokerReadyFalse(t, updatedClusterServiceBroker)\n\n\tupdatedClusterServiceBroker = assertUpdateStatus(t, actions[1], broker)\n\tassertClusterServiceBrokerCondition(t, updatedClusterServiceBroker, v1beta1.ServiceBrokerConditionFailed, v1beta1.ConditionTrue)\n\tassertClusterServiceBrokerOperationStartTimeSet(t, updatedClusterServiceBroker, false)\n\n\tassertNumberOfActions(t, fakeKubeClient.Actions(), 0)\n\n\tevents := getRecordedEvents(testController)\n\n\texpectedEventPrefixes := []string{\n\t\twarningEventBuilder(errorFetchingCatalogReason).String(),\n\t\twarningEventBuilder(errorReconciliationRetryTimeoutReason).String(),\n\t}\n\n\tif err := checkEventPrefixes(events, expectedEventPrefixes); err != nil {\n\t\tt.Fatal(err)\n\t}\n}", "func patchOrCreate(mapping *meta.RESTMapping, config *rest.Config, group string,\n\tversion string, namespace string, name string, data []byte) error {\n\tlog.Infof(\"Applying resource configuration for %v\", name)\n\terr := getResource(mapping, config, group, version, namespace, name)\n\tif err != nil {\n\t\tlog.Infof(\"getResource error, treating as not found: %v\", err)\n\t\terr = createResource(mapping, config, group, version, namespace, data)\n\t} else {\n\t\tlog.Infof(\"getResource succeeds, treating as found.\")\n\t\terr = patchResource(mapping, config, group, version, namespace, data)\n\t}\n\n\tfor i := 1; i < maxRetries && k8serrors.IsConflict(err); i++ {\n\t\ttime.Sleep(backoffInterval)\n\n\t\tlog.Infof(\"Retrying patchOrCreate at %v attempt ...\", i)\n\t\terr = getResource(mapping, config, group, version, namespace, name)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\terr = patchResource(mapping, config, group, version, namespace, data)\n\t}\n\n\tif err != nil && (k8serrors.IsConflict(err) || k8serrors.IsInvalid(err) ||\n\t\tk8serrors.IsMethodNotSupported(err)) {\n\t\tlog.Infof(\"Trying delete and create as last resort ...\")\n\t\tif err = deleteResource(mapping, config, group, version, namespace, name); err != nil {\n\t\t\treturn err\n\t\t}\n\t\terr = createResource(mapping, config, group, version, namespace, data)\n\t}\n\treturn err\n}", "func TestInitializeRecovery(t *testing.T) {\n\tempty := \"\"\n\tconfig := &drivers.OntapStorageDriverConfig{\n\t\tCommonStorageDriverConfig: &drivers.CommonStorageDriverConfig{\n\t\t\tVersion: 1,\n\t\t\tStorageDriverName: \"ontap-nas\",\n\t\t\tStoragePrefixRaw: json.RawMessage(\"{}\"),\n\t\t\tStoragePrefix: &empty,\n\t\t},\n\t\t// These should be bogus yet valid connection parameters\n\t\tManagementLIF: \"127.0.0.1\",\n\t\tDataLIF: \"127.0.0.1\",\n\t\tIgroupName: \"nonexistent\",\n\t\tSVM: \"nonexistent\",\n\t\tUsername: \"none\",\n\t\tPassword: \"none\",\n\t}\n\tmarshaledJSON, err := json.Marshal(config)\n\tif err != nil {\n\t\tt.Fatal(\"Unable to marshal ONTAP config: \", err)\n\t}\n\n\tcommonConfig, configInJSON, err := ValidateCommonSettings(context.Background(), string(marshaledJSON))\n\tif err != nil {\n\t\tt.Error(\"Failed to validate settings for configuration.\")\n\t}\n\n\t_, err = NewStorageBackendForConfig(context.Background(), configInJSON, \"fakeConfigRef\", uuid.New().String(),\n\t\tcommonConfig, nil)\n\tif err == nil {\n\t\tt.Error(\"Failed to get error for incorrect configuration.\")\n\t}\n}", "func TestCreateOrUpdateResource(t *testing.T) {\n\tt.Run(\"ready status unknown\", func(t *testing.T) {\n\t\tg := NewGomegaWithT(t)\n\n\t\tsch := runtime.NewScheme()\n\t\tg.Expect(asoresourcesv1.AddToScheme(sch)).To(Succeed())\n\t\tc := fakeclient.NewClientBuilder().\n\t\t\tWithScheme(sch).\n\t\t\tBuild()\n\t\ts := New(c, clusterName)\n\n\t\tmockCtrl := gomock.NewController(t)\n\t\tspecMock := mock_azure.NewMockASOResourceSpecGetter(mockCtrl)\n\t\tspecMock.EXPECT().ResourceRef().Return(&asoresourcesv1.ResourceGroup{\n\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\tName: \"name\",\n\t\t\t\tNamespace: \"namespace\",\n\t\t\t},\n\t\t})\n\n\t\tctx := context.Background()\n\t\tg.Expect(c.Create(ctx, &asoresourcesv1.ResourceGroup{\n\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\tName: \"name\",\n\t\t\t\tNamespace: \"namespace\",\n\t\t\t\tLabels: map[string]string{\n\t\t\t\t\tinfrav1.OwnedByClusterLabelKey: clusterName,\n\t\t\t\t},\n\t\t\t},\n\t\t\tStatus: asoresourcesv1.ResourceGroup_STATUS{},\n\t\t})).To(Succeed())\n\n\t\tresult, err := s.CreateOrUpdateResource(ctx, specMock, \"service\")\n\t\tg.Expect(result).To(BeNil())\n\t\tg.Expect(err).NotTo(BeNil())\n\t\tg.Expect(err.Error()).To(ContainSubstring(\"ready status unknown\"))\n\t})\n\n\tt.Run(\"create resource that doesn't already exist\", func(t *testing.T) {\n\t\tg := NewGomegaWithT(t)\n\n\t\tsch := runtime.NewScheme()\n\t\tg.Expect(asoresourcesv1.AddToScheme(sch)).To(Succeed())\n\t\tc := fakeclient.NewClientBuilder().\n\t\t\tWithScheme(sch).\n\t\t\tBuild()\n\t\ts := New(c, clusterName)\n\n\t\tmockCtrl := gomock.NewController(t)\n\t\tspecMock := mock_azure.NewMockASOResourceSpecGetter(mockCtrl)\n\t\tspecMock.EXPECT().ResourceRef().Return(&asoresourcesv1.ResourceGroup{\n\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\tName: \"name\",\n\t\t\t\tNamespace: \"namespace\",\n\t\t\t},\n\t\t})\n\t\tspecMock.EXPECT().Parameters(gomockinternal.AContext(), gomock.Nil()).Return(&asoresourcesv1.ResourceGroup{\n\t\t\tSpec: asoresourcesv1.ResourceGroup_Spec{\n\t\t\t\tLocation: ptr.To(\"location\"),\n\t\t\t},\n\t\t}, nil)\n\n\t\tctx := context.Background()\n\t\tresult, err := s.CreateOrUpdateResource(ctx, specMock, \"service\")\n\t\tg.Expect(result).To(BeNil())\n\t\tg.Expect(err).NotTo(BeNil())\n\t\tg.Expect(azure.IsOperationNotDoneError(err)).To(BeTrue())\n\t\tvar recerr azure.ReconcileError\n\t\tg.Expect(errors.As(err, &recerr)).To(BeTrue())\n\t\tg.Expect(recerr.IsTransient()).To(BeTrue())\n\n\t\tcreated := &asoresourcesv1.ResourceGroup{}\n\t\tg.Expect(c.Get(ctx, types.NamespacedName{Name: \"name\", Namespace: \"namespace\"}, created)).To(Succeed())\n\t\tg.Expect(created.Name).To(Equal(\"name\"))\n\t\tg.Expect(created.Namespace).To(Equal(\"namespace\"))\n\t\tg.Expect(created.Labels).To(Equal(map[string]string{\n\t\t\tinfrav1.OwnedByClusterLabelKey: clusterName,\n\t\t}))\n\t\tg.Expect(created.Annotations).To(Equal(map[string]string{\n\t\t\tReconcilePolicyAnnotation: ReconcilePolicySkip,\n\t\t\tSecretNameAnnotation: \"cluster-aso-secret\",\n\t\t}))\n\t\tg.Expect(created.Spec).To(Equal(asoresourcesv1.ResourceGroup_Spec{\n\t\t\tLocation: ptr.To(\"location\"),\n\t\t}))\n\t})\n\n\tt.Run(\"resource is not ready in non-terminal state\", func(t *testing.T) {\n\t\tg := NewGomegaWithT(t)\n\n\t\tsch := runtime.NewScheme()\n\t\tg.Expect(asoresourcesv1.AddToScheme(sch)).To(Succeed())\n\t\tc := fakeclient.NewClientBuilder().\n\t\t\tWithScheme(sch).\n\t\t\tBuild()\n\t\ts := New(c, clusterName)\n\n\t\tmockCtrl := gomock.NewController(t)\n\t\tspecMock := mock_azure.NewMockASOResourceSpecGetter(mockCtrl)\n\t\tspecMock.EXPECT().ResourceRef().Return(&asoresourcesv1.ResourceGroup{\n\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\tName: \"name\",\n\t\t\t\tNamespace: \"namespace\",\n\t\t\t},\n\t\t})\n\n\t\tctx := context.Background()\n\t\tg.Expect(c.Create(ctx, &asoresourcesv1.ResourceGroup{\n\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\tName: \"name\",\n\t\t\t\tNamespace: \"namespace\",\n\t\t\t\tLabels: map[string]string{\n\t\t\t\t\tinfrav1.OwnedByClusterLabelKey: clusterName,\n\t\t\t\t},\n\t\t\t},\n\t\t\tStatus: asoresourcesv1.ResourceGroup_STATUS{\n\t\t\t\tConditions: []conditions.Condition{\n\t\t\t\t\t{\n\t\t\t\t\t\tType: conditions.ConditionTypeReady,\n\t\t\t\t\t\tStatus: metav1.ConditionFalse,\n\t\t\t\t\t\tSeverity: conditions.ConditionSeverityInfo,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t})).To(Succeed())\n\n\t\tresult, err := s.CreateOrUpdateResource(ctx, specMock, \"service\")\n\t\tg.Expect(result).To(BeNil())\n\t\tg.Expect(err).NotTo(BeNil())\n\t\tg.Expect(err.Error()).To(ContainSubstring(\"resource is not Ready\"))\n\t\tvar recerr azure.ReconcileError\n\t\tg.Expect(errors.As(err, &recerr)).To(BeTrue())\n\t\tg.Expect(recerr.IsTransient()).To(BeTrue())\n\t})\n\n\tt.Run(\"resource is not ready in reconciling state\", func(t *testing.T) {\n\t\tg := NewGomegaWithT(t)\n\n\t\tsch := runtime.NewScheme()\n\t\tg.Expect(asoresourcesv1.AddToScheme(sch)).To(Succeed())\n\t\tc := fakeclient.NewClientBuilder().\n\t\t\tWithScheme(sch).\n\t\t\tBuild()\n\t\ts := New(c, clusterName)\n\n\t\tmockCtrl := gomock.NewController(t)\n\t\tspecMock := mock_azure.NewMockASOResourceSpecGetter(mockCtrl)\n\t\tspecMock.EXPECT().ResourceRef().Return(&asoresourcesv1.ResourceGroup{\n\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\tName: \"name\",\n\t\t\t\tNamespace: \"namespace\",\n\t\t\t},\n\t\t})\n\n\t\tctx := context.Background()\n\t\tg.Expect(c.Create(ctx, &asoresourcesv1.ResourceGroup{\n\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\tName: \"name\",\n\t\t\t\tNamespace: \"namespace\",\n\t\t\t\tLabels: map[string]string{\n\t\t\t\t\tinfrav1.OwnedByClusterLabelKey: clusterName,\n\t\t\t\t},\n\t\t\t},\n\t\t\tStatus: asoresourcesv1.ResourceGroup_STATUS{\n\t\t\t\tConditions: []conditions.Condition{\n\t\t\t\t\t{\n\t\t\t\t\t\tType: conditions.ConditionTypeReady,\n\t\t\t\t\t\tStatus: metav1.ConditionFalse,\n\t\t\t\t\t\tSeverity: conditions.ConditionSeverityInfo,\n\t\t\t\t\t\tReason: conditions.ReasonReconciling.Name,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t})).To(Succeed())\n\n\t\tresult, err := s.CreateOrUpdateResource(ctx, specMock, \"service\")\n\t\tg.Expect(result).To(BeNil())\n\t\tg.Expect(azure.IsOperationNotDoneError(err)).To(BeTrue())\n\t})\n\n\tt.Run(\"resource is not ready in terminal state\", func(t *testing.T) {\n\t\tg := NewGomegaWithT(t)\n\n\t\tsch := runtime.NewScheme()\n\t\tg.Expect(asoresourcesv1.AddToScheme(sch)).To(Succeed())\n\t\tc := fakeclient.NewClientBuilder().\n\t\t\tWithScheme(sch).\n\t\t\tBuild()\n\t\ts := New(c, clusterName)\n\n\t\tmockCtrl := gomock.NewController(t)\n\t\tspecMock := mock_azure.NewMockASOResourceSpecGetter(mockCtrl)\n\t\tspecMock.EXPECT().ResourceRef().Return(&asoresourcesv1.ResourceGroup{\n\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\tName: \"name\",\n\t\t\t\tNamespace: \"namespace\",\n\t\t\t},\n\t\t})\n\n\t\tctx := context.Background()\n\t\tg.Expect(c.Create(ctx, &asoresourcesv1.ResourceGroup{\n\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\tName: \"name\",\n\t\t\t\tNamespace: \"namespace\",\n\t\t\t\tLabels: map[string]string{\n\t\t\t\t\tinfrav1.OwnedByClusterLabelKey: clusterName,\n\t\t\t\t},\n\t\t\t},\n\t\t\tStatus: asoresourcesv1.ResourceGroup_STATUS{\n\t\t\t\tConditions: []conditions.Condition{\n\t\t\t\t\t{\n\t\t\t\t\t\tType: conditions.ConditionTypeReady,\n\t\t\t\t\t\tStatus: metav1.ConditionFalse,\n\t\t\t\t\t\tSeverity: conditions.ConditionSeverityError,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t})).To(Succeed())\n\n\t\tresult, err := s.CreateOrUpdateResource(ctx, specMock, \"service\")\n\t\tg.Expect(result).To(BeNil())\n\t\tg.Expect(err).NotTo(BeNil())\n\t\tg.Expect(err.Error()).To(ContainSubstring(\"resource is not Ready\"))\n\t\tvar recerr azure.ReconcileError\n\t\tg.Expect(errors.As(err, &recerr)).To(BeTrue())\n\t\tg.Expect(recerr.IsTerminal()).To(BeTrue())\n\t})\n\n\tt.Run(\"error getting existing resource\", func(t *testing.T) {\n\t\tg := NewGomegaWithT(t)\n\n\t\tsch := runtime.NewScheme()\n\t\tg.Expect(asoresourcesv1.AddToScheme(sch)).To(Succeed())\n\t\tc := fakeclient.NewClientBuilder().\n\t\t\tWithScheme(sch).\n\t\t\tBuild()\n\t\ts := New(ErroringGetClient{Client: c, err: errors.New(\"an error\")}, clusterName)\n\n\t\tmockCtrl := gomock.NewController(t)\n\t\tspecMock := mock_azure.NewMockASOResourceSpecGetter(mockCtrl)\n\t\tspecMock.EXPECT().ResourceRef().Return(&asoresourcesv1.ResourceGroup{\n\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\tName: \"name\",\n\t\t\t\tNamespace: \"namespace\",\n\t\t\t},\n\t\t})\n\n\t\tctx := context.Background()\n\t\tresult, err := s.CreateOrUpdateResource(ctx, specMock, \"service\")\n\t\tg.Expect(result).To(BeNil())\n\t\tg.Expect(err).NotTo(BeNil())\n\t\tg.Expect(err.Error()).To(ContainSubstring(\"failed to get existing resource\"))\n\t})\n\n\tt.Run(\"begin an update\", func(t *testing.T) {\n\t\tg := NewGomegaWithT(t)\n\n\t\tsch := runtime.NewScheme()\n\t\tg.Expect(asoresourcesv1.AddToScheme(sch)).To(Succeed())\n\t\tc := fakeclient.NewClientBuilder().\n\t\t\tWithScheme(sch).\n\t\t\tBuild()\n\t\ts := New(c, clusterName)\n\n\t\tmockCtrl := gomock.NewController(t)\n\t\tspecMock := mock_azure.NewMockASOResourceSpecGetter(mockCtrl)\n\t\tspecMock.EXPECT().ResourceRef().Return(&asoresourcesv1.ResourceGroup{\n\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\tName: \"name\",\n\t\t\t\tNamespace: \"namespace\",\n\t\t\t},\n\t\t})\n\t\tspecMock.EXPECT().Parameters(gomockinternal.AContext(), gomock.Not(gomock.Nil())).DoAndReturn(func(_ context.Context, object genruntime.MetaObject) (genruntime.MetaObject, error) {\n\t\t\tgroup := object.DeepCopyObject().(*asoresourcesv1.ResourceGroup)\n\t\t\tgroup.Spec.Location = ptr.To(\"location\")\n\t\t\treturn group, nil\n\t\t})\n\t\tspecMock.EXPECT().WasManaged(gomock.Any()).Return(false)\n\n\t\tctx := context.Background()\n\t\tg.Expect(c.Create(ctx, &asoresourcesv1.ResourceGroup{\n\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\tName: \"name\",\n\t\t\t\tNamespace: \"namespace\",\n\t\t\t\tLabels: map[string]string{\n\t\t\t\t\tinfrav1.OwnedByClusterLabelKey: clusterName,\n\t\t\t\t},\n\t\t\t},\n\t\t\tStatus: asoresourcesv1.ResourceGroup_STATUS{\n\t\t\t\tConditions: []conditions.Condition{\n\t\t\t\t\t{\n\t\t\t\t\t\tType: conditions.ConditionTypeReady,\n\t\t\t\t\t\tStatus: metav1.ConditionTrue,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t})).To(Succeed())\n\n\t\tresult, err := s.CreateOrUpdateResource(ctx, specMock, \"service\")\n\t\tg.Expect(result).To(BeNil())\n\t\tg.Expect(err).NotTo(BeNil())\n\t})\n\n\tt.Run(\"adopt managed resource in not found state\", func(t *testing.T) {\n\t\tg := NewGomegaWithT(t)\n\n\t\tsch := runtime.NewScheme()\n\t\tg.Expect(asoresourcesv1.AddToScheme(sch)).To(Succeed())\n\t\tc := fakeclient.NewClientBuilder().\n\t\t\tWithScheme(sch).\n\t\t\tBuild()\n\t\tclusterName := \"cluster\"\n\t\ts := New(c, clusterName)\n\n\t\tmockCtrl := gomock.NewController(t)\n\t\tspecMock := mock_azure.NewMockASOResourceSpecGetter(mockCtrl)\n\t\tspecMock.EXPECT().ResourceRef().Return(&asoresourcesv1.ResourceGroup{\n\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\tName: \"name\",\n\t\t\t\tNamespace: \"namespace\",\n\t\t\t},\n\t\t})\n\t\tspecMock.EXPECT().Parameters(gomockinternal.AContext(), gomock.Not(gomock.Nil())).DoAndReturn(func(_ context.Context, object genruntime.MetaObject) (genruntime.MetaObject, error) {\n\t\t\treturn object, nil\n\t\t})\n\n\t\tctx := context.Background()\n\t\tg.Expect(c.Create(ctx, &asoresourcesv1.ResourceGroup{\n\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\tName: \"name\",\n\t\t\t\tNamespace: \"namespace\",\n\t\t\t\tLabels: map[string]string{\n\t\t\t\t\tinfrav1.OwnedByClusterLabelKey: clusterName,\n\t\t\t\t},\n\t\t\t\tAnnotations: map[string]string{\n\t\t\t\t\tReconcilePolicyAnnotation: ReconcilePolicySkip,\n\t\t\t\t},\n\t\t\t},\n\t\t\tStatus: asoresourcesv1.ResourceGroup_STATUS{\n\t\t\t\tConditions: []conditions.Condition{\n\t\t\t\t\t{\n\t\t\t\t\t\tType: conditions.ConditionTypeReady,\n\t\t\t\t\t\tStatus: metav1.ConditionFalse,\n\t\t\t\t\t\tReason: conditions.ReasonAzureResourceNotFound.Name,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t})).To(Succeed())\n\n\t\tresult, err := s.CreateOrUpdateResource(ctx, specMock, \"service\")\n\t\tg.Expect(result).To(BeNil())\n\t\tg.Expect(err).NotTo(BeNil())\n\n\t\tupdated := &asoresourcesv1.ResourceGroup{}\n\t\tg.Expect(c.Get(ctx, types.NamespacedName{Name: \"name\", Namespace: \"namespace\"}, updated)).To(Succeed())\n\t\tg.Expect(updated.Annotations).To(Equal(map[string]string{\n\t\t\tReconcilePolicyAnnotation: ReconcilePolicyManage,\n\t\t\tSecretNameAnnotation: \"cluster-aso-secret\",\n\t\t}))\n\t})\n\n\tt.Run(\"adopt previously managed resource\", func(t *testing.T) {\n\t\tg := NewGomegaWithT(t)\n\n\t\tsch := runtime.NewScheme()\n\t\tg.Expect(asoresourcesv1.AddToScheme(sch)).To(Succeed())\n\t\tc := fakeclient.NewClientBuilder().\n\t\t\tWithScheme(sch).\n\t\t\tBuild()\n\t\tclusterName := \"cluster\"\n\t\ts := New(c, clusterName)\n\n\t\tmockCtrl := gomock.NewController(t)\n\t\tspecMock := mock_azure.NewMockASOResourceSpecGetter(mockCtrl)\n\t\tspecMock.EXPECT().ResourceRef().Return(&asoresourcesv1.ResourceGroup{\n\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\tName: \"name\",\n\t\t\t\tNamespace: \"namespace\",\n\t\t\t},\n\t\t})\n\t\tspecMock.EXPECT().Parameters(gomockinternal.AContext(), gomock.Not(gomock.Nil())).DoAndReturn(func(_ context.Context, object genruntime.MetaObject) (genruntime.MetaObject, error) {\n\t\t\treturn nil, nil\n\t\t})\n\t\tspecMock.EXPECT().WasManaged(gomock.Any()).Return(true)\n\n\t\tctx := context.Background()\n\t\tg.Expect(c.Create(ctx, &asoresourcesv1.ResourceGroup{\n\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\tName: \"name\",\n\t\t\t\tNamespace: \"namespace\",\n\t\t\t\tLabels: map[string]string{\n\t\t\t\t\tinfrav1.OwnedByClusterLabelKey: clusterName,\n\t\t\t\t},\n\t\t\t\tAnnotations: map[string]string{\n\t\t\t\t\tReconcilePolicyAnnotation: ReconcilePolicySkip,\n\t\t\t\t},\n\t\t\t},\n\t\t\tStatus: asoresourcesv1.ResourceGroup_STATUS{\n\t\t\t\tConditions: []conditions.Condition{\n\t\t\t\t\t{\n\t\t\t\t\t\tType: conditions.ConditionTypeReady,\n\t\t\t\t\t\tStatus: metav1.ConditionTrue,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t})).To(Succeed())\n\n\t\tresult, err := s.CreateOrUpdateResource(ctx, specMock, \"service\")\n\t\tg.Expect(result).To(BeNil())\n\t\tg.Expect(err).NotTo(BeNil())\n\n\t\tupdated := &asoresourcesv1.ResourceGroup{}\n\t\tg.Expect(c.Get(ctx, types.NamespacedName{Name: \"name\", Namespace: \"namespace\"}, updated)).To(Succeed())\n\t\tg.Expect(updated.Annotations).To(Equal(map[string]string{\n\t\t\tReconcilePolicyAnnotation: ReconcilePolicyManage,\n\t\t\tSecretNameAnnotation: \"cluster-aso-secret\",\n\t\t}))\n\t})\n\n\tt.Run(\"Parameters error\", func(t *testing.T) {\n\t\tg := NewGomegaWithT(t)\n\n\t\tsch := runtime.NewScheme()\n\t\tg.Expect(asoresourcesv1.AddToScheme(sch)).To(Succeed())\n\t\tc := fakeclient.NewClientBuilder().\n\t\t\tWithScheme(sch).\n\t\t\tBuild()\n\t\ts := New(c, clusterName)\n\n\t\tmockCtrl := gomock.NewController(t)\n\t\tspecMock := mock_azure.NewMockASOResourceSpecGetter(mockCtrl)\n\t\tspecMock.EXPECT().ResourceRef().Return(&asoresourcesv1.ResourceGroup{\n\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\tName: \"name\",\n\t\t\t\tNamespace: \"namespace\",\n\t\t\t},\n\t\t})\n\t\tspecMock.EXPECT().Parameters(gomockinternal.AContext(), gomock.Not(gomock.Nil())).Return(nil, errors.New(\"parameters error\"))\n\n\t\tctx := context.Background()\n\t\tg.Expect(c.Create(ctx, &asoresourcesv1.ResourceGroup{\n\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\tName: \"name\",\n\t\t\t\tNamespace: \"namespace\",\n\t\t\t\tLabels: map[string]string{\n\t\t\t\t\tinfrav1.OwnedByClusterLabelKey: clusterName,\n\t\t\t\t},\n\t\t\t},\n\t\t\tStatus: asoresourcesv1.ResourceGroup_STATUS{\n\t\t\t\tConditions: []conditions.Condition{\n\t\t\t\t\t{\n\t\t\t\t\t\tType: conditions.ConditionTypeReady,\n\t\t\t\t\t\tStatus: metav1.ConditionTrue,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t})).To(Succeed())\n\n\t\tresult, err := s.CreateOrUpdateResource(ctx, specMock, \"service\")\n\t\tg.Expect(result).To(BeNil())\n\t\tg.Expect(err).NotTo(BeNil())\n\t\tg.Expect(err.Error()).To(ContainSubstring(\"parameters error\"))\n\t})\n\n\tt.Run(\"skip update for unmanaged resource\", func(t *testing.T) {\n\t\tg := NewGomegaWithT(t)\n\n\t\tsch := runtime.NewScheme()\n\t\tg.Expect(asoresourcesv1.AddToScheme(sch)).To(Succeed())\n\t\tc := fakeclient.NewClientBuilder().\n\t\t\tWithScheme(sch).\n\t\t\tBuild()\n\t\ts := New(c, clusterName)\n\n\t\tmockCtrl := gomock.NewController(t)\n\t\tspecMock := mock_azure.NewMockASOResourceSpecGetter(mockCtrl)\n\t\tspecMock.EXPECT().ResourceRef().Return(&asoresourcesv1.ResourceGroup{\n\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\tName: \"name\",\n\t\t\t\tNamespace: \"namespace\",\n\t\t\t},\n\t\t})\n\n\t\tctx := context.Background()\n\t\tg.Expect(c.Create(ctx, &asoresourcesv1.ResourceGroup{\n\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\tName: \"name\",\n\t\t\t\tNamespace: \"namespace\",\n\t\t\t},\n\t\t\tStatus: asoresourcesv1.ResourceGroup_STATUS{\n\t\t\t\tConditions: []conditions.Condition{\n\t\t\t\t\t{\n\t\t\t\t\t\tType: conditions.ConditionTypeReady,\n\t\t\t\t\t\tStatus: metav1.ConditionTrue,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t})).To(Succeed())\n\n\t\tresult, err := s.CreateOrUpdateResource(ctx, specMock, \"service\")\n\t\tg.Expect(result).NotTo(BeNil())\n\t\tg.Expect(err).To(BeNil())\n\t})\n\n\tt.Run(\"resource up to date\", func(t *testing.T) {\n\t\tg := NewGomegaWithT(t)\n\n\t\tsch := runtime.NewScheme()\n\t\tg.Expect(asoresourcesv1.AddToScheme(sch)).To(Succeed())\n\t\tc := fakeclient.NewClientBuilder().\n\t\t\tWithScheme(sch).\n\t\t\tBuild()\n\t\ts := New(c, clusterName)\n\n\t\tmockCtrl := gomock.NewController(t)\n\t\tspecMock := mock_azure.NewMockASOResourceSpecGetter(mockCtrl)\n\t\tspecMock.EXPECT().ResourceRef().Return(&asoresourcesv1.ResourceGroup{\n\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\tName: \"name\",\n\t\t\t\tNamespace: \"namespace\",\n\t\t\t},\n\t\t})\n\t\tspecMock.EXPECT().Parameters(gomockinternal.AContext(), gomock.Any()).DoAndReturn(func(_ context.Context, object genruntime.MetaObject) (genruntime.MetaObject, error) {\n\t\t\treturn nil, nil\n\t\t})\n\t\tspecMock.EXPECT().WasManaged(gomock.Any()).Return(false)\n\n\t\tctx := context.Background()\n\t\tg.Expect(c.Create(ctx, &asoresourcesv1.ResourceGroup{\n\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\tName: \"name\",\n\t\t\t\tNamespace: \"namespace\",\n\t\t\t\tLabels: map[string]string{\n\t\t\t\t\tinfrav1.OwnedByClusterLabelKey: clusterName,\n\t\t\t\t},\n\t\t\t\tAnnotations: map[string]string{\n\t\t\t\t\tReconcilePolicyAnnotation: ReconcilePolicyManage,\n\t\t\t\t\tSecretNameAnnotation: \"cluster-aso-secret\",\n\t\t\t\t},\n\t\t\t},\n\t\t\tSpec: asoresourcesv1.ResourceGroup_Spec{\n\t\t\t\tLocation: ptr.To(\"location\"),\n\t\t\t},\n\t\t\tStatus: asoresourcesv1.ResourceGroup_STATUS{\n\t\t\t\tConditions: []conditions.Condition{\n\t\t\t\t\t{\n\t\t\t\t\t\tType: conditions.ConditionTypeReady,\n\t\t\t\t\t\tStatus: metav1.ConditionTrue,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t})).To(Succeed())\n\n\t\tresult, err := s.CreateOrUpdateResource(ctx, specMock, \"service\")\n\t\tg.Expect(result).NotTo(BeNil())\n\t\tg.Expect(err).To(BeNil())\n\n\t\tg.Expect(result.GetName()).To(Equal(\"name\"))\n\t\tg.Expect(result.GetNamespace()).To(Equal(\"namespace\"))\n\t\tg.Expect(result.(*asoresourcesv1.ResourceGroup).Spec.Location).To(Equal(ptr.To(\"location\")))\n\t})\n\n\tt.Run(\"error updating\", func(t *testing.T) {\n\t\tg := NewGomegaWithT(t)\n\n\t\tsch := runtime.NewScheme()\n\t\tg.Expect(asoresourcesv1.AddToScheme(sch)).To(Succeed())\n\t\tc := fakeclient.NewClientBuilder().\n\t\t\tWithScheme(sch).\n\t\t\tBuild()\n\t\ts := New(ErroringPatchClient{Client: c, err: errors.New(\"an error\")}, clusterName)\n\n\t\tmockCtrl := gomock.NewController(t)\n\t\tspecMock := mock_azure.NewMockASOResourceSpecGetter(mockCtrl)\n\t\tspecMock.EXPECT().ResourceRef().Return(&asoresourcesv1.ResourceGroup{\n\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\tName: \"name\",\n\t\t\t\tNamespace: \"namespace\",\n\t\t\t},\n\t\t})\n\t\tspecMock.EXPECT().Parameters(gomockinternal.AContext(), gomock.Any()).DoAndReturn(func(_ context.Context, object genruntime.MetaObject) (genruntime.MetaObject, error) {\n\t\t\tgroup := object.DeepCopyObject().(*asoresourcesv1.ResourceGroup)\n\t\t\tgroup.Spec.Location = ptr.To(\"location\")\n\t\t\treturn group, nil\n\t\t})\n\t\tspecMock.EXPECT().WasManaged(gomock.Any()).Return(false)\n\n\t\tctx := context.Background()\n\t\tg.Expect(c.Create(ctx, &asoresourcesv1.ResourceGroup{\n\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\tName: \"name\",\n\t\t\t\tNamespace: \"namespace\",\n\t\t\t\tLabels: map[string]string{\n\t\t\t\t\tinfrav1.OwnedByClusterLabelKey: clusterName,\n\t\t\t\t},\n\t\t\t},\n\t\t\tStatus: asoresourcesv1.ResourceGroup_STATUS{\n\t\t\t\tConditions: []conditions.Condition{\n\t\t\t\t\t{\n\t\t\t\t\t\tType: conditions.ConditionTypeReady,\n\t\t\t\t\t\tStatus: metav1.ConditionTrue,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t})).To(Succeed())\n\n\t\tresult, err := s.CreateOrUpdateResource(ctx, specMock, \"service\")\n\t\tg.Expect(result).To(BeNil())\n\t\tg.Expect(err).NotTo(BeNil())\n\t\tg.Expect(err.Error()).To(ContainSubstring(\"failed to update resource\"))\n\t})\n\n\tt.Run(\"with tags success\", func(t *testing.T) {\n\t\tg := NewGomegaWithT(t)\n\n\t\tsch := runtime.NewScheme()\n\t\tg.Expect(asoresourcesv1.AddToScheme(sch)).To(Succeed())\n\t\tc := fakeclient.NewClientBuilder().\n\t\t\tWithScheme(sch).\n\t\t\tBuild()\n\t\ts := New(c, clusterName)\n\n\t\tmockCtrl := gomock.NewController(t)\n\t\tspecMock := struct {\n\t\t\t*mock_azure.MockASOResourceSpecGetter\n\t\t\t*mock_aso.MockTagsGetterSetter\n\t\t}{\n\t\t\tMockASOResourceSpecGetter: mock_azure.NewMockASOResourceSpecGetter(mockCtrl),\n\t\t\tMockTagsGetterSetter: mock_aso.NewMockTagsGetterSetter(mockCtrl),\n\t\t}\n\t\tspecMock.MockASOResourceSpecGetter.EXPECT().ResourceRef().Return(&asoresourcesv1.ResourceGroup{\n\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\tName: \"name\",\n\t\t\t\tNamespace: \"namespace\",\n\t\t\t},\n\t\t})\n\t\tspecMock.MockASOResourceSpecGetter.EXPECT().Parameters(gomockinternal.AContext(), gomock.Any()).DoAndReturn(func(_ context.Context, object genruntime.MetaObject) (genruntime.MetaObject, error) {\n\t\t\treturn nil, nil\n\t\t})\n\t\tspecMock.MockASOResourceSpecGetter.EXPECT().WasManaged(gomock.Any()).Return(false)\n\n\t\tspecMock.MockTagsGetterSetter.EXPECT().GetActualTags(gomock.Any()).Return(nil)\n\t\tspecMock.MockTagsGetterSetter.EXPECT().GetAdditionalTags().Return(nil)\n\t\tspecMock.MockTagsGetterSetter.EXPECT().GetDesiredTags(gomock.Any()).Return(nil)\n\t\tspecMock.MockTagsGetterSetter.EXPECT().SetTags(gomock.Any(), gomock.Any())\n\n\t\tctx := context.Background()\n\t\tg.Expect(c.Create(ctx, &asoresourcesv1.ResourceGroup{\n\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\tName: \"name\",\n\t\t\t\tNamespace: \"namespace\",\n\t\t\t\tLabels: map[string]string{\n\t\t\t\t\tinfrav1.OwnedByClusterLabelKey: clusterName,\n\t\t\t\t},\n\t\t\t\tAnnotations: map[string]string{\n\t\t\t\t\tReconcilePolicyAnnotation: ReconcilePolicyManage,\n\t\t\t\t},\n\t\t\t},\n\t\t\tStatus: asoresourcesv1.ResourceGroup_STATUS{\n\t\t\t\tConditions: []conditions.Condition{\n\t\t\t\t\t{\n\t\t\t\t\t\tType: conditions.ConditionTypeReady,\n\t\t\t\t\t\tStatus: metav1.ConditionTrue,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t})).To(Succeed())\n\n\t\tresult, err := s.CreateOrUpdateResource(ctx, specMock, \"service\")\n\t\tg.Expect(result).To(BeNil())\n\t\tg.Expect(azure.IsOperationNotDoneError(err)).To(BeTrue())\n\n\t\tupdated := &asoresourcesv1.ResourceGroup{}\n\t\tg.Expect(c.Get(ctx, types.NamespacedName{Name: \"name\", Namespace: \"namespace\"}, updated)).To(Succeed())\n\t\tg.Expect(updated.Annotations).To(HaveKey(tagsLastAppliedAnnotation))\n\t})\n\n\tt.Run(\"with tags failure\", func(t *testing.T) {\n\t\tg := NewGomegaWithT(t)\n\n\t\tsch := runtime.NewScheme()\n\t\tg.Expect(asoresourcesv1.AddToScheme(sch)).To(Succeed())\n\t\tc := fakeclient.NewClientBuilder().\n\t\t\tWithScheme(sch).\n\t\t\tBuild()\n\t\ts := New(c, clusterName)\n\n\t\tmockCtrl := gomock.NewController(t)\n\t\tspecMock := struct {\n\t\t\t*mock_azure.MockASOResourceSpecGetter\n\t\t\t*mock_aso.MockTagsGetterSetter\n\t\t}{\n\t\t\tMockASOResourceSpecGetter: mock_azure.NewMockASOResourceSpecGetter(mockCtrl),\n\t\t\tMockTagsGetterSetter: mock_aso.NewMockTagsGetterSetter(mockCtrl),\n\t\t}\n\t\tspecMock.MockASOResourceSpecGetter.EXPECT().ResourceRef().Return(&asoresourcesv1.ResourceGroup{\n\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\tName: \"name\",\n\t\t\t\tNamespace: \"namespace\",\n\t\t\t},\n\t\t})\n\t\tspecMock.MockASOResourceSpecGetter.EXPECT().Parameters(gomockinternal.AContext(), gomock.Any()).DoAndReturn(func(_ context.Context, object genruntime.MetaObject) (genruntime.MetaObject, error) {\n\t\t\treturn nil, nil\n\t\t})\n\n\t\tctx := context.Background()\n\t\tg.Expect(c.Create(ctx, &asoresourcesv1.ResourceGroup{\n\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\tName: \"name\",\n\t\t\t\tNamespace: \"namespace\",\n\t\t\t\tLabels: map[string]string{\n\t\t\t\t\tinfrav1.OwnedByClusterLabelKey: clusterName,\n\t\t\t\t},\n\t\t\t\tAnnotations: map[string]string{\n\t\t\t\t\tReconcilePolicyAnnotation: ReconcilePolicyManage,\n\t\t\t\t\ttagsLastAppliedAnnotation: \"{\",\n\t\t\t\t},\n\t\t\t},\n\t\t\tStatus: asoresourcesv1.ResourceGroup_STATUS{\n\t\t\t\tConditions: []conditions.Condition{\n\t\t\t\t\t{\n\t\t\t\t\t\tType: conditions.ConditionTypeReady,\n\t\t\t\t\t\tStatus: metav1.ConditionTrue,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t})).To(Succeed())\n\n\t\tresult, err := s.CreateOrUpdateResource(ctx, specMock, \"service\")\n\t\tg.Expect(result).To(BeNil())\n\t\tg.Expect(err.Error()).To(ContainSubstring(\"failed to reconcile tags\"))\n\t})\n\n\tt.Run(\"reconcile policy annotation resets after un-pause\", func(t *testing.T) {\n\t\tg := NewGomegaWithT(t)\n\n\t\tsch := runtime.NewScheme()\n\t\tg.Expect(asoresourcesv1.AddToScheme(sch)).To(Succeed())\n\t\tc := fakeclient.NewClientBuilder().\n\t\t\tWithScheme(sch).\n\t\t\tBuild()\n\t\ts := New(c, clusterName)\n\n\t\tmockCtrl := gomock.NewController(t)\n\t\tspecMock := mock_azure.NewMockASOResourceSpecGetter(mockCtrl)\n\t\tspecMock.EXPECT().ResourceRef().Return(&asoresourcesv1.ResourceGroup{\n\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\tName: \"name\",\n\t\t\t\tNamespace: \"namespace\",\n\t\t\t},\n\t\t})\n\t\tspecMock.EXPECT().Parameters(gomockinternal.AContext(), gomock.Any()).DoAndReturn(func(_ context.Context, object genruntime.MetaObject) (genruntime.MetaObject, error) {\n\t\t\treturn nil, nil\n\t\t})\n\t\tspecMock.EXPECT().WasManaged(gomock.Any()).Return(false)\n\n\t\tctx := context.Background()\n\t\tg.Expect(c.Create(ctx, &asoresourcesv1.ResourceGroup{\n\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\tName: \"name\",\n\t\t\t\tNamespace: \"namespace\",\n\t\t\t\tLabels: map[string]string{\n\t\t\t\t\tinfrav1.OwnedByClusterLabelKey: clusterName,\n\t\t\t\t},\n\t\t\t\tAnnotations: map[string]string{\n\t\t\t\t\tPrePauseReconcilePolicyAnnotation: ReconcilePolicyManage,\n\t\t\t\t\tReconcilePolicyAnnotation: ReconcilePolicySkip,\n\t\t\t\t},\n\t\t\t},\n\t\t\tSpec: asoresourcesv1.ResourceGroup_Spec{\n\t\t\t\tLocation: ptr.To(\"location\"),\n\t\t\t},\n\t\t\tStatus: asoresourcesv1.ResourceGroup_STATUS{\n\t\t\t\tConditions: []conditions.Condition{\n\t\t\t\t\t{\n\t\t\t\t\t\tType: conditions.ConditionTypeReady,\n\t\t\t\t\t\tStatus: metav1.ConditionTrue,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t})).To(Succeed())\n\n\t\tresult, err := s.CreateOrUpdateResource(ctx, specMock, \"service\")\n\t\tg.Expect(result).To(BeNil())\n\t\tg.Expect(azure.IsOperationNotDoneError(err)).To(BeTrue())\n\n\t\tupdated := &asoresourcesv1.ResourceGroup{}\n\t\tg.Expect(c.Get(ctx, types.NamespacedName{Name: \"name\", Namespace: \"namespace\"}, updated)).To(Succeed())\n\t\tg.Expect(updated.Annotations).NotTo(HaveKey(PrePauseReconcilePolicyAnnotation))\n\t\tg.Expect(updated.Annotations).To(HaveKeyWithValue(ReconcilePolicyAnnotation, ReconcilePolicyManage))\n\t})\n}", "func CreateTemplateFailJSONMocked(t *testing.T, templateIn *types.Template) *types.Template {\n\n\tassert := assert.New(t)\n\n\t// wire up\n\tcs := &utils.MockConcertoService{}\n\tds, err := NewTemplateService(cs)\n\tassert.Nil(err, \"Couldn't load template service\")\n\tassert.NotNil(ds, \"Template service not instanced\")\n\n\t// convertMap\n\tmapIn, err := utils.ItemConvertParams(*templateIn)\n\tassert.Nil(err, \"Template test data corrupted\")\n\n\t// wrong json\n\tdOut := []byte{10, 20, 30}\n\n\t// call service\n\tcs.On(\"Post\", \"/blueprint/templates/\", mapIn).Return(dOut, 200, nil)\n\ttemplateOut, err := ds.CreateTemplate(mapIn)\n\tassert.NotNil(err, \"We are expecting a marshalling error\")\n\tassert.Nil(templateOut, \"Expecting nil output\")\n\tassert.Contains(err.Error(), \"invalid character\", \"Error message should include the string 'invalid character'\")\n\n\treturn templateOut\n}", "func TestCheckRequiredTemplate_Create_DoesNotSwallowError(t *testing.T) {\n\tctrl := gomock.NewController(t)\n\tdefer ctrl.Finish()\n\n\tr := ResourceCheckRequiredTemplate()\n\tresourceData := schema.TestResourceDataRaw(t, r.Schema, nil)\n\tflattenErr := flattenCheckRequiredTemplate(resourceData, &requiredTemplateCheckTest, requiredTemplateCheckProjectID)\n\n\tpipelinesChecksClient := azdosdkmocks.NewMockPipelineschecksextrasClient(ctrl)\n\tclients := &client.AggregatedClient{PipelinesChecksClientExtras: pipelinesChecksClient, Ctx: context.Background()}\n\n\texpectedArgs := pipelineschecksextras.AddCheckConfigurationArgs{Configuration: &requiredTemplateCheckTest, Project: &requiredTemplateCheckProjectID}\n\tpipelinesChecksClient.\n\t\tEXPECT().\n\t\tAddCheckConfiguration(clients.Ctx, expectedArgs).\n\t\tReturn(nil, errors.New(\"AddCheckConfiguration() Failed\")).\n\t\tTimes(1)\n\n\terr := r.Create(resourceData, clients)\n\trequire.Contains(t, err.Error(), \"AddCheckConfiguration() Failed\")\n\trequire.Nil(t, flattenErr)\n}", "func TestCreateFails(t *testing.T) {\n\toptions := &sharetypes.CreateOpts{\n\t\tName: \"my_new_share_type\",\n\t}\n\n\t_, err := sharetypes.Create(client.ServiceClient(), options).Extract()\n\tif _, ok := err.(gophercloud.ErrMissingInput); !ok {\n\t\tt.Fatal(\"ErrMissingInput was expected to occur\")\n\t}\n\n\textraSpecs := sharetypes.ExtraSpecsOpts{\n\t\tDriverHandlesShareServers: true,\n\t}\n\n\toptions = &sharetypes.CreateOpts{\n\t\tExtraSpecs: extraSpecs,\n\t}\n\n\t_, err = sharetypes.Create(client.ServiceClient(), options).Extract()\n\tif _, ok := err.(gophercloud.ErrMissingInput); !ok {\n\t\tt.Fatal(\"ErrMissingInput was expected to occur\")\n\t}\n}", "func (pas *PodAutoscalerStatus) MarkResourceFailedCreation(kind, name string) {\n\tpas.MarkInactive(\"FailedCreate\",\n\t\tfmt.Sprintf(\"Failed to create %s %q.\", kind, name))\n}", "func (suite *TaskFailRetryTestSuite) TestTaskFailRetryFailedPatch() {\n\ttaskConfig := pbtask.TaskConfig{\n\t\tRestartPolicy: &pbtask.RestartPolicy{\n\t\t\tMaxFailures: 3,\n\t\t},\n\t}\n\n\tsuite.cachedTask.EXPECT().\n\t\tID().\n\t\tReturn(uint32(0)).\n\t\tAnyTimes()\n\n\tsuite.jobFactory.EXPECT().\n\t\tGetJob(suite.jobID).Return(suite.cachedJob)\n\n\tsuite.cachedJob.EXPECT().\n\t\tGetTask(suite.instanceID).Return(suite.cachedTask)\n\n\tsuite.cachedJob.EXPECT().\n\t\tID().Return(suite.jobID)\n\n\tsuite.cachedTask.EXPECT().\n\t\tGetRuntime(gomock.Any()).Return(suite.taskRuntime, nil)\n\n\tsuite.taskConfigV2Ops.EXPECT().\n\t\tGetTaskConfig(gomock.Any(), suite.jobID, suite.instanceID, gomock.Any()).\n\t\tReturn(&taskConfig, &models.ConfigAddOn{}, nil)\n\n\tsuite.cachedJob.EXPECT().\n\t\tPatchTasks(gomock.Any(), gomock.Any(), false).\n\t\tReturn(nil, nil, fmt.Errorf(\"patch error\"))\n\n\terr := TaskFailRetry(context.Background(), suite.taskEnt)\n\tsuite.Error(err)\n}", "func TestCreateRetryConflictTagDiff(t *testing.T) {\n\tfirstGet := true\n\tfirstUpdate := true\n\trestInstance := &REST{\n\t\tstrategy: NewStrategy(registryhostname.TestingRegistryHostnameRetriever(nil, \"\", testDefaultRegistryURL)),\n\t\timageRegistry: &fakeImageRegistry{\n\t\t\tcreateImage: func(ctx context.Context, image *imageapi.Image) error {\n\t\t\t\treturn nil\n\t\t\t},\n\t\t},\n\t\timageStreamRegistry: &fakeImageStreamRegistry{\n\t\t\tgetImageStream: func(ctx context.Context, id string, options *metav1.GetOptions) (*imageapi.ImageStream, error) {\n\t\t\t\t// For the first get, return a stream with a latest tag pointing to \"original\"\n\t\t\t\tif firstGet {\n\t\t\t\t\tfirstGet = false\n\t\t\t\t\tstream := validImageStream()\n\t\t\t\t\tstream.Status = imageapi.ImageStreamStatus{\n\t\t\t\t\t\tTags: map[string]imageapi.TagEventList{\n\t\t\t\t\t\t\t\"latest\": {Items: []imageapi.TagEvent{{DockerImageReference: \"localhost:5000/someproject/somerepo:original\"}}},\n\t\t\t\t\t\t},\n\t\t\t\t\t}\n\t\t\t\t\treturn stream, nil\n\t\t\t\t}\n\t\t\t\t// For subsequent gets, return a stream with the latest tag changed to \"newer\"\n\t\t\t\tstream := validImageStream()\n\t\t\t\tstream.Status = imageapi.ImageStreamStatus{\n\t\t\t\t\tTags: map[string]imageapi.TagEventList{\n\t\t\t\t\t\t\"latest\": {Items: []imageapi.TagEvent{{DockerImageReference: \"localhost:5000/someproject/somerepo:newer\"}}},\n\t\t\t\t\t},\n\t\t\t\t}\n\t\t\t\treturn stream, nil\n\t\t\t},\n\t\t\tupdateImageStreamStatus: func(ctx context.Context, repo *imageapi.ImageStream) (*imageapi.ImageStream, error) {\n\t\t\t\t// For the first update, return a conflict so that the stream\n\t\t\t\t// get/compare is retried.\n\t\t\t\tif firstUpdate {\n\t\t\t\t\tfirstUpdate = false\n\t\t\t\t\treturn nil, errors.NewConflict(imagegroup.Resource(\"imagestreams\"), repo.Name, fmt.Errorf(\"resource modified\"))\n\t\t\t\t}\n\t\t\t\treturn repo, nil\n\t\t\t},\n\t\t},\n\t}\n\tobj, err := restInstance.Create(apirequest.NewDefaultContext(), validNewMappingWithName(), rest.ValidateAllObjectFunc, false)\n\tif err == nil {\n\t\tt.Fatalf(\"expected an error\")\n\t}\n\tif !errors.IsConflict(err) {\n\t\tt.Errorf(\"expected a conflict error, got %v\", err)\n\t}\n\tif obj != nil {\n\t\tt.Fatalf(\"expected a nil result\")\n\t}\n}", "func SKIPPEDTestAccessAfterUnmap(t *testing.T) {\n\ttmpDir, _ := ioutil.TempDir(\"\", \"mossMMap\")\n\tdefer os.RemoveAll(tmpDir)\n\n\tf, err := os.Create(tmpDir + string(os.PathSeparator) + \"test.file\")\n\tif err != nil {\n\t\tt.Errorf(\"expected open file to work, err: %v\", err)\n\t}\n\n\tdefer f.Close()\n\n\toffset := 1024 * 1024 * 1024 // 1 GB.\n\n\tf.WriteAt([]byte(\"hello\"), int64(offset))\n\n\tvar mm mmap.MMap\n\n\tmm, err = mmap.Map(f, mmap.RDONLY, 0)\n\tif err != nil {\n\t\tt.Errorf(\"expected mmap to work, err: %v\", err)\n\t}\n\n\tx := mm[offset : offset+5]\n\n\tif string(x) != \"hello\" {\n\t\tt.Errorf(\"expected hello\")\n\t}\n\n\tdefer func() {\n\t\tif r := recover(); r != nil {\n\t\t\tfmt.Println(\"Recovered in f\", r)\n\t\t} else {\n\t\t\tt.Errorf(\"expected recover from panic\")\n\t\t}\n\t}()\n\n\tmm.Unmap()\n\n\t/*\n\t\t\tThe following access of x results in a segfault, like...\n\n\t\t\t\tunexpected fault address 0x4060c000\n\t\t\t\tfatal error: fault\n\t\t\t\t[signal 0xb code=0x1 addr=0x4060c000 pc=0xb193f]\n\n\t\t The recover() machinery doesn't handle this situation, however,\n\t\t as it's not a normal kind of panic()\n\t*/\n\tif x[0] != 'h' {\n\t\tt.Errorf(\"expected h, but actually expected a segfault\")\n\t}\n\n\tt.Errorf(\"expected segfault, but instead unmmapped mem access worked\")\n}", "func TestFailbackRetryFailed(t *testing.T) {\n\tctrl := gomock.NewController(t)\n\tdefer ctrl.Finish()\n\n\tinvoker := mock.NewMockInvoker(ctrl)\n\tclusterInvoker := registerFailback(invoker).(*failbackClusterInvoker)\n\n\tinvoker.EXPECT().GetURL().Return(failbackUrl).AnyTimes()\n\tinvoker.EXPECT().IsAvailable().Return(true).AnyTimes()\n\n\tmockFailedResult := &protocol.RPCResult{Err: perrors.New(\"error\")}\n\tinvoker.EXPECT().Invoke(gomock.Any(), gomock.Any()).Return(mockFailedResult)\n\n\t//\n\tvar wg sync.WaitGroup\n\tretries := 2\n\twg.Add(retries)\n\tnow := time.Now()\n\n\t// add retry call that eventually failed.\n\tfor i := 0; i < retries; i++ {\n\t\tj := i + 1\n\t\tinvoker.EXPECT().Invoke(gomock.Any(), gomock.Any()).DoAndReturn(func(context.Context, protocol.Invocation) protocol.Result {\n\t\t\tdelta := time.Since(now).Nanoseconds() / int64(time.Second)\n\t\t\tassert.True(t, delta >= int64(5*j))\n\t\t\twg.Done()\n\t\t\treturn mockFailedResult\n\t\t})\n\t}\n\n\t// first call should failed.\n\tresult := clusterInvoker.Invoke(context.Background(), &invocation.RPCInvocation{})\n\tassert.Nil(t, result.Error())\n\tassert.Nil(t, result.Result())\n\tassert.Equal(t, 0, len(result.Attachments()))\n\n\twg.Wait()\n\ttime.Sleep(time.Second)\n\tassert.Equal(t, int64(1), clusterInvoker.taskList.Len())\n\n\tinvoker.EXPECT().Destroy().Return()\n\tclusterInvoker.Destroy()\n\t// after destroy, the taskList will be empty\n\tassert.Equal(t, int64(0), clusterInvoker.taskList.Len())\n}", "func TestRedisCannotBePinged(t *testing.T) {\n\tr := newTestRedis()\n\tr.On(\"Ping\").\n\t\tReturn(redis.NewStatusResult(\"\", errors.New(\"server not available\")))\n\n\tassert.False(t, RedisIsAvailable(r))\n}", "func (t *trial) recover() error {\n\trunID, restarts, err := t.db.TrialRunIDAndRestarts(t.id)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"restoring old trial state\")\n\t}\n\tt.runID = runID\n\tt.restarts = restarts\n\treturn nil\n}", "func registerFailback(invoker *mock.MockInvoker) protocol.Invoker {\n\textension.SetLoadbalance(\"random\", random.NewRandomLoadBalance)\n\tfailbackCluster := newFailbackCluster()\n\n\tvar invokers []protocol.Invoker\n\tinvokers = append(invokers, invoker)\n\n\tinvoker.EXPECT().GetURL().Return(failbackUrl).AnyTimes()\n\n\tstaticDir := static.NewDirectory(invokers)\n\tclusterInvoker := failbackCluster.Join(staticDir)\n\treturn clusterInvoker\n}", "func TestRecreate(t *testing.T) {\n\tdb, err := Open(db_filename, \"c\")\n\n\tif err != nil {\n\t\tt.Error(\"Couldn't create new database\")\n\t}\n\n\tdb.Close()\n\n\tf, err := Open(db_filename, \"r\")\n\n\tdefer f.Close()\n\tdefer os.Remove(db_filename)\n\tif err != nil {\n\t\tt.Error(\"Couldn't open new database\")\n\t}\n\n\tif os.IsExist(err) {\n\t\tt.Error(\"Database wasn't actually created\")\n\t}\n\n\t// Validate that there are no keys in the DB\n\t// If err was not found, a key was returned\n\t_, err = f.FirstKey()\n\tif err == nil {\n\t\tt.Error(\"Database was not emptied of keys\")\n\t}\n}", "func (m *CloudWatchLogsServiceMock) CreateNewServiceIfUnHealthy() {\n\n}", "func (suite *podActionsTestSuite) TestClientPodRestartFail() {\n\tsuite.podClient.EXPECT().\n\t\tRestartPod(gomock.Any(), gomock.Any()).\n\t\tReturn(nil, yarpcerrors.InternalErrorf(\"test error\"))\n\n\tsuite.Error(suite.client.PodRestartAction(testPodName))\n}", "func TestAzureDevOpsServiceEndpointDockerRegistry_Update_DoesNotSwallowError(t *testing.T) {\n\tctrl := gomock.NewController(t)\n\tdefer ctrl.Finish()\n\n\tr := resourceServiceEndpointDockerRegistry()\n\tresourceData := schema.TestResourceDataRaw(t, r.Schema, nil)\n\tflattenServiceEndpointDockerRegistry(resourceData, &dockerRegistryTestServiceEndpoint, dockerRegistryTestServiceEndpointProjectID)\n\n\tbuildClient := azdosdkmocks.NewMockServiceendpointClient(ctrl)\n\tclients := &config.AggregatedClient{ServiceEndpointClient: buildClient, Ctx: context.Background()}\n\n\texpectedArgs := serviceendpoint.UpdateServiceEndpointArgs{\n\t\tEndpoint: &dockerRegistryTestServiceEndpoint,\n\t\tEndpointId: dockerRegistryTestServiceEndpoint.Id,\n\t\tProject: dockerRegistryTestServiceEndpointProjectID,\n\t}\n\n\tbuildClient.\n\t\tEXPECT().\n\t\tUpdateServiceEndpoint(clients.Ctx, expectedArgs).\n\t\tReturn(nil, errors.New(\"UpdateServiceEndpoint() Failed\")).\n\t\tTimes(1)\n\n\terr := r.Update(resourceData, clients)\n\trequire.Contains(t, err.Error(), \"UpdateServiceEndpoint() Failed\")\n}", "func (suite *TaskFailRetryTestSuite) TestLostTaskRetry() {\n\ttaskConfig := pbtask.TaskConfig{\n\t\tRestartPolicy: &pbtask.RestartPolicy{\n\t\t\tMaxFailures: 3,\n\t\t},\n\t}\n\n\tsuite.cachedTask.EXPECT().\n\t\tID().\n\t\tReturn(uint32(0)).\n\t\tAnyTimes()\n\n\tsuite.jobFactory.EXPECT().\n\t\tGetJob(suite.jobID).Return(suite.cachedJob)\n\n\tsuite.cachedJob.EXPECT().\n\t\tGetTask(suite.instanceID).Return(suite.cachedTask)\n\n\tsuite.cachedJob.EXPECT().\n\t\tID().Return(suite.jobID)\n\n\tsuite.cachedTask.EXPECT().\n\t\tGetRuntime(gomock.Any()).Return(suite.lostTaskRuntime, nil)\n\n\tsuite.taskConfigV2Ops.EXPECT().\n\t\tGetTaskConfig(gomock.Any(), suite.jobID, suite.instanceID, gomock.Any()).\n\t\tReturn(&taskConfig, &models.ConfigAddOn{}, nil)\n\n\tsuite.cachedJob.EXPECT().\n\t\tPatchTasks(gomock.Any(), gomock.Any(), false).\n\t\tDo(func(ctx context.Context,\n\t\t\truntimeDiffs map[uint32]jobmgrcommon.RuntimeDiff,\n\t\t\t_ bool) {\n\t\t\truntimeDiff := runtimeDiffs[suite.instanceID]\n\t\t\tsuite.True(\n\t\t\t\truntimeDiff[jobmgrcommon.MesosTaskIDField].(*mesosv1.TaskID).GetValue() != suite.mesosTaskID)\n\t\t\tsuite.True(\n\t\t\t\truntimeDiff[jobmgrcommon.PrevMesosTaskIDField].(*mesosv1.TaskID).GetValue() == suite.mesosTaskID)\n\t\t\tsuite.True(\n\t\t\t\truntimeDiff[jobmgrcommon.StateField].(pbtask.TaskState) == pbtask.TaskState_INITIALIZED)\n\t\t}).Return(nil, nil, nil)\n\n\tsuite.cachedJob.EXPECT().\n\t\tGetJobType().Return(pbjob.JobType_BATCH)\n\n\tsuite.taskGoalStateEngine.EXPECT().\n\t\tEnqueue(gomock.Any(), gomock.Any()).\n\t\tReturn()\n\n\tsuite.jobGoalStateEngine.EXPECT().\n\t\tEnqueue(gomock.Any(), gomock.Any()).\n\t\tReturn()\n\n\terr := TaskFailRetry(context.Background(), suite.taskEnt)\n\tsuite.NoError(err)\n}", "func TestCreateAndApplyTransactionPanic(t *testing.T) {\n\tif testing.Short() {\n\t\tt.SkipNow()\n\t}\n\tt.Parallel()\n\n\t// Create invalid update that triggers a panic.\n\tupdate := writeaheadlog.Update{\n\t\tName: \"invalid name\",\n\t}\n\n\t// Declare a helper to check for a panic.\n\tassertRecover := func() {\n\t\tif r := recover(); r == nil {\n\t\t\tt.Fatalf(\"Expected a panic\")\n\t\t}\n\t}\n\n\t// Run the test for both the method and function\n\tsiadir, err := newTestDir(t.Name())\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tfunc() {\n\t\tdefer assertRecover()\n\t\t_ = siadir.createAndApplyTransaction(update)\n\t}()\n\tfunc() {\n\t\tdefer assertRecover()\n\t\t_ = CreateAndApplyTransaction(siadir.wal, update)\n\t}()\n}", "func TestKubeletRestartsAndRestoresMap(ctx context.Context, c clientset.Interface, f *framework.Framework, clientPod *v1.Pod, volumePath string) {\n\tbyteLen := 64\n\tseed := time.Now().UTC().UnixNano()\n\n\tginkgo.By(\"Writing to the volume.\")\n\tCheckWriteToPath(f, clientPod, v1.PersistentVolumeBlock, false, volumePath, byteLen, seed)\n\n\tginkgo.By(\"Restarting kubelet\")\n\tKubeletCommand(ctx, KRestart, c, clientPod)\n\n\tginkgo.By(\"Testing that written pv is accessible.\")\n\tCheckReadFromPath(f, clientPod, v1.PersistentVolumeBlock, false, volumePath, byteLen, seed)\n\n\tframework.Logf(\"Volume map detected on pod %s and written data %s is readable post-restart.\", clientPod.Name, volumePath)\n}", "func TestPutNewPresentationInvalidtemplatePassword(t *testing.T) {\n request := createPutNewPresentationRequest()\n request.templatePassword = invalidizeTestParamValue(request.templatePassword, \"templatePassword\", \"string\").(string)\n e := initializeTest(\"PutNewPresentation\", \"templatePassword\", request.templatePassword)\n if e != nil {\n t.Errorf(\"Error: %v.\", e)\n return\n }\n r, _, e := getTestApiClient().DocumentApi.PutNewPresentation(request)\n assertError(t, \"PutNewPresentation\", \"templatePassword\", r.Code, e)\n}", "func (rest *TestResourceREST) TestFailRegisterResourceNonServiceAccount() {\n\tsa := account.Identity{\n\t\tUsername: \"unknown-account\",\n\t}\n\tservice, controller := rest.SecuredController(sa)\n\n\tresourceDescription := \"Resource description\"\n\tresourceID := \"\"\n\tresourceScopes := []string{}\n\n\tresourceOwnerID := rest.testIdentity.ID\n\n\tpayload := &app.RegisterResourcePayload{\n\t\tDescription: &resourceDescription,\n\t\tName: \"My new resource\",\n\t\tParentResourceID: nil,\n\t\tResourceScopes: resourceScopes,\n\t\tResourceID: &resourceID,\n\t\tResourceOwnerID: resourceOwnerID.String(),\n\t\tType: \"Area\",\n\t}\n\n\ttest.RegisterResourceUnauthorized(rest.T(), service.Context, service, controller, payload)\n}", "func proxyCrash(t *testing.T) {\n\tproxyURL := tutils.RandomProxyURL(t)\n\tsmap := tutils.GetClusterMap(t, proxyURL)\n\ttlog.Logf(\"targets: %d, proxies: %d\\n\", smap.CountActiveTargets(), smap.CountActiveProxies())\n\n\tprimaryURL, primaryID := smap.Primary.URL(cmn.NetworkPublic), smap.Primary.ID()\n\ttlog.Logf(\"Primary proxy: %s\\n\", primaryURL)\n\n\tvar (\n\t\tsecondURL string\n\t\tsecondID string\n\t\tsecondNode *cluster.Snode\n\t\torigProxyCount = smap.CountActiveProxies()\n\t)\n\n\t// Select a random non-primary proxy\n\tfor k, v := range smap.Pmap {\n\t\tif k != primaryID {\n\t\t\tsecondURL = v.URL(cmn.NetworkPublic)\n\t\t\tsecondID = v.ID()\n\t\t\tsecondNode = v\n\t\t\tbreak\n\t\t}\n\t}\n\n\ttlog.Logf(\"Killing non-primary proxy: %s - %s\\n\", secondURL, secondID)\n\tsecondCmd, err := tutils.KillNode(secondNode)\n\ttassert.CheckFatal(t, err)\n\n\tsmap, err = tutils.WaitForClusterState(primaryURL, \"propagate new Smap\",\n\t\tsmap.Version, origProxyCount-1, 0)\n\ttassert.CheckFatal(t, err)\n\n\terr = tutils.RestoreNode(secondCmd, false, \"proxy\")\n\ttassert.CheckFatal(t, err)\n\n\tsmap, err = tutils.WaitForClusterState(primaryURL, \"restore\", smap.Version, origProxyCount, 0)\n\ttassert.CheckFatal(t, err)\n\n\tif _, ok := smap.Pmap[secondID]; !ok {\n\t\tt.Fatalf(\"Non-primary proxy did not rejoin the cluster.\")\n\t}\n}", "func TestReadOnlyTransaction_RecoverFromFailure(t *testing.T) {\n\tt.Parallel()\n\tctx := context.Background()\n\tserver, client, teardown := setupMockedTestServer(t)\n\tdefer teardown()\n\n\ttxn := client.ReadOnlyTransaction()\n\tdefer txn.Close()\n\n\t// First request will fail.\n\terrUsr := gstatus.Error(codes.Unknown, \"error\")\n\tserver.TestSpanner.PutExecutionTime(MethodBeginTransaction,\n\t\tSimulatedExecutionTime{\n\t\t\tErrors: []error{errUsr},\n\t\t})\n\n\t_, _, e := txn.acquire(ctx)\n\tif wantErr := ToSpannerError(errUsr); !testEqual(e, wantErr) {\n\t\tt.Fatalf(\"Acquire for multi use, got %v, want %v.\", e, wantErr)\n\t}\n\t_, _, e = txn.acquire(ctx)\n\tif e != nil {\n\t\tt.Fatalf(\"Acquire for multi use, got %v, want nil.\", e)\n\t}\n}", "func TestK8gbRepeatedlyRecreatedFromIngress(t *testing.T) {\n\tt.Parallel()\n\t// name of ingress and gslb\n\tconst name = \"test-gslb-failover-simple\"\n\n\tassertStrategy := func(t *testing.T, options *k8s.KubectlOptions) {\n\t\tutils.AssertGslbSpec(t, options, name, \"spec.strategy.splitBrainThresholdSeconds\", \"300\")\n\t\tutils.AssertGslbSpec(t, options, name, \"spec.strategy.dnsTtlSeconds\", \"30\")\n\t\tutils.AssertGslbSpec(t, options, name, \"spec.strategy.primaryGeoTag\", settings.PrimaryGeoTag)\n\t\tutils.AssertGslbSpec(t, options, name, \"spec.strategy.type\", \"failover\")\n\t}\n\n\t// Path to the Kubernetes resource config we will test\n\tingressResourcePath, err := filepath.Abs(\"../examples/ingress-annotation-failover-simple.yaml\")\n\trequire.NoError(t, err)\n\n\t// To ensure we can reuse the resource config on the same cluster to test different scenarios, we setup a unique\n\t// namespace for the resources for this test.\n\t// Note that namespaces must be lowercase.\n\tnamespaceName := fmt.Sprintf(\"k8gb-test-repeatedly-recreated-from-ingress-%s\", strings.ToLower(random.UniqueId()))\n\n\t// Here we choose to use the defaults, which is:\n\t// - HOME/.kube/config for the kubectl config file\n\t// - Current context of the kubectl config file\n\t// - Random namespace\n\toptions := k8s.NewKubectlOptions(\"\", \"\", namespaceName)\n\n\tk8s.CreateNamespace(t, options, namespaceName)\n\n\tdefer k8s.DeleteNamespace(t, options, namespaceName)\n\n\tdefer k8s.KubectlDelete(t, options, ingressResourcePath)\n\n\tutils.CreateGslb(t, options, settings, ingressResourcePath)\n\n\tk8s.WaitUntilIngressAvailable(t, options, name, 60, 1*time.Second)\n\n\tingress := k8s.GetIngress(t, options, name)\n\n\trequire.Equal(t, ingress.Name, name)\n\n\t// assert Gslb strategy has expected values\n\tassertStrategy(t, options)\n\n\tk8s.KubectlDelete(t, options, ingressResourcePath)\n\n\tutils.AssertGslbDeleted(t, options, ingress.Name)\n\n\t// recreate ingress\n\tutils.CreateGslb(t, options, settings, ingressResourcePath)\n\n\tk8s.WaitUntilIngressAvailable(t, options, name, 60, 1*time.Second)\n\n\tingress = k8s.GetIngress(t, options, name)\n\n\trequire.Equal(t, ingress.Name, name)\n\t// assert Gslb strategy has expected values\n\tassertStrategy(t, options)\n}", "func targetMapVersionMismatch(getNum func(int) int, t *testing.T, proxyURL string) {\n\tsmap := tutils.GetClusterMap(t, proxyURL)\n\ttlog.Logf(\"targets: %d, proxies: %d\\n\", smap.CountActiveTargets(), smap.CountActiveProxies())\n\n\tsmap.Version++\n\tjsonMap, err := jsoniter.Marshal(smap)\n\ttassert.CheckFatal(t, err)\n\n\tn := getNum(smap.CountActiveTargets() + smap.CountActiveProxies() - 1)\n\tfor _, v := range smap.Tmap {\n\t\tif n == 0 {\n\t\t\tbreak\n\t\t}\n\n\t\tbaseParams := tutils.BaseAPIParams(v.URL(cmn.NetworkPublic))\n\t\tbaseParams.Method = http.MethodPut\n\t\terr = api.DoHTTPRequest(api.ReqParams{\n\t\t\tBaseParams: baseParams,\n\t\t\tPath: cmn.URLPathDaemon.Join(cmn.SyncSmap),\n\t\t\tBody: jsonMap,\n\t\t})\n\t\ttassert.CheckFatal(t, err)\n\t\tn--\n\t}\n\tkillRestorePrimary(t, proxyURL, false, nil)\n}", "func Test_GetOrCreateAccessKey_Create(t *testing.T) {\n\tctrl := gomock.NewController(t)\n\tdefer ctrl.Finish()\n\n\takService := getMockTokenServiceServer(ctrl)\n\tc := New(db, bdl, akService, &fakeClusterServiceServer{}, nil, &mock.MockPipelineServiceServer{})\n\n\tmonkey.PatchInstanceMethod(reflect.TypeOf(c), \"CheckCluster\", func(_ *Clusters, _ context.Context, _ string) error {\n\t\treturn nil\n\t})\n\n\tdefer monkey.UnpatchAll()\n\n\takResp, err := c.GetOrCreateAccessKey(context.Background(), fakeCluster)\n\tassert.NoError(t, err)\n\tassert.Equal(t, akResp, fakeAkItem)\n}", "func Retries500Test() Test {\n\tvar (\n\t\tmtx sync.Mutex\n\t\taccept bool\n\t\tts int64\n\t)\n\n\treturn Test{\n\t\tName: \"Retries500\",\n\t\tMetrics: metricHandler(prometheus.NewGaugeFunc(prometheus.GaugeOpts{\n\t\t\tName: \"now\",\n\t\t}, func() float64 {\n\t\t\treturn float64(time.Now().Unix() * 1000)\n\t\t})),\n\t\tWrites: func(next http.Handler) http.Handler {\n\t\t\treturn http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\t\t\tmtx.Lock()\n\t\t\t\tdefer mtx.Unlock()\n\n\t\t\t\tif accept {\n\t\t\t\t\tnext.ServeHTTP(w, r)\n\t\t\t\t\treturn\n\t\t\t\t}\n\n\t\t\t\t// We're going to pick a timestamp from this batch, and then make sure\n\t\t\t\t// it gets resent. First we need to decode this batch.\n\t\t\t\tts = getFirstTimestamp(w, r)\n\t\t\t\taccept = true\n\t\t\t\thttp.Error(w, \"internal server error\", http.StatusInternalServerError)\n\t\t\t})\n\n\t\t},\n\t\tExpected: func(t *testing.T, bs []Batch) {\n\t\t\tfound := false\n\t\t\tforAllSamples(bs, func(s sample) {\n\t\t\t\tif labelsContain(s.l, labels.FromStrings(\"__name__\", \"now\")) && s.t == ts {\n\t\t\t\t\tfound = true\n\t\t\t\t}\n\t\t\t})\n\t\t\trequire.True(t, found, `failed to find sample that should have been retried`)\n\t\t},\n\t}\n}", "func (o InstanceGroupManagerActionsSummaryResponsePtrOutput) CreatingWithoutRetries() pulumi.IntPtrOutput {\n\treturn o.ApplyT(func(v *InstanceGroupManagerActionsSummaryResponse) *int {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn &v.CreatingWithoutRetries\n\t}).(pulumi.IntPtrOutput)\n}", "func (c *Controller) shouldContinueOnCreateFailed() error {\n\t// Check configuration option regarding should we continue when errors met on the way\n\t// c.chopConfig.OnStatefulSetUpdateFailureAction\n\tvar continueUpdate = false\n\tif continueUpdate {\n\t\t// Continue update\n\t\treturn nil\n\t}\n\n\t// Do not continue update\n\treturn errors.New(fmt.Sprintf(\"Create stopped due to previous errors\"))\n}", "func TestAfPacketModifyRecreateNotFound(t *testing.T) {\n\tctx, plugin, _ := afPacketTestSetup(t)\n\tdefer afPacketTestTeardown(ctx)\n\n\t// Data\n\toldData := getTestAfPacketData(\"if1\", []string{\"10.0.0.1/24\"}, \"host1\")\n\tnewData := getTestAfPacketData(\"if1\", []string{\"10.0.0.1/24\"}, \"host2\")\n\n\t// Test af packet modify\n\trecreate, err := plugin.ModifyAfPacketInterface(newData, oldData)\n\tExpect(err).To(BeNil())\n\tExpect(recreate).To(BeTrue())\n}", "func TestNoRetry(t *testing.T) {\n\tconst attempts = 2\n\n\tvar (\n\t\treq, _ = http.NewRequest(\"GET\", \"http://example/test\", nil)\n\t\tnext = &testRoundTrip{err: nil, resp: &http.Response{StatusCode: 200}}\n\t\ttrans = Transport{\n\t\t\tRetry: All(Errors(), Max(attempts)),\n\t\t\tNext: next,\n\t\t}\n\t)\n\n\tresp, err := trans.RoundTrip(req)\n\n\tif err != nil {\n\t\tt.Fatalf(\"expected error to be nil but got: %s\", err.Error())\n\t}\n\n\tif resp == nil {\n\t\tt.Fatalf(\"expected to obtain non-nil response\")\n\t}\n}", "func (suite *TaskFailRetryTestSuite) TestTaskFailSystemFailure() {\n\tsuite.taskRuntime.Reason = mesosv1.TaskStatus_REASON_CONTAINER_LAUNCH_FAILED.String()\n\n\ttestTable := []*pbtask.RuntimeInfo{\n\t\t{\n\t\t\tMesosTaskId: &mesosv1.TaskID{Value: &suite.mesosTaskID},\n\t\t\tState: pbtask.TaskState_FAILED,\n\t\t\tGoalState: pbtask.TaskState_SUCCEEDED,\n\t\t\tConfigVersion: 1,\n\t\t\tMessage: \"testFailure\",\n\t\t\tReason: mesosv1.TaskStatus_REASON_CONTAINER_LAUNCH_FAILED.String(),\n\t\t},\n\t\t{\n\t\t\tMesosTaskId: &mesosv1.TaskID{Value: &suite.mesosTaskID},\n\t\t\tState: pbtask.TaskState_FAILED,\n\t\t\tGoalState: pbtask.TaskState_SUCCEEDED,\n\t\t\tConfigVersion: 1,\n\t\t\tMessage: \"Container terminated with signal Broken pipe\",\n\t\t\tReason: mesosv1.TaskStatus_REASON_COMMAND_EXECUTOR_FAILED.String(),\n\t\t},\n\t}\n\n\ttaskConfig := pbtask.TaskConfig{\n\t\tRestartPolicy: &pbtask.RestartPolicy{\n\t\t\tMaxFailures: 0,\n\t\t},\n\t}\n\n\tsuite.cachedTask.EXPECT().\n\t\tID().\n\t\tReturn(uint32(0)).\n\t\tAnyTimes()\n\n\tfor _, taskRuntime := range testTable {\n\n\t\tsuite.jobFactory.EXPECT().\n\t\t\tGetJob(suite.jobID).Return(suite.cachedJob)\n\n\t\tsuite.cachedJob.EXPECT().\n\t\t\tGetTask(suite.instanceID).Return(suite.cachedTask)\n\n\t\tsuite.cachedJob.EXPECT().\n\t\t\tID().Return(suite.jobID)\n\n\t\tsuite.cachedTask.EXPECT().\n\t\t\tGetRuntime(gomock.Any()).Return(taskRuntime, nil)\n\n\t\tsuite.taskConfigV2Ops.EXPECT().\n\t\t\tGetTaskConfig(gomock.Any(), suite.jobID, suite.instanceID, gomock.Any()).\n\t\t\tReturn(&taskConfig, &models.ConfigAddOn{}, nil)\n\n\t\tsuite.cachedJob.EXPECT().\n\t\t\tPatchTasks(gomock.Any(), gomock.Any(), false).\n\t\t\tDo(func(ctx context.Context,\n\t\t\t\truntimeDiffs map[uint32]jobmgrcommon.RuntimeDiff,\n\t\t\t\t_ bool) {\n\t\t\t\truntimeDiff := runtimeDiffs[suite.instanceID]\n\t\t\t\tsuite.True(\n\t\t\t\t\truntimeDiff[jobmgrcommon.MesosTaskIDField].(*mesosv1.TaskID).GetValue() != suite.mesosTaskID)\n\t\t\t\tsuite.True(\n\t\t\t\t\truntimeDiff[jobmgrcommon.PrevMesosTaskIDField].(*mesosv1.TaskID).GetValue() == suite.mesosTaskID)\n\t\t\t\tsuite.True(\n\t\t\t\t\truntimeDiff[jobmgrcommon.StateField].(pbtask.TaskState) == pbtask.TaskState_INITIALIZED)\n\t\t\t}).Return(nil, nil, nil)\n\n\t\tsuite.cachedJob.EXPECT().\n\t\t\tGetJobType().Return(pbjob.JobType_BATCH)\n\n\t\tsuite.taskGoalStateEngine.EXPECT().\n\t\t\tEnqueue(gomock.Any(), gomock.Any()).\n\t\t\tReturn()\n\n\t\tsuite.jobGoalStateEngine.EXPECT().\n\t\t\tEnqueue(gomock.Any(), gomock.Any()).\n\t\t\tReturn()\n\n\t\terr := TaskFailRetry(context.Background(), suite.taskEnt)\n\t\tsuite.NoError(err)\n\t}\n}", "func TestClientRetryNonTxn(t *testing.T) {\n\tdefer leaktest.AfterTest(t)()\n\n\t// Set up a command filter which tracks which one of our test keys have\n\t// been attempted to be pushed.\n\tmu := struct {\n\t\tsyncutil.Mutex\n\t\tm map[string]struct{}\n\t}{\n\t\tm: make(map[string]struct{}),\n\t}\n\tfilter := func(args storagebase.FilterArgs) *roachpb.Error {\n\t\tmu.Lock()\n\t\tdefer mu.Unlock()\n\t\tpushArg, ok := args.Req.(*roachpb.PushTxnRequest)\n\t\tif !ok || !strings.HasPrefix(string(pushArg.PusheeTxn.Key), \"key-\") {\n\t\t\treturn nil\n\t\t}\n\t\tmu.m[string(pushArg.PusheeTxn.Key)] = struct{}{}\n\t\treturn nil\n\t}\n\targs := base.TestServerArgs{\n\t\tKnobs: base.TestingKnobs{\n\t\t\tStore: &storage.StoreTestingKnobs{\n\t\t\t\tTestingCommandFilter: filter,\n\t\t\t},\n\t\t},\n\t}\n\ts, _, _ := serverutils.StartServer(t, args)\n\tdefer s.Stopper().Stop()\n\n\ttestCases := []struct {\n\t\targs roachpb.Request\n\t\tisolation enginepb.IsolationType\n\t\tcanPush bool\n\t\texpAttempts int\n\t}{\n\t\t// Write/write conflicts.\n\t\t{&roachpb.PutRequest{}, enginepb.SNAPSHOT, true, 2},\n\t\t{&roachpb.PutRequest{}, enginepb.SERIALIZABLE, true, 2},\n\t\t{&roachpb.PutRequest{}, enginepb.SNAPSHOT, false, 1},\n\t\t{&roachpb.PutRequest{}, enginepb.SERIALIZABLE, false, 1},\n\t\t// Read/write conflicts.\n\t\t{&roachpb.GetRequest{}, enginepb.SNAPSHOT, true, 1},\n\t\t{&roachpb.GetRequest{}, enginepb.SERIALIZABLE, true, 2},\n\t\t{&roachpb.GetRequest{}, enginepb.SNAPSHOT, false, 1},\n\t\t{&roachpb.GetRequest{}, enginepb.SERIALIZABLE, false, 1},\n\t}\n\t// Lay down a write intent using a txn and attempt to access the same\n\t// key from our test client, with priorities set up so that the Push\n\t// succeeds iff the test dicates that it do.\n\tfor i, test := range testCases {\n\t\tkey := roachpb.Key(fmt.Sprintf(\"key-%d\", i))\n\t\tvar txnPri int32 = 1\n\t\tvar clientPri roachpb.UserPriority = 1\n\t\tif test.canPush {\n\t\t\tclientPri = 2\n\t\t} else {\n\t\t\ttxnPri = 2\n\t\t}\n\n\t\tdb, sender := createTestNotifyClient(t, s.Stopper(), s.ServingAddr(), -clientPri)\n\n\t\t// doneCall signals when the non-txn read or write has completed.\n\t\tdoneCall := make(chan error)\n\t\tcount := 0 // keeps track of retries\n\t\terr := db.Txn(context.TODO(), func(txn *client.Txn) error {\n\t\t\tif test.isolation == enginepb.SNAPSHOT {\n\t\t\t\tif err := txn.SetIsolation(enginepb.SNAPSHOT); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t}\n\t\t\ttxn.InternalSetPriority(txnPri)\n\n\t\t\tcount++\n\t\t\t// Lay down the intent.\n\t\t\tif err := txn.Put(key, \"txn-value\"); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\t// On the first true, send the non-txn put or get.\n\t\t\tif count == 1 {\n\t\t\t\t// We use a \"notifying\" sender here, which allows us to know exactly when the\n\t\t\t\t// call has been processed; otherwise, we'd be dependent on timing.\n\t\t\t\t// The channel lets us pause txn until after the non-txn method has run once.\n\t\t\t\t// Use a channel length of size 1 to guarantee a notification through a\n\t\t\t\t// non-blocking send.\n\t\t\t\tnotify := make(chan struct{}, 1)\n\t\t\t\tsender.reset(notify)\n\t\t\t\t// We must try the non-txn put or get in a goroutine because\n\t\t\t\t// it might have to retry and will only succeed immediately in\n\t\t\t\t// the event we can push.\n\t\t\t\tgo func() {\n\t\t\t\t\tvar err error\n\t\t\t\t\t//for {\n\t\t\t\t\tif _, ok := test.args.(*roachpb.GetRequest); ok {\n\t\t\t\t\t\t_, err = db.Get(key)\n\t\t\t\t\t} else {\n\t\t\t\t\t\terr = db.Put(key, \"value\")\n\t\t\t\t\t}\n\t\t\t\t\tdoneCall <- errors.Wrapf(\n\t\t\t\t\t\terr, \"%d: expected success on non-txn call to %s\",\n\t\t\t\t\t\ti, test.args.Method())\n\t\t\t\t}()\n\t\t\t\t// Block until the non-transactional client has pushed us at\n\t\t\t\t// least once.\n\t\t\t\tutil.SucceedsSoon(t, func() error {\n\t\t\t\t\tmu.Lock()\n\t\t\t\t\tdefer mu.Unlock()\n\t\t\t\t\tif _, ok := mu.m[string(key)]; ok {\n\t\t\t\t\t\treturn nil\n\t\t\t\t\t}\n\t\t\t\t\treturn errors.New(\"non-transactional client has not pushed txn yet\")\n\t\t\t\t})\n\t\t\t}\n\t\t\treturn nil\n\t\t})\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"%d: expected success writing transactionally; got %s\", i, err)\n\t\t}\n\n\t\t// Make sure non-txn put or get has finished.\n\t\tif err := <-doneCall; err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\n\t\t// Get the current value to verify whether the txn happened first.\n\t\tgr, err := db.Get(key)\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"%d: expected success getting %q: %s\", i, key, err)\n\t\t}\n\n\t\tif _, isGet := test.args.(*roachpb.GetRequest); isGet || test.canPush {\n\t\t\tif !bytes.Equal(gr.ValueBytes(), []byte(\"txn-value\")) {\n\t\t\t\tt.Errorf(\"%d: expected \\\"txn-value\\\"; got %q\", i, gr.ValueBytes())\n\t\t\t}\n\t\t} else {\n\t\t\tif !bytes.Equal(gr.ValueBytes(), []byte(\"value\")) {\n\t\t\t\tt.Errorf(\"%d: expected \\\"value\\\"; got %q\", i, gr.ValueBytes())\n\t\t\t}\n\t\t}\n\t\tif count != test.expAttempts {\n\t\t\tt.Errorf(\"%d: expected %d attempt(s); got %d\", i, test.expAttempts, count)\n\t\t}\n\t}\n}", "func TestDeployRouterInvalidConfig(t *testing.T) {\n\t// Read existing router that MUST have already exists from previous create router e2e test\n\t// Router name is assumed to follow this format: e2e-experiment-{{.TestID}}\n\trouterName := \"e2e-experiment-\" + globalTestContext.TestID\n\tt.Log(fmt.Sprintf(\"Retrieving router with name '%s' created from previous test step\", routerName))\n\texistingRouter, err := getRouterByName(\n\t\tglobalTestContext.httpClient, globalTestContext.APIBasePath, globalTestContext.ProjectID, routerName)\n\trequire.NoError(t, err)\n\n\t// Deploy router version\n\turl := fmt.Sprintf(\n\t\t\"%s/projects/%d/routers/%d/versions/2/deploy\",\n\t\tglobalTestContext.APIBasePath,\n\t\tglobalTestContext.ProjectID, existingRouter.ID,\n\t)\n\tt.Log(\"Deploying router: POST \" + url)\n\treq, err := http.NewRequestWithContext(context.Background(), http.MethodPost, url, nil)\n\trequire.NoError(t, err)\n\tresponse, err := globalTestContext.httpClient.Do(req)\n\trequire.NoError(t, err)\n\tdefer response.Body.Close()\n\tassert.Equal(t, http.StatusAccepted, response.StatusCode)\n\n\t// Wait for the version status to to change to success/failed deployment\n\tt.Log(\"Waiting for router to deploy\")\n\terr = waitDeployVersion(\n\t\tglobalTestContext.httpClient,\n\t\tglobalTestContext.APIBasePath,\n\t\tglobalTestContext.ProjectID,\n\t\tint(existingRouter.ID),\n\t\t2,\n\t)\n\trequire.NoError(t, err)\n\n\t// Test router version configuration\n\tt.Log(\"Testing GET router version\")\n\trouterVersion, err := getRouterVersion(\n\t\tglobalTestContext.httpClient,\n\t\tglobalTestContext.APIBasePath,\n\t\tglobalTestContext.ProjectID,\n\t\tint(existingRouter.ID),\n\t\t2,\n\t)\n\trequire.NoError(t, err)\n\tassert.Equal(t, models.RouterVersionStatusFailed, routerVersion.Status)\n\n\t// Test router configuration\n\tt.Log(\"Testing GET router\")\n\trouter, err := getRouter(\n\t\tglobalTestContext.httpClient,\n\t\tglobalTestContext.APIBasePath,\n\t\tglobalTestContext.ProjectID,\n\t\tint(existingRouter.ID),\n\t)\n\trequire.NoError(t, err)\n\trequire.NotNil(t, router.CurrRouterVersion)\n\t// the expected version 1 is the valid version that the deployment fallback to due to invalid config\n\tassert.Equal(t, uint(1), router.CurrRouterVersion.Version)\n\tassert.Equal(t, models.RouterVersionStatusUndeployed, router.CurrRouterVersion.Status)\n\tassert.Equal(t, models.RouterStatusUndeployed, router.Status)\n}", "func TestContainerCreationConflict(t *testing.T) {\n\tsConfig := makeSandboxConfig(\"foo\", \"bar\", \"1\", 0)\n\tconfig := makeContainerConfig(sConfig, \"pause\", \"iamimage\", 0, map[string]string{}, map[string]string{})\n\tcontainerName := makeContainerName(sConfig, config)\n\tconst sandboxId = \"sandboxid\"\n\tconst containerId = \"containerid\"\n\tconflictError := fmt.Errorf(\"Error response from daemon: Conflict. The name \\\"/%s\\\" is already in use by container %s. You have to remove (or rename) that container to be able to reuse that name.\",\n\t\tcontainerName, containerId)\n\tnoContainerError := fmt.Errorf(\"Error response from daemon: No such container: %s\", containerId)\n\trandomError := fmt.Errorf(\"random error\")\n\n\tfor desc, test := range map[string]struct {\n\t\tcreateError error\n\t\tremoveError error\n\t\texpectError error\n\t\texpectCalls []string\n\t\texpectFields int\n\t}{\n\t\t\"no create error\": {\n\t\t\texpectCalls: []string{\"create\"},\n\t\t\texpectFields: 6,\n\t\t},\n\t\t\"random create error\": {\n\t\t\tcreateError: randomError,\n\t\t\texpectError: randomError,\n\t\t\texpectCalls: []string{\"create\"},\n\t\t},\n\t\t\"conflict create error with successful remove\": {\n\t\t\tcreateError: conflictError,\n\t\t\texpectError: conflictError,\n\t\t\texpectCalls: []string{\"create\", \"remove\"},\n\t\t},\n\t\t\"conflict create error with random remove error\": {\n\t\t\tcreateError: conflictError,\n\t\t\tremoveError: randomError,\n\t\t\texpectError: conflictError,\n\t\t\texpectCalls: []string{\"create\", \"remove\"},\n\t\t},\n\t\t\"conflict create error with no such container remove error\": {\n\t\t\tcreateError: conflictError,\n\t\t\tremoveError: noContainerError,\n\t\t\texpectCalls: []string{\"create\", \"remove\", \"create\"},\n\t\t\texpectFields: 7,\n\t\t},\n\t} {\n\t\tt.Logf(\"TestCase: %s\", desc)\n\t\tds, fDocker, _ := newTestDockerService()\n\n\t\tif test.createError != nil {\n\t\t\tfDocker.InjectError(\"create\", test.createError)\n\t\t}\n\t\tif test.removeError != nil {\n\t\t\tfDocker.InjectError(\"remove\", test.removeError)\n\t\t}\n\t\tid, err := ds.CreateContainer(sandboxId, config, sConfig)\n\t\trequire.Equal(t, test.expectError, err)\n\t\tassert.NoError(t, fDocker.AssertCalls(test.expectCalls))\n\t\tif err == nil {\n\t\t\tc, err := fDocker.InspectContainer(id)\n\t\t\tassert.NoError(t, err)\n\t\t\tassert.Len(t, strings.Split(c.Name, nameDelimiter), test.expectFields)\n\t\t}\n\t}\n}", "func (m *RegeneratingDiscoveryRESTMapper) RegenerateMappings() error {\n\tresources, err := restmapper.GetAPIGroupResources(m.discoveryClient)\n\tif err != nil {\n\t\treturn err\n\t}\n\tnewDelegate := restmapper.NewDiscoveryRESTMapper(resources)\n\n\t// don't lock until we're ready to replace\n\tm.mu.Lock()\n\tdefer m.mu.Unlock()\n\tm.delegate = newDelegate\n\n\treturn nil\n}", "func discoveryAndOrigPrimaryProxiesCrash(t *testing.T) {\n\tvar (\n\t\tconfig = tutils.GetClusterConfig(t)\n\t\trestoreCmd = make([]tutils.RestoreCmd, 0, 3)\n\t\tconfigDiscovery, _ = cos.ParseURL(config.Proxy.DiscoveryURL)\n\t\tproxyURL string\n\t\trandomKilled bool\n\t)\n\n\t// Make sure primary is same config\n\tsmap := primarySetToOriginal(t)\n\torigProxyCnt := smap.CountActiveProxies()\n\torigTargetCnt := smap.CountActiveTargets()\n\n\tfor _, si := range smap.Pmap {\n\t\tif smap.IsPrimary(si) {\n\t\t\tcontinue\n\t\t}\n\t\tpublicURL, _ := cos.ParseURL(si.URL(cmn.NetworkPublic))\n\t\tif publicURL.Host == configDiscovery.Host || configDiscovery.Port() == publicURL.Port() {\n\t\t\tcmd, err := tutils.KillNode(si)\n\t\t\ttassert.CheckFatal(t, err)\n\t\t\trestoreCmd = append(restoreCmd, cmd)\n\t\t\tcontinue\n\t\t}\n\t\tif randomKilled {\n\t\t\t// Set proxyURL - used to get latest smap\n\t\t\tproxyURL = si.URL(cmn.NetworkPublic)\n\t\t\tcontinue\n\t\t}\n\n\t\t// Kill a random non primary proxy\n\t\tcmd, err := tutils.KillNode(si)\n\t\ttassert.CheckFatal(t, err)\n\t\trestoreCmd = append(restoreCmd, cmd)\n\t\trandomKilled = true\n\t}\n\n\t// Kill a random target\n\ttarget, err := smap.GetRandTarget()\n\ttassert.CheckFatal(t, err)\n\tcmd, err := tutils.KillNode(target)\n\ttassert.CheckFatal(t, err)\n\trestoreCmd = append(restoreCmd, cmd)\n\n\t// Kill original primary\n\tcmd, err = tutils.KillNode(smap.Primary)\n\ttassert.CheckFatal(t, err)\n\trestoreCmd = append(restoreCmd, cmd)\n\n\tproxyCnt, targetCnt := origProxyCnt-3, origTargetCnt-1\n\tsmap, err = tutils.WaitForClusterState(proxyURL, \"kill proxies and target\", smap.Version, proxyCnt, targetCnt)\n\ttassert.CheckFatal(t, err)\n\n\t// Restore all killed nodes\n\tfor _, cmd := range restoreCmd {\n\t\tif cmd.Node.IsProxy() {\n\t\t\tproxyCnt++\n\t\t}\n\t\tif cmd.Node.IsTarget() {\n\t\t\ttargetCnt++\n\t\t}\n\t\ttutils.RestoreNode(cmd, false, cmd.Node.Type())\n\t\t_, err = tutils.WaitForClusterState(proxyURL, \"restore \"+cmd.Node.ID(), smap.Version,\n\t\t\tproxyCnt, targetCnt)\n\t\ttassert.CheckError(t, err)\n\t}\n\n\ttutils.WaitForRebalanceToComplete(t, tutils.BaseAPIParams(proxyURL))\n}", "func (suite *TestManagerSuite) TestManagerCreateWithExisting() {\n\terr := suite.m.UpdateStatus(\"tid001\", job.SuccessStatus.String(), 2000)\n\trequire.NoError(suite.T(), err)\n\n\trp := &scan.Report{\n\t\tDigest: \"d1000\",\n\t\tRegistrationUUID: \"ruuid\",\n\t\tMimeType: v1.MimeTypeNativeReport,\n\t\tTrackID: \"tid002\",\n\t}\n\n\tuuid, err := suite.m.Create(rp)\n\trequire.NoError(suite.T(), err)\n\trequire.NotEmpty(suite.T(), uuid)\n\n\tassert.NotEqual(suite.T(), suite.rpUUID, uuid)\n\tsuite.rpUUID = uuid\n}", "func TestRecoverDoublePendingConfig(t *testing.T) {\n\tfunc() {\n\t\tdefer func() {\n\t\t\tif err := recover(); err == nil {\n\t\t\t\tt.Errorf(\"expect panic, but nothing happens\")\n\t\t\t}\n\t\t}()\n\t\tr := newTestRaft(1, []uint64{1, 2}, 10, 1, NewMemoryStorage())\n\t\tdefer closeAndFreeRaft(r)\n\t\tr.appendEntry(pb.Entry{Type: pb.EntryConfChange})\n\t\tr.appendEntry(pb.Entry{Type: pb.EntryConfChange})\n\t\tr.becomeCandidate()\n\t\tr.becomeLeader()\n\t}()\n}", "func TestCreateIfNotExists(t *testing.T) {\n\toriginCtrl := gomock.NewController(t)\n\tdefer originCtrl.Finish()\n\tmockOrigin := mocks.NewMockConnector(originCtrl)\n\n\tvalues := map[string]dosa.FieldValue{}\n\tmockOrigin.EXPECT().CreateIfNotExists(context.TODO(), testEi, values).Return(nil)\n\n\tconnector := NewConnector(mockOrigin, nil, NewJSONEncoder(), nil, cacheableEntities...)\n\tconnector.setSynchronousMode(true)\n\terr := connector.CreateIfNotExists(context.TODO(), testEi, values)\n\tassert.NoError(t, err)\n}", "func Retries400Test() Test {\n\tvar (\n\t\tmtx sync.Mutex\n\t\taccept bool\n\t\tts int64\n\t)\n\n\treturn Test{\n\t\tName: \"Retries400\",\n\t\tMetrics: metricHandler(prometheus.NewGaugeFunc(prometheus.GaugeOpts{\n\t\t\tName: \"now\",\n\t\t}, func() float64 {\n\t\t\treturn float64(time.Now().Unix() * 1000)\n\t\t})),\n\t\tWrites: func(next http.Handler) http.Handler {\n\t\t\treturn http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\t\t\tmtx.Lock()\n\t\t\t\tdefer mtx.Unlock()\n\n\t\t\t\tif accept {\n\t\t\t\t\tnext.ServeHTTP(w, r)\n\t\t\t\t\treturn\n\t\t\t\t}\n\n\t\t\t\t// We're going to pick a timestamp from this batch, and then make sure\n\t\t\t\t// it gets resent. First we need to decode this batch.\n\t\t\t\tts = getFirstTimestamp(w, r)\n\t\t\t\taccept = true\n\t\t\t\thttp.Error(w, \"bad request\", http.StatusBadRequest)\n\t\t\t})\n\n\t\t},\n\t\tExpected: func(t *testing.T, bs []Batch) {\n\t\t\tfound := false\n\t\t\tforAllSamples(bs, func(s sample) {\n\t\t\t\tif labelsContain(s.l, labels.FromStrings(\"__name__\", \"now\")) && s.t == ts {\n\t\t\t\t\tfound = true\n\t\t\t\t}\n\t\t\t})\n\t\t\trequire.False(t, found, `found sample that should not have been retried`)\n\t\t},\n\t}\n}", "func CreateRecovery(cluster, namespace, volumeName, deploymentName, mountPath, pathRestic string) {\n\tvar recovery map[string]interface{}\n\tvar nameRecovery string\n\t// TODO Backend -> local, s3, glusterFS, ...\n\tif cluster == \"ClusterFrom\" {\n\t\trecovery = utils.ReadJson(\"templates/recovery\", \"recovery_s3_template_from\")\n\t\tnameRecovery= \"recoveryFrom\"\n\t} else {\n\t\trecovery = utils.ReadJson(\"templates/recovery\", \"recovery_s3_template_to\")\n\t\tnameRecovery = \"recoveryTo\"\n\t}\n\n\t// Change namespace, name,\n\tauxName := \"recovery-\" + deploymentName\n\tauxName = deploymentName\n\trecovery[\"metadata\"].(map[string]interface{})[\"name\"] = auxName\n\trecovery[\"metadata\"].(map[string]interface{})[\"namespace\"] = namespace\n\trecovery[\"spec\"].(map[string]interface{})[\"workload\"].(map[string]interface{})[\"name\"] = deploymentName\n\trecovery[\"spec\"].(map[string]interface{})[\"paths\"].([]interface{})[0] = mountPath\n\trecovery[\"spec\"].(map[string]interface{})[\"recoveredVolumes\"].([]interface{})[0].(map[string]interface{})[\"mountPath\"] = mountPath\n\n\terr := utils.WriteJson(pathRestic, nameRecovery, recovery)\n\tif err != nil {\n\t\tfmt.Println(\"Error creating \" + auxName)\n\t}\n}", "func (suite *TaskFailRetryTestSuite) TestTaskFailRetry() {\n\ttaskConfig := pbtask.TaskConfig{\n\t\tRestartPolicy: &pbtask.RestartPolicy{\n\t\t\tMaxFailures: 3,\n\t\t},\n\t}\n\n\tsuite.cachedTask.EXPECT().\n\t\tID().\n\t\tReturn(uint32(0)).\n\t\tAnyTimes()\n\n\tsuite.jobFactory.EXPECT().\n\t\tGetJob(suite.jobID).Return(suite.cachedJob)\n\n\tsuite.cachedJob.EXPECT().\n\t\tGetTask(suite.instanceID).Return(suite.cachedTask)\n\n\tsuite.cachedJob.EXPECT().\n\t\tID().Return(suite.jobID)\n\n\tsuite.cachedTask.EXPECT().\n\t\tGetRuntime(gomock.Any()).Return(suite.taskRuntime, nil)\n\n\tsuite.taskConfigV2Ops.EXPECT().\n\t\tGetTaskConfig(gomock.Any(), suite.jobID, suite.instanceID, gomock.Any()).\n\t\tReturn(&taskConfig, &models.ConfigAddOn{}, nil)\n\n\tsuite.cachedJob.EXPECT().\n\t\tPatchTasks(gomock.Any(), gomock.Any(), false).\n\t\tDo(func(ctx context.Context,\n\t\t\truntimeDiffs map[uint32]jobmgrcommon.RuntimeDiff,\n\t\t\t_ bool) {\n\t\t\truntimeDiff := runtimeDiffs[suite.instanceID]\n\t\t\tsuite.True(\n\t\t\t\truntimeDiff[jobmgrcommon.MesosTaskIDField].(*mesosv1.TaskID).GetValue() != suite.mesosTaskID)\n\t\t\tsuite.True(\n\t\t\t\truntimeDiff[jobmgrcommon.PrevMesosTaskIDField].(*mesosv1.TaskID).GetValue() == suite.mesosTaskID)\n\t\t\tsuite.True(\n\t\t\t\truntimeDiff[jobmgrcommon.StateField].(pbtask.TaskState) == pbtask.TaskState_INITIALIZED)\n\t\t}).Return(nil, nil, nil)\n\n\tsuite.cachedJob.EXPECT().\n\t\tGetJobType().Return(pbjob.JobType_BATCH)\n\n\tsuite.taskGoalStateEngine.EXPECT().\n\t\tEnqueue(gomock.Any(), gomock.Any()).\n\t\tReturn()\n\n\tsuite.jobGoalStateEngine.EXPECT().\n\t\tEnqueue(gomock.Any(), gomock.Any()).\n\t\tReturn()\n\n\terr := TaskFailRetry(context.Background(), suite.taskEnt)\n\tsuite.NoError(err)\n}", "func primaryAndNextCrash(t *testing.T) {\n\tproxyURL := tutils.RandomProxyURL(t)\n\tsmap := tutils.GetClusterMap(t, proxyURL)\n\torigProxyCount := smap.CountActiveProxies()\n\n\tif origProxyCount < 4 {\n\t\tt.Skip(\"The test requires at least 4 proxies, found only \", origProxyCount)\n\t}\n\n\t// get next primary\n\tfirstPrimaryID, firstPrimaryURL, err := chooseNextProxy(smap)\n\ttassert.CheckFatal(t, err)\n\t// Cluster map is re-read to have a clone of original smap that the test\n\t// can modify in any way it needs. Because original smap got must be preserved\n\tsmapNext := tutils.GetClusterMap(t, proxyURL)\n\t// get next next primary\n\tfirstPrimary := smapNext.Pmap[firstPrimaryID]\n\tdelete(smapNext.Pmap, firstPrimaryID)\n\tfinalPrimaryID, finalPrimaryURL, err := chooseNextProxy(smapNext)\n\ttassert.CheckFatal(t, err)\n\n\t// kill the current primary\n\toldPrimaryURL, oldPrimaryID := smap.Primary.URL(cmn.NetworkPublic), smap.Primary.ID()\n\ttlog.Logf(\"Killing primary proxy: %s - %s\\n\", oldPrimaryURL, oldPrimaryID)\n\tcmdFirst, err := tutils.KillNode(smap.Primary)\n\ttassert.CheckFatal(t, err)\n\n\t// kill the next primary\n\ttlog.Logf(\"Killing next to primary proxy: %s - %s\\n\", firstPrimaryID, firstPrimaryURL)\n\tcmdSecond, errSecond := tutils.KillNode(firstPrimary)\n\t// if kill fails it does not make sense to wait for the cluster is stable\n\tif errSecond == nil {\n\t\t// the cluster should vote, so the smap version should be increased at\n\t\t// least by 100, that is why +99\n\t\tsmap, err = tutils.WaitForClusterState(finalPrimaryURL, \"designate new primary\",\n\t\t\tsmap.Version+99, origProxyCount-2, 0)\n\t\ttassert.CheckFatal(t, err)\n\t}\n\n\ttlog.Logf(\"Checking current primary, %s\\n\", smap.StringEx())\n\tif smap.Primary.ID() != finalPrimaryID {\n\t\tt.Errorf(\"Expected primary %s but real primary is %s\", finalPrimaryID, smap.Primary.ID())\n\t}\n\n\t// restore next and prev primaries in the reversed order\n\terr = tutils.RestoreNode(cmdSecond, false, \"proxy (next primary)\")\n\ttassert.CheckFatal(t, err)\n\tsmap, err = tutils.WaitForClusterState(finalPrimaryURL, \"restore next primary\",\n\t\tsmap.Version, origProxyCount-1, 0)\n\ttassert.CheckFatal(t, err)\n\n\terr = tutils.RestoreNode(cmdFirst, false, \"proxy (prev primary)\")\n\ttassert.CheckFatal(t, err)\n\t_, err = tutils.WaitForClusterState(finalPrimaryURL, \"restore prev primary\",\n\t\tsmap.Version, origProxyCount, 0)\n\ttassert.CheckFatal(t, err)\n}", "func _TestRegisterDontAddIfError(t *testing.T) {\n\tfor i := 0; i < 10; i++ {\n\t\tsendNoEnoughNodesRequest(t)\n\t}\n}", "func TestServiceCreateUserWeakPassword(t *testing.T) {\n\tks := CreateTestKeystore(t)\n\n\t{\n\t\treply := CreateUserReply{}\n\t\terr := ks.CreateUser(nil, &CreateUserArgs{\n\t\t\tUsername: \"bob\",\n\t\t\tPassword: \"weak\",\n\t\t}, &reply)\n\n\t\tif err != errWeakPassword {\n\t\t\tt.Error(\"Unexpected error occurred when testing weak password:\", err)\n\t\t}\n\n\t\tif reply.Success {\n\t\t\tt.Fatal(\"User was created when it should have been rejected due to weak password\")\n\t\t}\n\t}\n}", "func primaryAndProxyCrash(t *testing.T) {\n\tvar (\n\t\tproxyURL = tutils.RandomProxyURL(t)\n\t\tsmap = tutils.GetClusterMap(t, proxyURL)\n\t\torigProxyCount = smap.CountActiveProxies()\n\t\toldPrimaryURL, oldPrimaryID = smap.Primary.URL(cmn.NetworkPublic), smap.Primary.ID()\n\t\tsecondNode *cluster.Snode\n\t\tsecondURL, secondID string\n\t)\n\ttlog.Logf(\"targets: %d, proxies: %d\\n\", smap.CountActiveTargets(), smap.CountActiveProxies())\n\n\tnewPrimaryID, newPrimaryURL, err := chooseNextProxy(smap)\n\ttassert.CheckFatal(t, err)\n\n\ttlog.Logf(\"Killing primary: %s - %s\\n\", oldPrimaryURL, oldPrimaryID)\n\tcmd, err := tutils.KillNode(smap.Primary)\n\ttassert.CheckFatal(t, err)\n\n\t// Do not choose the next primary in line, or the current primary proxy\n\t// This is because the system currently cannot recover if the next proxy in line is\n\t// also killed (TODO)\n\tfor k, v := range smap.Pmap {\n\t\tif k != newPrimaryID && k != oldPrimaryID {\n\t\t\tsecondNode = v\n\t\t\tsecondURL, secondID = secondNode.URL(cmn.NetworkPublic), secondNode.ID()\n\t\t\tbreak\n\t\t}\n\t}\n\ttassert.Errorf(t, secondID != \"\", \"not enough proxies (%d)\", origProxyCount)\n\tn := cos.NowRand().Intn(20)\n\ttime.Sleep(time.Duration(n+1) * time.Second)\n\n\ttlog.Logf(\"Killing non-primary proxy: %s - %s\\n\", secondURL, secondID)\n\tsecondCmd, err := tutils.KillNode(secondNode)\n\ttassert.CheckFatal(t, err)\n\n\tsmap, err = tutils.WaitForClusterState(newPrimaryURL, \"elect new primary\",\n\t\tsmap.Version, origProxyCount-2, 0)\n\ttassert.CheckFatal(t, err)\n\n\terr = tutils.RestoreNode(cmd, true, \"previous primary \"+oldPrimaryID)\n\ttassert.CheckFatal(t, err)\n\n\tsmap, err = tutils.WaitForClusterState(newPrimaryURL, \"join back previous primary \"+oldPrimaryID,\n\t\tsmap.Version, origProxyCount-1, 0)\n\ttassert.CheckFatal(t, err)\n\n\terr = tutils.RestoreNode(secondCmd, false, \"proxy\")\n\ttassert.CheckFatal(t, err)\n\n\tsmap, err = tutils.WaitForClusterState(newPrimaryURL, \"join back non-primary \"+secondID,\n\t\tsmap.Version, origProxyCount, 0)\n\ttassert.CheckFatal(t, err)\n\n\tif smap.Primary.ID() != newPrimaryID {\n\t\tt.Fatalf(\"Wrong primary proxy: %s, expecting: %s\", smap.Primary.ID(), newPrimaryID)\n\t}\n\n\tif _, ok := smap.Pmap[oldPrimaryID]; !ok {\n\t\tt.Fatalf(\"Previous primary proxy %s did not rejoin the cluster\", oldPrimaryID)\n\t}\n\n\tif _, ok := smap.Pmap[secondID]; !ok {\n\t\tt.Fatalf(\"Second proxy %s did not rejoin the cluster\", secondID)\n\t}\n}", "func TestInsertNewUserServiceAlreadyExists (t *testing.T){\n\terr := PostNewUserService(user_01)\n\tassert.Equal(t, 409, err.HTTPStatus)\n}", "func TestAzureDevOpsServiceEndpointDockerRegistry_Read_DoesNotSwallowError(t *testing.T) {\n\tctrl := gomock.NewController(t)\n\tdefer ctrl.Finish()\n\n\tr := resourceServiceEndpointDockerRegistry()\n\tresourceData := schema.TestResourceDataRaw(t, r.Schema, nil)\n\tflattenServiceEndpointDockerRegistry(resourceData, &dockerRegistryTestServiceEndpoint, dockerRegistryTestServiceEndpointProjectID)\n\n\tbuildClient := azdosdkmocks.NewMockServiceendpointClient(ctrl)\n\tclients := &config.AggregatedClient{ServiceEndpointClient: buildClient, Ctx: context.Background()}\n\n\texpectedArgs := serviceendpoint.GetServiceEndpointDetailsArgs{EndpointId: dockerRegistryTestServiceEndpoint.Id, Project: dockerRegistryTestServiceEndpointProjectID}\n\tbuildClient.\n\t\tEXPECT().\n\t\tGetServiceEndpointDetails(clients.Ctx, expectedArgs).\n\t\tReturn(nil, errors.New(\"GetServiceEndpoint() Failed\")).\n\t\tTimes(1)\n\n\terr := r.Read(resourceData, clients)\n\trequire.Contains(t, err.Error(), \"GetServiceEndpoint() Failed\")\n}", "func (suite *podActionsTestSuite) TestClientPodRefreshFail() {\n\tsuite.podClient.EXPECT().\n\t\tRefreshPod(gomock.Any(), gomock.Any()).\n\t\tReturn(nil, yarpcerrors.InternalErrorf(\"test error\"))\n\n\tsuite.Error(suite.client.PodRefreshAction(testPodName))\n}", "func TestRetryRequired(t *testing.T) {\n\tcheck := assert.New(t)\n\tretryRequired := checkRetryRequired(http.StatusServiceUnavailable)\n\tcheck.Equal(retryRequired, true)\n}", "func TestCreateSubscriptionRollbackOnFailure(t *testing.T) {\n\trepository, mock := initTest(t)\n\n\t// subscription to create\n\tsubscription := models.Subscription{\n\t\tCallbackURL: \"url\",\n\t\tCallbackType: models.HTTP,\n\t\tFilters: map[models.EventType]models.Filter{\n\t\t\tmodels.DirectoryBlockCommit: {Filtering: fmt.Sprintf(\"filtering 1\")},\n\t\t},\n\t}\n\tsubscriptionContext := &models.SubscriptionContext{\n\t\tSubscription: subscription,\n\t\tFailures: 0,\n\t}\n\n\tmock.ExpectBegin()\n\tmock.ExpectPrepare(`INSERT INTO subscriptions \\(failures, callback, callback_type, status, info, access_token, username, password\\) VALUES\\(\\?, \\?, \\?, \\?, \\?, \\?, \\?, \\?\\);`)\n\tmock.ExpectExec(`INSERT INTO subscriptions`).WithArgs(subscriptionContext.Failures, subscription.CallbackURL, subscription.CallbackType, subscription.SubscriptionStatus, subscription.SubscriptionInfo, subscription.Credentials.AccessToken, subscription.Credentials.BasicAuthUsername, subscription.Credentials.BasicAuthPassword).WillReturnResult(sqlmock.NewResult(1, 1))\n\tmock.ExpectPrepare(`INSERT INTO filters \\(subscription, event_type, filtering\\) VALUES\\(\\?, \\?, \\?\\);`)\n\tmock.ExpectExec(`INSERT INTO filters`).WithArgs(1, models.DirectoryBlockCommit, subscription.Filters[models.DirectoryBlockCommit].Filtering).\n\t\tWillReturnError(fmt.Errorf(\"some error\"))\n\tmock.ExpectRollback()\n\n\t// now we execute our method\n\tif _, err := repository.CreateSubscription(subscriptionContext); err == nil {\n\t\tt.Errorf(\"was expecting an error, but there was none\")\n\t}\n\n\t// we make sure that all expectations were met\n\tif err := mock.ExpectationsWereMet(); err != nil {\n\t\tt.Errorf(\"there were unfulfilled expectations: %s\", err)\n\t}\n}", "func (ts *tester) createConfigMap() error {\n\tts.cfg.Logger.Info(\"creating config map\")\n\n\tb, err := ioutil.ReadFile(ts.cfg.EKSConfig.KubeConfigPath)\n\tif err != nil {\n\t\treturn err\n\t}\n\tctx, cancel := context.WithTimeout(context.Background(), time.Minute)\n\t_, err = ts.cfg.K8SClient.KubernetesClientSet().\n\t\tCoreV1().\n\t\tConfigMaps(ts.cfg.EKSConfig.AddOnStresserRemote.Namespace).\n\t\tCreate(\n\t\t\tctx,\n\t\t\t&v1.ConfigMap{\n\t\t\t\tTypeMeta: metav1.TypeMeta{\n\t\t\t\t\tAPIVersion: \"v1\",\n\t\t\t\t\tKind: \"ConfigMap\",\n\t\t\t\t},\n\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\tName: stresserKubeConfigConfigMapName,\n\t\t\t\t\tNamespace: ts.cfg.EKSConfig.AddOnStresserRemote.Namespace,\n\t\t\t\t\tLabels: map[string]string{\n\t\t\t\t\t\t\"name\": stresserKubeConfigConfigMapName,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tData: map[string]string{\n\t\t\t\t\tstresserKubeConfigConfigMapFileName: string(b),\n\t\t\t\t},\n\t\t\t},\n\t\t\tmetav1.CreateOptions{},\n\t\t)\n\tcancel()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tts.cfg.Logger.Info(\"created config map\")\n\tts.cfg.EKSConfig.Sync()\n\treturn nil\n}", "func TestUpdateRegistryInvalidCached(t *testing.T) {\n\tif testing.Short() {\n\t\tt.SkipNow()\n\t}\n\tt.Parallel()\n\n\tdeps := dependencies.NewDependencyRegistryUpdateNoOp()\n\tdeps.Disable()\n\twt, err := newWorkerTesterCustomDependency(t.Name(), modules.ProdDependencies, deps)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tdefer func() {\n\t\tif err := wt.Close(); err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t}()\n\n\t// Create a registry value.\n\tsk, pk := crypto.GenerateKeyPair()\n\tvar tweak crypto.Hash\n\tfastrand.Read(tweak[:])\n\tdata := fastrand.Bytes(modules.RegistryDataSize)\n\trev := fastrand.Uint64n(1000) + 1\n\tspk := types.SiaPublicKey{\n\t\tAlgorithm: types.SignatureEd25519,\n\t\tKey: pk[:],\n\t}\n\trv := modules.NewRegistryValue(tweak, data, rev).Sign(sk)\n\n\t// Run the UpdateRegistry job.\n\terr = wt.UpdateRegistry(context.Background(), spk, rv)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t// Run the UpdateRegistry job again. This time it's a no-op. The renter\n\t// won't know and increment the revision in the cache.\n\trv.Revision++\n\trv = rv.Sign(sk)\n\tdeps.Enable()\n\terr = wt.UpdateRegistry(context.Background(), spk, rv)\n\tdeps.Disable()\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t// Run the UpdateRegistry job again with a lower rev num than the initial\n\t// one. Causing a ErrLowerRevNumError. The host will use the latest revision\n\t// it knows for the proof which is lower than the one in the worker cache.\n\trv.Revision -= 2\n\trv = rv.Sign(sk)\n\terr = wt.UpdateRegistry(context.Background(), spk, rv)\n\tif !errors.Contains(err, errHostLowerRevisionThanCache) {\n\t\tt.Fatal(err)\n\t}\n\n\t// Make sure there is a recent error and cooldown.\n\twt.staticJobUpdateRegistryQueue.mu.Lock()\n\tif !errors.Contains(wt.staticJobUpdateRegistryQueue.recentErr, errHostLowerRevisionThanCache) {\n\t\tt.Fatal(\"wrong recent error\", wt.staticJobUpdateRegistryQueue.recentErr)\n\t}\n\tif wt.staticJobUpdateRegistryQueue.cooldownUntil == (time.Time{}) {\n\t\tt.Fatal(\"cooldownUntil is not set\")\n\t}\n\twt.staticJobUpdateRegistryQueue.mu.Unlock()\n}", "func (o InstanceGroupManagerActionsSummaryResponseOutput) CreatingWithoutRetries() pulumi.IntOutput {\n\treturn o.ApplyT(func(v InstanceGroupManagerActionsSummaryResponse) int { return v.CreatingWithoutRetries }).(pulumi.IntOutput)\n}", "func createConfigMapFunc(f *framework.Framework, tc *nodeConfigTestCase) error {\n\ttc.configMap.ResourceVersion = \"\"\n\tcm, err := f.ClientSet.CoreV1().ConfigMaps(tc.configMap.Namespace).Create(tc.configMap)\n\tif err != nil {\n\t\treturn err\n\t}\n\t// update tc.configMap's UID and ResourceVersion to match the new ConfigMap, this makes\n\t// sure our derived status checks have up-to-date information\n\ttc.configMap.UID = cm.UID\n\ttc.configMap.ResourceVersion = cm.ResourceVersion\n\treturn nil\n}", "func TestReloadWithReadLock_Success(t *testing.T) {\n\trequire := require.New(t)\n\n\tresources := initVMRegistryTest(t)\n\n\tfactory1 := vms.NewMockFactory(resources.ctrl)\n\tfactory2 := vms.NewMockFactory(resources.ctrl)\n\tfactory3 := vms.NewMockFactory(resources.ctrl)\n\tfactory4 := vms.NewMockFactory(resources.ctrl)\n\n\tregisteredVms := map[ids.ID]vms.Factory{\n\t\tid1: factory1,\n\t\tid2: factory2,\n\t}\n\n\tunregisteredVms := map[ids.ID]vms.Factory{\n\t\tid3: factory3,\n\t\tid4: factory4,\n\t}\n\n\tresources.mockVMGetter.EXPECT().\n\t\tGet().\n\t\tTimes(1).\n\t\tReturn(registeredVms, unregisteredVms, nil)\n\tresources.mockVMRegisterer.EXPECT().\n\t\tRegisterWithReadLock(gomock.Any(), id3, factory3).\n\t\tTimes(1).\n\t\tReturn(nil)\n\tresources.mockVMRegisterer.EXPECT().\n\t\tRegisterWithReadLock(gomock.Any(), id4, factory4).\n\t\tTimes(1).\n\t\tReturn(nil)\n\n\tinstalledVMs, failedVMs, err := resources.vmRegistry.ReloadWithReadLock(context.Background())\n\trequire.NoError(err)\n\trequire.ElementsMatch([]ids.ID{id3, id4}, installedVMs)\n\trequire.Empty(failedVMs)\n}", "func (suite *TaskFailRetryTestSuite) TestTaskFailNoRetry() {\n\n\ttaskConfig := pbtask.TaskConfig{\n\t\tRestartPolicy: &pbtask.RestartPolicy{\n\t\t\tMaxFailures: 0,\n\t\t},\n\t}\n\n\tsuite.jobFactory.EXPECT().\n\t\tGetJob(suite.jobID).Return(suite.cachedJob)\n\n\tsuite.cachedJob.EXPECT().\n\t\tGetTask(suite.instanceID).Return(suite.cachedTask)\n\n\tsuite.cachedTask.EXPECT().\n\t\tGetRuntime(gomock.Any()).Return(suite.taskRuntime, nil)\n\n\tsuite.taskConfigV2Ops.EXPECT().\n\t\tGetTaskConfig(gomock.Any(), suite.jobID, suite.instanceID, gomock.Any()).\n\t\tReturn(&taskConfig, &models.ConfigAddOn{}, nil)\n\n\terr := TaskFailRetry(context.Background(), suite.taskEnt)\n\tsuite.NoError(err)\n}", "func TestClient_CreateReplica(t *testing.T) {\n\tc := OpenClient(0)\n\tdefer c.Close()\n\n\t// Create replica through client.\n\tif err := c.CreateReplica(123, &url.URL{Host: \"localhost\"}); err != nil {\n\t\tt.Fatalf(\"unexpected error: %v\", err)\n\t}\n\n\t// Verify replica was created.\n\tif r := c.Server.Handler.Broker().Replica(123); r == nil {\n\t\tt.Fatalf(\"replica not created\")\n\t}\n}", "func createNewRCWithRetries(f *framework.Framework, name, namespace string, rcObj *v1.ReplicationController) (rc *v1.ReplicationController, err error) {\n\tfor retryCount := 0; retryCount < maxRetries; retryCount++ {\n\t\tif rc, err = f.ClientSet.Core().ReplicationControllers(namespace).Create(rcObj); err == nil {\n\t\t\tframework.Logf(\"Created replication controller %q\", name)\n\t\t\tbreak\n\t\t}\n\t}\n\treturn\n}", "func createOrFail(t test.Failer, store model.ConfigStoreController, cfg config.Config) {\n\tif _, err := store.Create(cfg); err != nil {\n\t\tt.Fatalf(\"failed creating %s/%s: %v\", cfg.Namespace, cfg.Name, err)\n\t}\n}", "func (m *messagingSuite) TestJoinFirstNodeRetryWithErrors() {\n\tclientPort := freeport.MustNext()\n\tnodeID := api.NewNodeId()\n\tserverAddr := m.addr\n\trequire := m.Require()\n\n\tview := NewView(m.k, nil, nil)\n\trequire.NoError(view.RingAdd(m.ctx, serverAddr, nodeID))\n\n\tm.createAndStartMembershipService(\"first\", serverAddr, view)\n\n\t// Try with the same host details as the server\n\tclientAddr1, client1 := m.makeClient(\"client1\", m.serverPort)\n\tdefer client1.Close()\n\n\tresp, err := m.sendPreJoinMessage(client1, serverAddr, clientAddr1, api.NewNodeId())\n\trequire.NoError(err)\n\trequire.NotNil(resp)\n\trequire.Equal(remoting.JoinStatusCode_HOSTNAME_ALREADY_IN_RING, resp.GetStatusCode())\n\trequire.Equal(m.k, len(resp.GetEndpoints()))\n\trequire.Empty(resp.GetIdentifiers())\n\n\t// Try again with a different port, this should fail because we're using the same\n\t// uuid as the server.\n\tclientAddr2, client2 := m.makeClient(\"client2\", clientPort)\n\tdefer client2.Close()\n\n\tresp2, err := m.sendPreJoinMessage(client2, serverAddr, clientAddr2, nodeID)\n\trequire.NoError(err)\n\trequire.NotNil(resp2)\n\trequire.Equal(remoting.JoinStatusCode_UUID_ALREADY_IN_RING, resp2.GetStatusCode())\n\trequire.Empty(resp2.GetEndpoints())\n\trequire.Empty(resp2.GetIdentifiers())\n}", "func TestRepeatedCrash(t *testing.T) {\n\truntime.GOMAXPROCS(4)\n\n\ttag := \"rc\"\n\tvshost := port(tag+\"v\", 1)\n\tviewservice.StartServer(vshost)\n\ttime.Sleep(time.Second)\n\tvck := viewservice.MakeClerk(\"\", vshost)\n\n\tfmt.Printf(\"Test: Repeated failures/restarts ...\\n\")\n\n\tconst nservers = 4\n\tvar sa [nservers]*PBServer\n\tfor i := 0; i < nservers; i++ {\n\t\tsa[i] = StartServer(vshost, port(tag, i+1))\n\t}\n\n\tfor i := 0; i < viewservice.DeadPings; i++ {\n\t\tv, _ := vck.Get()\n\t\tif v.Primary != \"\" && v.Backup[0] != \"\" && v.Backup[1] != \"\" {\n\t\t\tbreak\n\t\t}\n\t\ttime.Sleep(viewservice.PingInterval)\n\t}\n\n\t// wait a bit for primary to initialize backup\n\ttime.Sleep(viewservice.DeadPings * viewservice.PingInterval)\n\n\tdone := false\n\n\tgo func() {\n\t\t// killPBServer and restart servers\n\t\trr := rand.New(rand.NewSource(int64(os.Getpid())))\n\t\tfor done == false {\n\t\t\ti := rr.Int() % nservers\n\t\t\t// fmt.Printf(\"%v killPBServering %v\\n\", ts(), 5001+i)\n\t\t\tsa[i].killPBServer()\n\n\t\t\t// wait long enough for new view to form, backup to be initialized\n\t\t\ttime.Sleep(2 * viewservice.PingInterval * viewservice.DeadPings)\n\n\t\t\tsa[i] = StartServer(vshost, port(tag, i+1))\n\n\t\t\t// wait long enough for new view to form, backup to be initialized\n\t\t\ttime.Sleep(2 * viewservice.PingInterval * viewservice.DeadPings)\n\t\t}\n\t}()\n\n\tconst nth = 2\n\tvar cha [nth]chan bool\n\tfor xi := 0; xi < nth; xi++ {\n\t\tcha[xi] = make(chan bool)\n\t\tgo func(i int) {\n\t\t\tok := false\n\t\t\tdefer func() { cha[i] <- ok }()\n\t\t\tck := MakeClerk(vshost, \"\")\n\t\t\tdata := map[string]string{}\n\t\t\trr := rand.New(rand.NewSource(int64(os.Getpid() + i)))\n\t\t\tfor done == false {\n\t\t\t\tk := strconv.Itoa((i * 1000000) + (rr.Int() % 10))\n\t\t\t\twanted, ok := data[k]\n\t\t\t\tif ok {\n\t\t\t\t\tv := ck.Get(k)\n\t\t\t\t\tif v != wanted {\n\t\t\t\t\t\tt.Fatalf(\"key=%v wanted=%v got=%v\", k, wanted, v)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tnv := strconv.Itoa(rr.Int())\n\t\t\t\tck.Put(k, nv)\n\t\t\t\tdata[k] = nv\n\t\t\t\t// if no sleep here, then server tick() threads do not get\n\t\t\t\t// enough time to Ping the viewserver.\n\t\t\t\ttime.Sleep(10 * time.Millisecond)\n\t\t\t}\n\t\t\tok = true\n\t\t}(xi)\n\t}\n\n\ttime.Sleep(20 * time.Second)\n\tdone = true\n\n\tfor i := 0; i < nth; i++ {\n\t\tok := <-cha[i]\n\t\tif ok == false {\n\t\t\tt.Fatal(\"child failed\")\n\t\t}\n\t}\n\n\tck := MakeClerk(vshost, \"\")\n\tck.Put(\"aaa\", \"bbb\")\n\tif v := ck.Get(\"aaa\"); v != \"bbb\" {\n\t\tt.Fatalf(\"final Put/Get failed\")\n\t}\n\n\tfmt.Printf(\" ... Passed\\n\")\n\n\tfor i := 0; i < nservers; i++ {\n\t\tsa[i].killPBServer()\n\t}\n\ttime.Sleep(time.Second)\n}", "func TestDeleteWithRetry(t *testing.T) {\n\tscheme, codecs := testScheme(t)\n\tcodec := apitesting.TestCodec(codecs, examplev1.SchemeGroupVersion)\n\tserver := etcdtesting.NewEtcdTestClientServer(t)\n\tdefer server.Terminate(t)\n\tprefix := path.Join(\"/\", etcdtest.PathPrefix())\n\n\tobj := &example.Pod{ObjectMeta: metav1.ObjectMeta{Name: \"foo\", UID: \"A\"}}\n\t// fakeGet returns a large ModifiedIndex to emulate the case that another\n\t// party has updated the object.\n\tfakeGet := func(ctx context.Context, key string, opts *etcd.GetOptions) (*etcd.Response, error) {\n\t\tdata, _ := runtime.Encode(codec, obj)\n\t\treturn &etcd.Response{Node: &etcd.Node{Value: defaultPrefixValue(data), ModifiedIndex: 99}}, nil\n\t}\n\texpectedRetries := 3\n\thelper := newEtcdHelper(server.Client, scheme, codec, prefix)\n\tfake := &fakeDeleteKeysAPI{KeysAPI: helper.etcdKeysAPI, fakeGetCap: expectedRetries, fakeGetFunc: fakeGet}\n\thelper.etcdKeysAPI = fake\n\n\treturnedObj := &example.Pod{}\n\terr := helper.Create(context.TODO(), \"/some/key\", obj, returnedObj, 0)\n\tif err != nil {\n\t\tt.Errorf(\"Unexpected error %#v\", err)\n\t}\n\n\terr = helper.Delete(context.TODO(), \"/some/key\", obj, storage.NewUIDPreconditions(\"A\"))\n\tif err != nil {\n\t\tt.Errorf(\"Unexpected error %#v\", err)\n\t}\n\tif fake.getCount != expectedRetries {\n\t\tt.Errorf(\"Expect %d retries, got %d\", expectedRetries, fake.getCount)\n\t}\n\terr = helper.Get(context.TODO(), \"/some/key\", \"\", obj, false)\n\tif !storage.IsNotFound(err) {\n\t\tt.Errorf(\"Expect an NotFound error, got %v\", err)\n\t}\n}", "func TestCreateNewFabricCAClientCertFilesMissingFailure(t *testing.T) {\n\tmockCtrl := gomock.NewController(t)\n\tdefer mockCtrl.Finish()\n\tmockConfig := mock_apiconfig.NewMockConfig(mockCtrl)\n\tmockConfig.EXPECT().CAConfig(org1).Return(&config.CAConfig{URL: \"\"}, nil)\n\tmockConfig.EXPECT().CAServerCertPaths(org1).Return(nil, errors.New(\"CAServerCertPaths error\"))\n\t_, err := NewFabricCAClient(org1, mockConfig, cryptoSuiteProvider)\n\tif err.Error() != \"CAServerCertPaths error\" {\n\t\tt.Fatalf(\"Expected error from CAServerCertPaths. Got: %s\", err.Error())\n\t}\n}", "func testFailWithTweak(key *Key, data *metadata.WrappedKeyData, tweak []byte) error {\n\ttweak[0]++\n\tkey, err := Unwrap(key, data)\n\tif err == nil {\n\t\tkey.Wipe()\n\t}\n\ttweak[0]--\n\treturn err\n}", "func TestFailureReasonsCompatibilityMap(t *testing.T) {\n\tf := newFailureReasonMapper(nil)\n\trequire.Equal(t, failureReasonsCompatibilityMap, f.compatibilityMap)\n\n\tfor _, r := range allFailureReasons {\n\t\tt.Run(string(r), func(t *testing.T) {\n\t\t\tf.Map(r)\n\t\t\tassert.NoError(t, f.err)\n\t\t})\n\t}\n}", "func rollbackKeyCreation(iamSvc *iam.IAM, accessKey *iam.AccessKey) {\n\taccessKeyId := aws.StringValue(accessKey.AccessKeyId)\n\terr := deleteAccessKey(iamSvc, accessKeyId)\n\tif err != nil {\n\t\tlog.Errorf(\"Unable to delete new AccessKey, there are now probably 2 access keys for this user\")\n\t} else {\n\t\tlog.Errorf(\"Rollbacked new AccessKey (%s)\", accessKeyId)\n\t}\n}", "func TestPostNonRetriable(t *testing.T) {\n\tstatus := http.StatusBadRequest\n\ttries := 0\n\tts := httptest.NewTLSServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tw.WriteHeader(status)\n\t\tif tries++; tries > 1 {\n\t\t\tt.Errorf(\"expected client to not retry after receiving status code %d\", status)\n\t\t}\n\t}))\n\n\tdefer ts.Close()\n\n\tc := &APIClient{\n\t\tBaseURL: ts.URL,\n\t\tClient: ts.Client(),\n\t}\n\n\terr := c.PingSuccess(TestUUID, nil)\n\tif err == nil {\n\t\tt.Errorf(\"expected PingSuccess to return non-nil error after non-retriable API response\")\n\t}\n}" ]
[ "0.58306473", "0.5762952", "0.5743948", "0.5601782", "0.540478", "0.5353772", "0.5273277", "0.5259641", "0.5246419", "0.5239281", "0.52142185", "0.5167846", "0.51596874", "0.5153592", "0.51423925", "0.5119595", "0.5094918", "0.50900286", "0.50418925", "0.5036021", "0.503334", "0.5009414", "0.5004293", "0.49870953", "0.49803707", "0.49609673", "0.49524903", "0.49451122", "0.49361807", "0.49275044", "0.49261567", "0.49181536", "0.4918133", "0.49046865", "0.49021044", "0.48959675", "0.489396", "0.48897323", "0.4883946", "0.48797292", "0.48570895", "0.48539707", "0.48482946", "0.4844088", "0.48392338", "0.4837833", "0.4832156", "0.48295745", "0.48260185", "0.48222804", "0.4796398", "0.47825098", "0.47776535", "0.47771037", "0.4776948", "0.47644752", "0.47565523", "0.47541565", "0.47448835", "0.474115", "0.47361472", "0.47352764", "0.4727085", "0.47263265", "0.4721003", "0.47168818", "0.4714499", "0.47121182", "0.47033548", "0.4703062", "0.46993184", "0.46959385", "0.46803558", "0.467889", "0.46741244", "0.46733588", "0.46653807", "0.4662756", "0.46585202", "0.46553597", "0.46421173", "0.46416306", "0.46415874", "0.46386352", "0.46321842", "0.46284872", "0.46203533", "0.46203128", "0.46106416", "0.460773", "0.46054965", "0.4602915", "0.4601074", "0.4600097", "0.45979604", "0.45840716", "0.45840064", "0.45814615", "0.45811212", "0.45783272" ]
0.75010645
0
TestCreateRetryConflictNoTagDiff ensures that attempts to create a mapping that result in resource conflicts that do NOT include tag diffs causes the create to be retried successfully.
func TestCreateRetryConflictNoTagDiff(t *testing.T) { registry := registryhostname.TestingRegistryHostnameRetriever(nil, "", testDefaultRegistryURL) firstUpdate := true restInstance := &REST{ strategy: NewStrategy(registry), imageRegistry: &fakeImageRegistry{ createImage: func(ctx context.Context, image *imageapi.Image) error { return nil }, }, imageStreamRegistry: &fakeImageStreamRegistry{ getImageStream: func(ctx context.Context, id string, options *metav1.GetOptions) (*imageapi.ImageStream, error) { stream := validImageStream() stream.Status = imageapi.ImageStreamStatus{ Tags: map[string]imageapi.TagEventList{ "latest": {Items: []imageapi.TagEvent{{DockerImageReference: "localhost:5000/someproject/somerepo:original"}}}, }, } return stream, nil }, updateImageStreamStatus: func(ctx context.Context, repo *imageapi.ImageStream) (*imageapi.ImageStream, error) { // For the first update call, return a conflict to cause a retry of an // image stream whose tags haven't changed. if firstUpdate { firstUpdate = false return nil, errors.NewConflict(imagegroup.Resource("imagestreams"), repo.Name, fmt.Errorf("resource modified")) } return repo, nil }, }, } obj, err := restInstance.Create(apirequest.NewDefaultContext(), validNewMappingWithName(), rest.ValidateAllObjectFunc, false) if err != nil { t.Errorf("unexpected error: %v", err) } if obj == nil { t.Fatalf("expected a result") } }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func TestCreateRetryConflictTagDiff(t *testing.T) {\n\tfirstGet := true\n\tfirstUpdate := true\n\trestInstance := &REST{\n\t\tstrategy: NewStrategy(registryhostname.TestingRegistryHostnameRetriever(nil, \"\", testDefaultRegistryURL)),\n\t\timageRegistry: &fakeImageRegistry{\n\t\t\tcreateImage: func(ctx context.Context, image *imageapi.Image) error {\n\t\t\t\treturn nil\n\t\t\t},\n\t\t},\n\t\timageStreamRegistry: &fakeImageStreamRegistry{\n\t\t\tgetImageStream: func(ctx context.Context, id string, options *metav1.GetOptions) (*imageapi.ImageStream, error) {\n\t\t\t\t// For the first get, return a stream with a latest tag pointing to \"original\"\n\t\t\t\tif firstGet {\n\t\t\t\t\tfirstGet = false\n\t\t\t\t\tstream := validImageStream()\n\t\t\t\t\tstream.Status = imageapi.ImageStreamStatus{\n\t\t\t\t\t\tTags: map[string]imageapi.TagEventList{\n\t\t\t\t\t\t\t\"latest\": {Items: []imageapi.TagEvent{{DockerImageReference: \"localhost:5000/someproject/somerepo:original\"}}},\n\t\t\t\t\t\t},\n\t\t\t\t\t}\n\t\t\t\t\treturn stream, nil\n\t\t\t\t}\n\t\t\t\t// For subsequent gets, return a stream with the latest tag changed to \"newer\"\n\t\t\t\tstream := validImageStream()\n\t\t\t\tstream.Status = imageapi.ImageStreamStatus{\n\t\t\t\t\tTags: map[string]imageapi.TagEventList{\n\t\t\t\t\t\t\"latest\": {Items: []imageapi.TagEvent{{DockerImageReference: \"localhost:5000/someproject/somerepo:newer\"}}},\n\t\t\t\t\t},\n\t\t\t\t}\n\t\t\t\treturn stream, nil\n\t\t\t},\n\t\t\tupdateImageStreamStatus: func(ctx context.Context, repo *imageapi.ImageStream) (*imageapi.ImageStream, error) {\n\t\t\t\t// For the first update, return a conflict so that the stream\n\t\t\t\t// get/compare is retried.\n\t\t\t\tif firstUpdate {\n\t\t\t\t\tfirstUpdate = false\n\t\t\t\t\treturn nil, errors.NewConflict(imagegroup.Resource(\"imagestreams\"), repo.Name, fmt.Errorf(\"resource modified\"))\n\t\t\t\t}\n\t\t\t\treturn repo, nil\n\t\t\t},\n\t\t},\n\t}\n\tobj, err := restInstance.Create(apirequest.NewDefaultContext(), validNewMappingWithName(), rest.ValidateAllObjectFunc, false)\n\tif err == nil {\n\t\tt.Fatalf(\"expected an error\")\n\t}\n\tif !errors.IsConflict(err) {\n\t\tt.Errorf(\"expected a conflict error, got %v\", err)\n\t}\n\tif obj != nil {\n\t\tt.Fatalf(\"expected a nil result\")\n\t}\n}", "func TestCreateRetryUnrecoverable(t *testing.T) {\n\tregistry := registryhostname.TestingRegistryHostnameRetriever(nil, \"\", testDefaultRegistryURL)\n\trestInstance := &REST{\n\t\tstrategy: NewStrategy(registry),\n\t\timageRegistry: &fakeImageRegistry{\n\t\t\tcreateImage: func(ctx context.Context, image *imageapi.Image) error {\n\t\t\t\treturn nil\n\t\t\t},\n\t\t},\n\t\timageStreamRegistry: &fakeImageStreamRegistry{\n\t\t\tgetImageStream: func(ctx context.Context, id string, options *metav1.GetOptions) (*imageapi.ImageStream, error) {\n\t\t\t\treturn validImageStream(), nil\n\t\t\t},\n\t\t\tlistImageStreams: func(ctx context.Context, options *metainternal.ListOptions) (*imageapi.ImageStreamList, error) {\n\t\t\t\ts := validImageStream()\n\t\t\t\treturn &imageapi.ImageStreamList{Items: []imageapi.ImageStream{*s}}, nil\n\t\t\t},\n\t\t\tupdateImageStreamStatus: func(ctx context.Context, repo *imageapi.ImageStream) (*imageapi.ImageStream, error) {\n\t\t\t\treturn nil, errors.NewServiceUnavailable(\"unrecoverable error\")\n\t\t\t},\n\t\t},\n\t}\n\tobj, err := restInstance.Create(apirequest.NewDefaultContext(), validNewMappingWithName(), rest.ValidateAllObjectFunc, false)\n\tif err == nil {\n\t\tt.Errorf(\"expected an error\")\n\t}\n\tif obj != nil {\n\t\tt.Fatalf(\"expected a nil result\")\n\t}\n}", "func TestContainerCreationConflict(t *testing.T) {\n\tsConfig := makeSandboxConfig(\"foo\", \"bar\", \"1\", 0)\n\tconfig := makeContainerConfig(sConfig, \"pause\", \"iamimage\", 0, map[string]string{}, map[string]string{})\n\tcontainerName := makeContainerName(sConfig, config)\n\tconst sandboxId = \"sandboxid\"\n\tconst containerId = \"containerid\"\n\tconflictError := fmt.Errorf(\"Error response from daemon: Conflict. The name \\\"/%s\\\" is already in use by container %s. You have to remove (or rename) that container to be able to reuse that name.\",\n\t\tcontainerName, containerId)\n\tnoContainerError := fmt.Errorf(\"Error response from daemon: No such container: %s\", containerId)\n\trandomError := fmt.Errorf(\"random error\")\n\n\tfor desc, test := range map[string]struct {\n\t\tcreateError error\n\t\tremoveError error\n\t\texpectError error\n\t\texpectCalls []string\n\t\texpectFields int\n\t}{\n\t\t\"no create error\": {\n\t\t\texpectCalls: []string{\"create\"},\n\t\t\texpectFields: 6,\n\t\t},\n\t\t\"random create error\": {\n\t\t\tcreateError: randomError,\n\t\t\texpectError: randomError,\n\t\t\texpectCalls: []string{\"create\"},\n\t\t},\n\t\t\"conflict create error with successful remove\": {\n\t\t\tcreateError: conflictError,\n\t\t\texpectError: conflictError,\n\t\t\texpectCalls: []string{\"create\", \"remove\"},\n\t\t},\n\t\t\"conflict create error with random remove error\": {\n\t\t\tcreateError: conflictError,\n\t\t\tremoveError: randomError,\n\t\t\texpectError: conflictError,\n\t\t\texpectCalls: []string{\"create\", \"remove\"},\n\t\t},\n\t\t\"conflict create error with no such container remove error\": {\n\t\t\tcreateError: conflictError,\n\t\t\tremoveError: noContainerError,\n\t\t\texpectCalls: []string{\"create\", \"remove\", \"create\"},\n\t\t\texpectFields: 7,\n\t\t},\n\t} {\n\t\tt.Logf(\"TestCase: %s\", desc)\n\t\tds, fDocker, _ := newTestDockerService()\n\n\t\tif test.createError != nil {\n\t\t\tfDocker.InjectError(\"create\", test.createError)\n\t\t}\n\t\tif test.removeError != nil {\n\t\t\tfDocker.InjectError(\"remove\", test.removeError)\n\t\t}\n\t\tid, err := ds.CreateContainer(sandboxId, config, sConfig)\n\t\trequire.Equal(t, test.expectError, err)\n\t\tassert.NoError(t, fDocker.AssertCalls(test.expectCalls))\n\t\tif err == nil {\n\t\t\tc, err := fDocker.InspectContainer(id)\n\t\t\tassert.NoError(t, err)\n\t\t\tassert.Len(t, strings.Split(c.Name, nameDelimiter), test.expectFields)\n\t\t}\n\t}\n}", "func TestNoConflicts(t *testing.T) {\n\ttestDB(t, func(db *bolt.DB) {\n\t\tbucketName := []byte(\"testBucket\")\n\n\t\tif err := db.Update(func(tx *bolt.Tx) error {\n\t\t\tb, err := tx.CreateBucket(bucketName)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tc := b.Cursor()\n\t\t\tfor _, test := range matchTests {\n\t\t\t\tpathB := []byte(test.path)\n\t\t\t\tif k, _ := SeekPathConflict(c, pathB); k != nil {\n\t\t\t\t\tt.Errorf(\"unexpected conflict with %q: %s\", test.path, string(k))\n\t\t\t\t}\n\n\t\t\t\tif err := b.Put(pathB, []byte{}); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t}\n\t\t\treturn nil\n\t\t}); err != nil {\n\t\t\tt.Fatal(\"failed to insert paths:\", err)\n\t\t}\n\t})\n}", "func TestCommitConflictRepeat4A(t *testing.T) {\n}", "func TestConflictResolution(t *testing.T) {\n\tpoolB := mkPool(poolBUID, \"pool-b\", []string{\"10.0.10.0/24\", \"FF::0/48\"})\n\tpoolB.CreationTimestamp = meta_v1.Date(2022, 10, 16, 13, 30, 00, 0, time.UTC)\n\tfixture := mkTestFixture([]*cilium_api_v2alpha1.CiliumLoadBalancerIPPool{\n\t\tmkPool(poolAUID, \"pool-a\", []string{\"10.0.10.0/24\"}),\n\t\tpoolB,\n\t}, true, false, nil)\n\n\tawait := fixture.AwaitPool(func(action k8s_testing.Action) bool {\n\t\tif action.GetResource() != poolResource || action.GetVerb() != \"patch\" {\n\t\t\treturn false\n\t\t}\n\n\t\tpool := fixture.PatchedPool(action)\n\n\t\tif pool.Name != \"pool-b\" {\n\t\t\treturn false\n\t\t}\n\n\t\tif !isPoolConflicting(pool) {\n\t\t\treturn false\n\t\t}\n\n\t\treturn true\n\t}, time.Second)\n\n\tgo fixture.hive.Start(context.Background())\n\tdefer fixture.hive.Stop(context.Background())\n\n\tif await.Block() {\n\t\tt.Fatal(\"Pool B has not been marked conflicting\")\n\t}\n\n\t// All ranges of a conflicting pool must be disabled\n\tpoolBRanges, _ := fixture.lbIPAM.rangesStore.GetRangesForPool(\"pool-b\")\n\tfor _, r := range poolBRanges {\n\t\tif !r.internallyDisabled {\n\t\t\tt.Fatalf(\"Range '%s' from pool B hasn't been disabled\", ipNetStr(r.allocRange.CIDR()))\n\t\t}\n\t}\n\n\t// Phase 2, resolving the conflict\n\n\tawait = fixture.AwaitPool(func(action k8s_testing.Action) bool {\n\t\tif action.GetResource() != poolResource || action.GetVerb() != \"patch\" {\n\t\t\treturn false\n\t\t}\n\n\t\tpool := fixture.PatchedPool(action)\n\n\t\tif pool.Name != \"pool-b\" {\n\t\t\treturn false\n\t\t}\n\n\t\tif isPoolConflicting(pool) {\n\t\t\treturn false\n\t\t}\n\n\t\treturn true\n\t}, time.Second)\n\n\tpoolB, err := fixture.poolClient.Get(context.Background(), \"pool-b\", meta_v1.GetOptions{})\n\tif err != nil {\n\t\tt.Fatal(poolB)\n\t}\n\n\t// Remove the conflicting range\n\tpoolB.Spec.Cidrs = []cilium_api_v2alpha1.CiliumLoadBalancerIPPoolCIDRBlock{\n\t\t{\n\t\t\tCidr: cilium_api_v2alpha1.IPv4orIPv6CIDR(\"FF::0/48\"),\n\t\t},\n\t}\n\n\t_, err = fixture.poolClient.Update(context.Background(), poolB, meta_v1.UpdateOptions{})\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tif await.Block() {\n\t\tt.Fatal(\"Pool b has not de-conflicted\")\n\t}\n}", "func TestDefaultConflictResolverWithTombstoneRemote(t *testing.T) {\n\tbase.RequireNumTestBuckets(t, 2)\n\tif !base.TestUseXattrs() {\n\t\tt.Skip(\"This test only works with XATTRS enabled\")\n\t}\n\tbase.SetUpTestLogging(t, logger.LevelInfo, logger.KeyAll)\n\n\tdefaultConflictResolverWithTombstoneTests := []struct {\n\t\tname string // A unique name to identify the unit test.\n\t\tlocalBodyValues []string // Controls the local revision generation.\n\t\texpectedRevID string // Expected document revision ID.\n\t}{\n\t\t{\n\t\t\t// Revision tie with remote digest is lower than the local digest.\n\t\t\t// local generation = remote generation:\n\t\t\t//\t- e.g. local is 3-b, remote is 3-a(T)\n\t\t\tname: \"revGenTieRemoteDigestLower\",\n\t\t\tlocalBodyValues: []string{\"baz\", \"EADGBE\"},\n\t\t\texpectedRevID: \"4-0748692c1535b62f59b2c276cc2a8bda\",\n\t\t},\n\t\t{\n\t\t\t// Revision tie with remote digest is higher than the local digest.\n\t\t\t// local generation = remote generation:\n\t\t\t//\t- e.g. local is 3-b, remote is 3-c(T)\n\t\t\tname: \"revGenTieRemoteDigestHigher\",\n\t\t\tlocalBodyValues: []string{\"baz\", \"qux\"},\n\t\t\texpectedRevID: \"4-5afdb61ba968c9eaa7599e727c4c1b53\",\n\t\t},\n\t\t{\n\t\t\t// Local revision generation is higher than remote revision generation.\n\t\t\t// local generation > remote generation:\n\t\t\t// - e.g. local is 4-b, remote is 3-a(T)\n\t\t\tname: \"revGenRemoteLower\",\n\t\t\tlocalBodyValues: []string{\"baz\", \"qux\", \"grunt\"},\n\t\t\texpectedRevID: \"5-962dc965fd8e7fd2bc3ffbcab85d53ba\",\n\t\t},\n\t\t{\n\t\t\t// Local revision generation is lower than remote revision generation.\n\t\t\t// local generation < remote generation:\n\t\t\t//\t- e.g. local is 2-b, remote is 3-a(T)\n\t\t\tname: \"revGenRemoteHigher\",\n\t\t\tlocalBodyValues: []string{\"grunt\"},\n\t\t\texpectedRevID: \"3-cd4c29d9c84fc8b2a51c50e1234252c9\",\n\t\t},\n\t}\n\n\tfor _, test := range defaultConflictResolverWithTombstoneTests {\n\t\tt.Run(test.name, func(t *testing.T) {\n\t\t\t// Passive\n\t\t\trt2 := NewRestTester(t, &RestTesterConfig{\n\t\t\t\tTestBucket: base.GetTestBucket(t),\n\t\t\t\tDatabaseConfig: &DatabaseConfig{DbConfig: DbConfig{\n\t\t\t\t\tUsers: map[string]*db.PrincipalConfig{\n\t\t\t\t\t\t\"alice\": {\n\t\t\t\t\t\t\tPassword: base.StringPtr(\"pass\"),\n\t\t\t\t\t\t\tExplicitChannels: utils.SetOf(\"alice\"),\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t}},\n\t\t\t})\n\t\t\tdefer rt2.Close()\n\n\t\t\t// Make rt2 listen on an actual HTTP port, so it can receive the blipsync request from rt1\n\t\t\tsrv := httptest.NewServer(rt2.TestPublicHandler())\n\t\t\tdefer srv.Close()\n\n\t\t\t// Build passiveDBURL with basic auth creds\n\t\t\tpassiveDBURL, err := url.Parse(srv.URL + \"/db\")\n\t\t\trequire.NoError(t, err)\n\t\t\tpassiveDBURL.User = url.UserPassword(\"alice\", \"pass\")\n\n\t\t\t// Active\n\t\t\trt1 := NewRestTester(t, &RestTesterConfig{\n\t\t\t\tTestBucket: base.GetTestBucket(t),\n\t\t\t})\n\t\t\tdefer rt1.Close()\n\n\t\t\tdefaultConflictResolver, err := db.NewCustomConflictResolver(\n\t\t\t\t`function(conflict) { return defaultPolicy(conflict); }`)\n\t\t\trequire.NoError(t, err, \"Error creating custom conflict resolver\")\n\n\t\t\tconfig := db.ActiveReplicatorConfig{\n\t\t\t\tID: t.Name(),\n\t\t\t\tDirection: db.ActiveReplicatorTypePushAndPull,\n\t\t\t\tRemoteDBURL: passiveDBURL,\n\t\t\t\tActiveDB: &db.Database{\n\t\t\t\t\tDatabaseContext: rt1.GetDatabase(),\n\t\t\t\t},\n\t\t\t\tContinuous: true,\n\t\t\t\tConflictResolverFunc: defaultConflictResolver,\n\t\t\t\tReplicationStatsMap: base.SyncGatewayStats.NewDBStats(t.Name(), false, false, false).DBReplicatorStats(t.Name()),\n\t\t\t}\n\n\t\t\t// Create the first revision of the document on rt2.\n\t\t\tdocID := test.name + \"foo\"\n\t\t\trt2RevIDCreated := createOrUpdateDoc(t, rt2, docID, \"\", \"foo\")\n\n\t\t\t// Create active replicator and start replication.\n\t\t\tar := db.NewActiveReplicator(&config)\n\t\t\trequire.NoError(t, ar.Start(), \"Error starting replication\")\n\t\t\tdefer func() { require.NoError(t, ar.Stop(), \"Error stopping replication\") }()\n\n\t\t\t// Wait for the original document revision written to rt2 to arrive at rt1.\n\t\t\trt1RevIDCreated := rt2RevIDCreated\n\t\t\trequire.NoError(t, rt1.WaitForCondition(func() bool {\n\t\t\t\tdoc, _ := rt1.GetDatabase().GetDocument(logger.TestCtx(t), docID, db.DocUnmarshalAll)\n\t\t\t\treturn doc != nil && len(doc.Body()) > 0\n\t\t\t}))\n\t\t\trequireRevID(t, rt1, docID, rt1RevIDCreated)\n\n\t\t\t// Stop replication.\n\t\t\trequire.NoError(t, ar.Stop(), \"Error stopping replication\")\n\n\t\t\t// Update the document on rt2 to build a revision history.\n\t\t\trt2RevIDUpdated := createOrUpdateDoc(t, rt2, docID, rt2RevIDCreated, \"bar\")\n\n\t\t\t// Tombstone the document on rt2 to mark the tip of the revision history for deletion.\n\t\t\tresp := rt2.SendAdminRequest(http.MethodDelete, \"/db/\"+docID+\"?rev=\"+rt2RevIDUpdated, ``)\n\t\t\tassertStatus(t, resp, http.StatusOK)\n\t\t\trt2RevID := respRevID(t, resp)\n\t\t\tlog.Printf(\"rt2RevID: %s\", rt2RevID)\n\n\t\t\t// Ensure that the tombstone revision is written to rt2 bucket with an empty body.\n\t\t\twaitForTombstone(t, rt2, docID)\n\n\t\t\t// Update the document on rt1 with the specified body values.\n\t\t\trt1RevID := rt1RevIDCreated\n\t\t\tfor _, bodyValue := range test.localBodyValues {\n\t\t\t\trt1RevID = createOrUpdateDoc(t, rt1, docID, rt1RevID, bodyValue)\n\t\t\t}\n\n\t\t\t// Start replication.\n\t\t\trequire.NoError(t, ar.Start(), \"Error starting replication\")\n\n\t\t\t// Wait for default conflict resolution policy to be applied through replication and\n\t\t\t// the winning revision to be written to both rt1 and rt2 buckets. Check whether the\n\t\t\t// winning revision is a tombstone; tombstone revision wins over non-tombstone revision.\n\t\t\twaitForTombstone(t, rt1, docID)\n\t\t\twaitForTombstone(t, rt2, docID)\n\n\t\t\trequireRevID(t, rt1, docID, test.expectedRevID)\n\t\t\t// Wait for conflict resolved doc (tombstone) to be pulled to passive bucket\n\t\t\t// Then require it is the expected rev\n\t\t\trequire.NoError(t, rt2.WaitForCondition(func() bool {\n\t\t\t\tdoc, _ := rt2.GetDatabase().GetDocument(logger.TestCtx(t), docID, db.DocUnmarshalAll)\n\t\t\t\treturn doc != nil && doc.SyncData.CurrentRev == test.expectedRevID\n\t\t\t}))\n\n\t\t\t// Ensure that the document body of the winning tombstone revision written to both\n\t\t\t// rt1 and rt2 is empty, i.e., An attempt to read the document body of a tombstone\n\t\t\t// revision via SDK should return a \"key not found\" error.\n\t\t\trequireErrorKeyNotFound(t, rt2, docID)\n\t\t\trequireErrorKeyNotFound(t, rt1, docID)\n\t\t})\n\t}\n}", "func TestDefaultConflictResolverWithTombstoneLocal(t *testing.T) {\n\tbase.RequireNumTestBuckets(t, 2)\n\tif !base.TestUseXattrs() {\n\t\tt.Skip(\"This test only works with XATTRS enabled\")\n\t}\n\tbase.SetUpTestLogging(t, logger.LevelDebug, logger.KeyAll)\n\n\tdefaultConflictResolverWithTombstoneTests := []struct {\n\t\tname string // A unique name to identify the unit test.\n\t\tremoteBodyValues []string // Controls the remote revision generation.\n\t\texpectedRevID string // Expected document revision ID.\n\t}{\n\t\t{\n\t\t\t// Revision tie with local digest is lower than the remote digest.\n\t\t\t// local generation = remote generation:\n\t\t\t//\t- e.g. local is 3-a(T), remote is 3-b\n\t\t\tname: \"revGenTieLocalDigestLower\",\n\t\t\tremoteBodyValues: []string{\"baz\", \"EADGBE\"},\n\t\t\texpectedRevID: \"4-c6fe7cde8f7187705f9e048322a9c350\",\n\t\t},\n\t\t{\n\t\t\t// Revision tie with local digest is higher than the remote digest.\n\t\t\t// local generation = remote generation:\n\t\t\t//\t- e.g. local is 3-c(T), remote is 3-b\n\t\t\tname: \"revGenTieLocalDigestHigher\",\n\t\t\tremoteBodyValues: []string{\"baz\", \"qux\"},\n\t\t\texpectedRevID: \"4-a210e8a790415d7e842e78e1d051cb3d\",\n\t\t},\n\t\t{\n\t\t\t// Local revision generation is lower than remote revision generation.\n\t\t\t// local generation < remote generation:\n\t\t\t// - e.g. local is 3-a(T), remote is 4-b\n\t\t\tname: \"revGenLocalLower\",\n\t\t\tremoteBodyValues: []string{\"baz\", \"qux\", \"grunt\"},\n\t\t\texpectedRevID: \"5-fe3ac95144be01e9b455bfa163687f0e\",\n\t\t},\n\t\t{\n\t\t\t// Local revision generation is higher than remote revision generation.\n\t\t\t// local generation > remote generation:\n\t\t\t//\t- e.g. local is 3-a(T), remote is 2-b\n\t\t\tname: \"revGenLocalHigher\",\n\t\t\tremoteBodyValues: []string{\"baz\"},\n\t\t\texpectedRevID: \"4-232b1f34f6b9341c54435eaf5447d85d\",\n\t\t},\n\t}\n\n\tfor _, test := range defaultConflictResolverWithTombstoneTests {\n\t\tt.Run(test.name, func(tt *testing.T) {\n\t\t\t// Passive\n\t\t\trt2 := NewRestTester(t, &RestTesterConfig{\n\t\t\t\tTestBucket: base.GetTestBucket(t),\n\t\t\t\tDatabaseConfig: &DatabaseConfig{DbConfig: DbConfig{\n\t\t\t\t\tUsers: map[string]*db.PrincipalConfig{\n\t\t\t\t\t\t\"alice\": {\n\t\t\t\t\t\t\tPassword: base.StringPtr(\"pass\"),\n\t\t\t\t\t\t\tExplicitChannels: utils.SetOf(\"alice\"),\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t}},\n\t\t\t})\n\t\t\tdefer rt2.Close()\n\n\t\t\t// Make rt2 listen on an actual HTTP port, so it can receive the blipsync request from rt1\n\t\t\tsrv := httptest.NewServer(rt2.TestPublicHandler())\n\t\t\tdefer srv.Close()\n\n\t\t\t// Build passiveDBURL with basic auth creds\n\t\t\tpassiveDBURL, err := url.Parse(srv.URL + \"/db\")\n\t\t\trequire.NoError(t, err)\n\t\t\tpassiveDBURL.User = url.UserPassword(\"alice\", \"pass\")\n\n\t\t\t// Active\n\t\t\trt1 := NewRestTester(t, &RestTesterConfig{\n\t\t\t\tTestBucket: base.GetTestBucket(t),\n\t\t\t})\n\t\t\tdefer rt1.Close()\n\n\t\t\tdefaultConflictResolver, err := db.NewCustomConflictResolver(\n\t\t\t\t`function(conflict) { return defaultPolicy(conflict); }`)\n\t\t\trequire.NoError(t, err, \"Error creating custom conflict resolver\")\n\n\t\t\tconfig := db.ActiveReplicatorConfig{\n\t\t\t\tID: t.Name(),\n\t\t\t\tDirection: db.ActiveReplicatorTypePushAndPull,\n\t\t\t\tRemoteDBURL: passiveDBURL,\n\t\t\t\tActiveDB: &db.Database{\n\t\t\t\t\tDatabaseContext: rt1.GetDatabase(),\n\t\t\t\t},\n\t\t\t\tContinuous: true,\n\t\t\t\tConflictResolverFunc: defaultConflictResolver,\n\t\t\t\tReplicationStatsMap: base.SyncGatewayStats.NewDBStats(t.Name(), false, false, false).DBReplicatorStats(t.Name()),\n\t\t\t}\n\n\t\t\t// Create the first revision of the document on rt1.\n\t\t\tdocID := t.Name() + \"foo\"\n\t\t\trt1RevIDCreated := createOrUpdateDoc(t, rt1, docID, \"\", \"foo\")\n\n\t\t\t// Create active replicator and start replication.\n\t\t\tar := db.NewActiveReplicator(&config)\n\t\t\trequire.NoError(t, ar.Start(), \"Error starting replication\")\n\t\t\tdefer func() { require.NoError(t, ar.Stop(), \"Error stopping replication\") }()\n\n\t\t\t// Wait for the original document revision written to rt1 to arrive at rt2.\n\t\t\trt2RevIDCreated := rt1RevIDCreated\n\t\t\trequire.NoError(t, rt2.WaitForCondition(func() bool {\n\t\t\t\tdoc, _ := rt2.GetDatabase().GetDocument(logger.TestCtx(t), docID, db.DocUnmarshalAll)\n\t\t\t\treturn doc != nil && len(doc.Body()) > 0\n\t\t\t}))\n\t\t\trequireRevID(t, rt2, docID, rt2RevIDCreated)\n\n\t\t\t// Stop replication.\n\t\t\trequire.NoError(t, ar.Stop(), \"Error stopping replication\")\n\n\t\t\t// Update the document on rt1 to build a revision history.\n\t\t\trt1RevIDUpdated := createOrUpdateDoc(t, rt1, docID, rt1RevIDCreated, \"bar\")\n\n\t\t\t// Tombstone the document on rt1 to mark the tip of the revision history for deletion.\n\t\t\tresp := rt1.SendAdminRequest(http.MethodDelete, \"/db/\"+docID+\"?rev=\"+rt1RevIDUpdated, ``)\n\t\t\tassertStatus(t, resp, http.StatusOK)\n\n\t\t\t// Ensure that the tombstone revision is written to rt1 bucket with an empty body.\n\t\t\twaitForTombstone(t, rt1, docID)\n\n\t\t\t// Update the document on rt2 with the specified body values.\n\t\t\trt2RevID := rt2RevIDCreated\n\t\t\tfor _, bodyValue := range test.remoteBodyValues {\n\t\t\t\trt2RevID = createOrUpdateDoc(t, rt2, docID, rt2RevID, bodyValue)\n\t\t\t}\n\n\t\t\t// Start replication.\n\t\t\trequire.NoError(t, ar.Start(), \"Error starting replication\")\n\n\t\t\t// Wait for default conflict resolution policy to be applied through replication and\n\t\t\t// the winning revision to be written to both rt1 and rt2 buckets. Check whether the\n\t\t\t// winning revision is a tombstone; tombstone revision wins over non-tombstone revision.\n\t\t\twaitForTombstone(t, rt2, docID)\n\t\t\twaitForTombstone(t, rt1, docID)\n\n\t\t\trequireRevID(t, rt2, docID, test.expectedRevID)\n\t\t\trequireRevID(t, rt1, docID, test.expectedRevID)\n\n\t\t\t// Ensure that the document body of the winning tombstone revision written to both\n\t\t\t// rt1 and rt2 is empty, i.e., An attempt to read the document body of a tombstone\n\t\t\t// revision via SDK should return a \"key not found\" error.\n\t\t\trequireErrorKeyNotFound(t, rt2, docID)\n\t\t\trequireErrorKeyNotFound(t, rt1, docID)\n\t\t})\n\t}\n}", "func TestPoolInternalConflict(t *testing.T) {\n\tpoolA := mkPool(poolAUID, \"pool-a\", []string{\"10.0.10.0/24\", \"10.0.10.64/28\"})\n\tfixture := mkTestFixture([]*cilium_api_v2alpha1.CiliumLoadBalancerIPPool{\n\t\tpoolA,\n\t}, true, false, nil)\n\n\tawait := fixture.AwaitPool(func(action k8s_testing.Action) bool {\n\t\tif action.GetResource() != poolResource || action.GetVerb() != \"patch\" {\n\t\t\treturn false\n\t\t}\n\n\t\tpool := fixture.PatchedPool(action)\n\n\t\treturn !isPoolConflicting(pool)\n\t}, time.Second)\n\n\tgo fixture.hive.Start(context.Background())\n\tdefer fixture.hive.Stop(context.Background())\n\n\tif await.Block() {\n\t\tt.Fatal(\"Expected pool to be marked conflicting\")\n\t}\n\n\tawait = fixture.AwaitPool(func(action k8s_testing.Action) bool {\n\t\tif action.GetResource() != poolResource || action.GetVerb() != \"patch\" {\n\t\t\treturn false\n\t\t}\n\n\t\tpool := fixture.PatchedPool(action)\n\n\t\treturn !isPoolConflicting(pool)\n\t}, 2*time.Second)\n\n\tpool, err := fixture.poolClient.Get(context.Background(), \"pool-a\", meta_v1.GetOptions{})\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tpool.Spec.Cidrs = []cilium_api_v2alpha1.CiliumLoadBalancerIPPoolCIDRBlock{\n\t\t{\n\t\t\tCidr: \"10.0.10.0/24\",\n\t\t},\n\t}\n\n\t_, err = fixture.poolClient.Update(context.Background(), pool, meta_v1.UpdateOptions{})\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tif await.Block() {\n\t\tt.Fatal(\"Expected pool to be un-marked conflicting\")\n\t}\n}", "func TestActiveReplicatorPullConflict(t *testing.T) {\n\n\t// scenarios\n\tconflictResolutionTests := []struct {\n\t\tname string\n\t\tlocalRevisionBody db.Body\n\t\tlocalRevID string\n\t\tremoteRevisionBody db.Body\n\t\tremoteRevID string\n\t\tconflictResolver string\n\t\texpectedLocalBody db.Body\n\t\texpectedLocalRevID string\n\t\texpectedTombstonedRevID string\n\t\texpectedResolutionType db.ConflictResolutionType\n\t\tskipActiveLeafAssertion bool\n\t\tskipBodyAssertion bool\n\t}{\n\t\t{\n\t\t\tname: \"remoteWins\",\n\t\t\tlocalRevisionBody: db.Body{\"source\": \"local\"},\n\t\t\tlocalRevID: \"1-a\",\n\t\t\tremoteRevisionBody: db.Body{\"source\": \"remote\"},\n\t\t\tremoteRevID: \"1-b\",\n\t\t\tconflictResolver: `function(conflict) {return conflict.RemoteDocument;}`,\n\t\t\texpectedLocalBody: db.Body{\"source\": \"remote\"},\n\t\t\texpectedLocalRevID: \"1-b\",\n\t\t\texpectedResolutionType: db.ConflictResolutionRemote,\n\t\t},\n\t\t{\n\t\t\tname: \"merge\",\n\t\t\tlocalRevisionBody: db.Body{\"source\": \"local\"},\n\t\t\tlocalRevID: \"1-a\",\n\t\t\tremoteRevisionBody: db.Body{\"source\": \"remote\"},\n\t\t\tremoteRevID: \"1-b\",\n\t\t\tconflictResolver: `function(conflict) {\n\t\t\t\t\tvar mergedDoc = new Object();\n\t\t\t\t\tmergedDoc.source = \"merged\";\n\t\t\t\t\treturn mergedDoc;\n\t\t\t\t}`,\n\t\t\texpectedLocalBody: db.Body{\"source\": \"merged\"},\n\t\t\texpectedLocalRevID: db.CreateRevIDWithBytes(2, \"1-b\", []byte(`{\"source\":\"merged\"}`)), // rev for merged body, with parent 1-b\n\t\t\texpectedResolutionType: db.ConflictResolutionMerge,\n\t\t},\n\t\t{\n\t\t\tname: \"localWins\",\n\t\t\tlocalRevisionBody: db.Body{\"source\": \"local\"},\n\t\t\tlocalRevID: \"1-a\",\n\t\t\tremoteRevisionBody: db.Body{\"source\": \"remote\"},\n\t\t\tremoteRevID: \"1-b\",\n\t\t\tconflictResolver: `function(conflict) {return conflict.LocalDocument;}`,\n\t\t\texpectedLocalBody: db.Body{\"source\": \"local\"},\n\t\t\texpectedLocalRevID: db.CreateRevIDWithBytes(2, \"1-b\", []byte(`{\"source\":\"local\"}`)), // rev for local body, transposed under parent 1-b\n\t\t\texpectedResolutionType: db.ConflictResolutionLocal,\n\t\t},\n\t\t{\n\t\t\tname: \"twoTombstonesRemoteWin\",\n\t\t\tlocalRevisionBody: db.Body{\"_deleted\": true, \"source\": \"local\"},\n\t\t\tlocalRevID: \"1-a\",\n\t\t\tremoteRevisionBody: db.Body{\"_deleted\": true, \"source\": \"remote\"},\n\t\t\tremoteRevID: \"1-b\",\n\t\t\tconflictResolver: `function(conflict){}`,\n\t\t\texpectedLocalBody: db.Body{\"source\": \"remote\"},\n\t\t\texpectedLocalRevID: \"1-b\",\n\t\t\tskipActiveLeafAssertion: true,\n\t\t\tskipBodyAssertion: base.TestUseXattrs(),\n\t\t},\n\t\t{\n\t\t\tname: \"twoTombstonesLocalWin\",\n\t\t\tlocalRevisionBody: db.Body{\"_deleted\": true, \"source\": \"local\"},\n\t\t\tlocalRevID: \"1-b\",\n\t\t\tremoteRevisionBody: db.Body{\"_deleted\": true, \"source\": \"remote\"},\n\t\t\tremoteRevID: \"1-a\",\n\t\t\tconflictResolver: `function(conflict){}`,\n\t\t\texpectedLocalBody: db.Body{\"source\": \"local\"},\n\t\t\texpectedLocalRevID: \"1-b\",\n\t\t\tskipActiveLeafAssertion: true,\n\t\t\tskipBodyAssertion: base.TestUseXattrs(),\n\t\t},\n\t}\n\n\tfor _, test := range conflictResolutionTests {\n\t\tt.Run(test.name, func(t *testing.T) {\n\t\t\tbase.RequireNumTestBuckets(t, 2)\n\t\t\tbase.SetUpTestLogging(t, logger.LevelInfo, logger.KeyHTTP, logger.KeySync, logger.KeyChanges, logger.KeyCRUD)\n\n\t\t\t// Passive\n\t\t\ttb2 := base.GetTestBucket(t)\n\n\t\t\trt2 := NewRestTester(t, &RestTesterConfig{\n\t\t\t\tTestBucket: tb2,\n\t\t\t\tDatabaseConfig: &DatabaseConfig{DbConfig: DbConfig{\n\t\t\t\t\tUsers: map[string]*db.PrincipalConfig{\n\t\t\t\t\t\t\"alice\": {\n\t\t\t\t\t\t\tPassword: base.StringPtr(\"pass\"),\n\t\t\t\t\t\t\tExplicitChannels: utils.SetOf(\"*\"),\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t}},\n\t\t\t})\n\t\t\tdefer rt2.Close()\n\n\t\t\t// Create revision on rt2 (remote)\n\t\t\tdocID := test.name\n\t\t\tresp, err := rt2.PutDocumentWithRevID(docID, test.remoteRevID, \"\", test.remoteRevisionBody)\n\t\t\tassert.NoError(t, err)\n\t\t\tassertStatus(t, resp, http.StatusCreated)\n\t\t\trt2revID := respRevID(t, resp)\n\t\t\tassert.Equal(t, test.remoteRevID, rt2revID)\n\n\t\t\t// Make rt2 listen on an actual HTTP port, so it can receive the blipsync request from rt1.\n\t\t\tsrv := httptest.NewServer(rt2.TestPublicHandler())\n\t\t\tdefer srv.Close()\n\n\t\t\tpassiveDBURL, err := url.Parse(srv.URL + \"/db\")\n\t\t\trequire.NoError(t, err)\n\n\t\t\t// Add basic auth creds to target db URL\n\t\t\tpassiveDBURL.User = url.UserPassword(\"alice\", \"pass\")\n\n\t\t\t// Active\n\t\t\ttb1 := base.GetTestBucket(t)\n\n\t\t\trt1 := NewRestTester(t, &RestTesterConfig{\n\t\t\t\tTestBucket: tb1,\n\t\t\t})\n\t\t\tdefer rt1.Close()\n\n\t\t\t// Create revision on rt1 (local)\n\t\t\tresp, err = rt1.PutDocumentWithRevID(docID, test.localRevID, \"\", test.localRevisionBody)\n\t\t\tassert.NoError(t, err)\n\t\t\tassertStatus(t, resp, http.StatusCreated)\n\t\t\trt1revID := respRevID(t, resp)\n\t\t\tassert.Equal(t, test.localRevID, rt1revID)\n\n\t\t\tcustomConflictResolver, err := db.NewCustomConflictResolver(test.conflictResolver)\n\t\t\trequire.NoError(t, err)\n\t\t\treplicationStats := base.SyncGatewayStats.NewDBStats(t.Name(), false, false, false).DBReplicatorStats(t.Name())\n\t\t\tar := db.NewActiveReplicator(&db.ActiveReplicatorConfig{\n\t\t\t\tID: t.Name(),\n\t\t\t\tDirection: db.ActiveReplicatorTypePull,\n\t\t\t\tRemoteDBURL: passiveDBURL,\n\t\t\t\tActiveDB: &db.Database{\n\t\t\t\t\tDatabaseContext: rt1.GetDatabase(),\n\t\t\t\t},\n\t\t\t\tChangesBatchSize: 200,\n\t\t\t\tConflictResolverFunc: customConflictResolver,\n\t\t\t\tContinuous: true,\n\t\t\t\tReplicationStatsMap: replicationStats,\n\t\t\t})\n\t\t\tdefer func() { assert.NoError(t, ar.Stop()) }()\n\n\t\t\t// Start the replicator (implicit connect)\n\t\t\tassert.NoError(t, ar.Start())\n\n\t\t\twaitAndRequireCondition(t, func() bool { return ar.GetStatus().DocsRead == 1 }, \"Expecting DocsRead == 1\")\n\t\t\tswitch test.expectedResolutionType {\n\t\t\tcase db.ConflictResolutionLocal:\n\t\t\t\tassert.Equal(t, 1, int(replicationStats.ConflictResolvedLocalCount.Value()))\n\t\t\t\tassert.Equal(t, 0, int(replicationStats.ConflictResolvedMergedCount.Value()))\n\t\t\t\tassert.Equal(t, 0, int(replicationStats.ConflictResolvedRemoteCount.Value()))\n\t\t\tcase db.ConflictResolutionMerge:\n\t\t\t\tassert.Equal(t, 0, int(replicationStats.ConflictResolvedLocalCount.Value()))\n\t\t\t\tassert.Equal(t, 1, int(replicationStats.ConflictResolvedMergedCount.Value()))\n\t\t\t\tassert.Equal(t, 0, int(replicationStats.ConflictResolvedRemoteCount.Value()))\n\t\t\tcase db.ConflictResolutionRemote:\n\t\t\t\tassert.Equal(t, 0, int(replicationStats.ConflictResolvedLocalCount.Value()))\n\t\t\t\tassert.Equal(t, 0, int(replicationStats.ConflictResolvedMergedCount.Value()))\n\t\t\t\tassert.Equal(t, 1, int(replicationStats.ConflictResolvedRemoteCount.Value()))\n\t\t\tdefault:\n\t\t\t\tassert.Equal(t, 0, int(replicationStats.ConflictResolvedLocalCount.Value()))\n\t\t\t\tassert.Equal(t, 0, int(replicationStats.ConflictResolvedMergedCount.Value()))\n\t\t\t\tassert.Equal(t, 0, int(replicationStats.ConflictResolvedRemoteCount.Value()))\n\t\t\t}\n\t\t\t// wait for the document originally written to rt2 to arrive at rt1. Should end up as winner under default conflict resolution\n\n\t\t\tchangesResults, err := rt1.WaitForChanges(1, \"/db/_changes?since=0\", \"\", true)\n\t\t\trequire.NoError(t, err)\n\t\t\trequire.Len(t, changesResults.Results, 1)\n\t\t\tassert.Equal(t, docID, changesResults.Results[0].ID)\n\t\t\tassert.Equal(t, test.expectedLocalRevID, changesResults.Results[0].Changes[0][\"rev\"])\n\t\t\tlog.Printf(\"Changes response is %+v\", changesResults)\n\n\t\t\tdoc, err := rt1.GetDatabase().GetDocument(logger.TestCtx(t), docID, db.DocUnmarshalAll)\n\t\t\trequire.NoError(t, err)\n\t\t\tassert.Equal(t, test.expectedLocalRevID, doc.SyncData.CurrentRev)\n\n\t\t\t// This is skipped for tombstone tests running with xattr as xattr tombstones don't have a body to assert\n\t\t\t// against\n\t\t\tif !test.skipBodyAssertion {\n\t\t\t\tassert.Equal(t, test.expectedLocalBody, doc.Body())\n\t\t\t}\n\n\t\t\tlog.Printf(\"Doc %s is %+v\", docID, doc)\n\t\t\tfor revID, revInfo := range doc.SyncData.History {\n\t\t\t\tlog.Printf(\"doc revision [%s]: %+v\", revID, revInfo)\n\t\t\t}\n\n\t\t\tif !test.skipActiveLeafAssertion {\n\t\t\t\t// Validate only one active leaf node remains after conflict resolution, and that all parents\n\t\t\t\t// of leaves have empty bodies\n\t\t\t\tactiveCount := 0\n\t\t\t\tfor _, revID := range doc.SyncData.History.GetLeaves() {\n\t\t\t\t\trevInfo, ok := doc.SyncData.History[revID]\n\t\t\t\t\trequire.True(t, ok)\n\t\t\t\t\tif !revInfo.Deleted {\n\t\t\t\t\t\tactiveCount++\n\t\t\t\t\t}\n\t\t\t\t\tif revInfo.Parent != \"\" {\n\t\t\t\t\t\tparentRevInfo, ok := doc.SyncData.History[revInfo.Parent]\n\t\t\t\t\t\trequire.True(t, ok)\n\t\t\t\t\t\tassert.True(t, parentRevInfo.Body == nil)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tassert.Equal(t, 1, activeCount)\n\t\t\t}\n\t\t})\n\t}\n}", "func TestRetryNotRequired(t *testing.T) {\n\tcheck := assert.New(t)\n\tretryRequired := checkRetryRequired(http.StatusConflict)\n\tcheck.Equal(retryRequired, false)\n}", "func TestRecreateRunningWorkflowFails(t *testing.T) {\n\tr := task.NewTaskRegistry()\n\tr.AddOrchestratorN(\"SleepyWorkflow\", func(ctx *task.OrchestrationContext) (any, error) {\n\t\terr := ctx.CreateTimer(24 * time.Hour).Await(nil)\n\t\treturn nil, err\n\t})\n\n\tctx := context.Background()\n\tclient, engine := startEngine(ctx, t, r)\n\n\tfor _, opt := range GetTestOptions() {\n\t\tt.Run(opt(engine), func(t *testing.T) {\n\t\t\t// Start the first workflow, which will not complete\n\t\t\tvar metadata *api.OrchestrationMetadata\n\t\t\tid, err := client.ScheduleNewOrchestration(ctx, \"SleepyWorkflow\")\n\t\t\tif assert.NoError(t, err) {\n\t\t\t\tif metadata, err = client.WaitForOrchestrationStart(ctx, id); assert.NoError(t, err) {\n\t\t\t\t\tassert.False(t, metadata.IsComplete())\n\t\t\t\t}\n\t\t\t}\n\n\t\t\t// Attempting to start a second workflow with the same ID should fail\n\t\t\t_, err = client.ScheduleNewOrchestration(ctx, \"SleepyWorkflow\", api.WithInstanceID(id))\n\t\t\trequire.Error(t, err)\n\t\t\t// We expect that the workflow instance ID is included in the error message\n\t\t\tassert.Contains(t, err.Error(), id)\n\t\t})\n\t}\n}", "func TestCommitConflictRace4A(t *testing.T) {\n}", "func TestParallelCreateConflictingTables(t *testing.T) {\n\tdefer leaktest.AfterTest(t)()\n\n\tconst numberOfTables = 30\n\tconst numberOfNodes = 3\n\n\ttc := testcluster.StartTestCluster(t, numberOfNodes, base.TestClusterArgs{})\n\tdefer tc.Stopper().Stop()\n\n\tif _, err := tc.ServerConn(0).Exec(`CREATE DATABASE \"test\"`); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t// Get the id descriptor generator count.\n\tkvDB := tc.Servers[0].KVClient().(*client.DB)\n\tvar descIDStart int64\n\tif descID, err := kvDB.Get(context.Background(), keys.DescIDGenerator); err != nil {\n\t\tt.Fatal(err)\n\t} else {\n\t\tdescIDStart = descID.ValueInt()\n\t}\n\n\tvar wgStart sync.WaitGroup\n\tvar wgEnd sync.WaitGroup\n\twgStart.Add(numberOfTables)\n\twgEnd.Add(numberOfTables)\n\tsignal := make(chan struct{})\n\tcompleted := make(chan int, numberOfTables)\n\tfor i := 0; i < numberOfTables; i++ {\n\t\tdb := tc.ServerConn(i % numberOfNodes)\n\t\tgo createTestTable(t, tc, 0, db, &wgStart, &wgEnd, signal, completed)\n\t}\n\n\t// Wait until all goroutines are ready.\n\twgStart.Wait()\n\t// Signal the create table goroutines to start.\n\tclose(signal)\n\t// Wait until all create tables are finished.\n\twgEnd.Wait()\n\tclose(completed)\n\n\tverifyTables(\n\t\tt,\n\t\ttc,\n\t\tcompleted,\n\t\t1,\n\t\tdescIDStart,\n\t)\n}", "func TestSendChangesToNoConflictPreHydrogenTarget(t *testing.T) {\n\tt.Skip(\"Test is only for development purposes\")\n\n\tbase.RequireNumTestBuckets(t, 2)\n\n\terrorCountBefore := base.SyncGatewayStats.GlobalStats.ResourceUtilizationStats().ErrorCount.Value()\n\n\t// Passive\n\ttb2 := base.GetTestBucket(t)\n\trt2 := NewRestTester(t, &RestTesterConfig{\n\t\tTestBucket: tb2,\n\t\tDatabaseConfig: &DatabaseConfig{DbConfig: DbConfig{\n\t\t\tAllowConflicts: false,\n\t\t}},\n\t})\n\tdefer rt2.Close()\n\n\trt1 := NewRestTester(t, &RestTesterConfig{\n\t\tTestBucket: base.GetTestBucket(t),\n\t})\n\tdefer rt1.Close()\n\n\t// Make rt2 listen on an actual HTTP port, so it can receive the blipsync request from rt1.\n\tsrv := httptest.NewTLSServer(rt2.TestAdminHandler())\n\tdefer srv.Close()\n\n\tpassiveDBURL, err := url.Parse(srv.URL + \"/db\")\n\trequire.NoError(t, err)\n\n\tar := db.NewActiveReplicator(&db.ActiveReplicatorConfig{\n\t\tID: \"test\",\n\t\tDirection: db.ActiveReplicatorTypePush,\n\t\tRemoteDBURL: passiveDBURL,\n\t\tActiveDB: &db.Database{\n\t\t\tDatabaseContext: rt1.GetDatabase(),\n\t\t},\n\t\tContinuous: true,\n\t\tInsecureSkipVerify: true,\n\t\tReplicationStatsMap: base.SyncGatewayStats.NewDBStats(t.Name(), false, false, false).DBReplicatorStats(t.Name()),\n\t})\n\n\tdefer func() {\n\t\trequire.NoError(t, ar.Stop())\n\t}()\n\trequire.NoError(t, ar.Start())\n\n\tassert.Equal(t, errorCountBefore, base.SyncGatewayStats.GlobalStats.ResourceUtilizationStats().ErrorCount.Value())\n\n\tresponse := rt1.SendAdminRequest(\"PUT\", \"/db/doc1\", \"{}\")\n\tassertStatus(t, response, http.StatusCreated)\n\n\terr = rt2.WaitForCondition(func() bool {\n\t\tif base.SyncGatewayStats.GlobalStats.ResourceUtilizationStats().ErrorCount.Value() == errorCountBefore+1 {\n\t\t\treturn true\n\t\t}\n\t\treturn false\n\t})\n\tassert.NoError(t, err)\n\n\tassert.Equal(t, db.ReplicationStateStopped, ar.GetStatus().Status)\n\tassert.Equal(t, db.PreHydrogenTargetAllowConflictsError.Error(), ar.GetStatus().ErrorMessage)\n}", "func TestImportProjectCreatedFailWhenAPIIsExisted(t *testing.T) {\n\tapim := apimClients[0]\n\tprojectName := \"OpenAPI3Project\"\n\tusername := superAdminUser\n\tpassword := superAdminPassword\n\n\targs := &testutils.InitTestArgs{\n\t\tCtlUser: testutils.Credentials{Username: username, Password: password},\n\t\tSrcAPIM: apim,\n\t\tInitFlag: projectName,\n\t\tOasFlag: utils.TestOpenAPI3DefinitionPath,\n\t\tForceFlag: false,\n\t}\n\n\t//Import API for the First time\n\ttestutils.ValidateImportInitializedProject(t, args)\n\n\t//Import API for the second time\n\ttestutils.ValidateImportFailedWithInitializedProject(t, args)\n\n}", "func TestCommitConflictRollback4A(t *testing.T) {\n}", "func patchOrCreate(mapping *meta.RESTMapping, config *rest.Config, group string,\n\tversion string, namespace string, name string, data []byte) error {\n\tlog.Infof(\"Applying resource configuration for %v\", name)\n\terr := getResource(mapping, config, group, version, namespace, name)\n\tif err != nil {\n\t\tlog.Infof(\"getResource error, treating as not found: %v\", err)\n\t\terr = createResource(mapping, config, group, version, namespace, data)\n\t} else {\n\t\tlog.Infof(\"getResource succeeds, treating as found.\")\n\t\terr = patchResource(mapping, config, group, version, namespace, data)\n\t}\n\n\tfor i := 1; i < maxRetries && k8serrors.IsConflict(err); i++ {\n\t\ttime.Sleep(backoffInterval)\n\n\t\tlog.Infof(\"Retrying patchOrCreate at %v attempt ...\", i)\n\t\terr = getResource(mapping, config, group, version, namespace, name)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\terr = patchResource(mapping, config, group, version, namespace, data)\n\t}\n\n\tif err != nil && (k8serrors.IsConflict(err) || k8serrors.IsInvalid(err) ||\n\t\tk8serrors.IsMethodNotSupported(err)) {\n\t\tlog.Infof(\"Trying delete and create as last resort ...\")\n\t\tif err = deleteResource(mapping, config, group, version, namespace, name); err != nil {\n\t\t\treturn err\n\t\t}\n\t\terr = createResource(mapping, config, group, version, namespace, data)\n\t}\n\treturn err\n}", "func CreateContainerConflict(t goatest.TInterface, ctx context.Context, service *goa.Service, ctrl app.ContainerController, command []string, entrypoint []string, env []string, image string, name string, sslRedirect bool, volumes []string, workingDir *string) (http.ResponseWriter, error) {\n\t// Setup service\n\tvar (\n\t\tlogBuf bytes.Buffer\n\t\tresp interface{}\n\n\t\trespSetter goatest.ResponseSetterFunc = func(r interface{}) { resp = r }\n\t)\n\tif service == nil {\n\t\tservice = goatest.Service(&logBuf, respSetter)\n\t} else {\n\t\tlogger := log.New(&logBuf, \"\", log.Ltime)\n\t\tservice.WithLogger(goa.NewLogger(logger))\n\t\tnewEncoder := func(io.Writer) goa.Encoder { return respSetter }\n\t\tservice.Encoder = goa.NewHTTPEncoder() // Make sure the code ends up using this decoder\n\t\tservice.Encoder.Register(newEncoder, \"*/*\")\n\t}\n\n\t// Setup request context\n\trw := httptest.NewRecorder()\n\tquery := url.Values{}\n\t{\n\t\tsliceVal := command\n\t\tquery[\"command\"] = sliceVal\n\t}\n\t{\n\t\tsliceVal := entrypoint\n\t\tquery[\"entrypoint\"] = sliceVal\n\t}\n\t{\n\t\tsliceVal := env\n\t\tquery[\"env\"] = sliceVal\n\t}\n\t{\n\t\tsliceVal := []string{image}\n\t\tquery[\"image\"] = sliceVal\n\t}\n\t{\n\t\tsliceVal := []string{name}\n\t\tquery[\"name\"] = sliceVal\n\t}\n\t{\n\t\tsliceVal := []string{fmt.Sprintf(\"%v\", sslRedirect)}\n\t\tquery[\"sslRedirect\"] = sliceVal\n\t}\n\t{\n\t\tsliceVal := volumes\n\t\tquery[\"volumes\"] = sliceVal\n\t}\n\tif workingDir != nil {\n\t\tsliceVal := []string{*workingDir}\n\t\tquery[\"workingDir\"] = sliceVal\n\t}\n\tu := &url.URL{\n\t\tPath: fmt.Sprintf(\"/api/v2/container/create\"),\n\t\tRawQuery: query.Encode(),\n\t}\n\treq, err := http.NewRequest(\"GET\", u.String(), nil)\n\tif err != nil {\n\t\tpanic(\"invalid test \" + err.Error()) // bug\n\t}\n\tprms := url.Values{}\n\t{\n\t\tsliceVal := command\n\t\tprms[\"command\"] = sliceVal\n\t}\n\t{\n\t\tsliceVal := entrypoint\n\t\tprms[\"entrypoint\"] = sliceVal\n\t}\n\t{\n\t\tsliceVal := env\n\t\tprms[\"env\"] = sliceVal\n\t}\n\t{\n\t\tsliceVal := []string{image}\n\t\tprms[\"image\"] = sliceVal\n\t}\n\t{\n\t\tsliceVal := []string{name}\n\t\tprms[\"name\"] = sliceVal\n\t}\n\t{\n\t\tsliceVal := []string{fmt.Sprintf(\"%v\", sslRedirect)}\n\t\tprms[\"sslRedirect\"] = sliceVal\n\t}\n\t{\n\t\tsliceVal := volumes\n\t\tprms[\"volumes\"] = sliceVal\n\t}\n\tif workingDir != nil {\n\t\tsliceVal := []string{*workingDir}\n\t\tprms[\"workingDir\"] = sliceVal\n\t}\n\tif ctx == nil {\n\t\tctx = context.Background()\n\t}\n\tgoaCtx := goa.NewContext(goa.WithAction(ctx, \"ContainerTest\"), rw, req, prms)\n\tcreateCtx, _err := app.NewCreateContainerContext(goaCtx, req, service)\n\tif _err != nil {\n\t\te, ok := _err.(goa.ServiceError)\n\t\tif !ok {\n\t\t\tpanic(\"invalid test data \" + _err.Error()) // bug\n\t\t}\n\t\treturn nil, e\n\t}\n\n\t// Perform action\n\t_err = ctrl.Create(createCtx)\n\n\t// Validate response\n\tif _err != nil {\n\t\tt.Fatalf(\"controller returned %+v, logs:\\n%s\", _err, logBuf.String())\n\t}\n\tif rw.Code != 409 {\n\t\tt.Errorf(\"invalid response status code: got %+v, expected 409\", rw.Code)\n\t}\n\tvar mt error\n\tif resp != nil {\n\t\tvar _ok bool\n\t\tmt, _ok = resp.(error)\n\t\tif !_ok {\n\t\t\tt.Fatalf(\"invalid response media: got variable of type %T, value %+v, expected instance of error\", resp, resp)\n\t\t}\n\t}\n\n\t// Return results\n\treturn rw, mt\n}", "func TestCheckRequiredTemplate_Create_DoesNotSwallowError(t *testing.T) {\n\tctrl := gomock.NewController(t)\n\tdefer ctrl.Finish()\n\n\tr := ResourceCheckRequiredTemplate()\n\tresourceData := schema.TestResourceDataRaw(t, r.Schema, nil)\n\tflattenErr := flattenCheckRequiredTemplate(resourceData, &requiredTemplateCheckTest, requiredTemplateCheckProjectID)\n\n\tpipelinesChecksClient := azdosdkmocks.NewMockPipelineschecksextrasClient(ctrl)\n\tclients := &client.AggregatedClient{PipelinesChecksClientExtras: pipelinesChecksClient, Ctx: context.Background()}\n\n\texpectedArgs := pipelineschecksextras.AddCheckConfigurationArgs{Configuration: &requiredTemplateCheckTest, Project: &requiredTemplateCheckProjectID}\n\tpipelinesChecksClient.\n\t\tEXPECT().\n\t\tAddCheckConfiguration(clients.Ctx, expectedArgs).\n\t\tReturn(nil, errors.New(\"AddCheckConfiguration() Failed\")).\n\t\tTimes(1)\n\n\terr := r.Create(resourceData, clients)\n\trequire.Contains(t, err.Error(), \"AddCheckConfiguration() Failed\")\n\trequire.Nil(t, flattenErr)\n}", "func TestK8gbRepeatedlyRecreatedFromIngress(t *testing.T) {\n\tt.Parallel()\n\t// name of ingress and gslb\n\tconst name = \"test-gslb-failover-simple\"\n\n\tassertStrategy := func(t *testing.T, options *k8s.KubectlOptions) {\n\t\tutils.AssertGslbSpec(t, options, name, \"spec.strategy.splitBrainThresholdSeconds\", \"300\")\n\t\tutils.AssertGslbSpec(t, options, name, \"spec.strategy.dnsTtlSeconds\", \"30\")\n\t\tutils.AssertGslbSpec(t, options, name, \"spec.strategy.primaryGeoTag\", settings.PrimaryGeoTag)\n\t\tutils.AssertGslbSpec(t, options, name, \"spec.strategy.type\", \"failover\")\n\t}\n\n\t// Path to the Kubernetes resource config we will test\n\tingressResourcePath, err := filepath.Abs(\"../examples/ingress-annotation-failover-simple.yaml\")\n\trequire.NoError(t, err)\n\n\t// To ensure we can reuse the resource config on the same cluster to test different scenarios, we setup a unique\n\t// namespace for the resources for this test.\n\t// Note that namespaces must be lowercase.\n\tnamespaceName := fmt.Sprintf(\"k8gb-test-repeatedly-recreated-from-ingress-%s\", strings.ToLower(random.UniqueId()))\n\n\t// Here we choose to use the defaults, which is:\n\t// - HOME/.kube/config for the kubectl config file\n\t// - Current context of the kubectl config file\n\t// - Random namespace\n\toptions := k8s.NewKubectlOptions(\"\", \"\", namespaceName)\n\n\tk8s.CreateNamespace(t, options, namespaceName)\n\n\tdefer k8s.DeleteNamespace(t, options, namespaceName)\n\n\tdefer k8s.KubectlDelete(t, options, ingressResourcePath)\n\n\tutils.CreateGslb(t, options, settings, ingressResourcePath)\n\n\tk8s.WaitUntilIngressAvailable(t, options, name, 60, 1*time.Second)\n\n\tingress := k8s.GetIngress(t, options, name)\n\n\trequire.Equal(t, ingress.Name, name)\n\n\t// assert Gslb strategy has expected values\n\tassertStrategy(t, options)\n\n\tk8s.KubectlDelete(t, options, ingressResourcePath)\n\n\tutils.AssertGslbDeleted(t, options, ingress.Name)\n\n\t// recreate ingress\n\tutils.CreateGslb(t, options, settings, ingressResourcePath)\n\n\tk8s.WaitUntilIngressAvailable(t, options, name, 60, 1*time.Second)\n\n\tingress = k8s.GetIngress(t, options, name)\n\n\trequire.Equal(t, ingress.Name, name)\n\t// assert Gslb strategy has expected values\n\tassertStrategy(t, options)\n}", "func TestCreateOrUpdateResource(t *testing.T) {\n\tt.Run(\"ready status unknown\", func(t *testing.T) {\n\t\tg := NewGomegaWithT(t)\n\n\t\tsch := runtime.NewScheme()\n\t\tg.Expect(asoresourcesv1.AddToScheme(sch)).To(Succeed())\n\t\tc := fakeclient.NewClientBuilder().\n\t\t\tWithScheme(sch).\n\t\t\tBuild()\n\t\ts := New(c, clusterName)\n\n\t\tmockCtrl := gomock.NewController(t)\n\t\tspecMock := mock_azure.NewMockASOResourceSpecGetter(mockCtrl)\n\t\tspecMock.EXPECT().ResourceRef().Return(&asoresourcesv1.ResourceGroup{\n\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\tName: \"name\",\n\t\t\t\tNamespace: \"namespace\",\n\t\t\t},\n\t\t})\n\n\t\tctx := context.Background()\n\t\tg.Expect(c.Create(ctx, &asoresourcesv1.ResourceGroup{\n\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\tName: \"name\",\n\t\t\t\tNamespace: \"namespace\",\n\t\t\t\tLabels: map[string]string{\n\t\t\t\t\tinfrav1.OwnedByClusterLabelKey: clusterName,\n\t\t\t\t},\n\t\t\t},\n\t\t\tStatus: asoresourcesv1.ResourceGroup_STATUS{},\n\t\t})).To(Succeed())\n\n\t\tresult, err := s.CreateOrUpdateResource(ctx, specMock, \"service\")\n\t\tg.Expect(result).To(BeNil())\n\t\tg.Expect(err).NotTo(BeNil())\n\t\tg.Expect(err.Error()).To(ContainSubstring(\"ready status unknown\"))\n\t})\n\n\tt.Run(\"create resource that doesn't already exist\", func(t *testing.T) {\n\t\tg := NewGomegaWithT(t)\n\n\t\tsch := runtime.NewScheme()\n\t\tg.Expect(asoresourcesv1.AddToScheme(sch)).To(Succeed())\n\t\tc := fakeclient.NewClientBuilder().\n\t\t\tWithScheme(sch).\n\t\t\tBuild()\n\t\ts := New(c, clusterName)\n\n\t\tmockCtrl := gomock.NewController(t)\n\t\tspecMock := mock_azure.NewMockASOResourceSpecGetter(mockCtrl)\n\t\tspecMock.EXPECT().ResourceRef().Return(&asoresourcesv1.ResourceGroup{\n\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\tName: \"name\",\n\t\t\t\tNamespace: \"namespace\",\n\t\t\t},\n\t\t})\n\t\tspecMock.EXPECT().Parameters(gomockinternal.AContext(), gomock.Nil()).Return(&asoresourcesv1.ResourceGroup{\n\t\t\tSpec: asoresourcesv1.ResourceGroup_Spec{\n\t\t\t\tLocation: ptr.To(\"location\"),\n\t\t\t},\n\t\t}, nil)\n\n\t\tctx := context.Background()\n\t\tresult, err := s.CreateOrUpdateResource(ctx, specMock, \"service\")\n\t\tg.Expect(result).To(BeNil())\n\t\tg.Expect(err).NotTo(BeNil())\n\t\tg.Expect(azure.IsOperationNotDoneError(err)).To(BeTrue())\n\t\tvar recerr azure.ReconcileError\n\t\tg.Expect(errors.As(err, &recerr)).To(BeTrue())\n\t\tg.Expect(recerr.IsTransient()).To(BeTrue())\n\n\t\tcreated := &asoresourcesv1.ResourceGroup{}\n\t\tg.Expect(c.Get(ctx, types.NamespacedName{Name: \"name\", Namespace: \"namespace\"}, created)).To(Succeed())\n\t\tg.Expect(created.Name).To(Equal(\"name\"))\n\t\tg.Expect(created.Namespace).To(Equal(\"namespace\"))\n\t\tg.Expect(created.Labels).To(Equal(map[string]string{\n\t\t\tinfrav1.OwnedByClusterLabelKey: clusterName,\n\t\t}))\n\t\tg.Expect(created.Annotations).To(Equal(map[string]string{\n\t\t\tReconcilePolicyAnnotation: ReconcilePolicySkip,\n\t\t\tSecretNameAnnotation: \"cluster-aso-secret\",\n\t\t}))\n\t\tg.Expect(created.Spec).To(Equal(asoresourcesv1.ResourceGroup_Spec{\n\t\t\tLocation: ptr.To(\"location\"),\n\t\t}))\n\t})\n\n\tt.Run(\"resource is not ready in non-terminal state\", func(t *testing.T) {\n\t\tg := NewGomegaWithT(t)\n\n\t\tsch := runtime.NewScheme()\n\t\tg.Expect(asoresourcesv1.AddToScheme(sch)).To(Succeed())\n\t\tc := fakeclient.NewClientBuilder().\n\t\t\tWithScheme(sch).\n\t\t\tBuild()\n\t\ts := New(c, clusterName)\n\n\t\tmockCtrl := gomock.NewController(t)\n\t\tspecMock := mock_azure.NewMockASOResourceSpecGetter(mockCtrl)\n\t\tspecMock.EXPECT().ResourceRef().Return(&asoresourcesv1.ResourceGroup{\n\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\tName: \"name\",\n\t\t\t\tNamespace: \"namespace\",\n\t\t\t},\n\t\t})\n\n\t\tctx := context.Background()\n\t\tg.Expect(c.Create(ctx, &asoresourcesv1.ResourceGroup{\n\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\tName: \"name\",\n\t\t\t\tNamespace: \"namespace\",\n\t\t\t\tLabels: map[string]string{\n\t\t\t\t\tinfrav1.OwnedByClusterLabelKey: clusterName,\n\t\t\t\t},\n\t\t\t},\n\t\t\tStatus: asoresourcesv1.ResourceGroup_STATUS{\n\t\t\t\tConditions: []conditions.Condition{\n\t\t\t\t\t{\n\t\t\t\t\t\tType: conditions.ConditionTypeReady,\n\t\t\t\t\t\tStatus: metav1.ConditionFalse,\n\t\t\t\t\t\tSeverity: conditions.ConditionSeverityInfo,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t})).To(Succeed())\n\n\t\tresult, err := s.CreateOrUpdateResource(ctx, specMock, \"service\")\n\t\tg.Expect(result).To(BeNil())\n\t\tg.Expect(err).NotTo(BeNil())\n\t\tg.Expect(err.Error()).To(ContainSubstring(\"resource is not Ready\"))\n\t\tvar recerr azure.ReconcileError\n\t\tg.Expect(errors.As(err, &recerr)).To(BeTrue())\n\t\tg.Expect(recerr.IsTransient()).To(BeTrue())\n\t})\n\n\tt.Run(\"resource is not ready in reconciling state\", func(t *testing.T) {\n\t\tg := NewGomegaWithT(t)\n\n\t\tsch := runtime.NewScheme()\n\t\tg.Expect(asoresourcesv1.AddToScheme(sch)).To(Succeed())\n\t\tc := fakeclient.NewClientBuilder().\n\t\t\tWithScheme(sch).\n\t\t\tBuild()\n\t\ts := New(c, clusterName)\n\n\t\tmockCtrl := gomock.NewController(t)\n\t\tspecMock := mock_azure.NewMockASOResourceSpecGetter(mockCtrl)\n\t\tspecMock.EXPECT().ResourceRef().Return(&asoresourcesv1.ResourceGroup{\n\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\tName: \"name\",\n\t\t\t\tNamespace: \"namespace\",\n\t\t\t},\n\t\t})\n\n\t\tctx := context.Background()\n\t\tg.Expect(c.Create(ctx, &asoresourcesv1.ResourceGroup{\n\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\tName: \"name\",\n\t\t\t\tNamespace: \"namespace\",\n\t\t\t\tLabels: map[string]string{\n\t\t\t\t\tinfrav1.OwnedByClusterLabelKey: clusterName,\n\t\t\t\t},\n\t\t\t},\n\t\t\tStatus: asoresourcesv1.ResourceGroup_STATUS{\n\t\t\t\tConditions: []conditions.Condition{\n\t\t\t\t\t{\n\t\t\t\t\t\tType: conditions.ConditionTypeReady,\n\t\t\t\t\t\tStatus: metav1.ConditionFalse,\n\t\t\t\t\t\tSeverity: conditions.ConditionSeverityInfo,\n\t\t\t\t\t\tReason: conditions.ReasonReconciling.Name,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t})).To(Succeed())\n\n\t\tresult, err := s.CreateOrUpdateResource(ctx, specMock, \"service\")\n\t\tg.Expect(result).To(BeNil())\n\t\tg.Expect(azure.IsOperationNotDoneError(err)).To(BeTrue())\n\t})\n\n\tt.Run(\"resource is not ready in terminal state\", func(t *testing.T) {\n\t\tg := NewGomegaWithT(t)\n\n\t\tsch := runtime.NewScheme()\n\t\tg.Expect(asoresourcesv1.AddToScheme(sch)).To(Succeed())\n\t\tc := fakeclient.NewClientBuilder().\n\t\t\tWithScheme(sch).\n\t\t\tBuild()\n\t\ts := New(c, clusterName)\n\n\t\tmockCtrl := gomock.NewController(t)\n\t\tspecMock := mock_azure.NewMockASOResourceSpecGetter(mockCtrl)\n\t\tspecMock.EXPECT().ResourceRef().Return(&asoresourcesv1.ResourceGroup{\n\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\tName: \"name\",\n\t\t\t\tNamespace: \"namespace\",\n\t\t\t},\n\t\t})\n\n\t\tctx := context.Background()\n\t\tg.Expect(c.Create(ctx, &asoresourcesv1.ResourceGroup{\n\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\tName: \"name\",\n\t\t\t\tNamespace: \"namespace\",\n\t\t\t\tLabels: map[string]string{\n\t\t\t\t\tinfrav1.OwnedByClusterLabelKey: clusterName,\n\t\t\t\t},\n\t\t\t},\n\t\t\tStatus: asoresourcesv1.ResourceGroup_STATUS{\n\t\t\t\tConditions: []conditions.Condition{\n\t\t\t\t\t{\n\t\t\t\t\t\tType: conditions.ConditionTypeReady,\n\t\t\t\t\t\tStatus: metav1.ConditionFalse,\n\t\t\t\t\t\tSeverity: conditions.ConditionSeverityError,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t})).To(Succeed())\n\n\t\tresult, err := s.CreateOrUpdateResource(ctx, specMock, \"service\")\n\t\tg.Expect(result).To(BeNil())\n\t\tg.Expect(err).NotTo(BeNil())\n\t\tg.Expect(err.Error()).To(ContainSubstring(\"resource is not Ready\"))\n\t\tvar recerr azure.ReconcileError\n\t\tg.Expect(errors.As(err, &recerr)).To(BeTrue())\n\t\tg.Expect(recerr.IsTerminal()).To(BeTrue())\n\t})\n\n\tt.Run(\"error getting existing resource\", func(t *testing.T) {\n\t\tg := NewGomegaWithT(t)\n\n\t\tsch := runtime.NewScheme()\n\t\tg.Expect(asoresourcesv1.AddToScheme(sch)).To(Succeed())\n\t\tc := fakeclient.NewClientBuilder().\n\t\t\tWithScheme(sch).\n\t\t\tBuild()\n\t\ts := New(ErroringGetClient{Client: c, err: errors.New(\"an error\")}, clusterName)\n\n\t\tmockCtrl := gomock.NewController(t)\n\t\tspecMock := mock_azure.NewMockASOResourceSpecGetter(mockCtrl)\n\t\tspecMock.EXPECT().ResourceRef().Return(&asoresourcesv1.ResourceGroup{\n\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\tName: \"name\",\n\t\t\t\tNamespace: \"namespace\",\n\t\t\t},\n\t\t})\n\n\t\tctx := context.Background()\n\t\tresult, err := s.CreateOrUpdateResource(ctx, specMock, \"service\")\n\t\tg.Expect(result).To(BeNil())\n\t\tg.Expect(err).NotTo(BeNil())\n\t\tg.Expect(err.Error()).To(ContainSubstring(\"failed to get existing resource\"))\n\t})\n\n\tt.Run(\"begin an update\", func(t *testing.T) {\n\t\tg := NewGomegaWithT(t)\n\n\t\tsch := runtime.NewScheme()\n\t\tg.Expect(asoresourcesv1.AddToScheme(sch)).To(Succeed())\n\t\tc := fakeclient.NewClientBuilder().\n\t\t\tWithScheme(sch).\n\t\t\tBuild()\n\t\ts := New(c, clusterName)\n\n\t\tmockCtrl := gomock.NewController(t)\n\t\tspecMock := mock_azure.NewMockASOResourceSpecGetter(mockCtrl)\n\t\tspecMock.EXPECT().ResourceRef().Return(&asoresourcesv1.ResourceGroup{\n\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\tName: \"name\",\n\t\t\t\tNamespace: \"namespace\",\n\t\t\t},\n\t\t})\n\t\tspecMock.EXPECT().Parameters(gomockinternal.AContext(), gomock.Not(gomock.Nil())).DoAndReturn(func(_ context.Context, object genruntime.MetaObject) (genruntime.MetaObject, error) {\n\t\t\tgroup := object.DeepCopyObject().(*asoresourcesv1.ResourceGroup)\n\t\t\tgroup.Spec.Location = ptr.To(\"location\")\n\t\t\treturn group, nil\n\t\t})\n\t\tspecMock.EXPECT().WasManaged(gomock.Any()).Return(false)\n\n\t\tctx := context.Background()\n\t\tg.Expect(c.Create(ctx, &asoresourcesv1.ResourceGroup{\n\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\tName: \"name\",\n\t\t\t\tNamespace: \"namespace\",\n\t\t\t\tLabels: map[string]string{\n\t\t\t\t\tinfrav1.OwnedByClusterLabelKey: clusterName,\n\t\t\t\t},\n\t\t\t},\n\t\t\tStatus: asoresourcesv1.ResourceGroup_STATUS{\n\t\t\t\tConditions: []conditions.Condition{\n\t\t\t\t\t{\n\t\t\t\t\t\tType: conditions.ConditionTypeReady,\n\t\t\t\t\t\tStatus: metav1.ConditionTrue,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t})).To(Succeed())\n\n\t\tresult, err := s.CreateOrUpdateResource(ctx, specMock, \"service\")\n\t\tg.Expect(result).To(BeNil())\n\t\tg.Expect(err).NotTo(BeNil())\n\t})\n\n\tt.Run(\"adopt managed resource in not found state\", func(t *testing.T) {\n\t\tg := NewGomegaWithT(t)\n\n\t\tsch := runtime.NewScheme()\n\t\tg.Expect(asoresourcesv1.AddToScheme(sch)).To(Succeed())\n\t\tc := fakeclient.NewClientBuilder().\n\t\t\tWithScheme(sch).\n\t\t\tBuild()\n\t\tclusterName := \"cluster\"\n\t\ts := New(c, clusterName)\n\n\t\tmockCtrl := gomock.NewController(t)\n\t\tspecMock := mock_azure.NewMockASOResourceSpecGetter(mockCtrl)\n\t\tspecMock.EXPECT().ResourceRef().Return(&asoresourcesv1.ResourceGroup{\n\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\tName: \"name\",\n\t\t\t\tNamespace: \"namespace\",\n\t\t\t},\n\t\t})\n\t\tspecMock.EXPECT().Parameters(gomockinternal.AContext(), gomock.Not(gomock.Nil())).DoAndReturn(func(_ context.Context, object genruntime.MetaObject) (genruntime.MetaObject, error) {\n\t\t\treturn object, nil\n\t\t})\n\n\t\tctx := context.Background()\n\t\tg.Expect(c.Create(ctx, &asoresourcesv1.ResourceGroup{\n\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\tName: \"name\",\n\t\t\t\tNamespace: \"namespace\",\n\t\t\t\tLabels: map[string]string{\n\t\t\t\t\tinfrav1.OwnedByClusterLabelKey: clusterName,\n\t\t\t\t},\n\t\t\t\tAnnotations: map[string]string{\n\t\t\t\t\tReconcilePolicyAnnotation: ReconcilePolicySkip,\n\t\t\t\t},\n\t\t\t},\n\t\t\tStatus: asoresourcesv1.ResourceGroup_STATUS{\n\t\t\t\tConditions: []conditions.Condition{\n\t\t\t\t\t{\n\t\t\t\t\t\tType: conditions.ConditionTypeReady,\n\t\t\t\t\t\tStatus: metav1.ConditionFalse,\n\t\t\t\t\t\tReason: conditions.ReasonAzureResourceNotFound.Name,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t})).To(Succeed())\n\n\t\tresult, err := s.CreateOrUpdateResource(ctx, specMock, \"service\")\n\t\tg.Expect(result).To(BeNil())\n\t\tg.Expect(err).NotTo(BeNil())\n\n\t\tupdated := &asoresourcesv1.ResourceGroup{}\n\t\tg.Expect(c.Get(ctx, types.NamespacedName{Name: \"name\", Namespace: \"namespace\"}, updated)).To(Succeed())\n\t\tg.Expect(updated.Annotations).To(Equal(map[string]string{\n\t\t\tReconcilePolicyAnnotation: ReconcilePolicyManage,\n\t\t\tSecretNameAnnotation: \"cluster-aso-secret\",\n\t\t}))\n\t})\n\n\tt.Run(\"adopt previously managed resource\", func(t *testing.T) {\n\t\tg := NewGomegaWithT(t)\n\n\t\tsch := runtime.NewScheme()\n\t\tg.Expect(asoresourcesv1.AddToScheme(sch)).To(Succeed())\n\t\tc := fakeclient.NewClientBuilder().\n\t\t\tWithScheme(sch).\n\t\t\tBuild()\n\t\tclusterName := \"cluster\"\n\t\ts := New(c, clusterName)\n\n\t\tmockCtrl := gomock.NewController(t)\n\t\tspecMock := mock_azure.NewMockASOResourceSpecGetter(mockCtrl)\n\t\tspecMock.EXPECT().ResourceRef().Return(&asoresourcesv1.ResourceGroup{\n\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\tName: \"name\",\n\t\t\t\tNamespace: \"namespace\",\n\t\t\t},\n\t\t})\n\t\tspecMock.EXPECT().Parameters(gomockinternal.AContext(), gomock.Not(gomock.Nil())).DoAndReturn(func(_ context.Context, object genruntime.MetaObject) (genruntime.MetaObject, error) {\n\t\t\treturn nil, nil\n\t\t})\n\t\tspecMock.EXPECT().WasManaged(gomock.Any()).Return(true)\n\n\t\tctx := context.Background()\n\t\tg.Expect(c.Create(ctx, &asoresourcesv1.ResourceGroup{\n\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\tName: \"name\",\n\t\t\t\tNamespace: \"namespace\",\n\t\t\t\tLabels: map[string]string{\n\t\t\t\t\tinfrav1.OwnedByClusterLabelKey: clusterName,\n\t\t\t\t},\n\t\t\t\tAnnotations: map[string]string{\n\t\t\t\t\tReconcilePolicyAnnotation: ReconcilePolicySkip,\n\t\t\t\t},\n\t\t\t},\n\t\t\tStatus: asoresourcesv1.ResourceGroup_STATUS{\n\t\t\t\tConditions: []conditions.Condition{\n\t\t\t\t\t{\n\t\t\t\t\t\tType: conditions.ConditionTypeReady,\n\t\t\t\t\t\tStatus: metav1.ConditionTrue,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t})).To(Succeed())\n\n\t\tresult, err := s.CreateOrUpdateResource(ctx, specMock, \"service\")\n\t\tg.Expect(result).To(BeNil())\n\t\tg.Expect(err).NotTo(BeNil())\n\n\t\tupdated := &asoresourcesv1.ResourceGroup{}\n\t\tg.Expect(c.Get(ctx, types.NamespacedName{Name: \"name\", Namespace: \"namespace\"}, updated)).To(Succeed())\n\t\tg.Expect(updated.Annotations).To(Equal(map[string]string{\n\t\t\tReconcilePolicyAnnotation: ReconcilePolicyManage,\n\t\t\tSecretNameAnnotation: \"cluster-aso-secret\",\n\t\t}))\n\t})\n\n\tt.Run(\"Parameters error\", func(t *testing.T) {\n\t\tg := NewGomegaWithT(t)\n\n\t\tsch := runtime.NewScheme()\n\t\tg.Expect(asoresourcesv1.AddToScheme(sch)).To(Succeed())\n\t\tc := fakeclient.NewClientBuilder().\n\t\t\tWithScheme(sch).\n\t\t\tBuild()\n\t\ts := New(c, clusterName)\n\n\t\tmockCtrl := gomock.NewController(t)\n\t\tspecMock := mock_azure.NewMockASOResourceSpecGetter(mockCtrl)\n\t\tspecMock.EXPECT().ResourceRef().Return(&asoresourcesv1.ResourceGroup{\n\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\tName: \"name\",\n\t\t\t\tNamespace: \"namespace\",\n\t\t\t},\n\t\t})\n\t\tspecMock.EXPECT().Parameters(gomockinternal.AContext(), gomock.Not(gomock.Nil())).Return(nil, errors.New(\"parameters error\"))\n\n\t\tctx := context.Background()\n\t\tg.Expect(c.Create(ctx, &asoresourcesv1.ResourceGroup{\n\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\tName: \"name\",\n\t\t\t\tNamespace: \"namespace\",\n\t\t\t\tLabels: map[string]string{\n\t\t\t\t\tinfrav1.OwnedByClusterLabelKey: clusterName,\n\t\t\t\t},\n\t\t\t},\n\t\t\tStatus: asoresourcesv1.ResourceGroup_STATUS{\n\t\t\t\tConditions: []conditions.Condition{\n\t\t\t\t\t{\n\t\t\t\t\t\tType: conditions.ConditionTypeReady,\n\t\t\t\t\t\tStatus: metav1.ConditionTrue,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t})).To(Succeed())\n\n\t\tresult, err := s.CreateOrUpdateResource(ctx, specMock, \"service\")\n\t\tg.Expect(result).To(BeNil())\n\t\tg.Expect(err).NotTo(BeNil())\n\t\tg.Expect(err.Error()).To(ContainSubstring(\"parameters error\"))\n\t})\n\n\tt.Run(\"skip update for unmanaged resource\", func(t *testing.T) {\n\t\tg := NewGomegaWithT(t)\n\n\t\tsch := runtime.NewScheme()\n\t\tg.Expect(asoresourcesv1.AddToScheme(sch)).To(Succeed())\n\t\tc := fakeclient.NewClientBuilder().\n\t\t\tWithScheme(sch).\n\t\t\tBuild()\n\t\ts := New(c, clusterName)\n\n\t\tmockCtrl := gomock.NewController(t)\n\t\tspecMock := mock_azure.NewMockASOResourceSpecGetter(mockCtrl)\n\t\tspecMock.EXPECT().ResourceRef().Return(&asoresourcesv1.ResourceGroup{\n\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\tName: \"name\",\n\t\t\t\tNamespace: \"namespace\",\n\t\t\t},\n\t\t})\n\n\t\tctx := context.Background()\n\t\tg.Expect(c.Create(ctx, &asoresourcesv1.ResourceGroup{\n\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\tName: \"name\",\n\t\t\t\tNamespace: \"namespace\",\n\t\t\t},\n\t\t\tStatus: asoresourcesv1.ResourceGroup_STATUS{\n\t\t\t\tConditions: []conditions.Condition{\n\t\t\t\t\t{\n\t\t\t\t\t\tType: conditions.ConditionTypeReady,\n\t\t\t\t\t\tStatus: metav1.ConditionTrue,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t})).To(Succeed())\n\n\t\tresult, err := s.CreateOrUpdateResource(ctx, specMock, \"service\")\n\t\tg.Expect(result).NotTo(BeNil())\n\t\tg.Expect(err).To(BeNil())\n\t})\n\n\tt.Run(\"resource up to date\", func(t *testing.T) {\n\t\tg := NewGomegaWithT(t)\n\n\t\tsch := runtime.NewScheme()\n\t\tg.Expect(asoresourcesv1.AddToScheme(sch)).To(Succeed())\n\t\tc := fakeclient.NewClientBuilder().\n\t\t\tWithScheme(sch).\n\t\t\tBuild()\n\t\ts := New(c, clusterName)\n\n\t\tmockCtrl := gomock.NewController(t)\n\t\tspecMock := mock_azure.NewMockASOResourceSpecGetter(mockCtrl)\n\t\tspecMock.EXPECT().ResourceRef().Return(&asoresourcesv1.ResourceGroup{\n\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\tName: \"name\",\n\t\t\t\tNamespace: \"namespace\",\n\t\t\t},\n\t\t})\n\t\tspecMock.EXPECT().Parameters(gomockinternal.AContext(), gomock.Any()).DoAndReturn(func(_ context.Context, object genruntime.MetaObject) (genruntime.MetaObject, error) {\n\t\t\treturn nil, nil\n\t\t})\n\t\tspecMock.EXPECT().WasManaged(gomock.Any()).Return(false)\n\n\t\tctx := context.Background()\n\t\tg.Expect(c.Create(ctx, &asoresourcesv1.ResourceGroup{\n\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\tName: \"name\",\n\t\t\t\tNamespace: \"namespace\",\n\t\t\t\tLabels: map[string]string{\n\t\t\t\t\tinfrav1.OwnedByClusterLabelKey: clusterName,\n\t\t\t\t},\n\t\t\t\tAnnotations: map[string]string{\n\t\t\t\t\tReconcilePolicyAnnotation: ReconcilePolicyManage,\n\t\t\t\t\tSecretNameAnnotation: \"cluster-aso-secret\",\n\t\t\t\t},\n\t\t\t},\n\t\t\tSpec: asoresourcesv1.ResourceGroup_Spec{\n\t\t\t\tLocation: ptr.To(\"location\"),\n\t\t\t},\n\t\t\tStatus: asoresourcesv1.ResourceGroup_STATUS{\n\t\t\t\tConditions: []conditions.Condition{\n\t\t\t\t\t{\n\t\t\t\t\t\tType: conditions.ConditionTypeReady,\n\t\t\t\t\t\tStatus: metav1.ConditionTrue,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t})).To(Succeed())\n\n\t\tresult, err := s.CreateOrUpdateResource(ctx, specMock, \"service\")\n\t\tg.Expect(result).NotTo(BeNil())\n\t\tg.Expect(err).To(BeNil())\n\n\t\tg.Expect(result.GetName()).To(Equal(\"name\"))\n\t\tg.Expect(result.GetNamespace()).To(Equal(\"namespace\"))\n\t\tg.Expect(result.(*asoresourcesv1.ResourceGroup).Spec.Location).To(Equal(ptr.To(\"location\")))\n\t})\n\n\tt.Run(\"error updating\", func(t *testing.T) {\n\t\tg := NewGomegaWithT(t)\n\n\t\tsch := runtime.NewScheme()\n\t\tg.Expect(asoresourcesv1.AddToScheme(sch)).To(Succeed())\n\t\tc := fakeclient.NewClientBuilder().\n\t\t\tWithScheme(sch).\n\t\t\tBuild()\n\t\ts := New(ErroringPatchClient{Client: c, err: errors.New(\"an error\")}, clusterName)\n\n\t\tmockCtrl := gomock.NewController(t)\n\t\tspecMock := mock_azure.NewMockASOResourceSpecGetter(mockCtrl)\n\t\tspecMock.EXPECT().ResourceRef().Return(&asoresourcesv1.ResourceGroup{\n\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\tName: \"name\",\n\t\t\t\tNamespace: \"namespace\",\n\t\t\t},\n\t\t})\n\t\tspecMock.EXPECT().Parameters(gomockinternal.AContext(), gomock.Any()).DoAndReturn(func(_ context.Context, object genruntime.MetaObject) (genruntime.MetaObject, error) {\n\t\t\tgroup := object.DeepCopyObject().(*asoresourcesv1.ResourceGroup)\n\t\t\tgroup.Spec.Location = ptr.To(\"location\")\n\t\t\treturn group, nil\n\t\t})\n\t\tspecMock.EXPECT().WasManaged(gomock.Any()).Return(false)\n\n\t\tctx := context.Background()\n\t\tg.Expect(c.Create(ctx, &asoresourcesv1.ResourceGroup{\n\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\tName: \"name\",\n\t\t\t\tNamespace: \"namespace\",\n\t\t\t\tLabels: map[string]string{\n\t\t\t\t\tinfrav1.OwnedByClusterLabelKey: clusterName,\n\t\t\t\t},\n\t\t\t},\n\t\t\tStatus: asoresourcesv1.ResourceGroup_STATUS{\n\t\t\t\tConditions: []conditions.Condition{\n\t\t\t\t\t{\n\t\t\t\t\t\tType: conditions.ConditionTypeReady,\n\t\t\t\t\t\tStatus: metav1.ConditionTrue,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t})).To(Succeed())\n\n\t\tresult, err := s.CreateOrUpdateResource(ctx, specMock, \"service\")\n\t\tg.Expect(result).To(BeNil())\n\t\tg.Expect(err).NotTo(BeNil())\n\t\tg.Expect(err.Error()).To(ContainSubstring(\"failed to update resource\"))\n\t})\n\n\tt.Run(\"with tags success\", func(t *testing.T) {\n\t\tg := NewGomegaWithT(t)\n\n\t\tsch := runtime.NewScheme()\n\t\tg.Expect(asoresourcesv1.AddToScheme(sch)).To(Succeed())\n\t\tc := fakeclient.NewClientBuilder().\n\t\t\tWithScheme(sch).\n\t\t\tBuild()\n\t\ts := New(c, clusterName)\n\n\t\tmockCtrl := gomock.NewController(t)\n\t\tspecMock := struct {\n\t\t\t*mock_azure.MockASOResourceSpecGetter\n\t\t\t*mock_aso.MockTagsGetterSetter\n\t\t}{\n\t\t\tMockASOResourceSpecGetter: mock_azure.NewMockASOResourceSpecGetter(mockCtrl),\n\t\t\tMockTagsGetterSetter: mock_aso.NewMockTagsGetterSetter(mockCtrl),\n\t\t}\n\t\tspecMock.MockASOResourceSpecGetter.EXPECT().ResourceRef().Return(&asoresourcesv1.ResourceGroup{\n\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\tName: \"name\",\n\t\t\t\tNamespace: \"namespace\",\n\t\t\t},\n\t\t})\n\t\tspecMock.MockASOResourceSpecGetter.EXPECT().Parameters(gomockinternal.AContext(), gomock.Any()).DoAndReturn(func(_ context.Context, object genruntime.MetaObject) (genruntime.MetaObject, error) {\n\t\t\treturn nil, nil\n\t\t})\n\t\tspecMock.MockASOResourceSpecGetter.EXPECT().WasManaged(gomock.Any()).Return(false)\n\n\t\tspecMock.MockTagsGetterSetter.EXPECT().GetActualTags(gomock.Any()).Return(nil)\n\t\tspecMock.MockTagsGetterSetter.EXPECT().GetAdditionalTags().Return(nil)\n\t\tspecMock.MockTagsGetterSetter.EXPECT().GetDesiredTags(gomock.Any()).Return(nil)\n\t\tspecMock.MockTagsGetterSetter.EXPECT().SetTags(gomock.Any(), gomock.Any())\n\n\t\tctx := context.Background()\n\t\tg.Expect(c.Create(ctx, &asoresourcesv1.ResourceGroup{\n\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\tName: \"name\",\n\t\t\t\tNamespace: \"namespace\",\n\t\t\t\tLabels: map[string]string{\n\t\t\t\t\tinfrav1.OwnedByClusterLabelKey: clusterName,\n\t\t\t\t},\n\t\t\t\tAnnotations: map[string]string{\n\t\t\t\t\tReconcilePolicyAnnotation: ReconcilePolicyManage,\n\t\t\t\t},\n\t\t\t},\n\t\t\tStatus: asoresourcesv1.ResourceGroup_STATUS{\n\t\t\t\tConditions: []conditions.Condition{\n\t\t\t\t\t{\n\t\t\t\t\t\tType: conditions.ConditionTypeReady,\n\t\t\t\t\t\tStatus: metav1.ConditionTrue,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t})).To(Succeed())\n\n\t\tresult, err := s.CreateOrUpdateResource(ctx, specMock, \"service\")\n\t\tg.Expect(result).To(BeNil())\n\t\tg.Expect(azure.IsOperationNotDoneError(err)).To(BeTrue())\n\n\t\tupdated := &asoresourcesv1.ResourceGroup{}\n\t\tg.Expect(c.Get(ctx, types.NamespacedName{Name: \"name\", Namespace: \"namespace\"}, updated)).To(Succeed())\n\t\tg.Expect(updated.Annotations).To(HaveKey(tagsLastAppliedAnnotation))\n\t})\n\n\tt.Run(\"with tags failure\", func(t *testing.T) {\n\t\tg := NewGomegaWithT(t)\n\n\t\tsch := runtime.NewScheme()\n\t\tg.Expect(asoresourcesv1.AddToScheme(sch)).To(Succeed())\n\t\tc := fakeclient.NewClientBuilder().\n\t\t\tWithScheme(sch).\n\t\t\tBuild()\n\t\ts := New(c, clusterName)\n\n\t\tmockCtrl := gomock.NewController(t)\n\t\tspecMock := struct {\n\t\t\t*mock_azure.MockASOResourceSpecGetter\n\t\t\t*mock_aso.MockTagsGetterSetter\n\t\t}{\n\t\t\tMockASOResourceSpecGetter: mock_azure.NewMockASOResourceSpecGetter(mockCtrl),\n\t\t\tMockTagsGetterSetter: mock_aso.NewMockTagsGetterSetter(mockCtrl),\n\t\t}\n\t\tspecMock.MockASOResourceSpecGetter.EXPECT().ResourceRef().Return(&asoresourcesv1.ResourceGroup{\n\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\tName: \"name\",\n\t\t\t\tNamespace: \"namespace\",\n\t\t\t},\n\t\t})\n\t\tspecMock.MockASOResourceSpecGetter.EXPECT().Parameters(gomockinternal.AContext(), gomock.Any()).DoAndReturn(func(_ context.Context, object genruntime.MetaObject) (genruntime.MetaObject, error) {\n\t\t\treturn nil, nil\n\t\t})\n\n\t\tctx := context.Background()\n\t\tg.Expect(c.Create(ctx, &asoresourcesv1.ResourceGroup{\n\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\tName: \"name\",\n\t\t\t\tNamespace: \"namespace\",\n\t\t\t\tLabels: map[string]string{\n\t\t\t\t\tinfrav1.OwnedByClusterLabelKey: clusterName,\n\t\t\t\t},\n\t\t\t\tAnnotations: map[string]string{\n\t\t\t\t\tReconcilePolicyAnnotation: ReconcilePolicyManage,\n\t\t\t\t\ttagsLastAppliedAnnotation: \"{\",\n\t\t\t\t},\n\t\t\t},\n\t\t\tStatus: asoresourcesv1.ResourceGroup_STATUS{\n\t\t\t\tConditions: []conditions.Condition{\n\t\t\t\t\t{\n\t\t\t\t\t\tType: conditions.ConditionTypeReady,\n\t\t\t\t\t\tStatus: metav1.ConditionTrue,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t})).To(Succeed())\n\n\t\tresult, err := s.CreateOrUpdateResource(ctx, specMock, \"service\")\n\t\tg.Expect(result).To(BeNil())\n\t\tg.Expect(err.Error()).To(ContainSubstring(\"failed to reconcile tags\"))\n\t})\n\n\tt.Run(\"reconcile policy annotation resets after un-pause\", func(t *testing.T) {\n\t\tg := NewGomegaWithT(t)\n\n\t\tsch := runtime.NewScheme()\n\t\tg.Expect(asoresourcesv1.AddToScheme(sch)).To(Succeed())\n\t\tc := fakeclient.NewClientBuilder().\n\t\t\tWithScheme(sch).\n\t\t\tBuild()\n\t\ts := New(c, clusterName)\n\n\t\tmockCtrl := gomock.NewController(t)\n\t\tspecMock := mock_azure.NewMockASOResourceSpecGetter(mockCtrl)\n\t\tspecMock.EXPECT().ResourceRef().Return(&asoresourcesv1.ResourceGroup{\n\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\tName: \"name\",\n\t\t\t\tNamespace: \"namespace\",\n\t\t\t},\n\t\t})\n\t\tspecMock.EXPECT().Parameters(gomockinternal.AContext(), gomock.Any()).DoAndReturn(func(_ context.Context, object genruntime.MetaObject) (genruntime.MetaObject, error) {\n\t\t\treturn nil, nil\n\t\t})\n\t\tspecMock.EXPECT().WasManaged(gomock.Any()).Return(false)\n\n\t\tctx := context.Background()\n\t\tg.Expect(c.Create(ctx, &asoresourcesv1.ResourceGroup{\n\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\tName: \"name\",\n\t\t\t\tNamespace: \"namespace\",\n\t\t\t\tLabels: map[string]string{\n\t\t\t\t\tinfrav1.OwnedByClusterLabelKey: clusterName,\n\t\t\t\t},\n\t\t\t\tAnnotations: map[string]string{\n\t\t\t\t\tPrePauseReconcilePolicyAnnotation: ReconcilePolicyManage,\n\t\t\t\t\tReconcilePolicyAnnotation: ReconcilePolicySkip,\n\t\t\t\t},\n\t\t\t},\n\t\t\tSpec: asoresourcesv1.ResourceGroup_Spec{\n\t\t\t\tLocation: ptr.To(\"location\"),\n\t\t\t},\n\t\t\tStatus: asoresourcesv1.ResourceGroup_STATUS{\n\t\t\t\tConditions: []conditions.Condition{\n\t\t\t\t\t{\n\t\t\t\t\t\tType: conditions.ConditionTypeReady,\n\t\t\t\t\t\tStatus: metav1.ConditionTrue,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t})).To(Succeed())\n\n\t\tresult, err := s.CreateOrUpdateResource(ctx, specMock, \"service\")\n\t\tg.Expect(result).To(BeNil())\n\t\tg.Expect(azure.IsOperationNotDoneError(err)).To(BeTrue())\n\n\t\tupdated := &asoresourcesv1.ResourceGroup{}\n\t\tg.Expect(c.Get(ctx, types.NamespacedName{Name: \"name\", Namespace: \"namespace\"}, updated)).To(Succeed())\n\t\tg.Expect(updated.Annotations).NotTo(HaveKey(PrePauseReconcilePolicyAnnotation))\n\t\tg.Expect(updated.Annotations).To(HaveKeyWithValue(ReconcilePolicyAnnotation, ReconcilePolicyManage))\n\t})\n}", "func CreateTemplateFailStatusMocked(t *testing.T, templateIn *types.Template) *types.Template {\n\n\tassert := assert.New(t)\n\n\t// wire up\n\tcs := &utils.MockConcertoService{}\n\tds, err := NewTemplateService(cs)\n\tassert.Nil(err, \"Couldn't load template service\")\n\tassert.NotNil(ds, \"Template service not instanced\")\n\n\t// convertMap\n\tmapIn, err := utils.ItemConvertParams(*templateIn)\n\tassert.Nil(err, \"Template test data corrupted\")\n\n\t// to json\n\tdOut, err := json.Marshal(templateIn)\n\tassert.Nil(err, \"Template test data corrupted\")\n\n\t// call service\n\tcs.On(\"Post\", \"/blueprint/templates/\", mapIn).Return(dOut, 499, nil)\n\ttemplateOut, err := ds.CreateTemplate(mapIn)\n\tassert.NotNil(err, \"We are expecting an status code error\")\n\tassert.Nil(templateOut, \"Expecting nil output\")\n\tassert.Contains(err.Error(), \"499\", \"Error should contain http code 499\")\n\n\treturn templateOut\n}", "func TestActiveReplicatorPushAndPullConflict(t *testing.T) {\n\n\t// scenarios\n\tconflictResolutionTests := []struct {\n\t\tname string\n\t\tlocalRevisionBody []byte\n\t\tlocalRevID string\n\t\tremoteRevisionBody []byte\n\t\tremoteRevID string\n\t\tcommonAncestorRevID string\n\t\tconflictResolver string\n\t\texpectedBody []byte\n\t\texpectedRevID string\n\t\texpectedTombstonedRevID string\n\t}{\n\t\t{\n\t\t\tname: \"remoteWins\",\n\t\t\tlocalRevisionBody: []byte(`{\"source\": \"local\"}`),\n\t\t\tlocalRevID: \"1-a\",\n\t\t\tremoteRevisionBody: []byte(`{\"source\": \"remote\"}`),\n\t\t\tremoteRevID: \"1-b\",\n\t\t\tconflictResolver: `function(conflict) {return conflict.RemoteDocument;}`,\n\t\t\texpectedBody: []byte(`{\"source\": \"remote\"}`),\n\t\t\texpectedRevID: \"1-b\",\n\t\t},\n\t\t{\n\t\t\tname: \"merge\",\n\t\t\tlocalRevisionBody: []byte(`{\"source\": \"local\"}`),\n\t\t\tlocalRevID: \"1-a\",\n\t\t\tremoteRevisionBody: []byte(`{\"source\": \"remote\"}`),\n\t\t\tremoteRevID: \"1-b\",\n\t\t\tconflictResolver: `function(conflict) {\n\t\t\t\t\t\t\tvar mergedDoc = new Object();\n\t\t\t\t\t\t\tmergedDoc.source = \"merged\";\n\t\t\t\t\t\t\treturn mergedDoc;\n\t\t\t\t\t\t}`,\n\t\t\texpectedBody: []byte(`{\"source\": \"merged\"}`),\n\t\t\texpectedRevID: db.CreateRevIDWithBytes(2, \"1-b\", []byte(`{\"source\":\"merged\"}`)), // rev for merged body, with parent 1-b\n\t\t},\n\t\t{\n\t\t\tname: \"localWins\",\n\t\t\tlocalRevisionBody: []byte(`{\"source\": \"local\"}`),\n\t\t\tlocalRevID: \"1-a\",\n\t\t\tremoteRevisionBody: []byte(`{\"source\": \"remote\"}`),\n\t\t\tremoteRevID: \"1-b\",\n\t\t\tconflictResolver: `function(conflict) {return conflict.LocalDocument;}`,\n\t\t\texpectedBody: []byte(`{\"source\": \"local\"}`),\n\t\t\texpectedRevID: db.CreateRevIDWithBytes(2, \"1-b\", []byte(`{\"source\":\"local\"}`)), // rev for local body, transposed under parent 1-b\n\t\t},\n\t\t{\n\t\t\tname: \"localWinsRemoteTombstone\",\n\t\t\tlocalRevisionBody: []byte(`{\"source\": \"local\"}`),\n\t\t\tlocalRevID: \"2-a\",\n\t\t\tremoteRevisionBody: []byte(`{\"_deleted\": true}`),\n\t\t\tremoteRevID: \"2-b\",\n\t\t\tcommonAncestorRevID: \"1-a\",\n\t\t\tconflictResolver: `function(conflict) {return conflict.LocalDocument;}`,\n\t\t\texpectedBody: []byte(`{\"source\": \"local\"}`),\n\t\t\texpectedRevID: db.CreateRevIDWithBytes(3, \"2-b\", []byte(`{\"source\":\"local\"}`)), // rev for local body, transposed under parent 2-b\n\t\t},\n\t}\n\n\tfor _, test := range conflictResolutionTests {\n\t\tt.Run(test.name, func(t *testing.T) {\n\t\t\tbase.RequireNumTestBuckets(t, 2)\n\t\t\tbase.SetUpTestLogging(t, logger.LevelInfo, logger.KeyHTTP, logger.KeySync, logger.KeyChanges, logger.KeyCRUD)\n\n\t\t\t// Passive\n\t\t\trt2 := NewRestTester(t, &RestTesterConfig{\n\t\t\t\tTestBucket: base.GetTestBucket(t),\n\t\t\t\tDatabaseConfig: &DatabaseConfig{DbConfig: DbConfig{\n\t\t\t\t\tUsers: map[string]*db.PrincipalConfig{\n\t\t\t\t\t\t\"alice\": {\n\t\t\t\t\t\t\tPassword: base.StringPtr(\"pass\"),\n\t\t\t\t\t\t\tExplicitChannels: utils.SetOf(\"*\"),\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t}},\n\t\t\t})\n\t\t\tdefer rt2.Close()\n\n\t\t\tvar localRevisionBody db.Body\n\t\t\tassert.NoError(t, json.Unmarshal(test.localRevisionBody, &localRevisionBody))\n\n\t\t\tvar remoteRevisionBody db.Body\n\t\t\tassert.NoError(t, json.Unmarshal(test.remoteRevisionBody, &remoteRevisionBody))\n\n\t\t\tvar expectedLocalBody db.Body\n\t\t\tassert.NoError(t, json.Unmarshal(test.expectedBody, &expectedLocalBody))\n\n\t\t\t// Create revision on rt2 (remote)\n\t\t\tdocID := test.name\n\n\t\t\tif test.commonAncestorRevID != \"\" {\n\t\t\t\tresp, err := rt2.PutDocumentWithRevID(docID, test.commonAncestorRevID, \"\", remoteRevisionBody)\n\t\t\t\tassert.NoError(t, err)\n\t\t\t\tassertStatus(t, resp, http.StatusCreated)\n\t\t\t\trt2revID := respRevID(t, resp)\n\t\t\t\tassert.Equal(t, test.commonAncestorRevID, rt2revID)\n\t\t\t}\n\n\t\t\tresp, err := rt2.PutDocumentWithRevID(docID, test.remoteRevID, test.commonAncestorRevID, remoteRevisionBody)\n\t\t\tassert.NoError(t, err)\n\t\t\tassertStatus(t, resp, http.StatusCreated)\n\t\t\trt2revID := respRevID(t, resp)\n\t\t\tassert.Equal(t, test.remoteRevID, rt2revID)\n\n\t\t\tremoteDoc, err := rt2.GetDatabase().GetDocument(logger.TestCtx(t), docID, db.DocUnmarshalSync)\n\t\t\trequire.NoError(t, err)\n\n\t\t\t// Make rt2 listen on an actual HTTP port, so it can receive the blipsync request from rt1.\n\t\t\tsrv := httptest.NewServer(rt2.TestPublicHandler())\n\t\t\tdefer srv.Close()\n\n\t\t\tpassiveDBURL, err := url.Parse(srv.URL + \"/db\")\n\t\t\trequire.NoError(t, err)\n\n\t\t\t// Add basic auth creds to target db URL\n\t\t\tpassiveDBURL.User = url.UserPassword(\"alice\", \"pass\")\n\n\t\t\t// Active\n\t\t\trt1 := NewRestTester(t, &RestTesterConfig{\n\t\t\t\tTestBucket: base.GetTestBucket(t),\n\t\t\t})\n\t\t\tdefer rt1.Close()\n\n\t\t\t// Create revision on rt1 (local)\n\t\t\tif test.commonAncestorRevID != \"\" {\n\t\t\t\tresp, err = rt1.PutDocumentWithRevID(docID, test.commonAncestorRevID, \"\", localRevisionBody)\n\t\t\t\tassert.NoError(t, err)\n\t\t\t\tassertStatus(t, resp, http.StatusCreated)\n\t\t\t\trt1revID := respRevID(t, resp)\n\t\t\t\tassert.Equal(t, test.commonAncestorRevID, rt1revID)\n\t\t\t}\n\n\t\t\tresp, err = rt1.PutDocumentWithRevID(docID, test.localRevID, test.commonAncestorRevID, localRevisionBody)\n\t\t\tassert.NoError(t, err)\n\t\t\tassertStatus(t, resp, http.StatusCreated)\n\t\t\trt1revID := respRevID(t, resp)\n\t\t\tassert.Equal(t, test.localRevID, rt1revID)\n\n\t\t\tlocalDoc, err := rt1.GetDatabase().GetDocument(logger.TestCtx(t), docID, db.DocUnmarshalSync)\n\t\t\trequire.NoError(t, err)\n\n\t\t\tcustomConflictResolver, err := db.NewCustomConflictResolver(test.conflictResolver)\n\t\t\trequire.NoError(t, err)\n\t\t\tar := db.NewActiveReplicator(&db.ActiveReplicatorConfig{\n\t\t\t\tID: t.Name(),\n\t\t\t\tDirection: db.ActiveReplicatorTypePushAndPull,\n\t\t\t\tRemoteDBURL: passiveDBURL,\n\t\t\t\tActiveDB: &db.Database{\n\t\t\t\t\tDatabaseContext: rt1.GetDatabase(),\n\t\t\t\t},\n\t\t\t\tChangesBatchSize: 200,\n\t\t\t\tConflictResolverFunc: customConflictResolver,\n\t\t\t\tContinuous: true,\n\t\t\t\tReplicationStatsMap: base.SyncGatewayStats.NewDBStats(t.Name(), false, false, false).DBReplicatorStats(t.Name()),\n\t\t\t})\n\t\t\tdefer func() { assert.NoError(t, ar.Stop()) }()\n\n\t\t\t// Start the replicator (implicit connect)\n\t\t\tassert.NoError(t, ar.Start())\n\t\t\t// wait for the document originally written to rt2 to arrive at rt1. Should end up as winner under default conflict resolution\n\t\t\tbase.WaitForStat(func() int64 {\n\t\t\t\treturn ar.GetStatus().DocsWritten\n\t\t\t}, 1)\n\t\t\tlog.Printf(\"========================Replication should be done, checking with changes\")\n\n\t\t\t// Validate results on the local (rt1)\n\t\t\tchangesResults, err := rt1.WaitForChanges(1, fmt.Sprintf(\"/db/_changes?since=%d\", localDoc.Sequence), \"\", true)\n\t\t\trequire.NoError(t, err)\n\t\t\trequire.Len(t, changesResults.Results, 1)\n\t\t\tassert.Equal(t, docID, changesResults.Results[0].ID)\n\t\t\tassert.Equal(t, test.expectedRevID, changesResults.Results[0].Changes[0][\"rev\"])\n\t\t\tlog.Printf(\"Changes response is %+v\", changesResults)\n\n\t\t\trawDocResponse := rt1.SendAdminRequest(http.MethodGet, \"/db/_raw/\"+docID, \"\")\n\t\t\tlog.Printf(\"Raw response: %s\", rawDocResponse.Body.Bytes())\n\n\t\t\tdocResponse := rt1.SendAdminRequest(http.MethodGet, \"/db/\"+docID, \"\")\n\t\t\tlog.Printf(\"Non-raw response: %s\", docResponse.Body.Bytes())\n\n\t\t\tdoc, err := rt1.GetDatabase().GetDocument(logger.TestCtx(t), docID, db.DocUnmarshalAll)\n\t\t\trequire.NoError(t, err)\n\t\t\tassert.Equal(t, test.expectedRevID, doc.SyncData.CurrentRev)\n\t\t\tassert.Equal(t, expectedLocalBody, doc.Body())\n\t\t\tlog.Printf(\"Doc %s is %+v\", docID, doc)\n\t\t\tlog.Printf(\"Doc %s attachments are %+v\", docID, doc.Attachments)\n\t\t\tfor revID, revInfo := range doc.SyncData.History {\n\t\t\t\tlog.Printf(\"doc revision [%s]: %+v\", revID, revInfo)\n\t\t\t}\n\n\t\t\t// Validate only one active leaf node remains after conflict resolution, and that all parents\n\t\t\t// of leaves have empty bodies\n\t\t\tactiveCount := 0\n\t\t\tfor _, revID := range doc.SyncData.History.GetLeaves() {\n\t\t\t\trevInfo, ok := doc.SyncData.History[revID]\n\t\t\t\trequire.True(t, ok)\n\t\t\t\tif !revInfo.Deleted {\n\t\t\t\t\tactiveCount++\n\t\t\t\t}\n\t\t\t\tif revInfo.Parent != \"\" {\n\t\t\t\t\tparentRevInfo, ok := doc.SyncData.History[revInfo.Parent]\n\t\t\t\t\trequire.True(t, ok)\n\t\t\t\t\tassert.True(t, parentRevInfo.Body == nil)\n\t\t\t\t}\n\t\t\t}\n\t\t\tassert.Equal(t, 1, activeCount)\n\n\t\t\t// Validate results on the remote (rt2)\n\t\t\trt2Since := remoteDoc.Sequence\n\t\t\tif test.expectedRevID == test.remoteRevID {\n\t\t\t\t// no changes should have been pushed back up to rt2, because this rev won.\n\t\t\t\trt2Since = 0\n\t\t\t}\n\t\t\tchangesResults, err = rt2.WaitForChanges(1, fmt.Sprintf(\"/db/_changes?since=%d\", rt2Since), \"\", true)\n\t\t\trequire.NoError(t, err)\n\t\t\trequire.Len(t, changesResults.Results, 1)\n\t\t\tassert.Equal(t, docID, changesResults.Results[0].ID)\n\t\t\tassert.Equal(t, test.expectedRevID, changesResults.Results[0].Changes[0][\"rev\"])\n\t\t\tlog.Printf(\"Changes response is %+v\", changesResults)\n\n\t\t\tdoc, err = rt2.GetDatabase().GetDocument(logger.TestCtx(t), docID, db.DocUnmarshalAll)\n\t\t\trequire.NoError(t, err)\n\t\t\tassert.Equal(t, test.expectedRevID, doc.SyncData.CurrentRev)\n\t\t\tassert.Equal(t, expectedLocalBody, doc.Body())\n\t\t\tlog.Printf(\"Remote Doc %s is %+v\", docID, doc)\n\t\t\tlog.Printf(\"Remote Doc %s attachments are %+v\", docID, doc.Attachments)\n\t\t\tfor revID, revInfo := range doc.SyncData.History {\n\t\t\t\tlog.Printf(\"doc revision [%s]: %+v\", revID, revInfo)\n\t\t\t}\n\n\t\t\t// Validate only one active leaf node remains after conflict resolution, and that all parents\n\t\t\t// of leaves have empty bodies\n\t\t\tactiveCount = 0\n\t\t\tfor _, revID := range doc.SyncData.History.GetLeaves() {\n\t\t\t\trevInfo, ok := doc.SyncData.History[revID]\n\t\t\t\trequire.True(t, ok)\n\t\t\t\tif !revInfo.Deleted {\n\t\t\t\t\tactiveCount++\n\t\t\t\t}\n\t\t\t\tif revInfo.Parent != \"\" {\n\t\t\t\t\tparentRevInfo, ok := doc.SyncData.History[revInfo.Parent]\n\t\t\t\t\trequire.True(t, ok)\n\t\t\t\t\tassert.True(t, parentRevInfo.Body == nil)\n\t\t\t\t}\n\t\t\t}\n\t\t\tassert.Equal(t, 1, activeCount)\n\t\t})\n\t}\n}", "func TestManifestSyncJob(t *testing.T) {\n\tforAllReplicaTypes(t, func(strategy string) {\n\t\ttest.WithRoundTripper(func(_ *test.RoundTripper) {\n\t\t\tj1, s1 := setup(t)\n\t\t\tj2, s2 := setupReplica(t, s1, strategy)\n\t\t\ts1.Clock.StepBy(1 * time.Hour)\n\t\t\treplicaToken := s2.GetToken(t, \"repository:test1/foo:pull\")\n\t\t\tsyncManifestsJob1 := j1.ManifestSyncJob(s1.Registry)\n\t\t\tsyncManifestsJob2 := j2.ManifestSyncJob(s2.Registry)\n\n\t\t\t//upload some manifests...\n\t\t\timages := make([]test.Image, 4)\n\t\t\tfor idx := range images {\n\t\t\t\timage := test.GenerateImage(\n\t\t\t\t\ttest.GenerateExampleLayer(int64(10*idx+1)),\n\t\t\t\t\ttest.GenerateExampleLayer(int64(10*idx+2)),\n\t\t\t\t)\n\t\t\t\timages[idx] = image\n\n\t\t\t\t//...to the primary account...\n\t\t\t\timage.MustUpload(t, s1, fooRepoRef, \"\")\n\n\t\t\t\t//...and most of them also to the replica account (to simulate replication having taken place)\n\t\t\t\tif idx != 0 {\n\t\t\t\t\tassert.HTTPRequest{\n\t\t\t\t\t\tMethod: \"GET\",\n\t\t\t\t\t\tPath: fmt.Sprintf(\"/v2/test1/foo/manifests/%s\", image.Manifest.Digest),\n\t\t\t\t\t\tHeader: map[string]string{\"Authorization\": \"Bearer \" + replicaToken},\n\t\t\t\t\t\tExpectStatus: http.StatusOK,\n\t\t\t\t\t\tExpectBody: assert.ByteData(image.Manifest.Contents),\n\t\t\t\t\t}.Check(t, s2.Handler)\n\t\t\t\t}\n\t\t\t}\n\n\t\t\t//some of the replicated images are also tagged\n\t\t\tfor _, db := range []*keppel.DB{s1.DB, s2.DB} {\n\t\t\t\tfor _, tagName := range []string{\"latest\", \"other\"} {\n\t\t\t\t\tmustExec(t, db,\n\t\t\t\t\t\t`INSERT INTO tags (repo_id, name, digest, pushed_at) VALUES (1, $1, $2, $3)`,\n\t\t\t\t\t\ttagName,\n\t\t\t\t\t\timages[1].Manifest.Digest,\n\t\t\t\t\t\ts1.Clock.Now(),\n\t\t\t\t\t)\n\t\t\t\t}\n\t\t\t}\n\n\t\t\t//also setup an image list manifest containing some of those images (so that we have\n\t\t\t//some manifest-manifest refs to play with)\n\t\t\timageList := test.GenerateImageList(images[1], images[2])\n\t\t\timageList.MustUpload(t, s1, fooRepoRef, \"\")\n\t\t\t//this one is replicated as well\n\t\t\tassert.HTTPRequest{\n\t\t\t\tMethod: \"GET\",\n\t\t\t\tPath: fmt.Sprintf(\"/v2/test1/foo/manifests/%s\", imageList.Manifest.Digest),\n\t\t\t\tHeader: map[string]string{\"Authorization\": \"Bearer \" + replicaToken},\n\t\t\t\tExpectStatus: http.StatusOK,\n\t\t\t\tExpectBody: assert.ByteData(imageList.Manifest.Contents),\n\t\t\t}.Check(t, s2.Handler)\n\n\t\t\t//set a well-known last_pulled_at timestamp on all manifests in the primary\n\t\t\t//DB (we will later verify that this was not touched by the manifest sync)\n\t\t\tinitialLastPulledAt := time.Unix(42, 0)\n\t\t\tmustExec(t, s1.DB, `UPDATE manifests SET last_pulled_at = $1`, initialLastPulledAt)\n\t\t\tmustExec(t, s1.DB, `UPDATE tags SET last_pulled_at = $1`, initialLastPulledAt)\n\t\t\t//we set last_pulled_at to NULL on images[3] to verify that we can merge\n\t\t\t//NULL with a non-NULL last_pulled_at from the replica side\n\t\t\tmustExec(t, s1.DB, `UPDATE manifests SET last_pulled_at = NULL WHERE digest = $1`, images[3].Manifest.Digest)\n\n\t\t\t//as an exception, in the on_first_use method, we can and want to merge\n\t\t\t//last_pulled_at timestamps from the replica into those of the primary, so\n\t\t\t//set some of those to verify the merging behavior\n\t\t\tearlierLastPulledAt := initialLastPulledAt.Add(-10 * time.Second)\n\t\t\tlaterLastPulledAt := initialLastPulledAt.Add(+10 * time.Second)\n\t\t\tmustExec(t, s2.DB, `UPDATE manifests SET last_pulled_at = NULL`)\n\t\t\tmustExec(t, s2.DB, `UPDATE tags SET last_pulled_at = NULL`)\n\t\t\tmustExec(t, s2.DB, `UPDATE manifests SET last_pulled_at = $1 WHERE digest = $2`, earlierLastPulledAt, images[1].Manifest.Digest)\n\t\t\tmustExec(t, s2.DB, `UPDATE manifests SET last_pulled_at = $1 WHERE digest = $2`, laterLastPulledAt, images[2].Manifest.Digest)\n\t\t\tmustExec(t, s2.DB, `UPDATE manifests SET last_pulled_at = $1 WHERE digest = $2`, initialLastPulledAt, images[3].Manifest.Digest)\n\t\t\tmustExec(t, s2.DB, `UPDATE tags SET last_pulled_at = $1 WHERE name = $2`, earlierLastPulledAt, \"latest\")\n\t\t\tmustExec(t, s2.DB, `UPDATE tags SET last_pulled_at = $1 WHERE name = $2`, laterLastPulledAt, \"other\")\n\n\t\t\ttr, tr0 := easypg.NewTracker(t, s2.DB.DbMap.Db)\n\t\t\ttr0.AssertEqualToFile(fmt.Sprintf(\"fixtures/manifest-sync-setup-%s.sql\", strategy))\n\t\t\ttrForPrimary, _ := easypg.NewTracker(t, s1.DB.DbMap.Db)\n\n\t\t\t//ManifestSyncJob on the primary registry should have nothing to do\n\t\t\t//since there are no replica accounts\n\t\t\texpectError(t, sql.ErrNoRows.Error(), syncManifestsJob1.ProcessOne(s1.Ctx))\n\t\t\ttrForPrimary.DBChanges().AssertEmpty()\n\t\t\t//ManifestSyncJob on the secondary registry should set the\n\t\t\t//ManifestsSyncedAt timestamp on the repo, but otherwise not do anything\n\t\t\texpectSuccess(t, syncManifestsJob2.ProcessOne(s2.Ctx))\n\t\t\ttr.DBChanges().AssertEqualf(`\n\t\t\t\t\tUPDATE repos SET next_manifest_sync_at = %d WHERE id = 1 AND account_name = 'test1' AND name = 'foo';\n\t\t\t\t`,\n\t\t\t\ts1.Clock.Now().Add(1*time.Hour).Unix(),\n\t\t\t)\n\t\t\t//second run should not have anything else to do\n\t\t\texpectError(t, sql.ErrNoRows.Error(), syncManifestsJob2.ProcessOne(s2.Ctx))\n\t\t\ttr.DBChanges().AssertEmpty()\n\n\t\t\t//in on_first_use, the sync should have merged the replica's last_pulled_at\n\t\t\t//timestamps into the primary, i.e. primary.last_pulled_at =\n\t\t\t//max(primary.last_pulled_at, replica.last_pulled_at); this only touches\n\t\t\t//the DB when the replica's last_pulled_at is after the primary's\n\t\t\tif strategy == \"on_first_use\" {\n\t\t\t\ttrForPrimary.DBChanges().AssertEqualf(`\n\t\t\t\t\t\tUPDATE manifests SET last_pulled_at = %[1]d WHERE repo_id = 1 AND digest = '%[2]s';\n\t\t\t\t\t\tUPDATE manifests SET last_pulled_at = %[3]d WHERE repo_id = 1 AND digest = '%[4]s';\n\t\t\t\t\t\tUPDATE tags SET last_pulled_at = %[3]d WHERE repo_id = 1 AND name = 'other';\n\t\t\t\t\t`,\n\t\t\t\t\tinitialLastPulledAt.Unix(),\n\t\t\t\t\timages[3].Manifest.Digest,\n\t\t\t\t\tlaterLastPulledAt.Unix(),\n\t\t\t\t\timages[2].Manifest.Digest,\n\t\t\t\t)\n\t\t\t\t//reset all timestamps to prevent divergences in the rest of the test\n\t\t\t\tmustExec(t, s1.DB, `UPDATE manifests SET last_pulled_at = $1`, initialLastPulledAt)\n\t\t\t\tmustExec(t, s1.DB, `UPDATE tags SET last_pulled_at = $1`, initialLastPulledAt)\n\t\t\t\tmustExec(t, s2.DB, `UPDATE manifests SET last_pulled_at = $1`, initialLastPulledAt)\n\t\t\t\tmustExec(t, s2.DB, `UPDATE tags SET last_pulled_at = $1`, initialLastPulledAt)\n\t\t\t\ttr.DBChanges() // skip these changes\n\t\t\t} else {\n\t\t\t\ttrForPrimary.DBChanges().AssertEmpty()\n\t\t\t}\n\n\t\t\t//delete a manifest on the primary side (this one is a simple image not referenced by anyone else)\n\t\t\ts1.Clock.StepBy(2 * time.Hour)\n\t\t\tmustExec(t, s1.DB,\n\t\t\t\t`DELETE FROM manifests WHERE digest = $1`,\n\t\t\t\timages[3].Manifest.Digest,\n\t\t\t)\n\t\t\t//move a tag on the primary side\n\t\t\tmustExec(t, s1.DB,\n\t\t\t\t`UPDATE tags SET digest = $1 WHERE name = 'latest'`,\n\t\t\t\timages[2].Manifest.Digest,\n\t\t\t)\n\n\t\t\t//again, nothing to do on the primary side\n\t\t\texpectError(t, sql.ErrNoRows.Error(), syncManifestsJob1.ProcessOne(s1.Ctx))\n\t\t\t//ManifestSyncJob on the replica side should not do anything while\n\t\t\t//the account is in maintenance; only the timestamp is updated to make sure\n\t\t\t//that the job loop progresses to the next repo\n\t\t\tmustExec(t, s2.DB, `UPDATE accounts SET in_maintenance = TRUE`)\n\t\t\texpectSuccess(t, syncManifestsJob2.ProcessOne(s2.Ctx))\n\t\t\ttr.DBChanges().AssertEqualf(`\n\t\t\t\t\tUPDATE accounts SET in_maintenance = TRUE WHERE name = 'test1';\n\t\t\t\t\tUPDATE repos SET next_manifest_sync_at = %d WHERE id = 1 AND account_name = 'test1' AND name = 'foo';\n\t\t\t\t`,\n\t\t\t\ts1.Clock.Now().Add(1*time.Hour).Unix(),\n\t\t\t)\n\t\t\texpectError(t, sql.ErrNoRows.Error(), syncManifestsJob2.ProcessOne(s2.Ctx))\n\t\t\ttr.DBChanges().AssertEmpty()\n\n\t\t\t//end maintenance\n\t\t\tmustExec(t, s2.DB, `UPDATE accounts SET in_maintenance = FALSE`)\n\t\t\ttr.DBChanges().AssertEqual(`UPDATE accounts SET in_maintenance = FALSE WHERE name = 'test1';`)\n\n\t\t\t//test that replication from external uses the inbound cache\n\t\t\tif strategy == \"from_external_on_first_use\" {\n\t\t\t\t//after the end of the maintenance, we would naively expect\n\t\t\t\t//ManifestSyncJob to actually replicate the deletion, BUT we have an\n\t\t\t\t//inbound cache with a lifetime of 6 hours, so actually nothing should\n\t\t\t\t//happen (only the tag gets synced, which includes a validation of the\n\t\t\t\t//referenced manifest)\n\t\t\t\ts1.Clock.StepBy(2 * time.Hour)\n\t\t\t\texpectSuccess(t, syncManifestsJob2.ProcessOne(s2.Ctx))\n\t\t\t\ttr.DBChanges().AssertEqualf(`\n\t\t\t\t\t\tUPDATE manifests SET validated_at = %d WHERE repo_id = 1 AND digest = '%s';\n\t\t\t\t\t\tUPDATE repos SET next_manifest_sync_at = %d WHERE id = 1 AND account_name = 'test1' AND name = 'foo';\n\t\t\t\t\t`,\n\t\t\t\t\ts1.Clock.Now().Unix(),\n\t\t\t\t\timages[1].Manifest.Digest,\n\t\t\t\t\ts1.Clock.Now().Add(1*time.Hour).Unix(),\n\t\t\t\t)\n\t\t\t\texpectError(t, sql.ErrNoRows.Error(), syncManifestsJob2.ProcessOne(s2.Ctx))\n\t\t\t\ttr.DBChanges().AssertEmpty()\n\t\t\t}\n\n\t\t\t//From now on, we will go in clock increments of 7 hours to force the\n\t\t\t//inbound cache to never hit.\n\n\t\t\t//after the end of the maintenance, ManifestSyncJob on the replica\n\t\t\t//side should delete the same manifest that we deleted in the primary\n\t\t\t//account, and also replicate the tag change (which includes a validation\n\t\t\t//of the tagged manifests)\n\t\t\ts1.Clock.StepBy(7 * time.Hour)\n\t\t\texpectSuccess(t, syncManifestsJob2.ProcessOne(s2.Ctx))\n\t\t\tmanifestValidationBecauseOfExistingTag := fmt.Sprintf(\n\t\t\t\t//this validation is skipped in \"on_first_use\" because the respective tag is unchanged\n\t\t\t\t`UPDATE manifests SET validated_at = %d WHERE repo_id = 1 AND digest = '%s';`+\"\\n\",\n\t\t\t\ts1.Clock.Now().Unix(), images[1].Manifest.Digest,\n\t\t\t)\n\t\t\tif strategy == \"on_first_use\" {\n\t\t\t\tmanifestValidationBecauseOfExistingTag = \"\"\n\t\t\t}\n\t\t\ttr.DBChanges().AssertEqualf(`\n\t\t\t\t\tDELETE FROM manifest_blob_refs WHERE repo_id = 1 AND digest = '%[1]s' AND blob_id = 7;\n\t\t\t\t\tDELETE FROM manifest_blob_refs WHERE repo_id = 1 AND digest = '%[1]s' AND blob_id = 8;\n\t\t\t\t\tDELETE FROM manifest_blob_refs WHERE repo_id = 1 AND digest = '%[1]s' AND blob_id = 9;\n\t\t\t\t\tDELETE FROM manifest_contents WHERE repo_id = 1 AND digest = '%[1]s';\n\t\t\t\t\tDELETE FROM manifests WHERE repo_id = 1 AND digest = '%[1]s';\n\t\t\t\t\t%[5]sUPDATE manifests SET validated_at = %[2]d WHERE repo_id = 1 AND digest = '%[3]s';\n\t\t\t\t\tUPDATE repos SET next_manifest_sync_at = %[4]d WHERE id = 1 AND account_name = 'test1' AND name = 'foo';\n\t\t\t\t\tUPDATE tags SET digest = '%[3]s', pushed_at = %[2]d, last_pulled_at = NULL WHERE repo_id = 1 AND name = 'latest';\n\t\t\t\t\tDELETE FROM trivy_security_info WHERE repo_id = 1 AND digest = '%[1]s';\n\t\t\t\t`,\n\t\t\t\timages[3].Manifest.Digest, //the deleted manifest\n\t\t\t\ts1.Clock.Now().Unix(),\n\t\t\t\timages[2].Manifest.Digest, //the manifest now tagged as \"latest\"\n\t\t\t\ts1.Clock.Now().Add(1*time.Hour).Unix(),\n\t\t\t\tmanifestValidationBecauseOfExistingTag,\n\t\t\t)\n\t\t\texpectError(t, sql.ErrNoRows.Error(), syncManifestsJob2.ProcessOne(s2.Ctx))\n\t\t\ttr.DBChanges().AssertEmpty()\n\n\t\t\t//cause a deliberate inconsistency on the primary side: delete a manifest that\n\t\t\t//*is* referenced by another manifest (this requires deleting the\n\t\t\t//manifest-manifest ref first, otherwise the DB will complain)\n\t\t\ts1.Clock.StepBy(7 * time.Hour)\n\t\t\tmustExec(t, s1.DB,\n\t\t\t\t`DELETE FROM manifest_manifest_refs WHERE child_digest = $1`,\n\t\t\t\timages[2].Manifest.Digest,\n\t\t\t)\n\t\t\tmustExec(t, s1.DB,\n\t\t\t\t`DELETE FROM manifests WHERE digest = $1`,\n\t\t\t\timages[2].Manifest.Digest,\n\t\t\t)\n\n\t\t\t//ManifestSyncJob should now complain since it wants to delete\n\t\t\t//images[2].Manifest, but it can't because of the manifest-manifest ref to\n\t\t\t//the image list\n\t\t\texpectedError := fmt.Sprintf(\"cannot remove deleted manifests [%s] in repo test1/foo because they are still being referenced by other manifests (this smells like an inconsistency on the primary account)\",\n\t\t\t\timages[2].Manifest.Digest,\n\t\t\t)\n\t\t\texpectError(t, expectedError, syncManifestsJob2.ProcessOne(s2.Ctx))\n\t\t\t//the tag sync went through though, so the tag should be gone (the manifest\n\t\t\t//validation is because of the \"other\" tag that still exists)\n\t\t\tmanifestValidationBecauseOfExistingTag = fmt.Sprintf(\n\t\t\t\t//this validation is skipped in \"on_first_use\" because the respective tag is unchanged\n\t\t\t\t`UPDATE manifests SET validated_at = %d WHERE repo_id = 1 AND digest = '%s';`+\"\\n\",\n\t\t\t\ts1.Clock.Now().Unix(), images[1].Manifest.Digest,\n\t\t\t)\n\t\t\tif strategy == \"on_first_use\" {\n\t\t\t\tmanifestValidationBecauseOfExistingTag = \"\"\n\t\t\t}\n\t\t\ttr.DBChanges().AssertEqualf(`%sDELETE FROM tags WHERE repo_id = 1 AND name = 'latest';`,\n\t\t\t\tmanifestValidationBecauseOfExistingTag,\n\t\t\t)\n\n\t\t\t//also remove the image list manifest on the primary side\n\t\t\ts1.Clock.StepBy(7 * time.Hour)\n\t\t\tmustExec(t, s1.DB,\n\t\t\t\t`DELETE FROM manifests WHERE digest = $1`,\n\t\t\t\timageList.Manifest.Digest,\n\t\t\t)\n\t\t\t//and remove the other tag (this is required for the 404 error message in the next step but one to be deterministic)\n\t\t\tmustExec(t, s1.DB, `DELETE FROM tags`)\n\n\t\t\t//this makes the primary side consistent again, so ManifestSyncJob\n\t\t\t//should succeed now and remove both deleted manifests from the DB\n\t\t\texpectSuccess(t, syncManifestsJob2.ProcessOne(s2.Ctx))\n\t\t\ttr.DBChanges().AssertEqualf(`\n\t\t\t\t\tDELETE FROM manifest_blob_refs WHERE repo_id = 1 AND digest = '%[1]s' AND blob_id = 4;\n\t\t\t\t\tDELETE FROM manifest_blob_refs WHERE repo_id = 1 AND digest = '%[1]s' AND blob_id = 5;\n\t\t\t\t\tDELETE FROM manifest_blob_refs WHERE repo_id = 1 AND digest = '%[1]s' AND blob_id = 6;\n\t\t\t\t\tDELETE FROM manifest_contents WHERE repo_id = 1 AND digest = '%[1]s';\n\t\t\t\t\tDELETE FROM manifest_contents WHERE repo_id = 1 AND digest = '%[2]s';\n\t\t\t\t\tDELETE FROM manifest_manifest_refs WHERE repo_id = 1 AND parent_digest = '%[2]s' AND child_digest = '%[3]s';\n\t\t\t\t\tDELETE FROM manifest_manifest_refs WHERE repo_id = 1 AND parent_digest = '%[2]s' AND child_digest = '%[1]s';\n\t\t\t\t\tDELETE FROM manifests WHERE repo_id = 1 AND digest = '%[1]s';\n\t\t\t\t\tDELETE FROM manifests WHERE repo_id = 1 AND digest = '%[2]s';\n\t\t\t\t\tUPDATE repos SET next_manifest_sync_at = %[4]d WHERE id = 1 AND account_name = 'test1' AND name = 'foo';\n\t\t\t\t\tDELETE FROM tags WHERE repo_id = 1 AND name = 'other';\n\t\t\t\t\tDELETE FROM trivy_security_info WHERE repo_id = 1 AND digest = '%[1]s';\n\t\t\t\t\tDELETE FROM trivy_security_info WHERE repo_id = 1 AND digest = '%[2]s';\n\t\t\t\t`,\n\t\t\t\timages[2].Manifest.Digest,\n\t\t\t\timageList.Manifest.Digest,\n\t\t\t\timages[1].Manifest.Digest,\n\t\t\t\ts1.Clock.Now().Add(1*time.Hour).Unix(),\n\t\t\t)\n\t\t\texpectError(t, sql.ErrNoRows.Error(), syncManifestsJob2.ProcessOne(s2.Ctx))\n\t\t\ttr.DBChanges().AssertEmpty()\n\n\t\t\t//replace the primary registry's API with something that just answers 404 most of the time\n\t\t\t//\n\t\t\t//(We do allow the /keppel/v1/auth endpoint to work properly because\n\t\t\t//otherwise the error messages are not reproducible between passes.)\n\t\t\ts1.Clock.StepBy(7 * time.Hour)\n\t\t\thttp.DefaultTransport.(*test.RoundTripper).Handlers[\"registry.example.org\"] = answerMostWith404(s1.Handler)\n\t\t\t//This is particularly devious since 404 is returned by the GET endpoint for\n\t\t\t//a manifest when the manifest was deleted. We want to check that the next\n\t\t\t//ManifestSyncJob understands that this is a network issue and not\n\t\t\t//caused by the manifest getting deleted, since the 404-generating endpoint\n\t\t\t//does not render a proper MANIFEST_UNKNOWN error.\n\t\t\texpectedError = fmt.Sprintf(\"cannot check existence of manifest test1/foo/%s on primary account: during GET https://registry.example.org/v2/test1/foo/manifests/%[1]s: expected status 200, but got 404 Not Found\",\n\t\t\t\timages[1].Manifest.Digest, //the only manifest that is left\n\t\t\t)\n\t\t\texpectError(t, expectedError, syncManifestsJob2.ProcessOne(s2.Ctx))\n\t\t\ttr.DBChanges().AssertEmpty()\n\n\t\t\t//check that the manifest sync did not update the last_pulled_at timestamps\n\t\t\t//in the primary DB (even though there were GET requests for the manifests\n\t\t\t//there)\n\t\t\tvar lastPulledAt time.Time\n\t\t\texpectSuccess(t, s1.DB.DbMap.QueryRow(`SELECT MAX(last_pulled_at) FROM manifests`).Scan(&lastPulledAt))\n\t\t\tif !lastPulledAt.Equal(initialLastPulledAt) {\n\t\t\t\tt.Error(\"last_pulled_at timestamps on the primary side were touched\")\n\t\t\t\tt.Logf(\" expected = %#v\", initialLastPulledAt)\n\t\t\t\tt.Logf(\" actual = %#v\", lastPulledAt)\n\t\t\t}\n\n\t\t\t//flip back to the actual primary registry's API\n\t\t\thttp.DefaultTransport.(*test.RoundTripper).Handlers[\"registry.example.org\"] = s1.Handler\n\t\t\t//delete the entire repository on the primary\n\t\t\ts1.Clock.StepBy(7 * time.Hour)\n\t\t\tmustExec(t, s1.DB, `DELETE FROM manifests`)\n\t\t\tmustExec(t, s1.DB, `DELETE FROM repos`)\n\t\t\t//the manifest sync should reflect the repository deletion on the replica\n\t\t\texpectSuccess(t, syncManifestsJob2.ProcessOne(s2.Ctx))\n\t\t\ttr.DBChanges().AssertEqualf(`\n\t\t\t\t\tDELETE FROM blob_mounts WHERE blob_id = 1 AND repo_id = 1;\n\t\t\t\t\tDELETE FROM blob_mounts WHERE blob_id = 2 AND repo_id = 1;\n\t\t\t\t\tDELETE FROM blob_mounts WHERE blob_id = 3 AND repo_id = 1;\n\t\t\t\t\tDELETE FROM blob_mounts WHERE blob_id = 4 AND repo_id = 1;\n\t\t\t\t\tDELETE FROM blob_mounts WHERE blob_id = 5 AND repo_id = 1;\n\t\t\t\t\tDELETE FROM blob_mounts WHERE blob_id = 6 AND repo_id = 1;\n\t\t\t\t\tDELETE FROM blob_mounts WHERE blob_id = 7 AND repo_id = 1;\n\t\t\t\t\tDELETE FROM blob_mounts WHERE blob_id = 8 AND repo_id = 1;\n\t\t\t\t\tDELETE FROM blob_mounts WHERE blob_id = 9 AND repo_id = 1;\n\t\t\t\t\tDELETE FROM manifest_blob_refs WHERE repo_id = 1 AND digest = '%[1]s' AND blob_id = 1;\n\t\t\t\t\tDELETE FROM manifest_blob_refs WHERE repo_id = 1 AND digest = '%[1]s' AND blob_id = 2;\n\t\t\t\t\tDELETE FROM manifest_blob_refs WHERE repo_id = 1 AND digest = '%[1]s' AND blob_id = 3;\n\t\t\t\t\tDELETE FROM manifest_contents WHERE repo_id = 1 AND digest = '%[1]s';\n\t\t\t\t\tDELETE FROM manifests WHERE repo_id = 1 AND digest = '%[1]s';\n\t\t\t\t\tDELETE FROM repos WHERE id = 1 AND account_name = 'test1' AND name = 'foo';\n\t\t\t\t\tDELETE FROM trivy_security_info WHERE repo_id = 1 AND digest = '%[1]s';\n\t\t\t\t`,\n\t\t\t\timages[1].Manifest.Digest,\n\t\t\t)\n\t\t\texpectError(t, sql.ErrNoRows.Error(), syncManifestsJob2.ProcessOne(s2.Ctx))\n\t\t\ttr.DBChanges().AssertEmpty()\n\t\t})\n\t})\n}", "func TestCreateIfNotExists(t *testing.T) {\n\toriginCtrl := gomock.NewController(t)\n\tdefer originCtrl.Finish()\n\tmockOrigin := mocks.NewMockConnector(originCtrl)\n\n\tvalues := map[string]dosa.FieldValue{}\n\tmockOrigin.EXPECT().CreateIfNotExists(context.TODO(), testEi, values).Return(nil)\n\n\tconnector := NewConnector(mockOrigin, nil, NewJSONEncoder(), nil, cacheableEntities...)\n\tconnector.setSynchronousMode(true)\n\terr := connector.CreateIfNotExists(context.TODO(), testEi, values)\n\tassert.NoError(t, err)\n}", "func TestPrewriteWrittenNoConflict4A(t *testing.T) {\n}", "func TestResolveIdentifyImplicitTeamWithDuplicates(t *testing.T) {\n\ttt := newTeamTester(t)\n\tdefer tt.cleanup()\n\n\talice := tt.addUser(\"abc\")\n\tg := alice.tc.G\n\n\tbob := tt.addUser(\"bob\")\n\n\tiTeamNameCreate := strings.Join([]string{alice.username, bob.username}, \",\")\n\t// simple duplicate\n\tiTeamNameLookup1 := strings.Join([]string{alice.username, bob.username, bob.username}, \",\")\n\t// duplicate after resolution\n\tiTeamNameLookup2 := strings.Join([]string{alice.username, bob.username, bob.username + \"@rooter\"}, \",\")\n\t// duplicate across reader boundary\n\tiTeamNameLookup3 := strings.Join([]string{alice.username, bob.username + \"@rooter\"}, \",\") + \"#\" + bob.username\n\n\tt.Logf(\"make an implicit team\")\n\tiTeam, _, _, err := teams.LookupOrCreateImplicitTeam(context.TODO(), g, iTeamNameCreate, false /*isPublic*/)\n\trequire.NoError(t, err)\n\n\tbob.proveRooter()\n\n\tcli, err := client.GetIdentifyClient(g)\n\trequire.NoError(t, err, \"failed to get new identifyclient\")\n\n\tfor i, lookup := range []string{iTeamNameLookup1, iTeamNameLookup2, iTeamNameLookup3} {\n\t\tt.Logf(\"checking %v: %v\", i, lookup)\n\t\tres, err := cli.ResolveIdentifyImplicitTeam(context.Background(), keybase1.ResolveIdentifyImplicitTeamArg{\n\t\t\tAssertions: lookup,\n\t\t\tSuffix: \"\",\n\t\t\tIsPublic: false,\n\t\t\tDoIdentifies: false,\n\t\t\tCreate: false,\n\t\t\tIdentifyBehavior: keybase1.TLFIdentifyBehavior_DEFAULT_KBFS,\n\t\t})\n\t\trequire.NoError(t, err, \"%v %v\", err, spew.Sdump(res))\n\t\trequire.Equal(t, res.TeamID, iTeam.ID)\n\t\trequire.Equal(t, res.DisplayName, iTeamNameCreate)\n\t\trequire.True(t, compareUserVersionSets([]keybase1.UserVersion{alice.userVersion(), bob.userVersion()}, res.Writers))\n\t\trequire.Nil(t, res.TrackBreaks, \"track breaks\")\n\t}\n}", "func TestCreatesAllowedDuringNamespaceDeletion(t *testing.T) {\n\tconfig := &origin.MasterConfig{\n\t\tKubeletClientConfig: &kclient.KubeletConfig{},\n\t\tEtcdHelper: etcdstorage.NewEtcdStorage(nil, nil, \"\"),\n\t}\n\tstorageMap := config.GetRestStorage()\n\tresources := sets.String{}\n\n\tfor resource := range storageMap {\n\t\tresources.Insert(strings.ToLower(resource))\n\t}\n\n\tfor resource := range recommendedCreatableResources {\n\t\tif !resources.Has(resource) {\n\t\t\tt.Errorf(\"recommendedCreatableResources has resource %v, but that resource isn't registered.\", resource)\n\t\t}\n\t}\n}", "func (pas *PodAutoscalerStatus) MarkResourceFailedCreation(kind, name string) {\n\tpas.MarkInactive(\"FailedCreate\",\n\t\tfmt.Sprintf(\"Failed to create %s %q.\", kind, name))\n}", "func TestCreateFails(t *testing.T) {\n\toptions := &sharetypes.CreateOpts{\n\t\tName: \"my_new_share_type\",\n\t}\n\n\t_, err := sharetypes.Create(client.ServiceClient(), options).Extract()\n\tif _, ok := err.(gophercloud.ErrMissingInput); !ok {\n\t\tt.Fatal(\"ErrMissingInput was expected to occur\")\n\t}\n\n\textraSpecs := sharetypes.ExtraSpecsOpts{\n\t\tDriverHandlesShareServers: true,\n\t}\n\n\toptions = &sharetypes.CreateOpts{\n\t\tExtraSpecs: extraSpecs,\n\t}\n\n\t_, err = sharetypes.Create(client.ServiceClient(), options).Extract()\n\tif _, ok := err.(gophercloud.ErrMissingInput); !ok {\n\t\tt.Fatal(\"ErrMissingInput was expected to occur\")\n\t}\n}", "func (o InstanceGroupManagerActionsSummaryResponseOutput) CreatingWithoutRetries() pulumi.IntOutput {\n\treturn o.ApplyT(func(v InstanceGroupManagerActionsSummaryResponse) int { return v.CreatingWithoutRetries }).(pulumi.IntOutput)\n}", "func TestActiveReplicatorPullConflictReadWriteIntlProps(t *testing.T) {\n\n\tcreateRevID := func(generation int, parentRevID string, body db.Body) string {\n\t\trev, err := db.CreateRevID(generation, parentRevID, body)\n\t\trequire.NoError(t, err, \"Error creating revision\")\n\t\treturn rev\n\t}\n\tdocExpiry := time.Now().Local().Add(time.Hour * time.Duration(4)).Format(time.RFC3339)\n\n\t// scenarios\n\tconflictResolutionTests := []struct {\n\t\tname string\n\t\tcommonAncestorRevID string\n\t\tlocalRevisionBody db.Body\n\t\tlocalRevID string\n\t\tremoteRevisionBody db.Body\n\t\tremoteRevID string\n\t\tconflictResolver string\n\t\texpectedLocalBody db.Body\n\t\texpectedLocalRevID string\n\t}{\n\t\t{\n\t\t\tname: \"mergeReadWriteIntlProps\",\n\t\t\tlocalRevisionBody: db.Body{\n\t\t\t\t\"source\": \"local\",\n\t\t\t},\n\t\t\tlocalRevID: \"1-a\",\n\t\t\tremoteRevisionBody: db.Body{\n\t\t\t\t\"source\": \"remote\",\n\t\t\t},\n\t\t\tremoteRevID: \"1-b\",\n\t\t\tconflictResolver: `function(conflict) {\n\t\t\t\tvar mergedDoc = new Object();\n\t\t\t\tmergedDoc.source = \"merged\";\n\t\t\t\tmergedDoc.remoteDocId = conflict.RemoteDocument._id;\n\t\t\t\tmergedDoc.remoteRevId = conflict.RemoteDocument._rev;\n\t\t\t\tmergedDoc.localDocId = conflict.LocalDocument._id;\n\t\t\t\tmergedDoc.localRevId = conflict.LocalDocument._rev;\n\t\t\t\tmergedDoc._id = \"foo\";\n\t\t\t\tmergedDoc._rev = \"2-c\";\n\t\t\t\tmergedDoc._exp = 100;\n\t\t\t\treturn mergedDoc;\n\t\t\t}`,\n\t\t\texpectedLocalBody: db.Body{\n\t\t\t\tdb.BodyId: \"foo\",\n\t\t\t\tdb.BodyRev: \"2-c\",\n\t\t\t\tdb.BodyExpiry: json.Number(\"100\"),\n\t\t\t\t\"localDocId\": \"mergeReadWriteIntlProps\",\n\t\t\t\t\"localRevId\": \"1-a\",\n\t\t\t\t\"remoteDocId\": \"mergeReadWriteIntlProps\",\n\t\t\t\t\"remoteRevId\": \"1-b\",\n\t\t\t\t\"source\": \"merged\",\n\t\t\t},\n\t\t\texpectedLocalRevID: createRevID(2, \"1-b\", db.Body{\n\t\t\t\tdb.BodyId: \"foo\",\n\t\t\t\tdb.BodyRev: \"2-c\",\n\t\t\t\tdb.BodyExpiry: json.Number(\"100\"),\n\t\t\t\t\"localDocId\": \"mergeReadWriteIntlProps\",\n\t\t\t\t\"localRevId\": \"1-a\",\n\t\t\t\t\"remoteDocId\": \"mergeReadWriteIntlProps\",\n\t\t\t\t\"remoteRevId\": \"1-b\",\n\t\t\t\t\"source\": \"merged\",\n\t\t\t}),\n\t\t},\n\t\t{\n\t\t\tname: \"mergeReadWriteAttachments\",\n\t\t\tlocalRevisionBody: map[string]interface{}{\n\t\t\t\tdb.BodyAttachments: map[string]interface{}{\n\t\t\t\t\t\"A\": map[string]interface{}{\n\t\t\t\t\t\t\"data\": \"QQo=\",\n\t\t\t\t\t}},\n\t\t\t\t\"source\": \"local\",\n\t\t\t},\n\t\t\tlocalRevID: \"1-a\",\n\t\t\tremoteRevisionBody: map[string]interface{}{\n\t\t\t\tdb.BodyAttachments: map[string]interface{}{\n\t\t\t\t\t\"B\": map[string]interface{}{\n\t\t\t\t\t\t\"data\": \"Qgo=\",\n\t\t\t\t\t}},\n\t\t\t\t\"source\": \"remote\",\n\t\t\t},\n\t\t\tremoteRevID: \"1-b\",\n\t\t\tconflictResolver: `function(conflict) {\n\t\t\t\tvar mergedDoc = new Object();\n\t\t\t\tmergedDoc.source = \"merged\";\n\t\t\t\tvar mergedAttachments = new Object();\n\n\t\t\t\tdst = conflict.RemoteDocument._attachments;\n\t\t\t\tfor (var key in dst) {\n\t\t\t\t\tmergedAttachments[key] = dst[key];\n\t\t\t\t}\n\t\t\t\tsrc = conflict.LocalDocument._attachments;\n\t\t\t\tfor (var key in src) {\n\t\t\t\t\tmergedAttachments[key] = src[key];\n\t\t\t\t}\n\t\t\t\tmergedDoc._attachments = mergedAttachments;\n\t\t\t\treturn mergedDoc;\n\t\t\t}`,\n\t\t\texpectedLocalBody: map[string]interface{}{\n\t\t\t\t\"source\": \"merged\",\n\t\t\t},\n\t\t\texpectedLocalRevID: createRevID(2, \"1-b\", db.Body{\n\t\t\t\t\"source\": \"merged\",\n\t\t\t}),\n\t\t},\n\t\t{\n\t\t\tname: \"mergeReadIntlPropsLocalExpiry\",\n\t\t\tlocalRevisionBody: db.Body{\n\t\t\t\t\"source\": \"local\",\n\t\t\t\tdb.BodyExpiry: docExpiry,\n\t\t\t},\n\t\t\tlocalRevID: \"1-a\",\n\t\t\tremoteRevisionBody: db.Body{\"source\": \"remote\"},\n\t\t\tremoteRevID: \"1-b\",\n\t\t\tconflictResolver: `function(conflict) {\n\t\t\t\tvar mergedDoc = new Object();\n\t\t\t\tmergedDoc.source = \"merged\";\n\t\t\t\tmergedDoc.localDocExp = conflict.LocalDocument._exp;\n\t\t\t\treturn mergedDoc;\n\t\t\t}`,\n\t\t\texpectedLocalBody: db.Body{\n\t\t\t\t\"localDocExp\": docExpiry,\n\t\t\t\t\"source\": \"merged\",\n\t\t\t},\n\t\t\texpectedLocalRevID: createRevID(2, \"1-b\", db.Body{\n\t\t\t\t\"localDocExp\": docExpiry,\n\t\t\t\t\"source\": \"merged\",\n\t\t\t}),\n\t\t},\n\t\t{\n\t\t\tname: \"mergeWriteIntlPropsExpiry\",\n\t\t\tlocalRevisionBody: db.Body{\n\t\t\t\t\"source\": \"local\",\n\t\t\t\tdb.BodyExpiry: docExpiry,\n\t\t\t},\n\t\t\tlocalRevID: \"1-a\",\n\t\t\tremoteRevisionBody: db.Body{\n\t\t\t\t\"source\": \"remote\",\n\t\t\t},\n\t\t\tremoteRevID: \"1-b\",\n\t\t\tconflictResolver: fmt.Sprintf(`function(conflict) {\n\t\t\t\tvar mergedDoc = new Object();\n\t\t\t\tmergedDoc.source = \"merged\";\n\t\t\t\tmergedDoc._exp = %q;\n\t\t\t\treturn mergedDoc;\n\t\t\t}`, docExpiry),\n\t\t\texpectedLocalBody: db.Body{\n\t\t\t\tdb.BodyExpiry: docExpiry,\n\t\t\t\t\"source\": \"merged\",\n\t\t\t},\n\t\t\texpectedLocalRevID: createRevID(2, \"1-b\", db.Body{\n\t\t\t\tdb.BodyExpiry: docExpiry,\n\t\t\t\t\"source\": \"merged\",\n\t\t\t}),\n\t\t},\n\t\t{\n\t\t\tname: \"mergeReadIntlPropsDeletedWithLocalTombstone\",\n\t\t\tlocalRevisionBody: db.Body{\n\t\t\t\t\"source\": \"local\",\n\t\t\t\tdb.BodyDeleted: true,\n\t\t\t},\n\t\t\tcommonAncestorRevID: \"1-a\",\n\t\t\tlocalRevID: \"2-a\",\n\t\t\tremoteRevisionBody: db.Body{\n\t\t\t\t\"source\": \"remote\",\n\t\t\t},\n\t\t\tremoteRevID: \"2-b\",\n\t\t\tconflictResolver: `function(conflict) {\n\t\t\t\tvar mergedDoc = new Object();\n\t\t\t\tmergedDoc.source = \"merged\";\n\t\t\t\tmergedDoc.localDeleted = conflict.LocalDocument._deleted;\n\t\t\t\treturn mergedDoc;\n\t\t\t}`,\n\t\t\texpectedLocalBody: db.Body{\n\t\t\t\t\"localDeleted\": true,\n\t\t\t\t\"source\": \"merged\",\n\t\t\t},\n\t\t\texpectedLocalRevID: createRevID(3, \"2-b\", db.Body{\n\t\t\t\t\"localDeleted\": true,\n\t\t\t\t\"source\": \"merged\",\n\t\t\t}),\n\t\t},\n\t}\n\n\tfor _, test := range conflictResolutionTests {\n\t\tt.Run(test.name, func(t *testing.T) {\n\t\t\tbase.RequireNumTestBuckets(t, 2)\n\t\t\tbase.SetUpTestLogging(t, logger.LevelDebug, logger.KeyAll)\n\n\t\t\t// Passive\n\t\t\ttb2 := base.GetTestBucket(t)\n\n\t\t\trt2 := NewRestTester(t, &RestTesterConfig{\n\t\t\t\tTestBucket: tb2,\n\t\t\t\tDatabaseConfig: &DatabaseConfig{DbConfig: DbConfig{\n\t\t\t\t\tUsers: map[string]*db.PrincipalConfig{\n\t\t\t\t\t\t\"alice\": {\n\t\t\t\t\t\t\tPassword: base.StringPtr(\"pass\"),\n\t\t\t\t\t\t\tExplicitChannels: utils.SetOf(\"*\"),\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t}},\n\t\t\t})\n\t\t\tdefer rt2.Close()\n\n\t\t\t// Create revision on rt2 (remote)\n\t\t\tdocID := test.name\n\t\t\tif test.commonAncestorRevID != \"\" {\n\t\t\t\t_, err := rt2.PutDocumentWithRevID(docID, test.commonAncestorRevID, \"\", test.remoteRevisionBody)\n\t\t\t\tassert.NoError(t, err)\n\t\t\t}\n\t\t\tresp, err := rt2.PutDocumentWithRevID(docID, test.remoteRevID, test.commonAncestorRevID, test.remoteRevisionBody)\n\t\t\tassert.NoError(t, err)\n\t\t\tassertStatus(t, resp, http.StatusCreated)\n\t\t\trt2revID := respRevID(t, resp)\n\t\t\tassert.Equal(t, test.remoteRevID, rt2revID)\n\n\t\t\t// Make rt2 listen on an actual HTTP port, so it can receive the blipsync request from rt1.\n\t\t\tsrv := httptest.NewServer(rt2.TestPublicHandler())\n\t\t\tdefer srv.Close()\n\n\t\t\tpassiveDBURL, err := url.Parse(srv.URL + \"/db\")\n\t\t\trequire.NoError(t, err)\n\n\t\t\t// Add basic auth creds to target db URL\n\t\t\tpassiveDBURL.User = url.UserPassword(\"alice\", \"pass\")\n\n\t\t\t// Active\n\t\t\ttb1 := base.GetTestBucket(t)\n\n\t\t\trt1 := NewRestTester(t, &RestTesterConfig{\n\t\t\t\tTestBucket: tb1,\n\t\t\t})\n\t\t\tdefer rt1.Close()\n\n\t\t\t// Create revision on rt1 (local)\n\t\t\tif test.commonAncestorRevID != \"\" {\n\t\t\t\t_, err := rt1.PutDocumentWithRevID(docID, test.commonAncestorRevID, \"\", test.remoteRevisionBody)\n\t\t\t\tassert.NoError(t, err)\n\t\t\t}\n\t\t\tresp, err = rt1.PutDocumentWithRevID(docID, test.localRevID, test.commonAncestorRevID, test.localRevisionBody)\n\t\t\tassert.NoError(t, err)\n\t\t\tassertStatus(t, resp, http.StatusCreated)\n\t\t\trt1revID := respRevID(t, resp)\n\t\t\tassert.Equal(t, test.localRevID, rt1revID)\n\n\t\t\tcustomConflictResolver, err := db.NewCustomConflictResolver(test.conflictResolver)\n\t\t\trequire.NoError(t, err)\n\t\t\treplicationStats := base.SyncGatewayStats.NewDBStats(t.Name(), false, false, false).DBReplicatorStats(t.Name())\n\t\t\tar := db.NewActiveReplicator(&db.ActiveReplicatorConfig{\n\t\t\t\tID: t.Name(),\n\t\t\t\tDirection: db.ActiveReplicatorTypePull,\n\t\t\t\tRemoteDBURL: passiveDBURL,\n\t\t\t\tActiveDB: &db.Database{\n\t\t\t\t\tDatabaseContext: rt1.GetDatabase(),\n\t\t\t\t},\n\t\t\t\tChangesBatchSize: 200,\n\t\t\t\tConflictResolverFunc: customConflictResolver,\n\t\t\t\tContinuous: true,\n\t\t\t\tReplicationStatsMap: replicationStats,\n\t\t\t})\n\t\t\tdefer func() { assert.NoError(t, ar.Stop()) }()\n\n\t\t\t// Start the replicator (implicit connect)\n\t\t\tassert.NoError(t, ar.Start())\n\t\t\twaitAndRequireCondition(t, func() bool { return ar.GetStatus().DocsRead == 1 })\n\t\t\tassert.Equal(t, 1, int(replicationStats.ConflictResolvedMergedCount.Value()))\n\n\t\t\t// Wait for the document originally written to rt2 to arrive at rt1.\n\t\t\t// Should end up as winner under default conflict resolution.\n\t\t\tchangesResults, err := rt1.WaitForChanges(1, \"/db/_changes?&since=0\", \"\", true)\n\t\t\trequire.NoError(t, err)\n\t\t\trequire.Len(t, changesResults.Results, 1)\n\t\t\tassert.Equal(t, docID, changesResults.Results[0].ID)\n\t\t\tassert.Equal(t, test.expectedLocalRevID, changesResults.Results[0].Changes[0][\"rev\"])\n\t\t\tlog.Printf(\"Changes response is %+v\", changesResults)\n\n\t\t\tdoc, err := rt1.GetDatabase().GetDocument(logger.TestCtx(t), docID, db.DocUnmarshalAll)\n\t\t\trequire.NoError(t, err)\n\t\t\tassert.Equal(t, test.expectedLocalRevID, doc.SyncData.CurrentRev)\n\t\t\tlog.Printf(\"doc.Body(): %v\", doc.Body())\n\t\t\tassert.Equal(t, test.expectedLocalBody, doc.Body())\n\t\t\tlog.Printf(\"Doc %s is %+v\", docID, doc)\n\t\t\tfor revID, revInfo := range doc.SyncData.History {\n\t\t\t\tlog.Printf(\"doc revision [%s]: %+v\", revID, revInfo)\n\t\t\t}\n\n\t\t\t// Validate only one active leaf node remains after conflict resolution, and that all parents\n\t\t\t// of leaves have empty bodies\n\t\t\tactiveCount := 0\n\t\t\tfor _, revID := range doc.SyncData.History.GetLeaves() {\n\t\t\t\trevInfo, ok := doc.SyncData.History[revID]\n\t\t\t\trequire.True(t, ok)\n\t\t\t\tif !revInfo.Deleted {\n\t\t\t\t\tactiveCount++\n\t\t\t\t}\n\t\t\t\tif revInfo.Parent != \"\" {\n\t\t\t\t\tparentRevInfo, ok := doc.SyncData.History[revInfo.Parent]\n\t\t\t\t\trequire.True(t, ok)\n\t\t\t\t\tassert.True(t, parentRevInfo.Body == nil)\n\t\t\t\t}\n\t\t\t}\n\t\t\tassert.Equal(t, 1, activeCount)\n\t\t})\n\t}\n}", "func (t *simpleTest) replaceExistingDocumentWrongRevision(collectionName string, key, rev string) error {\n\n\toperationTimeout := t.OperationTimeout\n\ttestTimeout := time.Now().Add(operationTimeout * 4)\n\n\tq := url.Values{}\n\tq.Set(\"waitForSync\", \"true\")\n\tnewName := fmt.Sprintf(\"Updated name %s\", time.Now())\n\thdr := ifMatchHeader(nil, rev)\n\tnewDoc := UserDocument{\n\t\tKey: key,\n\t\tName: fmt.Sprintf(\"Replaced named %s\", key),\n\t\tValue: rand.Int(),\n\t\tOdd: rand.Int()%2 == 0,\n\t}\n\turl := fmt.Sprintf(\"/_api/document/%s/%s\", collectionName, key)\n\tbackoff := time.Millisecond * 250\n\ti := 0\n\n\tfor {\n\n\t\ti++\n\t\tif time.Now().After(testTimeout) {\n\t\t\tbreak\n\t\t}\n\n\t\tt.log.Infof(\n\t\t\t\"Replacing (%d) existing document '%s' wrong revision in '%s' (name -> '%s')...\",\n\t\t\ti, key, collectionName, newName)\n\t\tresp, err := t.client.Put(\n\t\t\turl, q, hdr, newDoc, \"\", nil, []int{0, 1, 412, 503},\n\t\t\t[]int{200, 201, 202, 400, 404, 307}, operationTimeout, 1)\n\t\tt.log.Infof(\"... got http %d - arangodb %d via %s\",\n\t\t\tresp[0].StatusCode, resp[0].Error_.ErrorNum, resp[0].CoordinatorURL)\n\n\t\tif err[0] == nil {\n\t\t\tif resp[0].StatusCode == 412 {\n\t\t\t\tt.replaceExistingWrongRevisionCounter.succeeded++\n\t\t\t\tt.log.Infof(\"Replacing existing document '%s' wrong revision in '%s' (name -> '%s') succeeded\", key, collectionName, newName)\n\t\t\t\treturn nil\n\t\t\t}\n\t\t\t// In cases 0 and 1 and 503, we fall through here and try again\n\t\t} else {\n\t\t\t// This is a failure\n\t\t\tt.replaceExistingWrongRevisionCounter.failed++\n\t\t\tt.reportFailure(\n\t\t\t\ttest.NewFailure(\n\t\t\t\t\t\"Failed to replace existing document '%s' wrong revision in collection '%s': %v\",\n\t\t\t\t\tkey, collectionName, err[0]))\n\t\t\treturn maskAny(err[0])\n\t\t}\n\n\t\ttime.Sleep(backoff)\n\t\tif backoff < time.Second*5 {\n\t\t\tbackoff += backoff\n\t\t}\n\t}\n\n\tt.replaceExistingWrongRevisionCounter.failed++\n\tt.reportFailure(\n\t\ttest.NewFailure(\n\t\t\t\"Timed out while replacing (%d) existing document '%s' wrong revision in collection '%s'\",\n\t\t\ti, key, collectionName))\n\treturn maskAny(\n\t\tfmt.Errorf(\n\t\t\t\"Timed out while replacing (%d) existing document '%s' wrong revision in collection '%s'\",\n\t\t\ti, key, collectionName))\n\n}", "func TestActiveReplicatorIgnoreNoConflicts(t *testing.T) {\n\n\tbase.RequireNumTestBuckets(t, 2)\n\n\tbase.SetUpTestLogging(t, logger.LevelDebug, logger.KeyHTTP, logger.KeySync, logger.KeyChanges, logger.KeyCRUD, logger.KeyBucket)\n\n\t// Passive\n\trt2 := NewRestTester(t, &RestTesterConfig{\n\t\tTestBucket: base.GetTestBucket(t),\n\t\tDatabaseConfig: &DatabaseConfig{DbConfig: DbConfig{\n\t\t\tAllowConflicts: false,\n\t\t\tUsers: map[string]*db.PrincipalConfig{\n\t\t\t\t\"alice\": {\n\t\t\t\t\tPassword: base.StringPtr(\"pass\"),\n\t\t\t\t\tExplicitChannels: utils.SetOf(\"alice\"),\n\t\t\t\t},\n\t\t\t},\n\t\t}},\n\t})\n\tdefer rt2.Close()\n\n\t// Active\n\trt1 := NewRestTester(t, &RestTesterConfig{\n\t\tTestBucket: base.GetTestBucket(t),\n\t\tDatabaseConfig: &DatabaseConfig{DbConfig: DbConfig{\n\t\t\tAllowConflicts: false,\n\t\t}},\n\t})\n\tdefer rt1.Close()\n\n\trt1docID := t.Name() + \"rt1doc1\"\n\tresp := rt1.SendAdminRequest(http.MethodPut, \"/db/\"+rt1docID, `{\"source\":\"rt1\",\"channels\":[\"alice\"]}`)\n\tassertStatus(t, resp, http.StatusCreated)\n\trt1revID := respRevID(t, resp)\n\n\t// Make rt2 listen on an actual HTTP port, so it can receive the blipsync request from rt1.\n\tsrv := httptest.NewServer(rt2.TestPublicHandler())\n\tdefer srv.Close()\n\n\tpassiveDBURL, err := url.Parse(srv.URL + \"/db\")\n\trequire.NoError(t, err)\n\n\t// Add basic auth creds to target db URL\n\tpassiveDBURL.User = url.UserPassword(\"alice\", \"pass\")\n\n\tar := db.NewActiveReplicator(&db.ActiveReplicatorConfig{\n\t\tID: t.Name(),\n\t\tDirection: db.ActiveReplicatorTypePushAndPull,\n\t\tRemoteDBURL: passiveDBURL,\n\t\tActiveDB: &db.Database{\n\t\t\tDatabaseContext: rt1.GetDatabase(),\n\t\t},\n\t\tContinuous: true,\n\t\tChangesBatchSize: 200,\n\t\tReplicationStatsMap: base.SyncGatewayStats.NewDBStats(t.Name(), false, false, false).DBReplicatorStats(t.Name()),\n\t})\n\tdefer func() { assert.NoError(t, ar.Stop()) }()\n\n\tassert.Equal(t, \"\", ar.GetStatus().LastSeqPush)\n\n\t// Start the replicator (implicit connect)\n\tassert.NoError(t, ar.Start())\n\n\t// wait for the document originally written to rt1 to arrive at rt2\n\tchangesResults, err := rt2.WaitForChanges(1, \"/db/_changes?since=0\", \"\", true)\n\trequire.NoError(t, err)\n\trequire.Len(t, changesResults.Results, 1)\n\tassert.Equal(t, rt1docID, changesResults.Results[0].ID)\n\n\tdoc, err := rt2.GetDatabase().GetDocument(logger.TestCtx(t), rt1docID, db.DocUnmarshalAll)\n\tassert.NoError(t, err)\n\n\tassert.Equal(t, rt1revID, doc.SyncData.CurrentRev)\n\n\tbody, err := doc.GetDeepMutableBody()\n\trequire.NoError(t, err)\n\tassert.Equal(t, \"rt1\", body[\"source\"])\n\n\t// write a doc on rt2 ...\n\trt2docID := t.Name() + \"rt2doc1\"\n\tresp = rt2.SendAdminRequest(http.MethodPut, \"/db/\"+rt2docID, `{\"source\":\"rt2\",\"channels\":[\"alice\"]}`)\n\tassertStatus(t, resp, http.StatusCreated)\n\trt2revID := respRevID(t, resp)\n\n\t// ... and wait to arrive at rt1\n\tchangesResults, err = rt1.WaitForChanges(2, \"/db/_changes?since=0\", \"\", true)\n\trequire.NoError(t, err)\n\trequire.Len(t, changesResults.Results, 2)\n\tassert.Equal(t, rt1docID, changesResults.Results[0].ID)\n\tassert.Equal(t, rt2docID, changesResults.Results[1].ID)\n\n\tdoc, err = rt1.GetDatabase().GetDocument(logger.TestCtx(t), rt2docID, db.DocUnmarshalAll)\n\tassert.NoError(t, err)\n\n\tassert.Equal(t, rt2revID, doc.SyncData.CurrentRev)\n\n\tbody, err = doc.GetDeepMutableBody()\n\trequire.NoError(t, err)\n\tassert.Equal(t, \"rt2\", body[\"source\"])\n}", "func TestPutNewPresentationInvalidtemplatePath(t *testing.T) {\n request := createPutNewPresentationRequest()\n request.templatePath = invalidizeTestParamValue(request.templatePath, \"templatePath\", \"string\").(string)\n e := initializeTest(\"PutNewPresentation\", \"templatePath\", request.templatePath)\n if e != nil {\n t.Errorf(\"Error: %v.\", e)\n return\n }\n r, _, e := getTestApiClient().DocumentApi.PutNewPresentation(request)\n assertError(t, \"PutNewPresentation\", \"templatePath\", r.Code, e)\n}", "func TestAzureDevOpsServiceEndpointDockerRegistry_Create_DoesNotSwallowError(t *testing.T) {\n\tctrl := gomock.NewController(t)\n\tdefer ctrl.Finish()\n\n\tr := resourceServiceEndpointDockerRegistry()\n\tresourceData := schema.TestResourceDataRaw(t, r.Schema, nil)\n\tflattenServiceEndpointDockerRegistry(resourceData, &dockerRegistryTestServiceEndpoint, dockerRegistryTestServiceEndpointProjectID)\n\n\tbuildClient := azdosdkmocks.NewMockServiceendpointClient(ctrl)\n\tclients := &config.AggregatedClient{ServiceEndpointClient: buildClient, Ctx: context.Background()}\n\n\texpectedArgs := serviceendpoint.CreateServiceEndpointArgs{Endpoint: &dockerRegistryTestServiceEndpoint, Project: dockerRegistryTestServiceEndpointProjectID}\n\tbuildClient.\n\t\tEXPECT().\n\t\tCreateServiceEndpoint(clients.Ctx, expectedArgs).\n\t\tReturn(nil, errors.New(\"CreateServiceEndpoint() Failed\")).\n\t\tTimes(1)\n\n\terr := r.Create(resourceData, clients)\n\trequire.Contains(t, err.Error(), \"CreateServiceEndpoint() Failed\")\n}", "func TestActiveReplicatorEdgeCheckpointNameCollisions(t *testing.T) {\n\n\tbase.RequireNumTestBuckets(t, 3)\n\n\tbase.SetUpTestLogging(t, logger.LevelDebug, logger.KeyReplicate, logger.KeyHTTP, logger.KeyHTTPResp, logger.KeySync, logger.KeySyncMsg)\n\n\tconst (\n\t\tchangesBatchSize = 10\n\t\tnumRT1DocsInitial = 13 // 2 batches of changes\n\t)\n\n\t// Central cluster\n\ttb1 := base.GetTestBucket(t)\n\trt1 := NewRestTester(t, &RestTesterConfig{\n\t\tTestBucket: tb1,\n\t\tDatabaseConfig: &DatabaseConfig{DbConfig: DbConfig{\n\t\t\tUsers: map[string]*db.PrincipalConfig{\n\t\t\t\t\"alice\": {\n\t\t\t\t\tPassword: base.StringPtr(\"pass\"),\n\t\t\t\t\tExplicitChannels: utils.SetOf(\"alice\"),\n\t\t\t\t},\n\t\t\t},\n\t\t}},\n\t})\n\tdefer rt1.Close()\n\n\t// Create first batch of docs\n\tdocIDPrefix := t.Name() + \"rt1doc\"\n\tfor i := 0; i < numRT1DocsInitial; i++ {\n\t\tresp := rt1.SendAdminRequest(http.MethodPut, fmt.Sprintf(\"/db/%s%d\", docIDPrefix, i), `{\"source\":\"rt1\",\"channels\":[\"alice\"]}`)\n\t\tassertStatus(t, resp, http.StatusCreated)\n\t}\n\n\t// Make rt1 listen on an actual HTTP port, so it can receive the blipsync request from edges\n\tsrv := httptest.NewServer(rt1.TestPublicHandler())\n\tdefer srv.Close()\n\n\t// Build rt1DBURL with basic auth creds\n\trt1DBURL, err := url.Parse(srv.URL + \"/db\")\n\trequire.NoError(t, err)\n\trt1DBURL.User = url.UserPassword(\"alice\", \"pass\")\n\n\t// Edge 1\n\tedge1Bucket := base.GetTestBucket(t)\n\tedge1 := NewRestTester(t, &RestTesterConfig{\n\t\tTestBucket: edge1Bucket,\n\t})\n\tdefer edge1.Close()\n\n\tarConfig := db.ActiveReplicatorConfig{\n\t\tID: \"edge-repl\",\n\t\tDirection: db.ActiveReplicatorTypePull,\n\t\tRemoteDBURL: rt1DBURL,\n\t\tActiveDB: &db.Database{\n\t\t\tDatabaseContext: edge1.GetDatabase(),\n\t\t},\n\t\tContinuous: true,\n\t\tChangesBatchSize: changesBatchSize,\n\t}\n\tarConfig.SetCheckpointPrefix(t, \"cluster1:\")\n\n\t// Create the first active replicator to pull from seq:0\n\tarConfig.ReplicationStatsMap = base.SyncGatewayStats.NewDBStats(t.Name()+\"edge1\", false, false, false).DBReplicatorStats(t.Name())\n\tedge1Replicator := db.NewActiveReplicator(&arConfig)\n\n\tstartNumChangesRequestedFromZeroTotal := rt1.GetDatabase().DbStats.CBLReplicationPull().NumPullReplSinceZero.Value()\n\tstartNumRevsHandledTotal := edge1Replicator.Pull.GetStats().HandleRevCount.Value()\n\n\tassert.NoError(t, edge1Replicator.Start())\n\n\t// wait for all of the documents originally written to rt1 to arrive at edge1\n\tchangesResults, err := edge1.WaitForChanges(numRT1DocsInitial, \"/db/_changes?since=0\", \"\", true)\n\trequire.NoError(t, err)\n\tedge1LastSeq := changesResults.Last_Seq\n\trequire.Len(t, changesResults.Results, numRT1DocsInitial)\n\tdocIDsSeen := make(map[string]bool, numRT1DocsInitial)\n\tfor _, result := range changesResults.Results {\n\t\tdocIDsSeen[result.ID] = true\n\t}\n\tfor i := 0; i < numRT1DocsInitial; i++ {\n\t\tdocID := fmt.Sprintf(\"%s%d\", docIDPrefix, i)\n\t\tassert.True(t, docIDsSeen[docID])\n\n\t\tdoc, err := edge1.GetDatabase().GetDocument(logger.TestCtx(t), docID, db.DocUnmarshalAll)\n\t\tassert.NoError(t, err)\n\n\t\tbody, err := doc.GetDeepMutableBody()\n\t\trequire.NoError(t, err)\n\t\tassert.Equal(t, \"rt1\", body[\"source\"])\n\t}\n\n\tedge1Replicator.Pull.Checkpointer.CheckpointNow()\n\n\t// one _changes from seq:0 with initial number of docs sent\n\tnumChangesRequestedFromZeroTotal := rt1.GetDatabase().DbStats.CBLReplicationPull().NumPullReplSinceZero.Value()\n\tassert.Equal(t, startNumChangesRequestedFromZeroTotal+1, numChangesRequestedFromZeroTotal)\n\n\t// rev assertions\n\tnumRevsHandledTotal := edge1Replicator.Pull.GetStats().HandleRevCount.Value()\n\tassert.Equal(t, startNumRevsHandledTotal+numRT1DocsInitial, numRevsHandledTotal)\n\tassert.Equal(t, int64(numRT1DocsInitial), edge1Replicator.Pull.Checkpointer.Stats().ProcessedSequenceCount)\n\tassert.Equal(t, int64(numRT1DocsInitial), edge1Replicator.Pull.Checkpointer.Stats().ExpectedSequenceCount)\n\n\t// checkpoint assertions\n\tassert.Equal(t, int64(0), edge1Replicator.Pull.Checkpointer.Stats().GetCheckpointHitCount)\n\tassert.Equal(t, int64(1), edge1Replicator.Pull.Checkpointer.Stats().GetCheckpointMissCount)\n\tassert.Equal(t, int64(1), edge1Replicator.Pull.Checkpointer.Stats().SetCheckpointCount)\n\n\tassert.NoError(t, edge1Replicator.Stop())\n\n\t// Edge 2\n\tedge2Bucket := base.GetTestBucket(t)\n\tedge2 := NewRestTester(t, &RestTesterConfig{\n\t\tTestBucket: edge2Bucket,\n\t})\n\tdefer edge2.Close()\n\n\t// Create a new replicator using the same ID, which should NOT use the checkpoint set by the first edge.\n\tarConfig.ReplicationStatsMap = base.SyncGatewayStats.NewDBStats(t.Name()+\"edge2\", false, false, false).DBReplicatorStats(t.Name())\n\tarConfig.ActiveDB = &db.Database{\n\t\tDatabaseContext: edge2.GetDatabase(),\n\t}\n\tarConfig.SetCheckpointPrefix(t, \"cluster2:\")\n\tedge2Replicator := db.NewActiveReplicator(&arConfig)\n\tassert.NoError(t, edge2Replicator.Start())\n\n\tchangesResults, err = edge2.WaitForChanges(numRT1DocsInitial, \"/db/_changes?since=0\", \"\", true)\n\trequire.NoError(t, err)\n\n\tedge2Replicator.Pull.Checkpointer.CheckpointNow()\n\n\t// make sure that edge 2 didn't use a checkpoint\n\tassert.Equal(t, int64(0), edge2Replicator.Pull.Checkpointer.Stats().GetCheckpointHitCount)\n\tassert.Equal(t, int64(1), edge2Replicator.Pull.Checkpointer.Stats().GetCheckpointMissCount)\n\tassert.Equal(t, int64(1), edge2Replicator.Pull.Checkpointer.Stats().SetCheckpointCount)\n\n\tassert.NoError(t, edge2Replicator.Stop())\n\n\tresp := rt1.SendAdminRequest(http.MethodPut, fmt.Sprintf(\"/db/%s%d\", docIDPrefix, numRT1DocsInitial), `{\"source\":\"rt1\",\"channels\":[\"alice\"]}`)\n\tassertStatus(t, resp, http.StatusCreated)\n\trequire.NoError(t, rt1.WaitForPendingChanges())\n\n\t// run a replicator on edge1 again to make sure that edge2 didn't blow away its checkpoint\n\tarConfig.ReplicationStatsMap = base.SyncGatewayStats.NewDBStats(t.Name()+\"edge1\", false, false, false).DBReplicatorStats(t.Name())\n\tarConfig.ActiveDB = &db.Database{\n\t\tDatabaseContext: edge1.GetDatabase(),\n\t}\n\tarConfig.SetCheckpointPrefix(t, \"cluster1:\")\n\n\tedge1Replicator2 := db.NewActiveReplicator(&arConfig)\n\trequire.NoError(t, edge1Replicator2.Start())\n\n\tchangesResults, err = edge1.WaitForChanges(1, fmt.Sprintf(\"/db/_changes?since=%v\", edge1LastSeq), \"\", true)\n\trequire.NoErrorf(t, err, \"changesResults: %v\", changesResults)\n\tchangesResults.requireDocIDs(t, []string{fmt.Sprintf(\"%s%d\", docIDPrefix, numRT1DocsInitial)})\n\n\tedge1Replicator2.Pull.Checkpointer.CheckpointNow()\n\n\tassert.Equal(t, int64(1), edge1Replicator2.Pull.Checkpointer.Stats().GetCheckpointHitCount)\n\tassert.Equal(t, int64(0), edge1Replicator2.Pull.Checkpointer.Stats().GetCheckpointMissCount)\n\tassert.Equal(t, int64(1), edge1Replicator2.Pull.Checkpointer.Stats().SetCheckpointCount)\n\n\trequire.NoError(t, edge1Replicator2.Stop())\n}", "func targetMapVersionMismatch(getNum func(int) int, t *testing.T, proxyURL string) {\n\tsmap := tutils.GetClusterMap(t, proxyURL)\n\ttlog.Logf(\"targets: %d, proxies: %d\\n\", smap.CountActiveTargets(), smap.CountActiveProxies())\n\n\tsmap.Version++\n\tjsonMap, err := jsoniter.Marshal(smap)\n\ttassert.CheckFatal(t, err)\n\n\tn := getNum(smap.CountActiveTargets() + smap.CountActiveProxies() - 1)\n\tfor _, v := range smap.Tmap {\n\t\tif n == 0 {\n\t\t\tbreak\n\t\t}\n\n\t\tbaseParams := tutils.BaseAPIParams(v.URL(cmn.NetworkPublic))\n\t\tbaseParams.Method = http.MethodPut\n\t\terr = api.DoHTTPRequest(api.ReqParams{\n\t\t\tBaseParams: baseParams,\n\t\t\tPath: cmn.URLPathDaemon.Join(cmn.SyncSmap),\n\t\t\tBody: jsonMap,\n\t\t})\n\t\ttassert.CheckFatal(t, err)\n\t\tn--\n\t}\n\tkillRestorePrimary(t, proxyURL, false, nil)\n}", "func recordRetry(ctx context.Context, apiName, apiStatus string) {\n\n\tctx, err := tag.New(ctx,\n\t\ttag.Upsert(apiNameKey, apiName),\n\t\ttag.Upsert(apiStatusKey, apiStatus),\n\t)\n\tif err != nil {\n\t\tlogrus.WithError(err).Fatalf(\"cannot add tags %v=%v %v=%v\", apiNameKey, apiName, apiStatusKey, apiStatus)\n\t}\n\n\tstats.Record(ctx, dockerRetriesMeasure.M(0))\n}", "func TestInsertNewUserServiceAlreadyExists (t *testing.T){\n\terr := PostNewUserService(user_01)\n\tassert.Equal(t, 409, err.HTTPStatus)\n}", "func CreateTemplateFailJSONMocked(t *testing.T, templateIn *types.Template) *types.Template {\n\n\tassert := assert.New(t)\n\n\t// wire up\n\tcs := &utils.MockConcertoService{}\n\tds, err := NewTemplateService(cs)\n\tassert.Nil(err, \"Couldn't load template service\")\n\tassert.NotNil(ds, \"Template service not instanced\")\n\n\t// convertMap\n\tmapIn, err := utils.ItemConvertParams(*templateIn)\n\tassert.Nil(err, \"Template test data corrupted\")\n\n\t// wrong json\n\tdOut := []byte{10, 20, 30}\n\n\t// call service\n\tcs.On(\"Post\", \"/blueprint/templates/\", mapIn).Return(dOut, 200, nil)\n\ttemplateOut, err := ds.CreateTemplate(mapIn)\n\tassert.NotNil(err, \"We are expecting a marshalling error\")\n\tassert.Nil(templateOut, \"Expecting nil output\")\n\tassert.Contains(err.Error(), \"invalid character\", \"Error message should include the string 'invalid character'\")\n\n\treturn templateOut\n}", "func TestPostPresentationMergeInvalidname(t *testing.T) {\n request := createPostPresentationMergeRequest()\n request.name = invalidizeTestParamValue(request.name, \"name\", \"string\").(string)\n e := initializeTest(\"PostPresentationMerge\", \"name\", request.name)\n if e != nil {\n t.Errorf(\"Error: %v.\", e)\n return\n }\n r, _, e := getTestApiClient().MergeDocumentApi.PostPresentationMerge(request)\n assertError(t, \"PostPresentationMerge\", \"name\", r.Code, e)\n}", "func TestDo_Retry(t *testing.T) {\n\ttype testServerResponse struct {\n\t\tAPIResource\n\t\tMessage string `json:\"message\"`\n\t}\n\n\tmessage := \"Hello, client.\"\n\trequestNum := 0\n\n\ttestServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\terr := r.ParseForm()\n\t\tassert.NoError(t, err)\n\n\t\t// The body should always be the same with every retry. We've\n\t\t// previously had regressions in this behavior as we switched to HTTP/2\n\t\t// and `Request` became non-reusable, so we want to check it with every\n\t\t// request.\n\t\tassert.Equal(t, \"bar\", r.Form.Get(\"foo\"))\n\n\t\tswitch requestNum {\n\t\tcase 0:\n\t\t\tw.WriteHeader(http.StatusConflict)\n\t\t\tw.Write([]byte(`{\"error\":\"Conflict (this should be retried).\"}`))\n\n\t\tcase 1:\n\t\t\tresponse := testServerResponse{Message: message}\n\n\t\t\tdata, err := json.Marshal(response)\n\t\t\tassert.NoError(t, err)\n\n\t\t\t_, err = w.Write(data)\n\t\t\tassert.NoError(t, err)\n\n\t\tdefault:\n\t\t\tassert.Fail(t, \"Should not have reached request %v\", requestNum)\n\t\t}\n\n\t\trequestNum++\n\t}))\n\tdefer testServer.Close()\n\n\tbackend := GetBackendWithConfig(\n\t\tAPIBackend,\n\t\t&BackendConfig{\n\t\t\tLeveledLogger: nullLeveledLogger,\n\t\t\tMaxNetworkRetries: Int64(5),\n\t\t\tURL: String(testServer.URL),\n\t\t},\n\t).(*BackendImplementation)\n\n\t// Disable sleeping duration our tests.\n\tbackend.SetNetworkRetriesSleep(false)\n\n\trequest, err := backend.NewRequest(\n\t\thttp.MethodPost,\n\t\t\"/hello\",\n\t\t\"sk_test_123\",\n\t\t\"application/x-www-form-urlencoded\",\n\t\tnil,\n\t)\n\tassert.NoError(t, err)\n\n\tbodyBuffer := bytes.NewBufferString(\"foo=bar\")\n\tvar response testServerResponse\n\terr = backend.Do(request, bodyBuffer, &response)\n\n\tassert.NoError(t, err)\n\tassert.Equal(t, message, response.Message)\n\n\t// We should have seen exactly two requests.\n\tassert.Equal(t, 2, requestNum)\n}", "func TestCmdDeploy_retryOk(t *testing.T) {\n\tdeletedPods := []string{}\n\tconfig := deploytest.OkDeploymentConfig(1)\n\n\texistingDeployment := deploymentFor(config, deployapi.DeploymentStatusFailed)\n\texistingDeployment.Annotations[deployapi.DeploymentCancelledAnnotation] = deployapi.DeploymentCancelledAnnotationValue\n\texistingDeployment.Annotations[deployapi.DeploymentStatusReasonAnnotation] = deployapi.DeploymentCancelledByUser\n\n\texistingDeployerPods := []kapi.Pod{\n\t\t{ObjectMeta: kapi.ObjectMeta{Name: \"prehook\"}},\n\t\t{ObjectMeta: kapi.ObjectMeta{Name: \"posthook\"}},\n\t\t{ObjectMeta: kapi.ObjectMeta{Name: \"deployerpod\"}},\n\t}\n\n\tvar updatedDeployment *kapi.ReplicationController\n\tcommandClient := &deployCommandClientImpl{\n\t\tGetDeploymentFn: func(namespace, name string) (*kapi.ReplicationController, error) {\n\t\t\treturn existingDeployment, nil\n\t\t},\n\t\tUpdateDeploymentConfigFn: func(config *deployapi.DeploymentConfig) (*deployapi.DeploymentConfig, error) {\n\t\t\tt.Fatalf(\"unexpected call to UpdateDeploymentConfig\")\n\t\t\treturn nil, nil\n\t\t},\n\t\tUpdateDeploymentFn: func(deployment *kapi.ReplicationController) (*kapi.ReplicationController, error) {\n\t\t\tupdatedDeployment = deployment\n\t\t\treturn deployment, nil\n\t\t},\n\t\tListDeployerPodsForFn: func(namespace, name string) (*kapi.PodList, error) {\n\t\t\treturn &kapi.PodList{Items: existingDeployerPods}, nil\n\t\t},\n\t\tDeletePodFn: func(pod *kapi.Pod) error {\n\t\t\tdeletedPods = append(deletedPods, pod.Name)\n\t\t\treturn nil\n\t\t},\n\t}\n\n\tc := &retryDeploymentCommand{client: commandClient}\n\terr := c.retry(config, ioutil.Discard)\n\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error: %v\", err)\n\t}\n\n\tif updatedDeployment == nil {\n\t\tt.Fatalf(\"expected updated config\")\n\t}\n\n\tif deployutil.IsDeploymentCancelled(updatedDeployment) {\n\t\tt.Fatalf(\"deployment should not have the cancelled flag set anymore\")\n\t}\n\n\tif deployutil.DeploymentStatusReasonFor(updatedDeployment) != \"\" {\n\t\tt.Fatalf(\"deployment status reason should be empty\")\n\t}\n\n\tsort.Strings(deletedPods)\n\tif !reflect.DeepEqual(deletedPods, []string{\"deployerpod\", \"posthook\", \"prehook\"}) {\n\t\tt.Fatalf(\"Not all deployer pods for the failed deployment were deleted\")\n\t}\n\n\tif e, a := deployapi.DeploymentStatusNew, deployutil.DeploymentStatusFor(updatedDeployment); e != a {\n\t\tt.Fatalf(\"expected deployment status %s, got %s\", e, a)\n\t}\n}", "func buildFailOnDuplicateOps(c client.Client, m model.Model) ([]ovsdb.Operation, error) {\n\t// Right now we mostly consider models with a \"Name\" field that is not an\n\t// index for which we don't expect duplicate names.\n\t// A duplicate Name field that is an index will fail without the\n\t// need of this wait operation.\n\t// Some models that require a complex condition to detect duplicates are not\n\t// considered for the time being due to the performance hit (e.g ACLs).\n\ttimeout := types.OVSDBWaitTimeout\n\tvar field interface{}\n\tvar value string\n\tswitch t := m.(type) {\n\tcase *nbdb.LogicalRouter:\n\t\tfield = &t.Name\n\t\tvalue = t.Name\n\tcase *nbdb.LogicalSwitch:\n\t\tfield = &t.Name\n\t\tvalue = t.Name\n\tcase *nbdb.LogicalRouterPolicy:\n\t\tcondPriority := model.Condition{\n\t\t\tField: &t.Priority,\n\t\t\tFunction: ovsdb.ConditionEqual,\n\t\t\tValue: t.Priority,\n\t\t}\n\t\tcondMatch := model.Condition{\n\t\t\tField: &t.Match,\n\t\t\tFunction: ovsdb.ConditionEqual,\n\t\t\tValue: t.Match,\n\t\t}\n\t\treturn c.WhereAll(t, condPriority, condMatch).Wait(\n\t\t\tovsdb.WaitConditionNotEqual,\n\t\t\t&timeout,\n\t\t\tt,\n\t\t\t&t.Priority,\n\t\t\t&t.Match,\n\t\t)\n\tdefault:\n\t\treturn []ovsdb.Operation{}, nil\n\t}\n\n\tcond := model.Condition{\n\t\tField: field,\n\t\tFunction: ovsdb.ConditionEqual,\n\t\tValue: value,\n\t}\n\treturn c.WhereAny(m, cond).Wait(ovsdb.WaitConditionNotEqual, &timeout, m, field)\n}", "func TestTransactionCreateExistingBucket(t *testing.T) {\n\tt.Skip(\"pending\")\n}", "func TestNamespacePreExisting(t *testing.T) {\n\ttestName := \"TestNamespacePreExisting\"\n\tbeforeTest()\n\t// kinds to check for status\n\tvar kindsToCheckStatus = map[string]bool{\n\t\tAPPLICATION: true,\n\t\t\"Deployment\": true,\n\t\t\"Service\": true,\n\t}\n\n\t// starting resources to pre-populate\n\tvar files = []string{\n\t\t/* 0 */ KappnavConfigFile,\n\t\t/* 1 */ CrdApplication,\n\t\t/* 2 */ ns1Service,\n\t\t/* 3 */ ns1Deployment,\n\t\t/* 4 */ ns2Service,\n\t\t/* 5 */ ns2Deployment,\n\t}\n\titeration0IDs, err := readResourceIDs(files)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t/* Iteration 0: no applications. No resources should have status */\n\ttestActions := newTestActions(testName, kindsToCheckStatus)\n\tvar emptyIDs = []resourceID{}\n\n\t// status should not be checked when there are not applications\n\titeration0IDs[2].expectedStatus = NoStatus\n\titeration0IDs[3].expectedStatus = NoStatus\n\titeration0IDs[4].expectedStatus = NoStatus\n\titeration0IDs[5].expectedStatus = NoStatus\n\ttestActions.addIteration(iteration0IDs, emptyIDs)\n\n\t// iteration 1: add application to NS_1. All in NS_1 is normal.\n\t// All in NS_2 remains NoStatus\n\tres, err := readOneResourceID(ns1App)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tarrayLength := len(iteration0IDs)\n\tvar iteration1IDs = make([]resourceID, arrayLength, arrayLength)\n\tcopy(iteration1IDs, iteration0IDs)\n\titeration1IDs = append(iteration1IDs, res)\n\tarrayLength++\n\titeration1IDs[2].expectedStatus = Normal\n\titeration1IDs[3].expectedStatus = Normal\n\titeration1IDs[6].expectedStatus = Normal\n\ttestActions.addIteration(iteration1IDs, emptyIDs)\n\n\t/* iteration 4: clean up */\n\ttestActions.addIteration(emptyIDs, emptyIDs)\n\n\tclusterWatcher, err := createClusterWatcher(iteration0IDs, testActions, StatusFailureRate)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tdefer clusterWatcher.shutDown()\n\n\t// make all trasition of testAction\n\terr = testActions.transitionAll()\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n}", "func TestPostSlidesDocumentInvalidtemplatePath(t *testing.T) {\n request := createPostSlidesDocumentRequest()\n request.templatePath = invalidizeTestParamValue(request.templatePath, \"templatePath\", \"string\").(string)\n e := initializeTest(\"PostSlidesDocument\", \"templatePath\", request.templatePath)\n if e != nil {\n t.Errorf(\"Error: %v.\", e)\n return\n }\n r, _, e := getTestApiClient().DocumentApi.PostSlidesDocument(request)\n assertError(t, \"PostSlidesDocument\", \"templatePath\", r.Code, e)\n}", "func (f *FailingKubeClient) Create(resources kube.ResourceList) (*kube.Result, error) {\n\tf.CreateCallsCnt++\n\treturn nil, f.CreateError\n}", "func TestServiceEndpointAzureRM_Create_DoesNotSwallowError(t *testing.T) {\n\tctrl := gomock.NewController(t)\n\tdefer ctrl.Finish()\n\n\tr := ResourceServiceEndpointAzureRM()\n\tfor _, resource := range azurermTestServiceEndpointsAzureRM {\n\t\tresourceData := getResourceData(t, resource)\n\t\tflattenServiceEndpointAzureRM(resourceData, &resource, azurermTestServiceEndpointAzureRMProjectID)\n\n\t\tbuildClient := azdosdkmocks.NewMockServiceendpointClient(ctrl)\n\t\tclients := &client.AggregatedClient{ServiceEndpointClient: buildClient, Ctx: context.Background()}\n\n\t\texpectedArgs := serviceendpoint.CreateServiceEndpointArgs{Endpoint: &resource}\n\n\t\tbuildClient.\n\t\t\tEXPECT().\n\t\t\tCreateServiceEndpoint(clients.Ctx, expectedArgs).\n\t\t\tReturn(nil, errors.New(\"CreateServiceEndpoint() Failed\")).\n\t\t\tTimes(1)\n\n\t\terr := r.Create(resourceData, clients)\n\t\trequire.Contains(t, err.Error(), \"CreateServiceEndpoint() Failed\")\n\n\t}\n}", "func (s *DockerRegistrySuite) TestPullConflict(c *check.C) {\n\trepoName := privateRegistryURL + \"/dockercli/conflict\"\n\n\t_, err := buildImage(repoName, `\n\t FROM scratch\n\t ENV IMAGE conflict\n\t CMD echo conflict\n\t`, true)\n\tif err != nil {\n\t\tc.Fatal(err)\n\t}\n\n\tdockerCmd(c, \"push\", repoName)\n\n\t// Pull to make it content-addressable\n\tdockerCmd(c, \"rmi\", repoName)\n\tdockerCmd(c, \"pull\", repoName)\n\n\tIDBeforeLoad := imageID(c, repoName)\n\n\t// Load/save to turn this into an unverified image with the same ID\n\ttmpDir, err := ioutil.TempDir(\"\", \"conflict-save-output\")\n\tif err != nil {\n\t\tc.Errorf(\"failed to create temporary directory: %s\", err)\n\t}\n\tdefer os.RemoveAll(tmpDir)\n\n\ttarFile := filepath.Join(tmpDir, \"repo.tar\")\n\n\tdockerCmd(c, \"save\", \"-o\", tarFile, repoName)\n\tdockerCmd(c, \"rmi\", repoName)\n\tdockerCmd(c, \"load\", \"-i\", tarFile)\n\n\t// Check that the the ID is the same after save/load.\n\tIDAfterLoad := imageID(c, repoName)\n\n\tif IDAfterLoad != IDBeforeLoad {\n\t\tc.Fatal(\"image's ID should be the same after save/load\")\n\t}\n\n\t// Repull\n\tdockerCmd(c, \"pull\", repoName)\n\n\t// Check that the ID is now different because of the conflict.\n\tIDAfterPull1 := imageID(c, repoName)\n\n\t// Expect the new ID to be SHA256(oldID)\n\texpectedIDDigest, err := digest.FromBytes([]byte(IDBeforeLoad))\n\tif err != nil {\n\t\tc.Fatalf(\"digest error: %v\", err)\n\t}\n\texpectedID := expectedIDDigest.Hex()\n\tif IDAfterPull1 != expectedID {\n\t\tc.Fatalf(\"image's ID should have changed on pull to %s (got %s)\", expectedID, IDAfterPull1)\n\t}\n\n\t// A second pull should use the new ID again.\n\tdockerCmd(c, \"pull\", repoName)\n\n\tIDAfterPull2 := imageID(c, repoName)\n\n\tif IDAfterPull2 != IDAfterPull1 {\n\t\tc.Fatal(\"image's ID unexpectedly changed after a repull\")\n\t}\n}", "func (c *Controller) shouldContinueOnCreateFailed() error {\n\t// Check configuration option regarding should we continue when errors met on the way\n\t// c.chopConfig.OnStatefulSetUpdateFailureAction\n\tvar continueUpdate = false\n\tif continueUpdate {\n\t\t// Continue update\n\t\treturn nil\n\t}\n\n\t// Do not continue update\n\treturn errors.New(fmt.Sprintf(\"Create stopped due to previous errors\"))\n}", "func Disabled_TestLoadMismatchedFilesAndUneven(t *testing.T) {\n\tfmt.Println(\"============== Test case start: TestLoadMismatchedFilesAndUneven =================\")\n\tassert := assert.New(t)\n\n\tfile1 := \"/tmp/test1.bin\"\n\tfile2 := \"/tmp/test2.bin\"\n\tdefer os.Remove(file1)\n\tdefer os.Remove(file2)\n\n\tentries := 1000\n\tnumMismatch := 5\n\textraEntries := 2\n\n\tkeys, err := genMismatchedFiles(entries, numMismatch, file1, file2)\n\tassert.Nil(err)\n\n\t// Add more records to one file\n\textraSliceOfPizza := genMultipleRecords(extraEntries)\n\tf, err := os.OpenFile(file1, os.O_APPEND|os.O_WRONLY, 644)\n\tassert.Nil(err)\n\t_, err = f.Write(extraSliceOfPizza)\n\tassert.Nil(err)\n\tf.Close()\n\n\tdiffer := NewFilesDiffer(file1, file2, nil, nil, nil)\n\tassert.NotNil(differ)\n\n\tsrcDiffMap, tgtDiffMap, _, _, _ := differ.Diff()\n\n\tassert.False(len(srcDiffMap) == 0)\n\tassert.False(len(tgtDiffMap) == 0)\n\n\tassert.Equal(numMismatch, len(differ.BothExistButMismatch))\n\tassert.True(verifyMisMatch(keys, differ))\n\n\tassert.Equal(0, len(differ.MissingFromFile1))\n\tassert.Equal(extraEntries, len(differ.MissingFromFile2))\n\tdiffer.PrettyPrintResult()\n\tfmt.Println(\"============== Test case start: TestLoadMismatchedFilesAndUneven =================\")\n}", "func TestClient_CreateReplica_Err(t *testing.T) {\n\tc := OpenClient(0)\n\tdefer c.Close()\n\tc.Server.Handler.Broker().CreateReplica(123, &url.URL{Host: \"localhost\"})\n\tif err := c.CreateReplica(123, &url.URL{Host: \"localhost\"}); err == nil || err.Error() != `replica already exists` {\n\t\tt.Fatalf(\"unexpected error: %v\", err)\n\t}\n}", "func (t *simpleTest) replaceNonExistingDocument(collectionName string, key string) error {\n\toperationTimeout := t.OperationTimeout\n\ttestTimeout := time.Now().Add(operationTimeout * 4)\n\n\tq := url.Values{}\n\tq.Set(\"waitForSync\", \"true\")\n\tnewName := fmt.Sprintf(\"Updated non-existing name %s\", time.Now())\n\tnewDoc := UserDocument{\n\t\tKey: key,\n\t\tName: fmt.Sprintf(\"Replaced named %s\", key),\n\t\tValue: rand.Int(),\n\t\tOdd: rand.Int()%2 == 0,\n\t}\n\n\tbackoff := time.Millisecond * 250\n\ti := 0\n\n\tfor {\n\n\t\ti++\n\t\tif time.Now().After(testTimeout) {\n\t\t\tbreak\n\t\t}\n\n\t\tt.log.Infof(\"Replacing (%d) non-existing document '%s' in '%s' (name -> '%s')...\",\n\t\t\ti, key, collectionName, newName)\n\t\tresp, err := t.client.Put(\n\t\t\tfmt.Sprintf(\"/_api/document/%s/%s\", collectionName, key), q, nil, newDoc, \"\", nil,\n\t\t\t[]int{0, 1, 404, 503}, []int{200, 201, 202, 400, 412, 307}, operationTimeout, 1)\n\t\tt.log.Infof(\"... got http %d - arangodb %d via %s\",\n\t\t\tresp[0].StatusCode, resp[0].Error_.ErrorNum, resp[0].CoordinatorURL)\n\n\t\tif err[0] == nil {\n\t\t\tif resp[0].StatusCode == 404 {\n\t\t\t\tt.replaceNonExistingCounter.succeeded++\n\t\t\t\tt.log.Infof(\n\t\t\t\t\t\"Replacing non-existing document '%s' in '%s' (name -> '%s') succeeded\", key, collectionName, newName)\n\t\t\t\treturn nil\n\t\t\t}\n\t\t\t// In cases 0, 1 and 503 we fall through here and try again.\n\t\t} else {\n\t\t\t// This is a failure\n\t\t\tt.replaceNonExistingCounter.failed++\n\t\t\tt.reportFailure(\n\t\t\t\ttest.NewFailure(\n\t\t\t\t\t\"Failed to replace non-existing document '%s' in collection '%s': %v\", key, collectionName, err[0]))\n\t\t\treturn maskAny(err[0])\n\t\t}\n\n\t\ttime.Sleep(backoff)\n\t\tif backoff < time.Second*5 {\n\t\t\tbackoff += backoff\n\t\t}\n\t}\n\n\tt.replaceNonExistingCounter.failed++\n\tt.reportFailure(\n\t\ttest.NewFailure(\n\t\t\t\"Timeout while replacing (%d) non-existing document '%s' in collection '%s'\", i, key, collectionName))\n\treturn maskAny(\n\t\tfmt.Errorf(\n\t\t\t\"Timeout while replacing (%d) non-existing document '%s' in collection '%s'\", i, key, collectionName))\n\n}", "func TestActiveReplicatorPushFromCheckpointIgnored(t *testing.T) {\n\n\tbase.RequireNumTestBuckets(t, 2)\n\n\tbase.SetUpTestLogging(t, logger.LevelInfo, logger.KeyReplicate, logger.KeyHTTP, logger.KeyHTTPResp, logger.KeySync, logger.KeySyncMsg)\n\n\tconst (\n\t\tchangesBatchSize = 10\n\t\tnumRT1DocsInitial = 13 // 2 batches of changes\n\t\tnumRT1DocsTotal = 24 // 2 more batches\n\t)\n\n\t// Active\n\ttb1 := base.GetTestBucket(t)\n\trt1 := NewRestTester(t, &RestTesterConfig{\n\t\tTestBucket: tb1,\n\t})\n\tdefer rt1.Close()\n\n\t// Passive\n\ttb2 := base.GetTestBucket(t)\n\trt2 := NewRestTester(t, &RestTesterConfig{\n\t\tTestBucket: tb2,\n\t\tDatabaseConfig: &DatabaseConfig{DbConfig: DbConfig{\n\t\t\tUsers: map[string]*db.PrincipalConfig{\n\t\t\t\t\"alice\": {\n\t\t\t\t\tPassword: base.StringPtr(\"pass\"),\n\t\t\t\t\tExplicitChannels: utils.SetOf(\"alice\"),\n\t\t\t\t},\n\t\t\t},\n\t\t}},\n\t})\n\tdefer rt2.Close()\n\n\t// Create first batch of docs\n\tdocIDPrefix := t.Name() + \"doc\"\n\tfor i := 0; i < numRT1DocsInitial; i++ {\n\t\tresp := rt1.SendAdminRequest(http.MethodPut, fmt.Sprintf(\"/db/%s%d\", docIDPrefix, i), `{\"channels\":[\"alice\"]}`)\n\t\tassertStatus(t, resp, http.StatusCreated)\n\t\trt1RevID := respRevID(t, resp)\n\t\tresp = rt2.SendAdminRequest(http.MethodPut, fmt.Sprintf(\"/db/%s%d\", docIDPrefix, i), `{\"channels\":[\"alice\"]}`)\n\t\tassertStatus(t, resp, http.StatusCreated)\n\t\trt2RevID := respRevID(t, resp)\n\t\trequire.Equal(t, rt1RevID, rt2RevID)\n\t}\n\n\t// Make rt2 listen on an actual HTTP port, so it can receive the blipsync request from rt1\n\tsrv := httptest.NewServer(rt2.TestPublicHandler())\n\tdefer srv.Close()\n\n\t// Build passiveDBURL with basic auth creds\n\tpassiveDBURL, err := url.Parse(srv.URL + \"/db\")\n\trequire.NoError(t, err)\n\tpassiveDBURL.User = url.UserPassword(\"alice\", \"pass\")\n\n\tarConfig := db.ActiveReplicatorConfig{\n\t\tID: t.Name(),\n\t\tDirection: db.ActiveReplicatorTypePush,\n\t\tRemoteDBURL: passiveDBURL,\n\t\tActiveDB: &db.Database{\n\t\t\tDatabaseContext: rt1.GetDatabase(),\n\t\t},\n\t\tContinuous: true,\n\t\tChangesBatchSize: changesBatchSize,\n\t\tReplicationStatsMap: base.SyncGatewayStats.NewDBStats(t.Name(), false, false, false).DBReplicatorStats(t.Name()),\n\t}\n\n\t// Create the first active replicator to pull from seq:0\n\tar := db.NewActiveReplicator(&arConfig)\n\n\tstartNumChangesRequestedFromZeroTotal := rt1.GetDatabase().DbStats.CBLReplicationPull().NumPullReplSinceZero.Value()\n\n\tassert.NoError(t, ar.Start())\n\n\t_, ok := base.WaitForStat(func() int64 {\n\t\treturn ar.Push.Checkpointer.Stats().AlreadyKnownSequenceCount\n\t}, numRT1DocsInitial)\n\tassert.True(t, ok)\n\n\t// one _changes from seq:0 with initial number of docs sent\n\tnumChangesRequestedFromZeroTotal := rt1.GetDatabase().DbStats.CBLReplicationPull().NumPullReplSinceZero.Value()\n\tassert.Equal(t, startNumChangesRequestedFromZeroTotal+1, numChangesRequestedFromZeroTotal)\n\n\t// rev assertions\n\tnumRevsSentTotal := ar.Push.GetStats().SendRevCount.Value()\n\tassert.Equal(t, int64(0), numRevsSentTotal)\n\tassert.Equal(t, int64(0), ar.Push.Checkpointer.Stats().ProcessedSequenceCount)\n\tassert.Equal(t, int64(0), ar.Push.Checkpointer.Stats().ExpectedSequenceCount)\n\n\t// checkpoint assertions\n\tassert.Equal(t, int64(0), ar.Push.Checkpointer.Stats().GetCheckpointHitCount)\n\tassert.Equal(t, int64(1), ar.Push.Checkpointer.Stats().GetCheckpointMissCount)\n\tassert.Equal(t, int64(0), ar.Push.Checkpointer.Stats().SetCheckpointCount)\n\n\tassert.NoError(t, ar.Stop())\n\n\t// Second batch of docs\n\tfor i := numRT1DocsInitial; i < numRT1DocsTotal; i++ {\n\t\tresp := rt1.SendAdminRequest(http.MethodPut, fmt.Sprintf(\"/db/%s%d\", docIDPrefix, i), `{\"channels\":[\"alice\"]}`)\n\t\tassertStatus(t, resp, http.StatusCreated)\n\t\trt1RevID := respRevID(t, resp)\n\t\tresp = rt2.SendAdminRequest(http.MethodPut, fmt.Sprintf(\"/db/%s%d\", docIDPrefix, i), `{\"channels\":[\"alice\"]}`)\n\t\tassertStatus(t, resp, http.StatusCreated)\n\t\trt2RevID := respRevID(t, resp)\n\t\trequire.Equal(t, rt1RevID, rt2RevID)\n\t}\n\n\t// Create a new replicator using the same config, which should use the checkpoint set from the first.\n\tar = db.NewActiveReplicator(&arConfig)\n\tdefer func() { assert.NoError(t, ar.Stop()) }()\n\tassert.NoError(t, ar.Start())\n\n\t_, ok = base.WaitForStat(func() int64 {\n\t\treturn ar.Push.Checkpointer.Stats().AlreadyKnownSequenceCount\n\t}, numRT1DocsTotal-numRT1DocsInitial)\n\tassert.True(t, ok)\n\n\t// Make sure we've not started any more since:0 replications on rt1 since the first one\n\tendNumChangesRequestedFromZeroTotal := rt1.GetDatabase().DbStats.CBLReplicationPull().NumPullReplSinceZero.Value()\n\tassert.Equal(t, numChangesRequestedFromZeroTotal, endNumChangesRequestedFromZeroTotal)\n\n\t// make sure rt1 thinks it has sent all of the revs via a 2.x replicator\n\tnumRevsSentTotal = ar.Push.GetStats().SendRevCount.Value()\n\tassert.Equal(t, int64(0), numRevsSentTotal)\n\tassert.Equal(t, int64(0), ar.Push.Checkpointer.Stats().ProcessedSequenceCount)\n\tassert.Equal(t, int64(0), ar.Push.Checkpointer.Stats().ExpectedSequenceCount)\n\n\t// assert the second active replicator stats\n\tassert.Equal(t, int64(1), ar.Push.Checkpointer.Stats().GetCheckpointHitCount)\n\tassert.Equal(t, int64(0), ar.Push.Checkpointer.Stats().GetCheckpointMissCount)\n\tassert.Equal(t, int64(0), ar.Push.Checkpointer.Stats().SetCheckpointCount)\n\tar.Push.Checkpointer.CheckpointNow()\n\tassert.Equal(t, int64(1), ar.Push.Checkpointer.Stats().SetCheckpointCount)\n}", "func TestPostPresentationMergeInvalidrequest(t *testing.T) {\n request := createPostPresentationMergeRequest()\n request.request = invalidizeTestParamValue(request.request, \"request\", \"PresentationsMergeRequest\").(PresentationsMergeRequest)\n e := initializeTest(\"PostPresentationMerge\", \"request\", request.request)\n if e != nil {\n t.Errorf(\"Error: %v.\", e)\n return\n }\n r, _, e := getTestApiClient().MergeDocumentApi.PostPresentationMerge(request)\n assertError(t, \"PostPresentationMerge\", \"request\", r.Code, e)\n}", "func TestIntermediateNameAmbiguous(t *testing.T) {\n\tth := kusttest_test.MakeHarness(t)\n\tth.WriteK(\"gcp\", `\nnamePrefix: gcp-\nresources:\n- ../emea\npatchesStrategicMerge:\n- depPatch.yaml\n`)\n\tth.WriteF(\"gcp/depPatch.yaml\", `\napiVersion: apps/v1\nkind: Deployment\nmetadata:\n name: foo\nspec:\n replicas: 999\n`)\n\tth.WriteK(\"emea\", `\nnamePrefix: emea-\nresources:\n- ../prod\n- deployment.yaml\n`)\n\tth.WriteF(\"emea/deployment.yaml\", `\napiVersion: apps/v1\nkind: Deployment\nmetadata:\n name: foo\nspec:\n template:\n spec:\n containers:\n - image: whatever\n`)\n\tth.WriteK(\"prod\", `\nnamePrefix: prod-\nresources:\n- ../base\n`)\n\tth.WriteK(\"base\", `\nresources:\n- deployment.yaml\n`)\n\tth.WriteF(\"base/deployment.yaml\", `\napiVersion: apps/v1\nkind: Deployment\nmetadata:\n name: foo\nspec:\n template:\n spec:\n containers:\n - image: whatever\n`)\n\terr := th.RunWithErr(\"gcp\", th.MakeDefaultOptions())\n\tassert.Error(t, err)\n}", "func (o InstanceGroupManagerActionsSummaryResponsePtrOutput) CreatingWithoutRetries() pulumi.IntPtrOutput {\n\treturn o.ApplyT(func(v *InstanceGroupManagerActionsSummaryResponse) *int {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn &v.CreatingWithoutRetries\n\t}).(pulumi.IntPtrOutput)\n}", "func TestClientRetryNonTxn(t *testing.T) {\n\tdefer leaktest.AfterTest(t)()\n\n\t// Set up a command filter which tracks which one of our test keys have\n\t// been attempted to be pushed.\n\tmu := struct {\n\t\tsyncutil.Mutex\n\t\tm map[string]struct{}\n\t}{\n\t\tm: make(map[string]struct{}),\n\t}\n\tfilter := func(args storagebase.FilterArgs) *roachpb.Error {\n\t\tmu.Lock()\n\t\tdefer mu.Unlock()\n\t\tpushArg, ok := args.Req.(*roachpb.PushTxnRequest)\n\t\tif !ok || !strings.HasPrefix(string(pushArg.PusheeTxn.Key), \"key-\") {\n\t\t\treturn nil\n\t\t}\n\t\tmu.m[string(pushArg.PusheeTxn.Key)] = struct{}{}\n\t\treturn nil\n\t}\n\targs := base.TestServerArgs{\n\t\tKnobs: base.TestingKnobs{\n\t\t\tStore: &storage.StoreTestingKnobs{\n\t\t\t\tTestingCommandFilter: filter,\n\t\t\t},\n\t\t},\n\t}\n\ts, _, _ := serverutils.StartServer(t, args)\n\tdefer s.Stopper().Stop()\n\n\ttestCases := []struct {\n\t\targs roachpb.Request\n\t\tisolation enginepb.IsolationType\n\t\tcanPush bool\n\t\texpAttempts int\n\t}{\n\t\t// Write/write conflicts.\n\t\t{&roachpb.PutRequest{}, enginepb.SNAPSHOT, true, 2},\n\t\t{&roachpb.PutRequest{}, enginepb.SERIALIZABLE, true, 2},\n\t\t{&roachpb.PutRequest{}, enginepb.SNAPSHOT, false, 1},\n\t\t{&roachpb.PutRequest{}, enginepb.SERIALIZABLE, false, 1},\n\t\t// Read/write conflicts.\n\t\t{&roachpb.GetRequest{}, enginepb.SNAPSHOT, true, 1},\n\t\t{&roachpb.GetRequest{}, enginepb.SERIALIZABLE, true, 2},\n\t\t{&roachpb.GetRequest{}, enginepb.SNAPSHOT, false, 1},\n\t\t{&roachpb.GetRequest{}, enginepb.SERIALIZABLE, false, 1},\n\t}\n\t// Lay down a write intent using a txn and attempt to access the same\n\t// key from our test client, with priorities set up so that the Push\n\t// succeeds iff the test dicates that it do.\n\tfor i, test := range testCases {\n\t\tkey := roachpb.Key(fmt.Sprintf(\"key-%d\", i))\n\t\tvar txnPri int32 = 1\n\t\tvar clientPri roachpb.UserPriority = 1\n\t\tif test.canPush {\n\t\t\tclientPri = 2\n\t\t} else {\n\t\t\ttxnPri = 2\n\t\t}\n\n\t\tdb, sender := createTestNotifyClient(t, s.Stopper(), s.ServingAddr(), -clientPri)\n\n\t\t// doneCall signals when the non-txn read or write has completed.\n\t\tdoneCall := make(chan error)\n\t\tcount := 0 // keeps track of retries\n\t\terr := db.Txn(context.TODO(), func(txn *client.Txn) error {\n\t\t\tif test.isolation == enginepb.SNAPSHOT {\n\t\t\t\tif err := txn.SetIsolation(enginepb.SNAPSHOT); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t}\n\t\t\ttxn.InternalSetPriority(txnPri)\n\n\t\t\tcount++\n\t\t\t// Lay down the intent.\n\t\t\tif err := txn.Put(key, \"txn-value\"); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\t// On the first true, send the non-txn put or get.\n\t\t\tif count == 1 {\n\t\t\t\t// We use a \"notifying\" sender here, which allows us to know exactly when the\n\t\t\t\t// call has been processed; otherwise, we'd be dependent on timing.\n\t\t\t\t// The channel lets us pause txn until after the non-txn method has run once.\n\t\t\t\t// Use a channel length of size 1 to guarantee a notification through a\n\t\t\t\t// non-blocking send.\n\t\t\t\tnotify := make(chan struct{}, 1)\n\t\t\t\tsender.reset(notify)\n\t\t\t\t// We must try the non-txn put or get in a goroutine because\n\t\t\t\t// it might have to retry and will only succeed immediately in\n\t\t\t\t// the event we can push.\n\t\t\t\tgo func() {\n\t\t\t\t\tvar err error\n\t\t\t\t\t//for {\n\t\t\t\t\tif _, ok := test.args.(*roachpb.GetRequest); ok {\n\t\t\t\t\t\t_, err = db.Get(key)\n\t\t\t\t\t} else {\n\t\t\t\t\t\terr = db.Put(key, \"value\")\n\t\t\t\t\t}\n\t\t\t\t\tdoneCall <- errors.Wrapf(\n\t\t\t\t\t\terr, \"%d: expected success on non-txn call to %s\",\n\t\t\t\t\t\ti, test.args.Method())\n\t\t\t\t}()\n\t\t\t\t// Block until the non-transactional client has pushed us at\n\t\t\t\t// least once.\n\t\t\t\tutil.SucceedsSoon(t, func() error {\n\t\t\t\t\tmu.Lock()\n\t\t\t\t\tdefer mu.Unlock()\n\t\t\t\t\tif _, ok := mu.m[string(key)]; ok {\n\t\t\t\t\t\treturn nil\n\t\t\t\t\t}\n\t\t\t\t\treturn errors.New(\"non-transactional client has not pushed txn yet\")\n\t\t\t\t})\n\t\t\t}\n\t\t\treturn nil\n\t\t})\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"%d: expected success writing transactionally; got %s\", i, err)\n\t\t}\n\n\t\t// Make sure non-txn put or get has finished.\n\t\tif err := <-doneCall; err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\n\t\t// Get the current value to verify whether the txn happened first.\n\t\tgr, err := db.Get(key)\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"%d: expected success getting %q: %s\", i, key, err)\n\t\t}\n\n\t\tif _, isGet := test.args.(*roachpb.GetRequest); isGet || test.canPush {\n\t\t\tif !bytes.Equal(gr.ValueBytes(), []byte(\"txn-value\")) {\n\t\t\t\tt.Errorf(\"%d: expected \\\"txn-value\\\"; got %q\", i, gr.ValueBytes())\n\t\t\t}\n\t\t} else {\n\t\t\tif !bytes.Equal(gr.ValueBytes(), []byte(\"value\")) {\n\t\t\t\tt.Errorf(\"%d: expected \\\"value\\\"; got %q\", i, gr.ValueBytes())\n\t\t\t}\n\t\t}\n\t\tif count != test.expAttempts {\n\t\t\tt.Errorf(\"%d: expected %d attempt(s); got %d\", i, test.expAttempts, count)\n\t\t}\n\t}\n}", "func TestProject_CreateProject_DoesNotSwallowErrorFromFailedAsyncStatusCheckCall(t *testing.T) {\n\tctrl := gomock.NewController(t)\n\tdefer ctrl.Finish()\n\n\tcoreClient := azdosdkmocks.NewMockCoreClient(ctrl)\n\toperationsClient := azdosdkmocks.NewMockOperationsClient(ctrl)\n\tclients := &client.AggregatedClient{\n\t\tCoreClient: coreClient,\n\t\tOperationsClient: operationsClient,\n\t\tCtx: context.Background(),\n\t}\n\n\texpectedProjectCreateArgs := core.QueueCreateProjectArgs{ProjectToCreate: &testProject}\n\tmockedOperationReference := operations.OperationReference{Id: &testID}\n\texpectedOperationArgs := operations.GetOperationArgs{OperationId: &testID}\n\n\tcoreClient.\n\t\tEXPECT().\n\t\tQueueCreateProject(clients.Ctx, expectedProjectCreateArgs).\n\t\tReturn(&mockedOperationReference, nil).\n\t\tTimes(1)\n\n\toperationsClient.\n\t\tEXPECT().\n\t\tGetOperation(clients.Ctx, expectedOperationArgs).\n\t\tReturn(nil, errors.New(\"GetOperation() failed\")).\n\t\tTimes(1)\n\n\terr := createProject(clients, &testProject, 10*time.Minute)\n\trequire.Equal(t, \" waiting for project ready. GetOperation() failed \", err.Error())\n}", "func TestProject_CreateProject_ReportsErrorIfNoSuccessForLongTime(t *testing.T) {\n\tctrl := gomock.NewController(t)\n\tdefer ctrl.Finish()\n\n\tcoreClient := azdosdkmocks.NewMockCoreClient(ctrl)\n\toperationsClient := azdosdkmocks.NewMockOperationsClient(ctrl)\n\tclients := &client.AggregatedClient{\n\t\tCoreClient: coreClient,\n\t\tOperationsClient: operationsClient,\n\t\tCtx: context.Background(),\n\t}\n\n\texpectedProjectCreateArgs := core.QueueCreateProjectArgs{ProjectToCreate: &testProject}\n\tmockedOperationReference := operations.OperationReference{Id: &testID}\n\texpectedOperationArgs := operations.GetOperationArgs{OperationId: &testID}\n\n\tcoreClient.\n\t\tEXPECT().\n\t\tQueueCreateProject(clients.Ctx, expectedProjectCreateArgs).\n\t\tReturn(&mockedOperationReference, nil).\n\t\tTimes(1)\n\n\t// the operation will forever be \"in progress\"\n\tstatus := operationWithStatus(operations.OperationStatusValues.InProgress)\n\toperationsClient.\n\t\tEXPECT().\n\t\tGetOperation(clients.Ctx, expectedOperationArgs).\n\t\tReturn(&status, nil).\n\t\tMinTimes(1)\n\n\terr := createProject(clients, &testProject, 20*time.Second)\n\trequire.NotNil(t, err, \"Expected error indicating timeout\")\n}", "func TestCreateAndApplyTransactionPanic(t *testing.T) {\n\tif testing.Short() {\n\t\tt.SkipNow()\n\t}\n\tt.Parallel()\n\n\t// Create invalid update that triggers a panic.\n\tupdate := writeaheadlog.Update{\n\t\tName: \"invalid name\",\n\t}\n\n\t// Declare a helper to check for a panic.\n\tassertRecover := func() {\n\t\tif r := recover(); r == nil {\n\t\t\tt.Fatalf(\"Expected a panic\")\n\t\t}\n\t}\n\n\t// Run the test for both the method and function\n\tsiadir, err := newTestDir(t.Name())\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tfunc() {\n\t\tdefer assertRecover()\n\t\t_ = siadir.createAndApplyTransaction(update)\n\t}()\n\tfunc() {\n\t\tdefer assertRecover()\n\t\t_ = CreateAndApplyTransaction(siadir.wal, update)\n\t}()\n}", "func TestProject_CreateProject_DoesNotSwallowErrorFromFailedCreateCall(t *testing.T) {\n\tctrl := gomock.NewController(t)\n\tdefer ctrl.Finish()\n\n\tcoreClient := azdosdkmocks.NewMockCoreClient(ctrl)\n\tclients := &client.AggregatedClient{\n\t\tCoreClient: coreClient,\n\t\tCtx: context.Background(),\n\t}\n\n\texpectedProjectCreateArgs := core.QueueCreateProjectArgs{ProjectToCreate: &testProject}\n\n\tcoreClient.\n\t\tEXPECT().\n\t\tQueueCreateProject(clients.Ctx, expectedProjectCreateArgs).\n\t\tReturn(nil, errors.New(\"QueueCreateProject() Failed\")).\n\t\tTimes(1)\n\n\terr := createProject(clients, &testProject, 5)\n\trequire.Equal(t, \"QueueCreateProject() Failed\", err.Error())\n}", "func (p *MockDeployPluginFailComponents) Create(cluster *lang.Cluster, deployName string, params util.NestedParameterMap, eventLog *event.Log) error {\n\teventLog.WithFields(event.Fields{}).Infof(\"[+] %s\", deployName)\n\tfor _, s := range p.FailComponents {\n\t\tif strings.Contains(deployName, s) {\n\t\t\treturn p.fail(\"create\", deployName)\n\t\t}\n\t}\n\treturn nil\n}", "func TestConflictingRun(t *testing.T) {\n\tt.Parallel()\n\tConvey(\"Given a testing context with a created entity and two stores using it\", t, func() {\n\t\tctx := gaetesting.TestingContext()\n\n\t\tstoreA := nodestore.New(\"foo-pool\")\n\t\terr := storeA.Create(ctx, time.Now())\n\t\tSo(err, ShouldBeNil)\n\n\t\tstoreB := nodestore.New(\"foo-pool\")\n\n\t\tConvey(\"alternating null operations between both stores run without error.\", func() {\n\t\t\terr = storeA.Run(ctx, &createUniqueAccounts{nAccounts: 1})\n\t\t\tSo(err, ShouldBeNil)\n\n\t\t\terr = storeB.Run(ctx, &createUniqueAccounts{nAccounts: 1})\n\t\t\tSo(err, ShouldBeNil)\n\n\t\t\terr = storeA.Run(ctx, &createUniqueAccounts{nAccounts: 1})\n\t\t\tSo(err, ShouldBeNil)\n\n\t\t\terr = storeB.Run(ctx, &createUniqueAccounts{nAccounts: 1})\n\t\t\tSo(err, ShouldBeNil)\n\n\t\t\ts, err := storeA.Get(ctx)\n\t\t\tSo(err, ShouldBeNil)\n\t\t\tSo(len(s.Scheduler.Config().AccountConfigs), ShouldEqual, 4)\n\t\t})\n\t})\n}", "func TestResourceContentionIsPreventedForTwoNamespacesMappingToSameProjectInDifferentClusters(t *testing.T) {\n\tshouldRun := func(fixture resourcefixture.ResourceFixture, mgr manager.Manager) bool {\n\t\t// only need to test contention for a single resource since the logic will apply to all resources\n\t\treturn fixture.GVK.Kind == \"PubSubTopic\"\n\t}\n\ttestFunc := func(t *testing.T, testContext testrunner.TestContext, systemContext testrunner.SystemContext) {\n\t\tif err := systemContext.Manager.GetClient().Create(context.TODO(), testContext.CreateUnstruct); err != nil {\n\t\t\tt.Fatalf(\"error creating resource: %v\", err)\n\t\t}\n\t\tsystemContext.Reconciler.Reconcile(testContext.UpdateUnstruct, testreconciler.ExpectedSuccessfulReconcileResultFor(systemContext.Reconciler, testContext.UpdateUnstruct), nil)\n\t\tassertLeaseLabelsAreNotPresent(t, systemContext.Manager, testContext.CreateUnstruct)\n\t\tprojectId := testgcp.GetDefaultProjectID(t)\n\t\ttestcontroller.EnsureNamespaceExistsT(t, mgr2.GetClient(), testContext.UniqueId)\n\t\ttestcontroller.EnsureNamespaceHasProjectIDAnnotation(t, mgr2.GetClient(), testContext.UniqueId, projectId)\n\t\tassertNamespaceIdsAreNotEqual(t, systemContext.Manager, mgr2, testContext.UniqueId, testContext.UniqueId)\n\t\treconciler2 := testreconciler.New(t, mgr2, systemContext.TFProvider)\n\t\tif err := mgr2.GetClient().Create(context.TODO(), testContext.UpdateUnstruct); err != nil {\n\t\t\tt.Fatalf(\"error creating resource: %v\", err)\n\t\t}\n\t\treconciler2.Reconcile(testContext.UpdateUnstruct, testreconciler.ExpectedUnsuccessfulReconcileResult, regexp.MustCompile(\"error obtaining lease\"))\n\t\tevents := testcontroller.CollectEvents(t, mgr2.GetConfig(), testContext.UpdateUnstruct.GetNamespace(), 1, 10*time.Second)\n\t\tevent := events[0]\n\t\texpectedReason := k8s.ManagementConflict\n\t\tif event.Reason != expectedReason {\n\t\t\tt.Fatalf(\"event mismatch: got '%v', want '%v'\", event.Reason, expectedReason)\n\t\t}\n\t\t// Since the controller was unable to obtain the lease it does not write the default finalizer onto the object.\n\t\t// Add the finalizer manually so that we can test the deletion resource contention flow.\n\t\tensureFinalizer(t, mgr2, testContext.NamespacedName, testContext.CreateUnstruct)\n\t\tif err := mgr2.GetClient().Delete(context.TODO(), testContext.CreateUnstruct); err != nil {\n\t\t\tt.Fatalf(\"error deleting resource: %v\", err)\n\t\t}\n\t\treconciler2.Reconcile(testContext.CreateUnstruct, testreconciler.ExpectedUnsuccessfulReconcileResult, regexp.MustCompile(\"error obtaining lease\"))\n\t\tevents = testcontroller.CollectEvents(t, mgr2.GetConfig(), testContext.CreateUnstruct.GetNamespace(), 3, 10*time.Second)\n\t\tnextEvent := events[2]\n\t\tif nextEvent.Reason != expectedReason {\n\t\t\tt.Fatalf(\"event mismatch: got '%v', want '%v'\", nextEvent.Reason, expectedReason)\n\t\t}\n\t\tif !(event.LastTimestamp == nextEvent.LastTimestamp || event.LastTimestamp.Before(&nextEvent.LastTimestamp)) {\n\t\t\tt.Fatalf(\"expected the previous event's last timestamp to be before or equal to the next event's last timestamp\")\n\t\t}\n\t}\n\ttestrunner.RunAllWithDependenciesCreatedButNotObject(t, mgr1, shouldRun, testFunc)\n}", "func (client *MockClient) Create(context ctx.Context, object ctrlClient.Object, options ...ctrlClient.CreateOption) error {\n\tobject.SetCreationTimestamp(metav1.Time{Time: time.Now()})\n\n\tjsonData, err := json.Marshal(object)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tkindKey, err := buildKindKey(object)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tobjectKey, err := buildJSONObjectKey(jsonData)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tgenericObject := make(map[string]interface{})\n\terr = json.Unmarshal(jsonData, &genericObject)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t_, err = incrementGeneration(genericObject)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = setUID(genericObject)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif kindKey == \"/v1/Service\" {\n\t\tclusterIP, err := lookupJSONString(genericObject, \"spec\", \"clusterIP\")\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tif clusterIP == \"\" {\n\t\t\terr = setJSONValue(genericObject, []string{\"spec\", \"clusterIP\"}, client.generateIP())\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t} else if kindKey == \"/v1/Pod\" {\n\t\tv4Address := client.generatePodIPv4()\n\t\tv6Address := client.generatePodIPv6()\n\t\terr = setJSONValue(genericObject, []string{\"status\", \"podIP\"}, v4Address)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\terr = setJSONValue(genericObject, []string{\"status\", \"podIPs\"}, []corev1.PodIP{{IP: v4Address}, {IP: v6Address}})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tclient.fillInMaps(kindKey)\n\tif client.data[kindKey][objectKey] != nil {\n\t\treturn &k8serrors.StatusError{ErrStatus: metav1.Status{\n\t\t\tStatus: \"Failure\",\n\t\t\tMessage: \"Conflict\",\n\t\t\tCode: 409,\n\t\t\tReason: metav1.StatusReasonAlreadyExists,\n\t\t}}\n\t}\n\tclient.data[kindKey][objectKey] = genericObject\n\n\tjsonData, err = json.Marshal(genericObject)\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = json.Unmarshal(jsonData, object)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}", "func testNonleadersElectionTimeoutNonconflict(t *testing.T, state StateType) {\n\tet := 10\n\tsize := 5\n\trs := make([]*raft, size)\n\tids := idsBySize(size)\n\tfor k := range rs {\n\t\trs[k] = newTestRaft(ids[k], ids, et, 1, NewMemoryStorage())\n\t}\n\tdefer func() {\n\t\tfor k := range rs {\n\t\t\tcloseAndFreeRaft(rs[k])\n\t\t}\n\t}()\n\tconflicts := 0\n\tfor round := 0; round < 1000; round++ {\n\t\tfor _, r := range rs {\n\t\t\tswitch state {\n\t\t\tcase StateFollower:\n\t\t\t\tr.becomeFollower(r.Term+1, None)\n\t\t\tcase StateCandidate:\n\t\t\t\tr.becomeCandidate()\n\t\t\t}\n\t\t}\n\n\t\ttimeoutNum := 0\n\t\tfor timeoutNum == 0 {\n\t\t\tfor _, r := range rs {\n\t\t\t\tr.tick()\n\t\t\t\tif len(r.readMessages()) > 0 {\n\t\t\t\t\ttimeoutNum++\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\t// several rafts time out at the same tick\n\t\tif timeoutNum > 1 {\n\t\t\tconflicts++\n\t\t}\n\t}\n\n\tif g := float64(conflicts) / 1000; g > 0.3 {\n\t\tt.Errorf(\"probability of conflicts = %v, want <= 0.3\", g)\n\t}\n}", "func Disabled_TestLoadMismatchedFilesOnly(t *testing.T) {\n\tfmt.Println(\"============== Test case start: TestLoadMismatchedFilesOnly =================\")\n\tassert := assert.New(t)\n\n\tfile1 := \"/tmp/test1.bin\"\n\tfile2 := \"/tmp/test2.bin\"\n\tdefer os.Remove(file1)\n\tdefer os.Remove(file2)\n\n\tentries := 10000\n\tnumMismatch := 5\n\n\tkeys, err := genMismatchedFiles(entries, numMismatch, file1, file2)\n\tassert.Nil(err)\n\n\tdiffer := NewFilesDiffer(file1, file2, nil, nil, nil)\n\tassert.NotNil(differ)\n\n\tsrcDiffMap, tgtDiffMap, _, _, _ := differ.Diff()\n\n\tassert.False(len(srcDiffMap) == 0)\n\tassert.False(len(tgtDiffMap) == 0)\n\n\tassert.Equal(numMismatch, len(differ.BothExistButMismatch))\n\tassert.True(verifyMisMatch(keys, differ))\n\n\tassert.Equal(0, len(differ.MissingFromFile1))\n\tassert.Equal(0, len(differ.MissingFromFile2))\n\n\tdiffer.PrettyPrintResult()\n\tfmt.Println(\"============== Test case end: TestLoadMismatchedFilesOnly =================\")\n}", "func TestFileDirConflict(t *testing.T) {\n\tif testing.Short() {\n\t\tt.SkipNow()\n\t}\n\n\ttestFileDirConflict(t, false)\n\ttestFileDirConflict(t, true)\n}", "func CreateTemplateFailErrMocked(t *testing.T, templateIn *types.Template) *types.Template {\n\n\tassert := assert.New(t)\n\n\t// wire up\n\tcs := &utils.MockConcertoService{}\n\tds, err := NewTemplateService(cs)\n\tassert.Nil(err, \"Couldn't load template service\")\n\tassert.NotNil(ds, \"Template service not instanced\")\n\n\t// convertMap\n\tmapIn, err := utils.ItemConvertParams(*templateIn)\n\tassert.Nil(err, \"Template test data corrupted\")\n\n\t// to json\n\tdOut, err := json.Marshal(templateIn)\n\tassert.Nil(err, \"Template test data corrupted\")\n\n\t// call service\n\tcs.On(\"Post\", \"/blueprint/templates/\", mapIn).Return(dOut, 200, fmt.Errorf(\"mocked error\"))\n\ttemplateOut, err := ds.CreateTemplate(mapIn)\n\tassert.NotNil(err, \"We are expecting an error\")\n\tassert.Nil(templateOut, \"Expecting nil output\")\n\tassert.Equal(err.Error(), \"mocked error\", \"Error should be 'mocked error'\")\n\n\treturn templateOut\n}", "func TestConflictErrorInDeleteInRR(t *testing.T) {\n\trequire.NoError(t, failpoint.Enable(\"github.com/pingcap/tidb/executor/assertPessimisticLockErr\", \"return\"))\n\tstore := testkit.CreateMockStore(t)\n\n\ttk := testkit.NewTestKit(t, store)\n\tdefer tk.MustExec(\"rollback\")\n\tse := tk.Session()\n\ttk2 := testkit.NewTestKit(t, store)\n\tdefer tk2.MustExec(\"rollback\")\n\n\ttk.MustExec(\"use test\")\n\ttk2.MustExec(\"use test\")\n\ttk.MustExec(\"create table t (id int primary key, v int)\")\n\ttk.MustExec(\"insert into t values (1, 1), (2, 2)\")\n\n\ttk.MustExec(\"begin pessimistic\")\n\ttk2.MustExec(\"insert into t values (3, 1)\")\n\tse.SetValue(sessiontxn.AssertLockErr, nil)\n\ttk.MustExec(\"delete from t where v = 1\")\n\t_, ok := se.Value(sessiontxn.AssertLockErr).(map[string]int)\n\trequire.False(t, ok)\n\ttk.MustQuery(\"select * from t\").Check(testkit.Rows(\"2 2\"))\n\ttk.MustExec(\"commit\")\n\n\ttk.MustExec(\"begin pessimistic\")\n\t// However, if sub select in delete is point get, we will incur one write conflict\n\ttk2.MustExec(\"update t set id = 1 where id = 2\")\n\tse.SetValue(sessiontxn.AssertLockErr, nil)\n\ttk.MustExec(\"delete from t where id = 1\")\n\n\trecords, ok := se.Value(sessiontxn.AssertLockErr).(map[string]int)\n\trequire.True(t, ok)\n\trequire.Equal(t, records[\"errWriteConflict\"], 1)\n\ttk.MustQuery(\"select * from t for update\").Check(testkit.Rows())\n\n\ttk.MustExec(\"rollback\")\n\trequire.NoError(t, failpoint.Disable(\"github.com/pingcap/tidb/executor/assertPessimisticLockErr\"))\n}", "func TestPutNewPresentationInvalidtemplatePassword(t *testing.T) {\n request := createPutNewPresentationRequest()\n request.templatePassword = invalidizeTestParamValue(request.templatePassword, \"templatePassword\", \"string\").(string)\n e := initializeTest(\"PutNewPresentation\", \"templatePassword\", request.templatePassword)\n if e != nil {\n t.Errorf(\"Error: %v.\", e)\n return\n }\n r, _, e := getTestApiClient().DocumentApi.PutNewPresentation(request)\n assertError(t, \"PutNewPresentation\", \"templatePassword\", r.Code, e)\n}", "func TestPutNewPresentationInvalidtemplateStorage(t *testing.T) {\n request := createPutNewPresentationRequest()\n request.templateStorage = invalidizeTestParamValue(request.templateStorage, \"templateStorage\", \"string\").(string)\n e := initializeTest(\"PutNewPresentation\", \"templateStorage\", request.templateStorage)\n if e != nil {\n t.Errorf(\"Error: %v.\", e)\n return\n }\n r, _, e := getTestApiClient().DocumentApi.PutNewPresentation(request)\n assertError(t, \"PutNewPresentation\", \"templateStorage\", r.Code, e)\n}", "func TestEmployeeManagerMapCreate_BadRequest(t *testing.T) {\n\tdb, _, err := sqlmock.New()\n\tif err != nil {\n\t\tt.Fatalf(\"an error '%s' was not expected when opening a stub database connection\", err)\n\t}\n\tdefer db.Close()\n\n\templyManagerMap := NewEmployeeManagerMapHandler(db)\n\n\tw := httptest.NewRecorder()\n\tvar jsonStr = []byte(`{\"invalidjson\":}`)\n\tr := httptest.NewRequest(\"POST\", \"http://localhost:9090/api/v1/emplymgrmap\", bytes.NewBuffer(jsonStr))\n\tr = r.WithContext(context.Background())\n\templyManagerMap.Create(w, r)\n\n\texpectedResponse := `{\"error_message\":\"Error:: Invalid Request\"}`\n\tassert.Equal(t, gohttp.StatusBadRequest, w.Code)\n\tassert.Equal(t, expectedResponse, w.Body.String())\n}", "func TestAfPacketModifyRecreateNotFound(t *testing.T) {\n\tctx, plugin, _ := afPacketTestSetup(t)\n\tdefer afPacketTestTeardown(ctx)\n\n\t// Data\n\toldData := getTestAfPacketData(\"if1\", []string{\"10.0.0.1/24\"}, \"host1\")\n\tnewData := getTestAfPacketData(\"if1\", []string{\"10.0.0.1/24\"}, \"host2\")\n\n\t// Test af packet modify\n\trecreate, err := plugin.ModifyAfPacketInterface(newData, oldData)\n\tExpect(err).To(BeNil())\n\tExpect(recreate).To(BeTrue())\n}", "func TestSkipOnError(t *testing.T) {\n\tkube, mock := setup(t)\n\tmock.deleteErr = errors.New(\"create failed\")\n\n\tdef := cluster.SyncDef{\n\t\tActions: []cluster.SyncAction{\n\t\t\tcluster.SyncAction{\n\t\t\t\tResourceID: \"fail in middle\",\n\t\t\t\tDelete: deploymentDef(\"should fail\"),\n\t\t\t\tApply: deploymentDef(\"skipped\"),\n\t\t\t},\n\t\t\tcluster.SyncAction{\n\t\t\t\tResourceID: \"proceed\",\n\t\t\t\tApply: deploymentDef(\"apply works\"),\n\t\t\t},\n\t\t},\n\t}\n\n\terr := kube.Sync(def)\n\tswitch err := err.(type) {\n\tcase cluster.SyncError:\n\t\tif _, ok := err[\"fail in middle\"]; !ok {\n\t\t\tt.Errorf(\"expected error for failing resource %q, but got %#v\", \"fail in middle\", err)\n\t\t}\n\tdefault:\n\t\tt.Errorf(\"expected sync error, got %#v\", err)\n\t}\n\n\texpected := []command{\n\t\tcommand{\"delete\", \"should fail\"},\n\t\t// skip to next resource after failure\n\t\tcommand{\"apply\", \"apply works\"},\n\t}\n\tif !reflect.DeepEqual(expected, mock.commands) {\n\t\tt.Errorf(\"expected commands:\\n%#v\\ngot:\\n%#v\", expected, mock.commands)\n\t}\n}", "func Test_jsonpatch_Replace_NonExistent_IsError(t *testing.T) {\n\tt.SkipNow() // Test is no working as expected\n\tg := NewWithT(t)\n\n\torigDoc := []byte(`{\"asd\":\"foof\"}`)\n\n\tpatch1, _ := jsonpatch.DecodePatch([]byte(`[{\"op\":\"replace\", \"path\":\"/test_key\", \"value\":\"qwe\"}]`))\n\n\texpectNewDoc := []byte(`{\"asd\":\"foof\"}`)\n\n\tnewDoc, err := patch1.Apply(origDoc)\n\tg.Expect(err).Should(HaveOccurred(), \"patch apply\")\n\tg.Expect(jsonpatch.Equal(newDoc, expectNewDoc)).Should(BeTrue(), \"%v is not equal to %v\", string(newDoc), string(expectNewDoc))\n}", "func (r *ReconcileAerospikeCluster) recoverFailedCreate(aeroCluster *aerospikev1alpha1.AerospikeCluster) error {\n\tlogger := pkglog.New(log.Ctx{\"AerospikeCluster\": utils.ClusterNamespacedName(aeroCluster)})\n\tlogger.Info(\"Forcing a cluster recreate as status is nil. The cluster could be unreachable due to bad configuration.\")\n\n\t// Delete all statefulsets and everything related so that it can be properly created and updated in next run.\n\tstatefulSetList, err := r.getClusterStatefulSets(aeroCluster)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Error getting statefulsets while forcing recreate of the cluster as status is nil: %v\", err)\n\t}\n\n\tlogger.Debug(\"Found statefulset for cluster. Need to delete them\", log.Ctx{\"nSTS\": len(statefulSetList.Items)})\n\tfor _, statefulset := range statefulSetList.Items {\n\t\tif err := r.deleteStatefulSet(aeroCluster, &statefulset); err != nil {\n\t\t\treturn fmt.Errorf(\"Error deleting statefulset while forcing recreate of the cluster as status is nil: %v\", err)\n\t\t}\n\t}\n\n\t// Clear pod status as well in status since we want to be re-initializing or cascade deleting devices if any.\n\t// This is not necessary since scale-up would cleanup danglin pod status. However done here for general\n\t// cleanliness.\n\trackStateList := getNewRackStateList(aeroCluster)\n\tfor _, state := range rackStateList {\n\t\tpods, err := r.getRackPodList(aeroCluster, state.Rack.ID)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"Failed recover failed cluster: %v\", err)\n\t\t}\n\n\t\tnewPodNames := []string{}\n\t\tfor i := 0; i < len(pods.Items); i++ {\n\t\t\tnewPodNames = append(newPodNames, pods.Items[i].Name)\n\t\t}\n\n\t\terr = r.cleanupPods(aeroCluster, newPodNames, state)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"Failed recover failed cluster: %v\", err)\n\t\t}\n\t}\n\n\treturn fmt.Errorf(\"Forcing recreate of the cluster as status is nil\")\n}", "func createVoteMapForConflicts(conflictIDs, timestampIDs []string) map[string]opinion.Opinions {\n\tvoteMap := map[string]opinion.Opinions{}\n\n\tfor _, id := range conflictIDs {\n\t\tvoteMap[id] = opinion.Opinions{}\n\t}\n\tfor _, id := range timestampIDs {\n\t\tvoteMap[id] = opinion.Opinions{}\n\t}\n\n\treturn voteMap\n}", "func Retries400Test() Test {\n\tvar (\n\t\tmtx sync.Mutex\n\t\taccept bool\n\t\tts int64\n\t)\n\n\treturn Test{\n\t\tName: \"Retries400\",\n\t\tMetrics: metricHandler(prometheus.NewGaugeFunc(prometheus.GaugeOpts{\n\t\t\tName: \"now\",\n\t\t}, func() float64 {\n\t\t\treturn float64(time.Now().Unix() * 1000)\n\t\t})),\n\t\tWrites: func(next http.Handler) http.Handler {\n\t\t\treturn http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\t\t\tmtx.Lock()\n\t\t\t\tdefer mtx.Unlock()\n\n\t\t\t\tif accept {\n\t\t\t\t\tnext.ServeHTTP(w, r)\n\t\t\t\t\treturn\n\t\t\t\t}\n\n\t\t\t\t// We're going to pick a timestamp from this batch, and then make sure\n\t\t\t\t// it gets resent. First we need to decode this batch.\n\t\t\t\tts = getFirstTimestamp(w, r)\n\t\t\t\taccept = true\n\t\t\t\thttp.Error(w, \"bad request\", http.StatusBadRequest)\n\t\t\t})\n\n\t\t},\n\t\tExpected: func(t *testing.T, bs []Batch) {\n\t\t\tfound := false\n\t\t\tforAllSamples(bs, func(s sample) {\n\t\t\t\tif labelsContain(s.l, labels.FromStrings(\"__name__\", \"now\")) && s.t == ts {\n\t\t\t\t\tfound = true\n\t\t\t\t}\n\t\t\t})\n\t\t\trequire.False(t, found, `found sample that should not have been retried`)\n\t\t},\n\t}\n}", "func TestPutPresentationMergeInvalidname(t *testing.T) {\n request := createPutPresentationMergeRequest()\n request.name = invalidizeTestParamValue(request.name, \"name\", \"string\").(string)\n e := initializeTest(\"PutPresentationMerge\", \"name\", request.name)\n if e != nil {\n t.Errorf(\"Error: %v.\", e)\n return\n }\n r, _, e := getTestApiClient().MergeDocumentApi.PutPresentationMerge(request)\n assertError(t, \"PutPresentationMerge\", \"name\", r.Code, e)\n}", "func TestIntermediateNameSameNameDifferentLayer(t *testing.T) {\n\tth := kusttest_test.MakeHarness(t)\n\tth.WriteK(\"gcp\", `\nnamePrefix: gcp-\nresources:\n- ../emea\npatchesStrategicMerge:\n- depPatch.yaml\n`)\n\tth.WriteF(\"gcp/depPatch.yaml\", `\napiVersion: apps/v1\nkind: Deployment\nmetadata:\n name: prod-foo\nspec:\n replicas: 999\n`)\n\tth.WriteK(\"emea\", `\nnamePrefix: emea-\nresources:\n- ../prod\n- deployment.yaml\n`)\n\tth.WriteF(\"emea/deployment.yaml\", `\napiVersion: apps/v1\nkind: Deployment\nmetadata:\n name: foo\nspec:\n template:\n spec:\n containers:\n - image: whatever\n`)\n\tth.WriteK(\"prod\", `\nnamePrefix: prod-\nresources:\n- ../base\n`)\n\tth.WriteK(\"base\", `\nresources:\n- deployment.yaml\n`)\n\tth.WriteF(\"base/deployment.yaml\", `\napiVersion: apps/v1\nkind: Deployment\nmetadata:\n name: foo\nspec:\n template:\n spec:\n containers:\n - image: whatever\n`)\n\tm := th.Run(\"gcp\", th.MakeDefaultOptions())\n\tth.AssertActualEqualsExpected(m, `\napiVersion: apps/v1\nkind: Deployment\nmetadata:\n name: gcp-emea-prod-foo\nspec:\n replicas: 999\n template:\n spec:\n containers:\n - image: whatever\n---\napiVersion: apps/v1\nkind: Deployment\nmetadata:\n name: gcp-emea-foo\nspec:\n template:\n spec:\n containers:\n - image: whatever\n`)\n}", "func testReconcileDeploymentReadyRestartRequired(t *testing.T) {\n\treaper := createReaper()\n\tdeployment := createReadyDeployment(reaper)\n\n\tobjs := []runtime.Object{reaper, deployment}\n\n\tSetConfigurationUpdatedCondition(&reaper.Status)\n\n\tr := createDeploymentReconciler(objs...)\n\tresult, err := r.ReconcileDeployment(context.TODO(), reaper)\n\n\tif result == nil {\n\t\tt.Errorf(\"expected non-nil result\")\n\t} else if !result.Requeue {\n\t\tt.Errorf(\"expected requeue\")\n\t}\n\n\tif err != nil {\n\t\tt.Errorf(\"expected err (nil), got (%s)\", err)\n\t}\n\n\tcond := GetCondition(&reaper.Status, v1alpha1.ConfigurationUpdated)\n\tif cond == nil {\n\t\tt.Errorf(\"expected to find condition (%s)\", v1alpha1.ConfigurationUpdated)\n\t} else if cond.Reason != RestartRequiredReason {\n\t\tt.Errorf(\"condition %s reason is wrong: expected (%s), got (%s)\", v1alpha1.ConfigurationUpdated, RestartRequiredReason, cond.Reason)\n\t}\n\n\tdeployment = &appsv1.Deployment{}\n\tif err := r.client.Get(context.TODO(), namespaceName, deployment); err != nil {\n\t\tt.Errorf(\"failed to get deployment: (%s)\", err)\n\t} else if _, found := deployment.Spec.Template.Annotations[reaperRestartedAt]; !found {\n\t\tt.Errorf(\"expected to find deployment annotation: (%s)\", reaperRestartedAt)\n\t}\n}", "func TestPutPresentationMergeInvalidrequest(t *testing.T) {\n request := createPutPresentationMergeRequest()\n request.request = invalidizeTestParamValue(request.request, \"request\", \"OrderedMergeRequest\").(OrderedMergeRequest)\n e := initializeTest(\"PutPresentationMerge\", \"request\", request.request)\n if e != nil {\n t.Errorf(\"Error: %v.\", e)\n return\n }\n r, _, e := getTestApiClient().MergeDocumentApi.PutPresentationMerge(request)\n assertError(t, \"PutPresentationMerge\", \"request\", r.Code, e)\n}", "func (suite *TestManagerSuite) TestManagerCreateWithExisting() {\n\terr := suite.m.UpdateStatus(\"tid001\", job.SuccessStatus.String(), 2000)\n\trequire.NoError(suite.T(), err)\n\n\trp := &scan.Report{\n\t\tDigest: \"d1000\",\n\t\tRegistrationUUID: \"ruuid\",\n\t\tMimeType: v1.MimeTypeNativeReport,\n\t\tTrackID: \"tid002\",\n\t}\n\n\tuuid, err := suite.m.Create(rp)\n\trequire.NoError(suite.T(), err)\n\trequire.NotEmpty(suite.T(), uuid)\n\n\tassert.NotEqual(suite.T(), suite.rpUUID, uuid)\n\tsuite.rpUUID = uuid\n}", "func (m *IDXKeyRepository) CreateOrReplace(arg0 context.Context, arg1 *idxkey.IDXKey) error {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"CreateOrReplace\", arg0, arg1)\n\tret0, _ := ret[0].(error)\n\treturn ret0\n}", "func (fbo *folderBranchOps) forceStuckConflictForTesting(\n\tctx context.Context) (err error) {\n\tstartTime, timer := fbo.startOp(ctx, \"Forcing a stuck conflict\")\n\tdefer func() {\n\t\tfbo.endOp(\n\t\t\tctx, startTime, timer, \"Forcing a stuck conflict done: %+v\", err)\n\t}()\n\n\tlState := makeFBOLockState()\n\tfbo.mdWriterLock.Lock(lState)\n\tdefer fbo.mdWriterLock.Unlock(lState)\n\n\tif fbo.isUnmergedLocked(lState) {\n\t\treturn errors.New(\"Cannot force conflict when already unmerged\")\n\t}\n\n\t// Disable updates.\n\tunpauseUpdatesCh := make(chan struct{})\n\tselect {\n\tcase fbo.updatePauseChan <- unpauseUpdatesCh:\n\tcase <-ctx.Done():\n\t\treturn ctx.Err()\n\t}\n\tdefer func() { unpauseUpdatesCh <- struct{}{} }()\n\n\t// Make a no-op revision with an empty resolutionOp. Wait for it\n\t// to flush to the server.\n\torigHead, _ := fbo.getHead(ctx, lState, mdNoCommit)\n\tmergedGCOp := newGCOp(origHead.data.LastGCRevision)\n\terr = fbo.finalizeGCOpLocked(ctx, lState, mergedGCOp)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tjManager, _ := GetJournalManager(fbo.config)\n\tif jManager != nil {\n\t\terr := fbo.waitForJournalLocked(ctx, lState, jManager)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t// Wait for the flush handler to finish, so we don't\n\t\t// accidentally swap in the upcoming MD on the conflict branch\n\t\t// over the \"merged\" one we just flushed, before the pointer\n\t\t// archiving step happens.\n\t\terr = fbo.mdFlushes.Wait(ctx)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\t// Roll back the local view to the original revision.\n\terr = func() error {\n\t\tfbo.headLock.Lock(lState)\n\t\tdefer fbo.headLock.Unlock(lState)\n\t\terr = fbo.setHeadLocked(ctx, lState, origHead, headTrusted, mdNoCommit)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tfbo.setLatestMergedRevisionLocked(\n\t\t\tctx, lState, origHead.Revision(), true)\n\t\treturn nil\n\t}()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// Set CR to always fail.\n\toldMode := fbo.cr.getFailModeForTesting()\n\tfbo.cr.setFailModeForTesting(alwaysFailCR)\n\tdefer func() { fbo.cr.setFailModeForTesting(oldMode) }()\n\n\t// Make fake conflicting files to trigger CR. Make one for each\n\t// attempt needed to result in stuck CR.\n\thandle := origHead.GetTlfHandle()\n\trootNode, err := fbo.nodeCache.GetOrCreate(\n\t\torigHead.data.Dir.BlockPointer,\n\t\tdata.NewPathPartString(string(handle.GetCanonicalName()),\n\t\t\tfbo.makeObfuscator()),\n\t\tnil, data.Dir)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfor i := 0; i < maxConflictResolutionAttempts+1; i++ {\n\t\tfilename := fmt.Sprintf(\"FILE_FOR_STUCK_CONFLICT_%02d\", i)\n\t\t_, _, err := fbo.createEntryLocked(\n\t\t\tctx, lState, rootNode, rootNode.ChildName(filename), data.File,\n\t\t\tNoExcl)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\terr = fbo.syncAllLocked(ctx, lState, NoExcl)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tif jManager != nil && TLFJournalEnabled(fbo.config, fbo.id()) {\n\t\t\t// Can't use fbo.waitForJournalLocked here, since the\n\t\t\t// flushing won't actually complete.\n\t\t\terr := jManager.Wait(ctx, fbo.id())\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tnewHead, _ := fbo.getHead(ctx, lState, mdNoCommit)\n\t\t\tfbo.cr.Resolve(\n\t\t\t\tctx, newHead.Revision(), kbfsmd.RevisionUninitialized)\n\t\t}\n\n\t\terr = fbo.cr.Wait(ctx)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\t// Make sure we're stuck.\n\tisStuck, err := fbo.cr.isStuck()\n\tif err != nil {\n\t\treturn err\n\t}\n\tif !isStuck {\n\t\treturn errors.New(\"CR not stuck after trying to force conflict\")\n\t}\n\n\treturn nil\n}", "func Test_NewResources(t *testing.T) {\n\ttype args struct {\n\t\tbody []byte\n\t\theader map[string][]string\n\t\tbinding ResolvedTrigger\n\t}\n\ttests := []struct {\n\t\tname string\n\t\targs args\n\t\twant []json.RawMessage\n\t}{{\n\t\tname: \"empty\",\n\t\targs: args{\n\t\t\tbody: json.RawMessage{},\n\t\t\theader: map[string][]string{},\n\t\t\tbinding: ResolvedTrigger{\n\t\t\t\tTriggerTemplate: bldr.TriggerTemplate(\"tt\", \"namespace\"),\n\t\t\t\tTriggerBindings: []*triggersv1.TriggerBinding{bldr.TriggerBinding(\"tb\", \"namespace\")},\n\t\t\t},\n\t\t},\n\t\twant: []json.RawMessage{},\n\t}, {\n\t\tname: \"one resource template\",\n\t\targs: args{\n\t\t\tbody: json.RawMessage(`{\"foo\": \"bar\"}`),\n\t\t\theader: map[string][]string{\"one\": {\"1\"}},\n\t\t\tbinding: ResolvedTrigger{\n\t\t\t\tTriggerTemplate: bldr.TriggerTemplate(\"tt\", \"namespace\",\n\t\t\t\t\tbldr.TriggerTemplateSpec(\n\t\t\t\t\t\tbldr.TriggerTemplateParam(\"param1\", \"description\", \"\"),\n\t\t\t\t\t\tbldr.TriggerTemplateParam(\"param2\", \"description\", \"\"),\n\t\t\t\t\t\tbldr.TriggerResourceTemplate(json.RawMessage(`{\"rt1\": \"$(params.param1)-$(params.param2)\"}`)),\n\t\t\t\t\t),\n\t\t\t\t),\n\t\t\t\tTriggerBindings: []*triggersv1.TriggerBinding{\n\t\t\t\t\tbldr.TriggerBinding(\"tb\", \"namespace\",\n\t\t\t\t\t\tbldr.TriggerBindingSpec(\n\t\t\t\t\t\t\tbldr.TriggerBindingParam(\"param1\", \"$(body.foo)\"),\n\t\t\t\t\t\t\tbldr.TriggerBindingParam(\"param2\", \"$(header.one)\"),\n\t\t\t\t\t\t),\n\t\t\t\t\t),\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\twant: []json.RawMessage{\n\t\t\tjson.RawMessage(`{\"rt1\": \"bar-1\"}`),\n\t\t},\n\t}, {\n\t\tname: \"multiple resource templates\",\n\t\targs: args{\n\t\t\tbody: json.RawMessage(`{\"foo\": \"bar\"}`),\n\t\t\theader: map[string][]string{\"one\": {\"1\"}},\n\t\t\tbinding: ResolvedTrigger{\n\t\t\t\tTriggerTemplate: bldr.TriggerTemplate(\"tt\", \"namespace\",\n\t\t\t\t\tbldr.TriggerTemplateSpec(\n\t\t\t\t\t\tbldr.TriggerTemplateParam(\"param1\", \"description\", \"\"),\n\t\t\t\t\t\tbldr.TriggerTemplateParam(\"param2\", \"description\", \"\"),\n\t\t\t\t\t\tbldr.TriggerTemplateParam(\"param3\", \"description\", \"default2\"),\n\t\t\t\t\t\tbldr.TriggerResourceTemplate(json.RawMessage(`{\"rt1\": \"$(params.param1)-$(params.param2)\"}`)),\n\t\t\t\t\t\tbldr.TriggerResourceTemplate(json.RawMessage(`{\"rt2\": \"$(params.param3)\"}`)),\n\t\t\t\t\t\tbldr.TriggerResourceTemplate(json.RawMessage(`{\"rt3\": \"rt3\"}`)),\n\t\t\t\t\t),\n\t\t\t\t),\n\t\t\t\tTriggerBindings: []*triggersv1.TriggerBinding{\n\t\t\t\t\tbldr.TriggerBinding(\"tb\", \"namespace\",\n\t\t\t\t\t\tbldr.TriggerBindingSpec(\n\t\t\t\t\t\t\tbldr.TriggerBindingParam(\"param1\", \"$(body.foo)\"),\n\t\t\t\t\t\t\tbldr.TriggerBindingParam(\"param2\", \"$(header.one)\"),\n\t\t\t\t\t\t),\n\t\t\t\t\t),\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\twant: []json.RawMessage{\n\t\t\tjson.RawMessage(`{\"rt1\": \"bar-1\"}`),\n\t\t\tjson.RawMessage(`{\"rt2\": \"default2\"}`),\n\t\t\tjson.RawMessage(`{\"rt3\": \"rt3\"}`),\n\t\t},\n\t}, {\n\t\tname: \"one resource template with one uid\",\n\t\targs: args{\n\t\t\tbody: json.RawMessage(`{\"foo\": \"bar\"}`),\n\t\t\tbinding: ResolvedTrigger{\n\t\t\t\tTriggerTemplate: bldr.TriggerTemplate(\"tt\", \"namespace\",\n\t\t\t\t\tbldr.TriggerTemplateSpec(\n\t\t\t\t\t\tbldr.TriggerTemplateParam(\"param1\", \"description\", \"\"),\n\t\t\t\t\t\tbldr.TriggerResourceTemplate(json.RawMessage(`{\"rt1\": \"$(params.param1)-$(uid)\"}`)),\n\t\t\t\t\t),\n\t\t\t\t),\n\t\t\t\tTriggerBindings: []*triggersv1.TriggerBinding{\n\t\t\t\t\tbldr.TriggerBinding(\"tb\", \"namespace\",\n\t\t\t\t\t\tbldr.TriggerBindingSpec(\n\t\t\t\t\t\t\tbldr.TriggerBindingParam(\"param1\", \"$(body.foo)\"),\n\t\t\t\t\t\t),\n\t\t\t\t\t),\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\twant: []json.RawMessage{\n\t\t\tjson.RawMessage(`{\"rt1\": \"bar-cbhtc\"}`),\n\t\t},\n\t}, {\n\t\tname: \"one resource template with three uid\",\n\t\targs: args{\n\t\t\tbody: json.RawMessage(`{\"foo\": \"bar\"}`),\n\t\t\tbinding: ResolvedTrigger{\n\t\t\t\tTriggerTemplate: bldr.TriggerTemplate(\"tt\", \"namespace\",\n\t\t\t\t\tbldr.TriggerTemplateSpec(\n\t\t\t\t\t\tbldr.TriggerTemplateParam(\"param1\", \"description\", \"\"),\n\t\t\t\t\t\tbldr.TriggerResourceTemplate(json.RawMessage(`{\"rt1\": \"$(params.param1)-$(uid)-$(uid)\", \"rt2\": \"$(uid)\"}`)),\n\t\t\t\t\t),\n\t\t\t\t),\n\t\t\t\tTriggerBindings: []*triggersv1.TriggerBinding{\n\t\t\t\t\tbldr.TriggerBinding(\"tb\", \"namespace\",\n\t\t\t\t\t\tbldr.TriggerBindingSpec(\n\t\t\t\t\t\t\tbldr.TriggerBindingParam(\"param1\", \"$(body.foo)\"),\n\t\t\t\t\t\t),\n\t\t\t\t\t),\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\twant: []json.RawMessage{\n\t\t\tjson.RawMessage(`{\"rt1\": \"bar-cbhtc-cbhtc\", \"rt2\": \"cbhtc\"}`),\n\t\t},\n\t}, {\n\t\tname: \"multiple resource templates with multiple uid\",\n\t\targs: args{\n\t\t\tbody: json.RawMessage(`{\"foo\": \"bar\"}`),\n\t\t\tbinding: ResolvedTrigger{\n\t\t\t\tTriggerTemplate: bldr.TriggerTemplate(\"tt\", \"namespace\",\n\t\t\t\t\tbldr.TriggerTemplateSpec(\n\t\t\t\t\t\tbldr.TriggerTemplateParam(\"param1\", \"description\", \"\"),\n\t\t\t\t\t\tbldr.TriggerTemplateParam(\"param2\", \"description\", \"default2\"),\n\t\t\t\t\t\tbldr.TriggerResourceTemplate(json.RawMessage(`{\"rt1\": \"$(params.param1)-$(uid)\", \"$(uid)\": \"$(uid)\"}`)),\n\t\t\t\t\t\tbldr.TriggerResourceTemplate(json.RawMessage(`{\"rt2\": \"$(params.param2)-$(uid)\"}`)),\n\t\t\t\t\t\tbldr.TriggerResourceTemplate(json.RawMessage(`{\"rt3\": \"rt3\"}`)),\n\t\t\t\t\t),\n\t\t\t\t),\n\t\t\t\tTriggerBindings: []*triggersv1.TriggerBinding{\n\t\t\t\t\tbldr.TriggerBinding(\"tb\", \"namespace\",\n\t\t\t\t\t\tbldr.TriggerBindingSpec(\n\t\t\t\t\t\t\tbldr.TriggerBindingParam(\"param1\", \"$(body.foo)\"),\n\t\t\t\t\t\t),\n\t\t\t\t\t),\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\twant: []json.RawMessage{\n\t\t\tjson.RawMessage(`{\"rt1\": \"bar-cbhtc\", \"cbhtc\": \"cbhtc\"}`),\n\t\t\tjson.RawMessage(`{\"rt2\": \"default2-cbhtc\"}`),\n\t\t\tjson.RawMessage(`{\"rt3\": \"rt3\"}`),\n\t\t},\n\t}, {\n\t\tname: \"one resource template multiple bindings\",\n\t\targs: args{\n\t\t\tbody: json.RawMessage(`{\"foo\": \"bar\"}`),\n\t\t\theader: map[string][]string{\"one\": {\"1\"}},\n\t\t\tbinding: ResolvedTrigger{\n\t\t\t\tTriggerTemplate: bldr.TriggerTemplate(\"tt\", \"namespace\",\n\t\t\t\t\tbldr.TriggerTemplateSpec(\n\t\t\t\t\t\tbldr.TriggerTemplateParam(\"param1\", \"description\", \"\"),\n\t\t\t\t\t\tbldr.TriggerTemplateParam(\"param2\", \"description\", \"\"),\n\t\t\t\t\t\tbldr.TriggerResourceTemplate(json.RawMessage(`{\"rt1\": \"$(params.param1)-$(params.param2)\"}`)),\n\t\t\t\t\t),\n\t\t\t\t),\n\t\t\t\tTriggerBindings: []*triggersv1.TriggerBinding{\n\t\t\t\t\tbldr.TriggerBinding(\"tb\", \"namespace\",\n\t\t\t\t\t\tbldr.TriggerBindingSpec(\n\t\t\t\t\t\t\tbldr.TriggerBindingParam(\"param1\", \"$(body.foo)\"),\n\t\t\t\t\t\t),\n\t\t\t\t\t),\n\t\t\t\t\tbldr.TriggerBinding(\"tb2\", \"namespace\",\n\t\t\t\t\t\tbldr.TriggerBindingSpec(\n\t\t\t\t\t\t\tbldr.TriggerBindingParam(\"param2\", \"$(header.one)\"),\n\t\t\t\t\t\t),\n\t\t\t\t\t),\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\twant: []json.RawMessage{\n\t\t\tjson.RawMessage(`{\"rt1\": \"bar-1\"}`),\n\t\t},\n\t}, {\n\t\tname: \"bindings with static values\",\n\t\targs: args{\n\t\t\tbody: json.RawMessage(`{\"foo\": \"bar\"}`),\n\t\t\tbinding: ResolvedTrigger{\n\t\t\t\tTriggerTemplate: bldr.TriggerTemplate(\"tt\", \"ns\", bldr.TriggerTemplateSpec(\n\t\t\t\t\tbldr.TriggerTemplateParam(\"p1\", \"\", \"\"),\n\t\t\t\t\tbldr.TriggerTemplateParam(\"p2\", \"\", \"\"),\n\t\t\t\t\tbldr.TriggerResourceTemplate(json.RawMessage(`{\"p1\": \"$(params.p1)\", \"p2\": \"$(params.p2)\"}`)),\n\t\t\t\t),\n\t\t\t\t),\n\t\t\t\tTriggerBindings: []*triggersv1.TriggerBinding{\n\t\t\t\t\tbldr.TriggerBinding(\"tb\", \"ns\", bldr.TriggerBindingSpec(\n\t\t\t\t\t\tbldr.TriggerBindingParam(\"p1\", \"static_value\"),\n\t\t\t\t\t\tbldr.TriggerBindingParam(\"p2\", \"$(body.foo)\"),\n\t\t\t\t\t)),\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\twant: []json.RawMessage{\n\t\t\tjson.RawMessage(`{\"p1\": \"static_value\", \"p2\": \"bar\"}`),\n\t\t},\n\t}, {\n\t\tname: \"bindings with combination of static values \",\n\t\targs: args{\n\t\t\tbody: json.RawMessage(`{\"foo\": \"fooValue\", \"bar\": \"barValue\"}`),\n\t\t\tbinding: ResolvedTrigger{\n\t\t\t\tTriggerTemplate: bldr.TriggerTemplate(\"tt\", \"ns\", bldr.TriggerTemplateSpec(\n\t\t\t\t\tbldr.TriggerTemplateParam(\"p1\", \"\", \"\"),\n\t\t\t\t\tbldr.TriggerResourceTemplate(json.RawMessage(`{\"p1\": \"$(params.p1)\"`)),\n\t\t\t\t),\n\t\t\t\t),\n\t\t\t\tTriggerBindings: []*triggersv1.TriggerBinding{\n\t\t\t\t\tbldr.TriggerBinding(\"tb\", \"ns\", bldr.TriggerBindingSpec(\n\t\t\t\t\t\tbldr.TriggerBindingParam(\"p1\", \"Event values are - foo: $(body.foo); bar: $(body.bar)\"),\n\t\t\t\t\t)),\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\twant: []json.RawMessage{\n\t\t\tjson.RawMessage(`{\"p1\": \"Event values are - foo: fooValue; bar: barValue\"`),\n\t\t},\n\t}, {\n\t\tname: \"event value is JSON string\",\n\t\targs: args{\n\t\t\tbody: json.RawMessage(`{\"a\": \"b\"}`),\n\t\t\tbinding: ResolvedTrigger{\n\t\t\t\tTriggerTemplate: bldr.TriggerTemplate(\"tt\", \"ns\", bldr.TriggerTemplateSpec(\n\t\t\t\t\tbldr.TriggerTemplateParam(\"p1\", \"\", \"\"),\n\t\t\t\t\tbldr.TriggerResourceTemplate(json.RawMessage(`{\"p1\": \"$(params.p1)\"}`)),\n\t\t\t\t),\n\t\t\t\t),\n\t\t\t\tTriggerBindings: []*triggersv1.TriggerBinding{\n\t\t\t\t\tbldr.TriggerBinding(\"tb\", \"ns\", bldr.TriggerBindingSpec(\n\t\t\t\t\t\tbldr.TriggerBindingParam(\"p1\", \"$(body)\"),\n\t\t\t\t\t)),\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\twant: []json.RawMessage{\n\t\t\tjson.RawMessage(`{\"p1\": \"{\\\"a\\\":\\\"b\\\"}\"}`),\n\t\t},\n\t}, {\n\t\tname: \"header event values\",\n\t\targs: args{\n\t\t\theader: map[string][]string{\n\t\t\t\t\"a\": {\"singlevalue\"},\n\t\t\t\t\"b\": {\"multiple\", \"values\"},\n\t\t\t},\n\t\t\tbinding: ResolvedTrigger{\n\t\t\t\tTriggerTemplate: bldr.TriggerTemplate(\"tt\", \"ns\", bldr.TriggerTemplateSpec(\n\t\t\t\t\tbldr.TriggerTemplateParam(\"p1\", \"\", \"\"),\n\t\t\t\t\tbldr.TriggerTemplateParam(\"p2\", \"\", \"\"),\n\t\t\t\t\tbldr.TriggerResourceTemplate(json.RawMessage(`{\"p1\": \"$(params.p1)\",\"p2\": \"$(params.p2)\"}`)),\n\t\t\t\t),\n\t\t\t\t),\n\t\t\t\tTriggerBindings: []*triggersv1.TriggerBinding{\n\t\t\t\t\tbldr.TriggerBinding(\"tb\", \"ns\", bldr.TriggerBindingSpec(\n\t\t\t\t\t\tbldr.TriggerBindingParam(\"p1\", \"$(header.a)\"),\n\t\t\t\t\t\tbldr.TriggerBindingParam(\"p2\", \"$(header.b)\"),\n\t\t\t\t\t)),\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\twant: []json.RawMessage{\n\t\t\tjson.RawMessage(`{\"p1\": \"singlevalue\",\"p2\": \"multiple,values\"}`),\n\t\t},\n\t}}\n\tfor _, tt := range tests {\n\t\tt.Run(tt.name, func(t *testing.T) {\n\t\t\t// This seeds Uid() to return 'cbhtc'\n\t\t\trand.Seed(0)\n\t\t\tparams, err := ResolveParams(tt.args.binding.TriggerBindings, tt.args.body, tt.args.header, tt.args.binding.TriggerTemplate.Spec.Params)\n\t\t\tif err != nil {\n\t\t\t\tt.Fatalf(\"ResolveParams() returned unexpected error: %s\", err)\n\t\t\t}\n\t\t\tgot := ResolveResources(tt.args.binding.TriggerTemplate, params)\n\t\t\tif diff := cmp.Diff(tt.want, got); diff != \"\" {\n\t\t\t\tstringDiff := cmp.Diff(convertJSONRawMessagesToString(tt.want), convertJSONRawMessagesToString(got))\n\t\t\t\tt.Errorf(\"ResolveResources(): -want +got: %s\", stringDiff)\n\t\t\t}\n\t\t})\n\t}\n}", "func TestActiveReplicatorPullFromCheckpointIgnored(t *testing.T) {\n\n\tbase.RequireNumTestBuckets(t, 2)\n\n\tbase.SetUpTestLogging(t, logger.LevelDebug, logger.KeyReplicate, logger.KeyHTTP, logger.KeyHTTPResp, logger.KeySync, logger.KeySyncMsg)\n\n\tconst (\n\t\tchangesBatchSize = 10\n\t\tnumRT2DocsInitial = 13 // 2 batches of changes\n\t\tnumRT2DocsTotal = 24 // 2 more batches\n\t)\n\n\t// Passive\n\ttb2 := base.GetTestBucket(t)\n\trt2 := NewRestTester(t, &RestTesterConfig{\n\t\tTestBucket: tb2,\n\t\tDatabaseConfig: &DatabaseConfig{DbConfig: DbConfig{\n\t\t\tUsers: map[string]*db.PrincipalConfig{\n\t\t\t\t\"alice\": {\n\t\t\t\t\tPassword: base.StringPtr(\"pass\"),\n\t\t\t\t\tExplicitChannels: utils.SetOf(\"alice\"),\n\t\t\t\t},\n\t\t\t},\n\t\t}},\n\t})\n\tdefer rt2.Close()\n\n\t// Active\n\ttb1 := base.GetTestBucket(t)\n\trt1 := NewRestTester(t, &RestTesterConfig{\n\t\tTestBucket: tb1,\n\t})\n\tdefer rt1.Close()\n\n\t// Create first batch of docs\n\tdocIDPrefix := t.Name() + \"doc\"\n\tfor i := 0; i < numRT2DocsInitial; i++ {\n\t\tresp := rt1.SendAdminRequest(http.MethodPut, fmt.Sprintf(\"/db/%s%d\", docIDPrefix, i), `{\"channels\":[\"alice\"]}`)\n\t\tassertStatus(t, resp, http.StatusCreated)\n\t\trt1RevID := respRevID(t, resp)\n\t\tresp = rt2.SendAdminRequest(http.MethodPut, fmt.Sprintf(\"/db/%s%d\", docIDPrefix, i), `{\"channels\":[\"alice\"]}`)\n\t\tassertStatus(t, resp, http.StatusCreated)\n\t\trt2RevID := respRevID(t, resp)\n\t\trequire.Equal(t, rt1RevID, rt2RevID)\n\t}\n\n\t// Make rt2 listen on an actual HTTP port, so it can receive the blipsync request from rt1\n\tsrv := httptest.NewServer(rt2.TestPublicHandler())\n\tdefer srv.Close()\n\n\t// Build passiveDBURL with basic auth creds\n\tpassiveDBURL, err := url.Parse(srv.URL + \"/db\")\n\trequire.NoError(t, err)\n\tpassiveDBURL.User = url.UserPassword(\"alice\", \"pass\")\n\n\tarConfig := db.ActiveReplicatorConfig{\n\t\tID: t.Name(),\n\t\tDirection: db.ActiveReplicatorTypePull,\n\t\tRemoteDBURL: passiveDBURL,\n\t\tActiveDB: &db.Database{\n\t\t\tDatabaseContext: rt1.GetDatabase(),\n\t\t},\n\t\tContinuous: true,\n\t\tChangesBatchSize: changesBatchSize,\n\t\tReplicationStatsMap: base.SyncGatewayStats.NewDBStats(t.Name(), false, false, false).DBReplicatorStats(t.Name()),\n\t}\n\n\t// Create the first active replicator to pull from seq:0\n\tar := db.NewActiveReplicator(&arConfig)\n\n\tstartNumChangesRequestedFromZeroTotal := rt2.GetDatabase().DbStats.CBLReplicationPull().NumPullReplSinceZero.Value()\n\n\tassert.NoError(t, ar.Start())\n\n\t_, ok := base.WaitForStat(func() int64 {\n\t\treturn ar.Pull.Checkpointer.Stats().AlreadyKnownSequenceCount\n\t}, numRT2DocsInitial)\n\tassert.True(t, ok)\n\n\t// wait for all of the documents originally written to rt2 to arrive at rt1\n\tchangesResults, err := rt1.WaitForChanges(numRT2DocsInitial, \"/db/_changes?since=0\", \"\", true)\n\trequire.NoError(t, err)\n\trequire.Len(t, changesResults.Results, numRT2DocsInitial)\n\tdocIDsSeen := make(map[string]bool, numRT2DocsInitial)\n\tfor _, result := range changesResults.Results {\n\t\tdocIDsSeen[result.ID] = true\n\t}\n\tfor i := 0; i < numRT2DocsInitial; i++ {\n\t\tdocID := fmt.Sprintf(\"%s%d\", docIDPrefix, i)\n\t\tassert.True(t, docIDsSeen[docID])\n\n\t\t_, err := rt1.GetDatabase().GetDocument(logger.TestCtx(t), docID, db.DocUnmarshalAll)\n\t\tassert.NoError(t, err)\n\t}\n\n\t// one _changes from seq:0 with initial number of docs sent\n\tnumChangesRequestedFromZeroTotal := rt2.GetDatabase().DbStats.CBLReplicationPull().NumPullReplSinceZero.Value()\n\tassert.Equal(t, startNumChangesRequestedFromZeroTotal+1, numChangesRequestedFromZeroTotal)\n\n\t// rev assertions\n\tnumRevsSentTotal := rt2.GetDatabase().DbStats.CBLReplicationPull().RevSendCount.Value()\n\tassert.Equal(t, int64(0), numRevsSentTotal)\n\tassert.Equal(t, int64(0), ar.Pull.Checkpointer.Stats().ProcessedSequenceCount)\n\tassert.Equal(t, int64(0), ar.Pull.Checkpointer.Stats().ExpectedSequenceCount)\n\n\t// checkpoint assertions\n\tassert.Equal(t, int64(0), ar.Pull.Checkpointer.Stats().GetCheckpointHitCount)\n\tassert.Equal(t, int64(1), ar.Pull.Checkpointer.Stats().GetCheckpointMissCount)\n\t// Since we bumped the checkpointer interval, we're only setting checkpoints on replicator close.\n\tassert.Equal(t, int64(0), ar.Pull.Checkpointer.Stats().SetCheckpointCount)\n\tar.Pull.Checkpointer.CheckpointNow()\n\tassert.Equal(t, int64(1), ar.Pull.Checkpointer.Stats().SetCheckpointCount)\n\n\tassert.NoError(t, ar.Stop())\n\n\t// Second batch of docs\n\tfor i := numRT2DocsInitial; i < numRT2DocsTotal; i++ {\n\t\tresp := rt1.SendAdminRequest(http.MethodPut, fmt.Sprintf(\"/db/%s%d\", docIDPrefix, i), `{\"channels\":[\"alice\"]}`)\n\t\tassertStatus(t, resp, http.StatusCreated)\n\t\trt1RevID := respRevID(t, resp)\n\t\tresp = rt2.SendAdminRequest(http.MethodPut, fmt.Sprintf(\"/db/%s%d\", docIDPrefix, i), `{\"channels\":[\"alice\"]}`)\n\t\tassertStatus(t, resp, http.StatusCreated)\n\t\trt2RevID := respRevID(t, resp)\n\t\trequire.Equal(t, rt1RevID, rt2RevID)\n\t}\n\n\t// Create a new replicator using the same config, which should use the checkpoint set from the first.\n\tar = db.NewActiveReplicator(&arConfig)\n\tdefer func() { assert.NoError(t, ar.Stop()) }()\n\tassert.NoError(t, ar.Start())\n\n\t_, ok = base.WaitForStat(func() int64 {\n\t\treturn ar.Pull.Checkpointer.Stats().AlreadyKnownSequenceCount\n\t}, numRT2DocsTotal-numRT2DocsInitial)\n\tassert.True(t, ok)\n\n\t// Make sure we've not started any more since:0 replications on rt2 since the first one\n\tendNumChangesRequestedFromZeroTotal := rt2.GetDatabase().DbStats.CBLReplicationPull().NumPullReplSinceZero.Value()\n\tassert.Equal(t, numChangesRequestedFromZeroTotal, endNumChangesRequestedFromZeroTotal)\n\n\t// make sure rt2 thinks it has sent all of the revs via a 2.x replicator\n\tnumRevsSentTotal = rt2.GetDatabase().DbStats.CBLReplicationPull().RevSendCount.Value()\n\tassert.Equal(t, int64(0), numRevsSentTotal)\n\tassert.Equal(t, int64(0), ar.Pull.Checkpointer.Stats().ProcessedSequenceCount)\n\tassert.Equal(t, int64(0), ar.Pull.Checkpointer.Stats().ExpectedSequenceCount)\n\n\t// assert the second active replicator stats\n\tassert.Equal(t, int64(1), ar.Pull.Checkpointer.Stats().GetCheckpointHitCount)\n\tassert.Equal(t, int64(0), ar.Pull.Checkpointer.Stats().GetCheckpointMissCount)\n\tassert.Equal(t, int64(0), ar.Pull.Checkpointer.Stats().SetCheckpointCount)\n\tar.Pull.Checkpointer.CheckpointNow()\n\tassert.Equal(t, int64(1), ar.Pull.Checkpointer.Stats().SetCheckpointCount)\n}", "func TestPostSlidesDocumentInvalidtemplateStorage(t *testing.T) {\n request := createPostSlidesDocumentRequest()\n request.templateStorage = invalidizeTestParamValue(request.templateStorage, \"templateStorage\", \"string\").(string)\n e := initializeTest(\"PostSlidesDocument\", \"templateStorage\", request.templateStorage)\n if e != nil {\n t.Errorf(\"Error: %v.\", e)\n return\n }\n r, _, e := getTestApiClient().DocumentApi.PostSlidesDocument(request)\n assertError(t, \"PostSlidesDocument\", \"templateStorage\", r.Code, e)\n}", "func TestShouldIgnoreRepeated(t *testing.T) {\n\tvar packetEvent PacketEvent = PacketEvent{\n\t\tTimeStamp: \"Sun Mar 08 20:02:59 EDT 2020\",\n\t\tDropReason: \"PolicyDrop-br-int/POL_TABLE\",\n\t\tSourceMac: \"16:39:19:fa:f8:40\",\n\t\tDestinationMac: \"62:58:da:98:01:97\",\n\t\tEtherType: \"IPv4\",\n\t\tSourceIP: \"10.1.1.1\",\n\t\tDestinationIP: \"10.1.1.2\",\n\t\tIPProto: \"UDP\",\n\t\tSourcePort: \"10023\",\n\t\tDestinationPort: \"53\",\n\t}\n\ttempdir, err := os.MkdirTemp(\"\", \"hostagent_test_\")\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tdefer os.RemoveAll(tempdir)\n\tagent := testAgent()\n\tagent.config.OpFlexEndpointDir = tempdir\n\tagent.config.OpFlexServiceDir = tempdir\n\tagent.config.OpFlexSnatDir = tempdir\n\tagent.config.UplinkIface = \"eth10\"\n\tagent.config.NodeName = \"test-node\"\n\tagent.config.ServiceVlan = 4003\n\tagent.config.UplinkMacAdress = \"5a:fd:16:e5:e7:c0\"\n\tagent.config.DropLogExpiryTime = 10\n\tagent.config.DropLogRepeatIntervalTime = 2\n\tagent.run()\n\tfor i, pt := range podTests {\n\t\tif i%2 == 0 {\n\t\t\tos.WriteFile(filepath.Join(tempdir,\n\t\t\t\tpt.uuid+\"_\"+pt.cont+\"_\"+pt.veth+\".ep\"),\n\t\t\t\t[]byte(\"random gibberish\"), 0644)\n\t\t}\n\n\t\tpod := pod(pt.uuid, pt.namespace, pt.name, pt.eg, pt.sg, pt.qp)\n\t\tpod.Status.PodIP = pt.ip\n\t\tpod.Status.Phase = \"Running\"\n\t\tcnimd := cnimd(pt.namespace, pt.name, pt.ip, pt.cont, pt.veth)\n\t\tagent.epMetadata[pt.namespace+\"/\"+pt.name] =\n\t\t\tmap[string]*metadata.ContainerMetadata{\n\t\t\t\tcnimd.Id.ContId: cnimd,\n\t\t\t}\n\t\tagent.fakePodSource.Add(pod)\n\t}\n\ttime.Sleep(3000 * time.Millisecond)\n\tcurrTime, _ := time.Parse(time.UnixDate, \"Sun Mar 08 20:03:59 EDT 2020\")\n\terr = agent.processPacketEvent(&packetEvent, currTime)\n\tassert.Nil(t, err, \"Failed to process event\")\n\tpacketEvent.TimeStamp = \"Sun Mar 08 20:04:59 EDT 2020\"\n\tcurrTime = currTime.Add(time.Minute * 1)\n\tassert.Equal(t, true, agent.shouldIgnore(&packetEvent, currTime), \"repeated event prune test failed\")\n\tpacketEvent.TimeStamp = \"Sun Mar 08 20:06:59 EDT 2020\"\n\tcurrTime = currTime.Add(time.Minute * 5)\n\tassert.Equal(t, false, agent.shouldIgnore(&packetEvent, currTime), \"post event test failed\")\n\tfor _, pt := range podTests {\n\t\tpod := pod(pt.uuid, pt.namespace, pt.name, pt.eg, pt.sg, pt.qp)\n\t\tagent.fakePodSource.Delete(pod)\n\t}\n\tagent.stop()\n}", "func testFileDirConflict(t *testing.T, open bool) {\n\t// Prepare a filesystem.\n\troot := filepath.Join(testDir(t.Name()), fmt.Sprintf(\"open-%v\", open), \"fs-root\")\n\terr := os.RemoveAll(root)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tfs := newTestFileSystem(root)\n\n\t// Create a file.\n\tfilepath := newSiaPath(\"dir1/file1\")\n\tfs.addTestSiaFile(filepath)\n\n\tif open {\n\t\t// Open the file. This shouldn't affect later checks.\n\t\tnode, err := fs.OpenSiaFile(filepath)\n\t\tif err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t\tdefer func() {\n\t\t\terr := node.Close()\n\t\t\tif err != nil {\n\t\t\t\tt.Fatal(err)\n\t\t\t}\n\t\t}()\n\t}\n\n\t// Make sure we can't create another file with the same name.\n\terr = fs.addTestSiaFileWithErr(filepath)\n\tif !errors.Contains(err, ErrExists) {\n\t\tt.Fatalf(\"Expected err %v, got %v\", ErrExists, err)\n\t}\n\n\t// Make sure we can't rename another file to the same name.\n\tfilepath2 := newSiaPath(\"dir1/file2\")\n\tfs.addTestSiaFile(filepath2)\n\terr = fs.RenameFile(filepath2, filepath)\n\tif !errors.Contains(err, ErrExists) {\n\t\tt.Fatalf(\"Expected err %v, got %v\", ErrExists, err)\n\t}\n\n\t// Make sure we (still) can't create another file with the same name.\n\terr = fs.addTestSiaFileWithErr(filepath)\n\tif !errors.Contains(err, ErrExists) {\n\t\tt.Fatalf(\"Expected err %v, got %v\", ErrExists, err)\n\t}\n\n\t// Make sure we can't create a dir with the same name.\n\terr = fs.NewSiaDir(filepath, modules.DefaultDirPerm)\n\tif !errors.Contains(err, ErrExists) {\n\t\tt.Fatalf(\"Expected err %v, got %v\", ErrExists, err)\n\t}\n\n\t// Create a dir.\n\tdirpath := newSiaPath(\"dir2/dir3\")\n\terr = fs.NewSiaDir(dirpath, modules.DefaultDirPerm)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tif open {\n\t\t// Open the dir. This shouldn't affect later checks.\n\t\tnode, err := fs.OpenSiaDir(dirpath)\n\t\tif err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t\tdefer func() {\n\t\t\terr := node.Close()\n\t\t\tif err != nil {\n\t\t\t\tt.Fatal(err)\n\t\t\t}\n\t\t}()\n\t}\n\n\t// Make sure we CAN create another dir with the same name as the first\n\t// dir.\n\terr = fs.NewSiaDir(dirpath, modules.DefaultDirPerm)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t// Make sure we can't rename a dir to the same name as the first file.\n\terr = fs.RenameDir(dirpath, filepath)\n\tif !errors.Contains(err, ErrExists) {\n\t\tt.Fatalf(\"Expected err %v, got %v\", ErrExists, err)\n\t}\n\n\t// Make sure we still CAN create another dir with the same name as the first\n\t// dir.\n\terr = fs.NewSiaDir(dirpath, modules.DefaultDirPerm)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t// Make sure we can't create a file with the same name as the dir.\n\terr = fs.addTestSiaFileWithErr(dirpath)\n\tif !errors.Contains(err, ErrExists) {\n\t\tt.Fatalf(\"Expected err %v, got %v\", ErrExists, err)\n\t}\n\n\t// Make sure we can't rename a file to the same name as the first dir.\n\terr = fs.RenameFile(filepath, dirpath)\n\tif !errors.Contains(err, ErrExists) {\n\t\tt.Fatalf(\"Expected err %v, got %v\", ErrExists, err)\n\t}\n\n\t// Make sure we can't rename another dir to the same name as the first dir.\n\tdirpath2 := newSiaPath(\"dir2/dir4\")\n\terr = fs.NewSiaDir(dirpath2, modules.DefaultDirPerm)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\terr = fs.RenameDir(dirpath2, dirpath)\n\tif !errors.Contains(err, ErrExists) {\n\t\tt.Fatalf(\"Expected err %v, got %v\", ErrExists, err)\n\t}\n}", "func TestReloadWithReadLock_PartialRegisterFailure(t *testing.T) {\n\trequire := require.New(t)\n\n\tresources := initVMRegistryTest(t)\n\n\tfactory1 := vms.NewMockFactory(resources.ctrl)\n\tfactory2 := vms.NewMockFactory(resources.ctrl)\n\tfactory3 := vms.NewMockFactory(resources.ctrl)\n\tfactory4 := vms.NewMockFactory(resources.ctrl)\n\n\tregisteredVms := map[ids.ID]vms.Factory{\n\t\tid1: factory1,\n\t\tid2: factory2,\n\t}\n\n\tunregisteredVms := map[ids.ID]vms.Factory{\n\t\tid3: factory3,\n\t\tid4: factory4,\n\t}\n\n\tresources.mockVMGetter.EXPECT().\n\t\tGet().\n\t\tTimes(1).\n\t\tReturn(registeredVms, unregisteredVms, nil)\n\tresources.mockVMRegisterer.EXPECT().\n\t\tRegisterWithReadLock(gomock.Any(), id3, factory3).\n\t\tTimes(1).\n\t\tReturn(errTest)\n\tresources.mockVMRegisterer.EXPECT().\n\t\tRegisterWithReadLock(gomock.Any(), id4, factory4).\n\t\tTimes(1).\n\t\tReturn(nil)\n\n\tinstalledVMs, failedVMs, err := resources.vmRegistry.ReloadWithReadLock(context.Background())\n\trequire.NoError(err)\n\trequire.Len(failedVMs, 1)\n\trequire.ErrorIs(failedVMs[id3], errTest)\n\trequire.Len(installedVMs, 1)\n\trequire.Equal(id4, installedVMs[0])\n}", "func TestRegisteringDuplicateAuthMethodPanics(t *testing.T) {\n\trunTest(t, func(s *res.Service) {\n\t\trestest.AssertPanic(t, func() {\n\t\t\ts.Handle(\"model\",\n\t\t\t\tres.Auth(\"foo\", func(r res.AuthRequest) {\n\t\t\t\t\tr.OK(nil)\n\t\t\t\t}),\n\t\t\t\tres.Auth(\"bar\", func(r res.AuthRequest) {\n\t\t\t\t\tr.OK(nil)\n\t\t\t\t}),\n\t\t\t\tres.Auth(\"foo\", func(r res.AuthRequest) {\n\t\t\t\t\tr.OK(nil)\n\t\t\t\t}),\n\t\t\t)\n\t\t})\n\t}, nil, restest.WithoutReset)\n}", "func TestInsertMatchingMap(t *testing.T) {\n\t// Insert an expired map\n\ttestMapPool := NewControlMap(\n\t\tstore.NewMemStore(),\n\t\tconf.DefaultUpdateControlMapBootExpirationTimeSeconds,\n\t\tconf.DefaultUpdateControlMapBootExpirationTimeSeconds,\n\t)\n\tcm := &updatecontrolmap.UpdateControlMap{\n\t\tID: TEST_UUID,\n\t\tPriority: 1,\n\t\tStates: map[string]updatecontrolmap.UpdateControlMapState{\n\t\t\t\"ArtifactInstall\": updatecontrolmap.UpdateControlMapState{\n\t\t\t\tAction: \"continue\",\n\t\t\t},\n\t\t},\n\t}\n\ttestMapPool.Insert(cm.Stamp(1))\n\ttime.Sleep(2 * time.Second)\n\tactive, expired := testMapPool.Get(TEST_UUID)\n\trequire.Equal(t, 0, len(active))\n\trequire.Equal(t, 1, len(expired))\n\trequire.True(t, cm.Equal(expired[0]))\n\t// Insert a matching map\n\tcmn := &updatecontrolmap.UpdateControlMap{\n\t\tID: TEST_UUID,\n\t\tPriority: 1,\n\t\tStates: map[string]updatecontrolmap.UpdateControlMapState{\n\t\t\t\"ArtifactRebootEnter\": updatecontrolmap.UpdateControlMapState{\n\t\t\t\tAction: \"ArtifactRebootEnter\",\n\t\t\t},\n\t\t},\n\t}\n\ttestMapPool.Insert(cmn.Stamp(2))\n\t// Map should exist in the active map\n\tactive, expired = testMapPool.Get(TEST_UUID)\n\t// But not in the inactive anylonger\n\tassert.Equal(t, 0, len(expired))\n\tassert.Equal(t, 1, len(active))\n\tassert.Contains(t, active[0].States, \"ArtifactRebootEnter\", active)\n}", "func (r *JenkinsReconciler) createResourceIfNotPresent(resource runtime.Object, name string, namespace string) error {\n\tkey := types.NamespacedName{\n\t\tName: name,\n\t\tNamespace: namespace,\n\t}\n\t//\tr.logger.Info(\"Checking if object exists\", \"in Namespace\", key.Namespace, \"Resource.Name\", resource)\n\terr := r.client.Get(context.TODO(), key, resource)\n\tif err != nil && errors.IsAlreadyExists(err) {\n\t\tr.logger.Info(\"Object already exists\", \"in Namespace\", key.Namespace, \"Resource.Name\", resource, \": No need to requeue\")\n\t\tr.result = reconcile.Result{Requeue: false}\n\t\treturn err\n\t}\n\tif err != nil && errors.IsNotFound(err) {\n\t\tr.logger.Info(\"Creating a new Object\", \"in Namespace\", key.Namespace, \"Resource.Name\", resource)\n\t\terr = r.client.Create(context.TODO(), resource)\n\t\tif err != nil {\n\t\t\tr.logger.Info(\"Error while creating an object\", \"Object.Namespace\", key.Namespace, \"Object.Name\", resource, \"Error:\", err)\n\t\t\treturn err\n\t\t}\n\t\t// Resource created successfully - don't requeue\n\t\tr.result = reconcile.Result{Requeue: false}\n\t\treturn nil\n\t}\n\n\treturn nil\n}", "func TestMarker_FaultTolerance(t *testing.T) {\n\tdone := false\n\tfor i := 1; !done && i < 1000; i++ {\n\t\tt.Run(strconv.Itoa(i), func(t *testing.T) {\n\t\t\tvar count atomic.Int32\n\t\t\tcount.Store(int32(i))\n\t\t\tinj := errorfs.InjectorFunc(func(op errorfs.Op, path string) error {\n\t\t\t\t// Don't inject on Sync errors. They're fatal.\n\t\t\t\tif op == errorfs.OpFileSync {\n\t\t\t\t\treturn nil\n\t\t\t\t}\n\t\t\t\tif v := count.Add(-1); v == 0 {\n\t\t\t\t\treturn errorfs.ErrInjected\n\t\t\t\t}\n\t\t\t\treturn nil\n\t\t\t})\n\n\t\t\tmem := vfs.NewMem()\n\t\t\tfs := errorfs.Wrap(mem, inj)\n\t\t\tmarkers := map[string]*Marker{}\n\t\t\tops := []struct {\n\t\t\t\top string\n\t\t\t\tname string\n\t\t\t\tvalue string\n\t\t\t}{\n\t\t\t\t{op: \"locate\", name: \"foo\", value: \"\"},\n\t\t\t\t{op: \"locate\", name: \"foo\", value: \"\"},\n\t\t\t\t{op: \"locate\", name: \"bar\", value: \"\"},\n\t\t\t\t{op: \"rm-obsolete\", name: \"foo\"},\n\t\t\t\t{op: \"move\", name: \"bar\", value: \"california\"},\n\t\t\t\t{op: \"rm-obsolete\", name: \"bar\"},\n\t\t\t\t{op: \"move\", name: \"bar\", value: \"california\"},\n\t\t\t\t{op: \"move\", name: \"bar\", value: \"new-york\"},\n\t\t\t\t{op: \"locate\", name: \"bar\", value: \"new-york\"},\n\t\t\t\t{op: \"move\", name: \"bar\", value: \"california\"},\n\t\t\t\t{op: \"rm-obsolete\", name: \"bar\"},\n\t\t\t\t{op: \"locate\", name: \"bar\", value: \"california\"},\n\t\t\t\t{op: \"move\", name: \"foo\", value: \"connecticut\"},\n\t\t\t\t{op: \"locate\", name: \"foo\", value: \"connecticut\"},\n\t\t\t}\n\n\t\t\tfor _, op := range ops {\n\t\t\t\trunOp := func() error {\n\t\t\t\t\tswitch op.op {\n\t\t\t\t\tcase \"locate\":\n\t\t\t\t\t\tm, v, err := LocateMarker(fs, \"\", op.name)\n\t\t\t\t\t\tif err != nil {\n\t\t\t\t\t\t\treturn err\n\t\t\t\t\t\t}\n\t\t\t\t\t\trequire.NotNil(t, m)\n\t\t\t\t\t\trequire.Equal(t, op.value, v)\n\t\t\t\t\t\tif existingMarker := markers[op.name]; existingMarker != nil {\n\t\t\t\t\t\t\trequire.NoError(t, existingMarker.Close())\n\t\t\t\t\t\t}\n\t\t\t\t\t\tmarkers[op.name] = m\n\t\t\t\t\t\treturn nil\n\t\t\t\t\tcase \"move\":\n\t\t\t\t\t\tm := markers[op.name]\n\t\t\t\t\t\trequire.NotNil(t, m)\n\t\t\t\t\t\treturn m.Move(op.value)\n\t\t\t\t\tcase \"rm-obsolete\":\n\t\t\t\t\t\tm := markers[op.name]\n\t\t\t\t\t\trequire.NotNil(t, m)\n\t\t\t\t\t\treturn m.RemoveObsolete()\n\t\t\t\t\tdefault:\n\t\t\t\t\t\tpanic(\"unreachable\")\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\t// Run the operation, if it fails with the injected\n\t\t\t\t// error, retry it exactly once. The retry should always\n\t\t\t\t// succeed.\n\t\t\t\terr := runOp()\n\t\t\t\tif errors.Is(err, errorfs.ErrInjected) {\n\t\t\t\t\terr = runOp()\n\t\t\t\t}\n\t\t\t\trequire.NoError(t, err)\n\t\t\t}\n\n\t\t\tfor _, m := range markers {\n\t\t\t\trequire.NoError(t, m.Close())\n\t\t\t}\n\n\t\t\t// Stop if the number of operations in the test case is\n\t\t\t// fewer than `i`.\n\t\t\tdone = count.Load() > 0\n\t\t})\n\t}\n}" ]
[ "0.71426773", "0.6064782", "0.5951557", "0.5742706", "0.56113195", "0.5535146", "0.55262333", "0.5498901", "0.54485303", "0.5439701", "0.5390984", "0.536594", "0.53543603", "0.53499657", "0.53442293", "0.53407156", "0.5339919", "0.52867866", "0.528656", "0.52314365", "0.5230959", "0.5230194", "0.52262557", "0.5203784", "0.5198192", "0.51514524", "0.51384914", "0.5128441", "0.51118934", "0.5094078", "0.5092825", "0.50822014", "0.5079273", "0.504105", "0.50324416", "0.5031358", "0.5019638", "0.50109136", "0.5004712", "0.500308", "0.49869466", "0.49851704", "0.49814442", "0.49787262", "0.4976898", "0.4972668", "0.49680918", "0.49599183", "0.4956052", "0.49551705", "0.49511456", "0.49496463", "0.49459827", "0.49405223", "0.49351612", "0.49351266", "0.49287713", "0.4926433", "0.49217805", "0.4914834", "0.49102694", "0.4901346", "0.48943242", "0.48851502", "0.487856", "0.48753956", "0.48694378", "0.486731", "0.4865028", "0.48636153", "0.48483196", "0.4847375", "0.48463398", "0.48417255", "0.4835037", "0.4831355", "0.4827074", "0.4824979", "0.4820594", "0.48120162", "0.48111135", "0.48045716", "0.4802893", "0.47984555", "0.47899887", "0.47849014", "0.47763908", "0.47757596", "0.47625208", "0.47615558", "0.47603408", "0.47556278", "0.4753682", "0.4742633", "0.4742302", "0.4740238", "0.47352135", "0.4735101", "0.47310072", "0.47273198" ]
0.7636146
0
TestCreateRetryConflictTagDiff ensures that attempts to create a mapping that result in resource conflicts that DO contain tag diffs causes the conflict error to be returned.
func TestCreateRetryConflictTagDiff(t *testing.T) { firstGet := true firstUpdate := true restInstance := &REST{ strategy: NewStrategy(registryhostname.TestingRegistryHostnameRetriever(nil, "", testDefaultRegistryURL)), imageRegistry: &fakeImageRegistry{ createImage: func(ctx context.Context, image *imageapi.Image) error { return nil }, }, imageStreamRegistry: &fakeImageStreamRegistry{ getImageStream: func(ctx context.Context, id string, options *metav1.GetOptions) (*imageapi.ImageStream, error) { // For the first get, return a stream with a latest tag pointing to "original" if firstGet { firstGet = false stream := validImageStream() stream.Status = imageapi.ImageStreamStatus{ Tags: map[string]imageapi.TagEventList{ "latest": {Items: []imageapi.TagEvent{{DockerImageReference: "localhost:5000/someproject/somerepo:original"}}}, }, } return stream, nil } // For subsequent gets, return a stream with the latest tag changed to "newer" stream := validImageStream() stream.Status = imageapi.ImageStreamStatus{ Tags: map[string]imageapi.TagEventList{ "latest": {Items: []imageapi.TagEvent{{DockerImageReference: "localhost:5000/someproject/somerepo:newer"}}}, }, } return stream, nil }, updateImageStreamStatus: func(ctx context.Context, repo *imageapi.ImageStream) (*imageapi.ImageStream, error) { // For the first update, return a conflict so that the stream // get/compare is retried. if firstUpdate { firstUpdate = false return nil, errors.NewConflict(imagegroup.Resource("imagestreams"), repo.Name, fmt.Errorf("resource modified")) } return repo, nil }, }, } obj, err := restInstance.Create(apirequest.NewDefaultContext(), validNewMappingWithName(), rest.ValidateAllObjectFunc, false) if err == nil { t.Fatalf("expected an error") } if !errors.IsConflict(err) { t.Errorf("expected a conflict error, got %v", err) } if obj != nil { t.Fatalf("expected a nil result") } }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func TestCreateRetryConflictNoTagDiff(t *testing.T) {\n\tregistry := registryhostname.TestingRegistryHostnameRetriever(nil, \"\", testDefaultRegistryURL)\n\tfirstUpdate := true\n\trestInstance := &REST{\n\t\tstrategy: NewStrategy(registry),\n\t\timageRegistry: &fakeImageRegistry{\n\t\t\tcreateImage: func(ctx context.Context, image *imageapi.Image) error {\n\t\t\t\treturn nil\n\t\t\t},\n\t\t},\n\t\timageStreamRegistry: &fakeImageStreamRegistry{\n\t\t\tgetImageStream: func(ctx context.Context, id string, options *metav1.GetOptions) (*imageapi.ImageStream, error) {\n\t\t\t\tstream := validImageStream()\n\t\t\t\tstream.Status = imageapi.ImageStreamStatus{\n\t\t\t\t\tTags: map[string]imageapi.TagEventList{\n\t\t\t\t\t\t\"latest\": {Items: []imageapi.TagEvent{{DockerImageReference: \"localhost:5000/someproject/somerepo:original\"}}},\n\t\t\t\t\t},\n\t\t\t\t}\n\t\t\t\treturn stream, nil\n\t\t\t},\n\t\t\tupdateImageStreamStatus: func(ctx context.Context, repo *imageapi.ImageStream) (*imageapi.ImageStream, error) {\n\t\t\t\t// For the first update call, return a conflict to cause a retry of an\n\t\t\t\t// image stream whose tags haven't changed.\n\t\t\t\tif firstUpdate {\n\t\t\t\t\tfirstUpdate = false\n\t\t\t\t\treturn nil, errors.NewConflict(imagegroup.Resource(\"imagestreams\"), repo.Name, fmt.Errorf(\"resource modified\"))\n\t\t\t\t}\n\t\t\t\treturn repo, nil\n\t\t\t},\n\t\t},\n\t}\n\tobj, err := restInstance.Create(apirequest.NewDefaultContext(), validNewMappingWithName(), rest.ValidateAllObjectFunc, false)\n\tif err != nil {\n\t\tt.Errorf(\"unexpected error: %v\", err)\n\t}\n\tif obj == nil {\n\t\tt.Fatalf(\"expected a result\")\n\t}\n}", "func TestContainerCreationConflict(t *testing.T) {\n\tsConfig := makeSandboxConfig(\"foo\", \"bar\", \"1\", 0)\n\tconfig := makeContainerConfig(sConfig, \"pause\", \"iamimage\", 0, map[string]string{}, map[string]string{})\n\tcontainerName := makeContainerName(sConfig, config)\n\tconst sandboxId = \"sandboxid\"\n\tconst containerId = \"containerid\"\n\tconflictError := fmt.Errorf(\"Error response from daemon: Conflict. The name \\\"/%s\\\" is already in use by container %s. You have to remove (or rename) that container to be able to reuse that name.\",\n\t\tcontainerName, containerId)\n\tnoContainerError := fmt.Errorf(\"Error response from daemon: No such container: %s\", containerId)\n\trandomError := fmt.Errorf(\"random error\")\n\n\tfor desc, test := range map[string]struct {\n\t\tcreateError error\n\t\tremoveError error\n\t\texpectError error\n\t\texpectCalls []string\n\t\texpectFields int\n\t}{\n\t\t\"no create error\": {\n\t\t\texpectCalls: []string{\"create\"},\n\t\t\texpectFields: 6,\n\t\t},\n\t\t\"random create error\": {\n\t\t\tcreateError: randomError,\n\t\t\texpectError: randomError,\n\t\t\texpectCalls: []string{\"create\"},\n\t\t},\n\t\t\"conflict create error with successful remove\": {\n\t\t\tcreateError: conflictError,\n\t\t\texpectError: conflictError,\n\t\t\texpectCalls: []string{\"create\", \"remove\"},\n\t\t},\n\t\t\"conflict create error with random remove error\": {\n\t\t\tcreateError: conflictError,\n\t\t\tremoveError: randomError,\n\t\t\texpectError: conflictError,\n\t\t\texpectCalls: []string{\"create\", \"remove\"},\n\t\t},\n\t\t\"conflict create error with no such container remove error\": {\n\t\t\tcreateError: conflictError,\n\t\t\tremoveError: noContainerError,\n\t\t\texpectCalls: []string{\"create\", \"remove\", \"create\"},\n\t\t\texpectFields: 7,\n\t\t},\n\t} {\n\t\tt.Logf(\"TestCase: %s\", desc)\n\t\tds, fDocker, _ := newTestDockerService()\n\n\t\tif test.createError != nil {\n\t\t\tfDocker.InjectError(\"create\", test.createError)\n\t\t}\n\t\tif test.removeError != nil {\n\t\t\tfDocker.InjectError(\"remove\", test.removeError)\n\t\t}\n\t\tid, err := ds.CreateContainer(sandboxId, config, sConfig)\n\t\trequire.Equal(t, test.expectError, err)\n\t\tassert.NoError(t, fDocker.AssertCalls(test.expectCalls))\n\t\tif err == nil {\n\t\t\tc, err := fDocker.InspectContainer(id)\n\t\t\tassert.NoError(t, err)\n\t\t\tassert.Len(t, strings.Split(c.Name, nameDelimiter), test.expectFields)\n\t\t}\n\t}\n}", "func TestConflictResolution(t *testing.T) {\n\tpoolB := mkPool(poolBUID, \"pool-b\", []string{\"10.0.10.0/24\", \"FF::0/48\"})\n\tpoolB.CreationTimestamp = meta_v1.Date(2022, 10, 16, 13, 30, 00, 0, time.UTC)\n\tfixture := mkTestFixture([]*cilium_api_v2alpha1.CiliumLoadBalancerIPPool{\n\t\tmkPool(poolAUID, \"pool-a\", []string{\"10.0.10.0/24\"}),\n\t\tpoolB,\n\t}, true, false, nil)\n\n\tawait := fixture.AwaitPool(func(action k8s_testing.Action) bool {\n\t\tif action.GetResource() != poolResource || action.GetVerb() != \"patch\" {\n\t\t\treturn false\n\t\t}\n\n\t\tpool := fixture.PatchedPool(action)\n\n\t\tif pool.Name != \"pool-b\" {\n\t\t\treturn false\n\t\t}\n\n\t\tif !isPoolConflicting(pool) {\n\t\t\treturn false\n\t\t}\n\n\t\treturn true\n\t}, time.Second)\n\n\tgo fixture.hive.Start(context.Background())\n\tdefer fixture.hive.Stop(context.Background())\n\n\tif await.Block() {\n\t\tt.Fatal(\"Pool B has not been marked conflicting\")\n\t}\n\n\t// All ranges of a conflicting pool must be disabled\n\tpoolBRanges, _ := fixture.lbIPAM.rangesStore.GetRangesForPool(\"pool-b\")\n\tfor _, r := range poolBRanges {\n\t\tif !r.internallyDisabled {\n\t\t\tt.Fatalf(\"Range '%s' from pool B hasn't been disabled\", ipNetStr(r.allocRange.CIDR()))\n\t\t}\n\t}\n\n\t// Phase 2, resolving the conflict\n\n\tawait = fixture.AwaitPool(func(action k8s_testing.Action) bool {\n\t\tif action.GetResource() != poolResource || action.GetVerb() != \"patch\" {\n\t\t\treturn false\n\t\t}\n\n\t\tpool := fixture.PatchedPool(action)\n\n\t\tif pool.Name != \"pool-b\" {\n\t\t\treturn false\n\t\t}\n\n\t\tif isPoolConflicting(pool) {\n\t\t\treturn false\n\t\t}\n\n\t\treturn true\n\t}, time.Second)\n\n\tpoolB, err := fixture.poolClient.Get(context.Background(), \"pool-b\", meta_v1.GetOptions{})\n\tif err != nil {\n\t\tt.Fatal(poolB)\n\t}\n\n\t// Remove the conflicting range\n\tpoolB.Spec.Cidrs = []cilium_api_v2alpha1.CiliumLoadBalancerIPPoolCIDRBlock{\n\t\t{\n\t\t\tCidr: cilium_api_v2alpha1.IPv4orIPv6CIDR(\"FF::0/48\"),\n\t\t},\n\t}\n\n\t_, err = fixture.poolClient.Update(context.Background(), poolB, meta_v1.UpdateOptions{})\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tif await.Block() {\n\t\tt.Fatal(\"Pool b has not de-conflicted\")\n\t}\n}", "func TestDefaultConflictResolverWithTombstoneRemote(t *testing.T) {\n\tbase.RequireNumTestBuckets(t, 2)\n\tif !base.TestUseXattrs() {\n\t\tt.Skip(\"This test only works with XATTRS enabled\")\n\t}\n\tbase.SetUpTestLogging(t, logger.LevelInfo, logger.KeyAll)\n\n\tdefaultConflictResolverWithTombstoneTests := []struct {\n\t\tname string // A unique name to identify the unit test.\n\t\tlocalBodyValues []string // Controls the local revision generation.\n\t\texpectedRevID string // Expected document revision ID.\n\t}{\n\t\t{\n\t\t\t// Revision tie with remote digest is lower than the local digest.\n\t\t\t// local generation = remote generation:\n\t\t\t//\t- e.g. local is 3-b, remote is 3-a(T)\n\t\t\tname: \"revGenTieRemoteDigestLower\",\n\t\t\tlocalBodyValues: []string{\"baz\", \"EADGBE\"},\n\t\t\texpectedRevID: \"4-0748692c1535b62f59b2c276cc2a8bda\",\n\t\t},\n\t\t{\n\t\t\t// Revision tie with remote digest is higher than the local digest.\n\t\t\t// local generation = remote generation:\n\t\t\t//\t- e.g. local is 3-b, remote is 3-c(T)\n\t\t\tname: \"revGenTieRemoteDigestHigher\",\n\t\t\tlocalBodyValues: []string{\"baz\", \"qux\"},\n\t\t\texpectedRevID: \"4-5afdb61ba968c9eaa7599e727c4c1b53\",\n\t\t},\n\t\t{\n\t\t\t// Local revision generation is higher than remote revision generation.\n\t\t\t// local generation > remote generation:\n\t\t\t// - e.g. local is 4-b, remote is 3-a(T)\n\t\t\tname: \"revGenRemoteLower\",\n\t\t\tlocalBodyValues: []string{\"baz\", \"qux\", \"grunt\"},\n\t\t\texpectedRevID: \"5-962dc965fd8e7fd2bc3ffbcab85d53ba\",\n\t\t},\n\t\t{\n\t\t\t// Local revision generation is lower than remote revision generation.\n\t\t\t// local generation < remote generation:\n\t\t\t//\t- e.g. local is 2-b, remote is 3-a(T)\n\t\t\tname: \"revGenRemoteHigher\",\n\t\t\tlocalBodyValues: []string{\"grunt\"},\n\t\t\texpectedRevID: \"3-cd4c29d9c84fc8b2a51c50e1234252c9\",\n\t\t},\n\t}\n\n\tfor _, test := range defaultConflictResolverWithTombstoneTests {\n\t\tt.Run(test.name, func(t *testing.T) {\n\t\t\t// Passive\n\t\t\trt2 := NewRestTester(t, &RestTesterConfig{\n\t\t\t\tTestBucket: base.GetTestBucket(t),\n\t\t\t\tDatabaseConfig: &DatabaseConfig{DbConfig: DbConfig{\n\t\t\t\t\tUsers: map[string]*db.PrincipalConfig{\n\t\t\t\t\t\t\"alice\": {\n\t\t\t\t\t\t\tPassword: base.StringPtr(\"pass\"),\n\t\t\t\t\t\t\tExplicitChannels: utils.SetOf(\"alice\"),\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t}},\n\t\t\t})\n\t\t\tdefer rt2.Close()\n\n\t\t\t// Make rt2 listen on an actual HTTP port, so it can receive the blipsync request from rt1\n\t\t\tsrv := httptest.NewServer(rt2.TestPublicHandler())\n\t\t\tdefer srv.Close()\n\n\t\t\t// Build passiveDBURL with basic auth creds\n\t\t\tpassiveDBURL, err := url.Parse(srv.URL + \"/db\")\n\t\t\trequire.NoError(t, err)\n\t\t\tpassiveDBURL.User = url.UserPassword(\"alice\", \"pass\")\n\n\t\t\t// Active\n\t\t\trt1 := NewRestTester(t, &RestTesterConfig{\n\t\t\t\tTestBucket: base.GetTestBucket(t),\n\t\t\t})\n\t\t\tdefer rt1.Close()\n\n\t\t\tdefaultConflictResolver, err := db.NewCustomConflictResolver(\n\t\t\t\t`function(conflict) { return defaultPolicy(conflict); }`)\n\t\t\trequire.NoError(t, err, \"Error creating custom conflict resolver\")\n\n\t\t\tconfig := db.ActiveReplicatorConfig{\n\t\t\t\tID: t.Name(),\n\t\t\t\tDirection: db.ActiveReplicatorTypePushAndPull,\n\t\t\t\tRemoteDBURL: passiveDBURL,\n\t\t\t\tActiveDB: &db.Database{\n\t\t\t\t\tDatabaseContext: rt1.GetDatabase(),\n\t\t\t\t},\n\t\t\t\tContinuous: true,\n\t\t\t\tConflictResolverFunc: defaultConflictResolver,\n\t\t\t\tReplicationStatsMap: base.SyncGatewayStats.NewDBStats(t.Name(), false, false, false).DBReplicatorStats(t.Name()),\n\t\t\t}\n\n\t\t\t// Create the first revision of the document on rt2.\n\t\t\tdocID := test.name + \"foo\"\n\t\t\trt2RevIDCreated := createOrUpdateDoc(t, rt2, docID, \"\", \"foo\")\n\n\t\t\t// Create active replicator and start replication.\n\t\t\tar := db.NewActiveReplicator(&config)\n\t\t\trequire.NoError(t, ar.Start(), \"Error starting replication\")\n\t\t\tdefer func() { require.NoError(t, ar.Stop(), \"Error stopping replication\") }()\n\n\t\t\t// Wait for the original document revision written to rt2 to arrive at rt1.\n\t\t\trt1RevIDCreated := rt2RevIDCreated\n\t\t\trequire.NoError(t, rt1.WaitForCondition(func() bool {\n\t\t\t\tdoc, _ := rt1.GetDatabase().GetDocument(logger.TestCtx(t), docID, db.DocUnmarshalAll)\n\t\t\t\treturn doc != nil && len(doc.Body()) > 0\n\t\t\t}))\n\t\t\trequireRevID(t, rt1, docID, rt1RevIDCreated)\n\n\t\t\t// Stop replication.\n\t\t\trequire.NoError(t, ar.Stop(), \"Error stopping replication\")\n\n\t\t\t// Update the document on rt2 to build a revision history.\n\t\t\trt2RevIDUpdated := createOrUpdateDoc(t, rt2, docID, rt2RevIDCreated, \"bar\")\n\n\t\t\t// Tombstone the document on rt2 to mark the tip of the revision history for deletion.\n\t\t\tresp := rt2.SendAdminRequest(http.MethodDelete, \"/db/\"+docID+\"?rev=\"+rt2RevIDUpdated, ``)\n\t\t\tassertStatus(t, resp, http.StatusOK)\n\t\t\trt2RevID := respRevID(t, resp)\n\t\t\tlog.Printf(\"rt2RevID: %s\", rt2RevID)\n\n\t\t\t// Ensure that the tombstone revision is written to rt2 bucket with an empty body.\n\t\t\twaitForTombstone(t, rt2, docID)\n\n\t\t\t// Update the document on rt1 with the specified body values.\n\t\t\trt1RevID := rt1RevIDCreated\n\t\t\tfor _, bodyValue := range test.localBodyValues {\n\t\t\t\trt1RevID = createOrUpdateDoc(t, rt1, docID, rt1RevID, bodyValue)\n\t\t\t}\n\n\t\t\t// Start replication.\n\t\t\trequire.NoError(t, ar.Start(), \"Error starting replication\")\n\n\t\t\t// Wait for default conflict resolution policy to be applied through replication and\n\t\t\t// the winning revision to be written to both rt1 and rt2 buckets. Check whether the\n\t\t\t// winning revision is a tombstone; tombstone revision wins over non-tombstone revision.\n\t\t\twaitForTombstone(t, rt1, docID)\n\t\t\twaitForTombstone(t, rt2, docID)\n\n\t\t\trequireRevID(t, rt1, docID, test.expectedRevID)\n\t\t\t// Wait for conflict resolved doc (tombstone) to be pulled to passive bucket\n\t\t\t// Then require it is the expected rev\n\t\t\trequire.NoError(t, rt2.WaitForCondition(func() bool {\n\t\t\t\tdoc, _ := rt2.GetDatabase().GetDocument(logger.TestCtx(t), docID, db.DocUnmarshalAll)\n\t\t\t\treturn doc != nil && doc.SyncData.CurrentRev == test.expectedRevID\n\t\t\t}))\n\n\t\t\t// Ensure that the document body of the winning tombstone revision written to both\n\t\t\t// rt1 and rt2 is empty, i.e., An attempt to read the document body of a tombstone\n\t\t\t// revision via SDK should return a \"key not found\" error.\n\t\t\trequireErrorKeyNotFound(t, rt2, docID)\n\t\t\trequireErrorKeyNotFound(t, rt1, docID)\n\t\t})\n\t}\n}", "func TestCommitConflictRepeat4A(t *testing.T) {\n}", "func TestDefaultConflictResolverWithTombstoneLocal(t *testing.T) {\n\tbase.RequireNumTestBuckets(t, 2)\n\tif !base.TestUseXattrs() {\n\t\tt.Skip(\"This test only works with XATTRS enabled\")\n\t}\n\tbase.SetUpTestLogging(t, logger.LevelDebug, logger.KeyAll)\n\n\tdefaultConflictResolverWithTombstoneTests := []struct {\n\t\tname string // A unique name to identify the unit test.\n\t\tremoteBodyValues []string // Controls the remote revision generation.\n\t\texpectedRevID string // Expected document revision ID.\n\t}{\n\t\t{\n\t\t\t// Revision tie with local digest is lower than the remote digest.\n\t\t\t// local generation = remote generation:\n\t\t\t//\t- e.g. local is 3-a(T), remote is 3-b\n\t\t\tname: \"revGenTieLocalDigestLower\",\n\t\t\tremoteBodyValues: []string{\"baz\", \"EADGBE\"},\n\t\t\texpectedRevID: \"4-c6fe7cde8f7187705f9e048322a9c350\",\n\t\t},\n\t\t{\n\t\t\t// Revision tie with local digest is higher than the remote digest.\n\t\t\t// local generation = remote generation:\n\t\t\t//\t- e.g. local is 3-c(T), remote is 3-b\n\t\t\tname: \"revGenTieLocalDigestHigher\",\n\t\t\tremoteBodyValues: []string{\"baz\", \"qux\"},\n\t\t\texpectedRevID: \"4-a210e8a790415d7e842e78e1d051cb3d\",\n\t\t},\n\t\t{\n\t\t\t// Local revision generation is lower than remote revision generation.\n\t\t\t// local generation < remote generation:\n\t\t\t// - e.g. local is 3-a(T), remote is 4-b\n\t\t\tname: \"revGenLocalLower\",\n\t\t\tremoteBodyValues: []string{\"baz\", \"qux\", \"grunt\"},\n\t\t\texpectedRevID: \"5-fe3ac95144be01e9b455bfa163687f0e\",\n\t\t},\n\t\t{\n\t\t\t// Local revision generation is higher than remote revision generation.\n\t\t\t// local generation > remote generation:\n\t\t\t//\t- e.g. local is 3-a(T), remote is 2-b\n\t\t\tname: \"revGenLocalHigher\",\n\t\t\tremoteBodyValues: []string{\"baz\"},\n\t\t\texpectedRevID: \"4-232b1f34f6b9341c54435eaf5447d85d\",\n\t\t},\n\t}\n\n\tfor _, test := range defaultConflictResolverWithTombstoneTests {\n\t\tt.Run(test.name, func(tt *testing.T) {\n\t\t\t// Passive\n\t\t\trt2 := NewRestTester(t, &RestTesterConfig{\n\t\t\t\tTestBucket: base.GetTestBucket(t),\n\t\t\t\tDatabaseConfig: &DatabaseConfig{DbConfig: DbConfig{\n\t\t\t\t\tUsers: map[string]*db.PrincipalConfig{\n\t\t\t\t\t\t\"alice\": {\n\t\t\t\t\t\t\tPassword: base.StringPtr(\"pass\"),\n\t\t\t\t\t\t\tExplicitChannels: utils.SetOf(\"alice\"),\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t}},\n\t\t\t})\n\t\t\tdefer rt2.Close()\n\n\t\t\t// Make rt2 listen on an actual HTTP port, so it can receive the blipsync request from rt1\n\t\t\tsrv := httptest.NewServer(rt2.TestPublicHandler())\n\t\t\tdefer srv.Close()\n\n\t\t\t// Build passiveDBURL with basic auth creds\n\t\t\tpassiveDBURL, err := url.Parse(srv.URL + \"/db\")\n\t\t\trequire.NoError(t, err)\n\t\t\tpassiveDBURL.User = url.UserPassword(\"alice\", \"pass\")\n\n\t\t\t// Active\n\t\t\trt1 := NewRestTester(t, &RestTesterConfig{\n\t\t\t\tTestBucket: base.GetTestBucket(t),\n\t\t\t})\n\t\t\tdefer rt1.Close()\n\n\t\t\tdefaultConflictResolver, err := db.NewCustomConflictResolver(\n\t\t\t\t`function(conflict) { return defaultPolicy(conflict); }`)\n\t\t\trequire.NoError(t, err, \"Error creating custom conflict resolver\")\n\n\t\t\tconfig := db.ActiveReplicatorConfig{\n\t\t\t\tID: t.Name(),\n\t\t\t\tDirection: db.ActiveReplicatorTypePushAndPull,\n\t\t\t\tRemoteDBURL: passiveDBURL,\n\t\t\t\tActiveDB: &db.Database{\n\t\t\t\t\tDatabaseContext: rt1.GetDatabase(),\n\t\t\t\t},\n\t\t\t\tContinuous: true,\n\t\t\t\tConflictResolverFunc: defaultConflictResolver,\n\t\t\t\tReplicationStatsMap: base.SyncGatewayStats.NewDBStats(t.Name(), false, false, false).DBReplicatorStats(t.Name()),\n\t\t\t}\n\n\t\t\t// Create the first revision of the document on rt1.\n\t\t\tdocID := t.Name() + \"foo\"\n\t\t\trt1RevIDCreated := createOrUpdateDoc(t, rt1, docID, \"\", \"foo\")\n\n\t\t\t// Create active replicator and start replication.\n\t\t\tar := db.NewActiveReplicator(&config)\n\t\t\trequire.NoError(t, ar.Start(), \"Error starting replication\")\n\t\t\tdefer func() { require.NoError(t, ar.Stop(), \"Error stopping replication\") }()\n\n\t\t\t// Wait for the original document revision written to rt1 to arrive at rt2.\n\t\t\trt2RevIDCreated := rt1RevIDCreated\n\t\t\trequire.NoError(t, rt2.WaitForCondition(func() bool {\n\t\t\t\tdoc, _ := rt2.GetDatabase().GetDocument(logger.TestCtx(t), docID, db.DocUnmarshalAll)\n\t\t\t\treturn doc != nil && len(doc.Body()) > 0\n\t\t\t}))\n\t\t\trequireRevID(t, rt2, docID, rt2RevIDCreated)\n\n\t\t\t// Stop replication.\n\t\t\trequire.NoError(t, ar.Stop(), \"Error stopping replication\")\n\n\t\t\t// Update the document on rt1 to build a revision history.\n\t\t\trt1RevIDUpdated := createOrUpdateDoc(t, rt1, docID, rt1RevIDCreated, \"bar\")\n\n\t\t\t// Tombstone the document on rt1 to mark the tip of the revision history for deletion.\n\t\t\tresp := rt1.SendAdminRequest(http.MethodDelete, \"/db/\"+docID+\"?rev=\"+rt1RevIDUpdated, ``)\n\t\t\tassertStatus(t, resp, http.StatusOK)\n\n\t\t\t// Ensure that the tombstone revision is written to rt1 bucket with an empty body.\n\t\t\twaitForTombstone(t, rt1, docID)\n\n\t\t\t// Update the document on rt2 with the specified body values.\n\t\t\trt2RevID := rt2RevIDCreated\n\t\t\tfor _, bodyValue := range test.remoteBodyValues {\n\t\t\t\trt2RevID = createOrUpdateDoc(t, rt2, docID, rt2RevID, bodyValue)\n\t\t\t}\n\n\t\t\t// Start replication.\n\t\t\trequire.NoError(t, ar.Start(), \"Error starting replication\")\n\n\t\t\t// Wait for default conflict resolution policy to be applied through replication and\n\t\t\t// the winning revision to be written to both rt1 and rt2 buckets. Check whether the\n\t\t\t// winning revision is a tombstone; tombstone revision wins over non-tombstone revision.\n\t\t\twaitForTombstone(t, rt2, docID)\n\t\t\twaitForTombstone(t, rt1, docID)\n\n\t\t\trequireRevID(t, rt2, docID, test.expectedRevID)\n\t\t\trequireRevID(t, rt1, docID, test.expectedRevID)\n\n\t\t\t// Ensure that the document body of the winning tombstone revision written to both\n\t\t\t// rt1 and rt2 is empty, i.e., An attempt to read the document body of a tombstone\n\t\t\t// revision via SDK should return a \"key not found\" error.\n\t\t\trequireErrorKeyNotFound(t, rt2, docID)\n\t\t\trequireErrorKeyNotFound(t, rt1, docID)\n\t\t})\n\t}\n}", "func TestActiveReplicatorPullConflict(t *testing.T) {\n\n\t// scenarios\n\tconflictResolutionTests := []struct {\n\t\tname string\n\t\tlocalRevisionBody db.Body\n\t\tlocalRevID string\n\t\tremoteRevisionBody db.Body\n\t\tremoteRevID string\n\t\tconflictResolver string\n\t\texpectedLocalBody db.Body\n\t\texpectedLocalRevID string\n\t\texpectedTombstonedRevID string\n\t\texpectedResolutionType db.ConflictResolutionType\n\t\tskipActiveLeafAssertion bool\n\t\tskipBodyAssertion bool\n\t}{\n\t\t{\n\t\t\tname: \"remoteWins\",\n\t\t\tlocalRevisionBody: db.Body{\"source\": \"local\"},\n\t\t\tlocalRevID: \"1-a\",\n\t\t\tremoteRevisionBody: db.Body{\"source\": \"remote\"},\n\t\t\tremoteRevID: \"1-b\",\n\t\t\tconflictResolver: `function(conflict) {return conflict.RemoteDocument;}`,\n\t\t\texpectedLocalBody: db.Body{\"source\": \"remote\"},\n\t\t\texpectedLocalRevID: \"1-b\",\n\t\t\texpectedResolutionType: db.ConflictResolutionRemote,\n\t\t},\n\t\t{\n\t\t\tname: \"merge\",\n\t\t\tlocalRevisionBody: db.Body{\"source\": \"local\"},\n\t\t\tlocalRevID: \"1-a\",\n\t\t\tremoteRevisionBody: db.Body{\"source\": \"remote\"},\n\t\t\tremoteRevID: \"1-b\",\n\t\t\tconflictResolver: `function(conflict) {\n\t\t\t\t\tvar mergedDoc = new Object();\n\t\t\t\t\tmergedDoc.source = \"merged\";\n\t\t\t\t\treturn mergedDoc;\n\t\t\t\t}`,\n\t\t\texpectedLocalBody: db.Body{\"source\": \"merged\"},\n\t\t\texpectedLocalRevID: db.CreateRevIDWithBytes(2, \"1-b\", []byte(`{\"source\":\"merged\"}`)), // rev for merged body, with parent 1-b\n\t\t\texpectedResolutionType: db.ConflictResolutionMerge,\n\t\t},\n\t\t{\n\t\t\tname: \"localWins\",\n\t\t\tlocalRevisionBody: db.Body{\"source\": \"local\"},\n\t\t\tlocalRevID: \"1-a\",\n\t\t\tremoteRevisionBody: db.Body{\"source\": \"remote\"},\n\t\t\tremoteRevID: \"1-b\",\n\t\t\tconflictResolver: `function(conflict) {return conflict.LocalDocument;}`,\n\t\t\texpectedLocalBody: db.Body{\"source\": \"local\"},\n\t\t\texpectedLocalRevID: db.CreateRevIDWithBytes(2, \"1-b\", []byte(`{\"source\":\"local\"}`)), // rev for local body, transposed under parent 1-b\n\t\t\texpectedResolutionType: db.ConflictResolutionLocal,\n\t\t},\n\t\t{\n\t\t\tname: \"twoTombstonesRemoteWin\",\n\t\t\tlocalRevisionBody: db.Body{\"_deleted\": true, \"source\": \"local\"},\n\t\t\tlocalRevID: \"1-a\",\n\t\t\tremoteRevisionBody: db.Body{\"_deleted\": true, \"source\": \"remote\"},\n\t\t\tremoteRevID: \"1-b\",\n\t\t\tconflictResolver: `function(conflict){}`,\n\t\t\texpectedLocalBody: db.Body{\"source\": \"remote\"},\n\t\t\texpectedLocalRevID: \"1-b\",\n\t\t\tskipActiveLeafAssertion: true,\n\t\t\tskipBodyAssertion: base.TestUseXattrs(),\n\t\t},\n\t\t{\n\t\t\tname: \"twoTombstonesLocalWin\",\n\t\t\tlocalRevisionBody: db.Body{\"_deleted\": true, \"source\": \"local\"},\n\t\t\tlocalRevID: \"1-b\",\n\t\t\tremoteRevisionBody: db.Body{\"_deleted\": true, \"source\": \"remote\"},\n\t\t\tremoteRevID: \"1-a\",\n\t\t\tconflictResolver: `function(conflict){}`,\n\t\t\texpectedLocalBody: db.Body{\"source\": \"local\"},\n\t\t\texpectedLocalRevID: \"1-b\",\n\t\t\tskipActiveLeafAssertion: true,\n\t\t\tskipBodyAssertion: base.TestUseXattrs(),\n\t\t},\n\t}\n\n\tfor _, test := range conflictResolutionTests {\n\t\tt.Run(test.name, func(t *testing.T) {\n\t\t\tbase.RequireNumTestBuckets(t, 2)\n\t\t\tbase.SetUpTestLogging(t, logger.LevelInfo, logger.KeyHTTP, logger.KeySync, logger.KeyChanges, logger.KeyCRUD)\n\n\t\t\t// Passive\n\t\t\ttb2 := base.GetTestBucket(t)\n\n\t\t\trt2 := NewRestTester(t, &RestTesterConfig{\n\t\t\t\tTestBucket: tb2,\n\t\t\t\tDatabaseConfig: &DatabaseConfig{DbConfig: DbConfig{\n\t\t\t\t\tUsers: map[string]*db.PrincipalConfig{\n\t\t\t\t\t\t\"alice\": {\n\t\t\t\t\t\t\tPassword: base.StringPtr(\"pass\"),\n\t\t\t\t\t\t\tExplicitChannels: utils.SetOf(\"*\"),\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t}},\n\t\t\t})\n\t\t\tdefer rt2.Close()\n\n\t\t\t// Create revision on rt2 (remote)\n\t\t\tdocID := test.name\n\t\t\tresp, err := rt2.PutDocumentWithRevID(docID, test.remoteRevID, \"\", test.remoteRevisionBody)\n\t\t\tassert.NoError(t, err)\n\t\t\tassertStatus(t, resp, http.StatusCreated)\n\t\t\trt2revID := respRevID(t, resp)\n\t\t\tassert.Equal(t, test.remoteRevID, rt2revID)\n\n\t\t\t// Make rt2 listen on an actual HTTP port, so it can receive the blipsync request from rt1.\n\t\t\tsrv := httptest.NewServer(rt2.TestPublicHandler())\n\t\t\tdefer srv.Close()\n\n\t\t\tpassiveDBURL, err := url.Parse(srv.URL + \"/db\")\n\t\t\trequire.NoError(t, err)\n\n\t\t\t// Add basic auth creds to target db URL\n\t\t\tpassiveDBURL.User = url.UserPassword(\"alice\", \"pass\")\n\n\t\t\t// Active\n\t\t\ttb1 := base.GetTestBucket(t)\n\n\t\t\trt1 := NewRestTester(t, &RestTesterConfig{\n\t\t\t\tTestBucket: tb1,\n\t\t\t})\n\t\t\tdefer rt1.Close()\n\n\t\t\t// Create revision on rt1 (local)\n\t\t\tresp, err = rt1.PutDocumentWithRevID(docID, test.localRevID, \"\", test.localRevisionBody)\n\t\t\tassert.NoError(t, err)\n\t\t\tassertStatus(t, resp, http.StatusCreated)\n\t\t\trt1revID := respRevID(t, resp)\n\t\t\tassert.Equal(t, test.localRevID, rt1revID)\n\n\t\t\tcustomConflictResolver, err := db.NewCustomConflictResolver(test.conflictResolver)\n\t\t\trequire.NoError(t, err)\n\t\t\treplicationStats := base.SyncGatewayStats.NewDBStats(t.Name(), false, false, false).DBReplicatorStats(t.Name())\n\t\t\tar := db.NewActiveReplicator(&db.ActiveReplicatorConfig{\n\t\t\t\tID: t.Name(),\n\t\t\t\tDirection: db.ActiveReplicatorTypePull,\n\t\t\t\tRemoteDBURL: passiveDBURL,\n\t\t\t\tActiveDB: &db.Database{\n\t\t\t\t\tDatabaseContext: rt1.GetDatabase(),\n\t\t\t\t},\n\t\t\t\tChangesBatchSize: 200,\n\t\t\t\tConflictResolverFunc: customConflictResolver,\n\t\t\t\tContinuous: true,\n\t\t\t\tReplicationStatsMap: replicationStats,\n\t\t\t})\n\t\t\tdefer func() { assert.NoError(t, ar.Stop()) }()\n\n\t\t\t// Start the replicator (implicit connect)\n\t\t\tassert.NoError(t, ar.Start())\n\n\t\t\twaitAndRequireCondition(t, func() bool { return ar.GetStatus().DocsRead == 1 }, \"Expecting DocsRead == 1\")\n\t\t\tswitch test.expectedResolutionType {\n\t\t\tcase db.ConflictResolutionLocal:\n\t\t\t\tassert.Equal(t, 1, int(replicationStats.ConflictResolvedLocalCount.Value()))\n\t\t\t\tassert.Equal(t, 0, int(replicationStats.ConflictResolvedMergedCount.Value()))\n\t\t\t\tassert.Equal(t, 0, int(replicationStats.ConflictResolvedRemoteCount.Value()))\n\t\t\tcase db.ConflictResolutionMerge:\n\t\t\t\tassert.Equal(t, 0, int(replicationStats.ConflictResolvedLocalCount.Value()))\n\t\t\t\tassert.Equal(t, 1, int(replicationStats.ConflictResolvedMergedCount.Value()))\n\t\t\t\tassert.Equal(t, 0, int(replicationStats.ConflictResolvedRemoteCount.Value()))\n\t\t\tcase db.ConflictResolutionRemote:\n\t\t\t\tassert.Equal(t, 0, int(replicationStats.ConflictResolvedLocalCount.Value()))\n\t\t\t\tassert.Equal(t, 0, int(replicationStats.ConflictResolvedMergedCount.Value()))\n\t\t\t\tassert.Equal(t, 1, int(replicationStats.ConflictResolvedRemoteCount.Value()))\n\t\t\tdefault:\n\t\t\t\tassert.Equal(t, 0, int(replicationStats.ConflictResolvedLocalCount.Value()))\n\t\t\t\tassert.Equal(t, 0, int(replicationStats.ConflictResolvedMergedCount.Value()))\n\t\t\t\tassert.Equal(t, 0, int(replicationStats.ConflictResolvedRemoteCount.Value()))\n\t\t\t}\n\t\t\t// wait for the document originally written to rt2 to arrive at rt1. Should end up as winner under default conflict resolution\n\n\t\t\tchangesResults, err := rt1.WaitForChanges(1, \"/db/_changes?since=0\", \"\", true)\n\t\t\trequire.NoError(t, err)\n\t\t\trequire.Len(t, changesResults.Results, 1)\n\t\t\tassert.Equal(t, docID, changesResults.Results[0].ID)\n\t\t\tassert.Equal(t, test.expectedLocalRevID, changesResults.Results[0].Changes[0][\"rev\"])\n\t\t\tlog.Printf(\"Changes response is %+v\", changesResults)\n\n\t\t\tdoc, err := rt1.GetDatabase().GetDocument(logger.TestCtx(t), docID, db.DocUnmarshalAll)\n\t\t\trequire.NoError(t, err)\n\t\t\tassert.Equal(t, test.expectedLocalRevID, doc.SyncData.CurrentRev)\n\n\t\t\t// This is skipped for tombstone tests running with xattr as xattr tombstones don't have a body to assert\n\t\t\t// against\n\t\t\tif !test.skipBodyAssertion {\n\t\t\t\tassert.Equal(t, test.expectedLocalBody, doc.Body())\n\t\t\t}\n\n\t\t\tlog.Printf(\"Doc %s is %+v\", docID, doc)\n\t\t\tfor revID, revInfo := range doc.SyncData.History {\n\t\t\t\tlog.Printf(\"doc revision [%s]: %+v\", revID, revInfo)\n\t\t\t}\n\n\t\t\tif !test.skipActiveLeafAssertion {\n\t\t\t\t// Validate only one active leaf node remains after conflict resolution, and that all parents\n\t\t\t\t// of leaves have empty bodies\n\t\t\t\tactiveCount := 0\n\t\t\t\tfor _, revID := range doc.SyncData.History.GetLeaves() {\n\t\t\t\t\trevInfo, ok := doc.SyncData.History[revID]\n\t\t\t\t\trequire.True(t, ok)\n\t\t\t\t\tif !revInfo.Deleted {\n\t\t\t\t\t\tactiveCount++\n\t\t\t\t\t}\n\t\t\t\t\tif revInfo.Parent != \"\" {\n\t\t\t\t\t\tparentRevInfo, ok := doc.SyncData.History[revInfo.Parent]\n\t\t\t\t\t\trequire.True(t, ok)\n\t\t\t\t\t\tassert.True(t, parentRevInfo.Body == nil)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tassert.Equal(t, 1, activeCount)\n\t\t\t}\n\t\t})\n\t}\n}", "func TestPoolInternalConflict(t *testing.T) {\n\tpoolA := mkPool(poolAUID, \"pool-a\", []string{\"10.0.10.0/24\", \"10.0.10.64/28\"})\n\tfixture := mkTestFixture([]*cilium_api_v2alpha1.CiliumLoadBalancerIPPool{\n\t\tpoolA,\n\t}, true, false, nil)\n\n\tawait := fixture.AwaitPool(func(action k8s_testing.Action) bool {\n\t\tif action.GetResource() != poolResource || action.GetVerb() != \"patch\" {\n\t\t\treturn false\n\t\t}\n\n\t\tpool := fixture.PatchedPool(action)\n\n\t\treturn !isPoolConflicting(pool)\n\t}, time.Second)\n\n\tgo fixture.hive.Start(context.Background())\n\tdefer fixture.hive.Stop(context.Background())\n\n\tif await.Block() {\n\t\tt.Fatal(\"Expected pool to be marked conflicting\")\n\t}\n\n\tawait = fixture.AwaitPool(func(action k8s_testing.Action) bool {\n\t\tif action.GetResource() != poolResource || action.GetVerb() != \"patch\" {\n\t\t\treturn false\n\t\t}\n\n\t\tpool := fixture.PatchedPool(action)\n\n\t\treturn !isPoolConflicting(pool)\n\t}, 2*time.Second)\n\n\tpool, err := fixture.poolClient.Get(context.Background(), \"pool-a\", meta_v1.GetOptions{})\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tpool.Spec.Cidrs = []cilium_api_v2alpha1.CiliumLoadBalancerIPPoolCIDRBlock{\n\t\t{\n\t\t\tCidr: \"10.0.10.0/24\",\n\t\t},\n\t}\n\n\t_, err = fixture.poolClient.Update(context.Background(), pool, meta_v1.UpdateOptions{})\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tif await.Block() {\n\t\tt.Fatal(\"Expected pool to be un-marked conflicting\")\n\t}\n}", "func TestCreateRetryUnrecoverable(t *testing.T) {\n\tregistry := registryhostname.TestingRegistryHostnameRetriever(nil, \"\", testDefaultRegistryURL)\n\trestInstance := &REST{\n\t\tstrategy: NewStrategy(registry),\n\t\timageRegistry: &fakeImageRegistry{\n\t\t\tcreateImage: func(ctx context.Context, image *imageapi.Image) error {\n\t\t\t\treturn nil\n\t\t\t},\n\t\t},\n\t\timageStreamRegistry: &fakeImageStreamRegistry{\n\t\t\tgetImageStream: func(ctx context.Context, id string, options *metav1.GetOptions) (*imageapi.ImageStream, error) {\n\t\t\t\treturn validImageStream(), nil\n\t\t\t},\n\t\t\tlistImageStreams: func(ctx context.Context, options *metainternal.ListOptions) (*imageapi.ImageStreamList, error) {\n\t\t\t\ts := validImageStream()\n\t\t\t\treturn &imageapi.ImageStreamList{Items: []imageapi.ImageStream{*s}}, nil\n\t\t\t},\n\t\t\tupdateImageStreamStatus: func(ctx context.Context, repo *imageapi.ImageStream) (*imageapi.ImageStream, error) {\n\t\t\t\treturn nil, errors.NewServiceUnavailable(\"unrecoverable error\")\n\t\t\t},\n\t\t},\n\t}\n\tobj, err := restInstance.Create(apirequest.NewDefaultContext(), validNewMappingWithName(), rest.ValidateAllObjectFunc, false)\n\tif err == nil {\n\t\tt.Errorf(\"expected an error\")\n\t}\n\tif obj != nil {\n\t\tt.Fatalf(\"expected a nil result\")\n\t}\n}", "func CreateContainerConflict(t goatest.TInterface, ctx context.Context, service *goa.Service, ctrl app.ContainerController, command []string, entrypoint []string, env []string, image string, name string, sslRedirect bool, volumes []string, workingDir *string) (http.ResponseWriter, error) {\n\t// Setup service\n\tvar (\n\t\tlogBuf bytes.Buffer\n\t\tresp interface{}\n\n\t\trespSetter goatest.ResponseSetterFunc = func(r interface{}) { resp = r }\n\t)\n\tif service == nil {\n\t\tservice = goatest.Service(&logBuf, respSetter)\n\t} else {\n\t\tlogger := log.New(&logBuf, \"\", log.Ltime)\n\t\tservice.WithLogger(goa.NewLogger(logger))\n\t\tnewEncoder := func(io.Writer) goa.Encoder { return respSetter }\n\t\tservice.Encoder = goa.NewHTTPEncoder() // Make sure the code ends up using this decoder\n\t\tservice.Encoder.Register(newEncoder, \"*/*\")\n\t}\n\n\t// Setup request context\n\trw := httptest.NewRecorder()\n\tquery := url.Values{}\n\t{\n\t\tsliceVal := command\n\t\tquery[\"command\"] = sliceVal\n\t}\n\t{\n\t\tsliceVal := entrypoint\n\t\tquery[\"entrypoint\"] = sliceVal\n\t}\n\t{\n\t\tsliceVal := env\n\t\tquery[\"env\"] = sliceVal\n\t}\n\t{\n\t\tsliceVal := []string{image}\n\t\tquery[\"image\"] = sliceVal\n\t}\n\t{\n\t\tsliceVal := []string{name}\n\t\tquery[\"name\"] = sliceVal\n\t}\n\t{\n\t\tsliceVal := []string{fmt.Sprintf(\"%v\", sslRedirect)}\n\t\tquery[\"sslRedirect\"] = sliceVal\n\t}\n\t{\n\t\tsliceVal := volumes\n\t\tquery[\"volumes\"] = sliceVal\n\t}\n\tif workingDir != nil {\n\t\tsliceVal := []string{*workingDir}\n\t\tquery[\"workingDir\"] = sliceVal\n\t}\n\tu := &url.URL{\n\t\tPath: fmt.Sprintf(\"/api/v2/container/create\"),\n\t\tRawQuery: query.Encode(),\n\t}\n\treq, err := http.NewRequest(\"GET\", u.String(), nil)\n\tif err != nil {\n\t\tpanic(\"invalid test \" + err.Error()) // bug\n\t}\n\tprms := url.Values{}\n\t{\n\t\tsliceVal := command\n\t\tprms[\"command\"] = sliceVal\n\t}\n\t{\n\t\tsliceVal := entrypoint\n\t\tprms[\"entrypoint\"] = sliceVal\n\t}\n\t{\n\t\tsliceVal := env\n\t\tprms[\"env\"] = sliceVal\n\t}\n\t{\n\t\tsliceVal := []string{image}\n\t\tprms[\"image\"] = sliceVal\n\t}\n\t{\n\t\tsliceVal := []string{name}\n\t\tprms[\"name\"] = sliceVal\n\t}\n\t{\n\t\tsliceVal := []string{fmt.Sprintf(\"%v\", sslRedirect)}\n\t\tprms[\"sslRedirect\"] = sliceVal\n\t}\n\t{\n\t\tsliceVal := volumes\n\t\tprms[\"volumes\"] = sliceVal\n\t}\n\tif workingDir != nil {\n\t\tsliceVal := []string{*workingDir}\n\t\tprms[\"workingDir\"] = sliceVal\n\t}\n\tif ctx == nil {\n\t\tctx = context.Background()\n\t}\n\tgoaCtx := goa.NewContext(goa.WithAction(ctx, \"ContainerTest\"), rw, req, prms)\n\tcreateCtx, _err := app.NewCreateContainerContext(goaCtx, req, service)\n\tif _err != nil {\n\t\te, ok := _err.(goa.ServiceError)\n\t\tif !ok {\n\t\t\tpanic(\"invalid test data \" + _err.Error()) // bug\n\t\t}\n\t\treturn nil, e\n\t}\n\n\t// Perform action\n\t_err = ctrl.Create(createCtx)\n\n\t// Validate response\n\tif _err != nil {\n\t\tt.Fatalf(\"controller returned %+v, logs:\\n%s\", _err, logBuf.String())\n\t}\n\tif rw.Code != 409 {\n\t\tt.Errorf(\"invalid response status code: got %+v, expected 409\", rw.Code)\n\t}\n\tvar mt error\n\tif resp != nil {\n\t\tvar _ok bool\n\t\tmt, _ok = resp.(error)\n\t\tif !_ok {\n\t\t\tt.Fatalf(\"invalid response media: got variable of type %T, value %+v, expected instance of error\", resp, resp)\n\t\t}\n\t}\n\n\t// Return results\n\treturn rw, mt\n}", "func TestCommitConflictRollback4A(t *testing.T) {\n}", "func TestCommitConflictRace4A(t *testing.T) {\n}", "func TestActiveReplicatorPushAndPullConflict(t *testing.T) {\n\n\t// scenarios\n\tconflictResolutionTests := []struct {\n\t\tname string\n\t\tlocalRevisionBody []byte\n\t\tlocalRevID string\n\t\tremoteRevisionBody []byte\n\t\tremoteRevID string\n\t\tcommonAncestorRevID string\n\t\tconflictResolver string\n\t\texpectedBody []byte\n\t\texpectedRevID string\n\t\texpectedTombstonedRevID string\n\t}{\n\t\t{\n\t\t\tname: \"remoteWins\",\n\t\t\tlocalRevisionBody: []byte(`{\"source\": \"local\"}`),\n\t\t\tlocalRevID: \"1-a\",\n\t\t\tremoteRevisionBody: []byte(`{\"source\": \"remote\"}`),\n\t\t\tremoteRevID: \"1-b\",\n\t\t\tconflictResolver: `function(conflict) {return conflict.RemoteDocument;}`,\n\t\t\texpectedBody: []byte(`{\"source\": \"remote\"}`),\n\t\t\texpectedRevID: \"1-b\",\n\t\t},\n\t\t{\n\t\t\tname: \"merge\",\n\t\t\tlocalRevisionBody: []byte(`{\"source\": \"local\"}`),\n\t\t\tlocalRevID: \"1-a\",\n\t\t\tremoteRevisionBody: []byte(`{\"source\": \"remote\"}`),\n\t\t\tremoteRevID: \"1-b\",\n\t\t\tconflictResolver: `function(conflict) {\n\t\t\t\t\t\t\tvar mergedDoc = new Object();\n\t\t\t\t\t\t\tmergedDoc.source = \"merged\";\n\t\t\t\t\t\t\treturn mergedDoc;\n\t\t\t\t\t\t}`,\n\t\t\texpectedBody: []byte(`{\"source\": \"merged\"}`),\n\t\t\texpectedRevID: db.CreateRevIDWithBytes(2, \"1-b\", []byte(`{\"source\":\"merged\"}`)), // rev for merged body, with parent 1-b\n\t\t},\n\t\t{\n\t\t\tname: \"localWins\",\n\t\t\tlocalRevisionBody: []byte(`{\"source\": \"local\"}`),\n\t\t\tlocalRevID: \"1-a\",\n\t\t\tremoteRevisionBody: []byte(`{\"source\": \"remote\"}`),\n\t\t\tremoteRevID: \"1-b\",\n\t\t\tconflictResolver: `function(conflict) {return conflict.LocalDocument;}`,\n\t\t\texpectedBody: []byte(`{\"source\": \"local\"}`),\n\t\t\texpectedRevID: db.CreateRevIDWithBytes(2, \"1-b\", []byte(`{\"source\":\"local\"}`)), // rev for local body, transposed under parent 1-b\n\t\t},\n\t\t{\n\t\t\tname: \"localWinsRemoteTombstone\",\n\t\t\tlocalRevisionBody: []byte(`{\"source\": \"local\"}`),\n\t\t\tlocalRevID: \"2-a\",\n\t\t\tremoteRevisionBody: []byte(`{\"_deleted\": true}`),\n\t\t\tremoteRevID: \"2-b\",\n\t\t\tcommonAncestorRevID: \"1-a\",\n\t\t\tconflictResolver: `function(conflict) {return conflict.LocalDocument;}`,\n\t\t\texpectedBody: []byte(`{\"source\": \"local\"}`),\n\t\t\texpectedRevID: db.CreateRevIDWithBytes(3, \"2-b\", []byte(`{\"source\":\"local\"}`)), // rev for local body, transposed under parent 2-b\n\t\t},\n\t}\n\n\tfor _, test := range conflictResolutionTests {\n\t\tt.Run(test.name, func(t *testing.T) {\n\t\t\tbase.RequireNumTestBuckets(t, 2)\n\t\t\tbase.SetUpTestLogging(t, logger.LevelInfo, logger.KeyHTTP, logger.KeySync, logger.KeyChanges, logger.KeyCRUD)\n\n\t\t\t// Passive\n\t\t\trt2 := NewRestTester(t, &RestTesterConfig{\n\t\t\t\tTestBucket: base.GetTestBucket(t),\n\t\t\t\tDatabaseConfig: &DatabaseConfig{DbConfig: DbConfig{\n\t\t\t\t\tUsers: map[string]*db.PrincipalConfig{\n\t\t\t\t\t\t\"alice\": {\n\t\t\t\t\t\t\tPassword: base.StringPtr(\"pass\"),\n\t\t\t\t\t\t\tExplicitChannels: utils.SetOf(\"*\"),\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t}},\n\t\t\t})\n\t\t\tdefer rt2.Close()\n\n\t\t\tvar localRevisionBody db.Body\n\t\t\tassert.NoError(t, json.Unmarshal(test.localRevisionBody, &localRevisionBody))\n\n\t\t\tvar remoteRevisionBody db.Body\n\t\t\tassert.NoError(t, json.Unmarshal(test.remoteRevisionBody, &remoteRevisionBody))\n\n\t\t\tvar expectedLocalBody db.Body\n\t\t\tassert.NoError(t, json.Unmarshal(test.expectedBody, &expectedLocalBody))\n\n\t\t\t// Create revision on rt2 (remote)\n\t\t\tdocID := test.name\n\n\t\t\tif test.commonAncestorRevID != \"\" {\n\t\t\t\tresp, err := rt2.PutDocumentWithRevID(docID, test.commonAncestorRevID, \"\", remoteRevisionBody)\n\t\t\t\tassert.NoError(t, err)\n\t\t\t\tassertStatus(t, resp, http.StatusCreated)\n\t\t\t\trt2revID := respRevID(t, resp)\n\t\t\t\tassert.Equal(t, test.commonAncestorRevID, rt2revID)\n\t\t\t}\n\n\t\t\tresp, err := rt2.PutDocumentWithRevID(docID, test.remoteRevID, test.commonAncestorRevID, remoteRevisionBody)\n\t\t\tassert.NoError(t, err)\n\t\t\tassertStatus(t, resp, http.StatusCreated)\n\t\t\trt2revID := respRevID(t, resp)\n\t\t\tassert.Equal(t, test.remoteRevID, rt2revID)\n\n\t\t\tremoteDoc, err := rt2.GetDatabase().GetDocument(logger.TestCtx(t), docID, db.DocUnmarshalSync)\n\t\t\trequire.NoError(t, err)\n\n\t\t\t// Make rt2 listen on an actual HTTP port, so it can receive the blipsync request from rt1.\n\t\t\tsrv := httptest.NewServer(rt2.TestPublicHandler())\n\t\t\tdefer srv.Close()\n\n\t\t\tpassiveDBURL, err := url.Parse(srv.URL + \"/db\")\n\t\t\trequire.NoError(t, err)\n\n\t\t\t// Add basic auth creds to target db URL\n\t\t\tpassiveDBURL.User = url.UserPassword(\"alice\", \"pass\")\n\n\t\t\t// Active\n\t\t\trt1 := NewRestTester(t, &RestTesterConfig{\n\t\t\t\tTestBucket: base.GetTestBucket(t),\n\t\t\t})\n\t\t\tdefer rt1.Close()\n\n\t\t\t// Create revision on rt1 (local)\n\t\t\tif test.commonAncestorRevID != \"\" {\n\t\t\t\tresp, err = rt1.PutDocumentWithRevID(docID, test.commonAncestorRevID, \"\", localRevisionBody)\n\t\t\t\tassert.NoError(t, err)\n\t\t\t\tassertStatus(t, resp, http.StatusCreated)\n\t\t\t\trt1revID := respRevID(t, resp)\n\t\t\t\tassert.Equal(t, test.commonAncestorRevID, rt1revID)\n\t\t\t}\n\n\t\t\tresp, err = rt1.PutDocumentWithRevID(docID, test.localRevID, test.commonAncestorRevID, localRevisionBody)\n\t\t\tassert.NoError(t, err)\n\t\t\tassertStatus(t, resp, http.StatusCreated)\n\t\t\trt1revID := respRevID(t, resp)\n\t\t\tassert.Equal(t, test.localRevID, rt1revID)\n\n\t\t\tlocalDoc, err := rt1.GetDatabase().GetDocument(logger.TestCtx(t), docID, db.DocUnmarshalSync)\n\t\t\trequire.NoError(t, err)\n\n\t\t\tcustomConflictResolver, err := db.NewCustomConflictResolver(test.conflictResolver)\n\t\t\trequire.NoError(t, err)\n\t\t\tar := db.NewActiveReplicator(&db.ActiveReplicatorConfig{\n\t\t\t\tID: t.Name(),\n\t\t\t\tDirection: db.ActiveReplicatorTypePushAndPull,\n\t\t\t\tRemoteDBURL: passiveDBURL,\n\t\t\t\tActiveDB: &db.Database{\n\t\t\t\t\tDatabaseContext: rt1.GetDatabase(),\n\t\t\t\t},\n\t\t\t\tChangesBatchSize: 200,\n\t\t\t\tConflictResolverFunc: customConflictResolver,\n\t\t\t\tContinuous: true,\n\t\t\t\tReplicationStatsMap: base.SyncGatewayStats.NewDBStats(t.Name(), false, false, false).DBReplicatorStats(t.Name()),\n\t\t\t})\n\t\t\tdefer func() { assert.NoError(t, ar.Stop()) }()\n\n\t\t\t// Start the replicator (implicit connect)\n\t\t\tassert.NoError(t, ar.Start())\n\t\t\t// wait for the document originally written to rt2 to arrive at rt1. Should end up as winner under default conflict resolution\n\t\t\tbase.WaitForStat(func() int64 {\n\t\t\t\treturn ar.GetStatus().DocsWritten\n\t\t\t}, 1)\n\t\t\tlog.Printf(\"========================Replication should be done, checking with changes\")\n\n\t\t\t// Validate results on the local (rt1)\n\t\t\tchangesResults, err := rt1.WaitForChanges(1, fmt.Sprintf(\"/db/_changes?since=%d\", localDoc.Sequence), \"\", true)\n\t\t\trequire.NoError(t, err)\n\t\t\trequire.Len(t, changesResults.Results, 1)\n\t\t\tassert.Equal(t, docID, changesResults.Results[0].ID)\n\t\t\tassert.Equal(t, test.expectedRevID, changesResults.Results[0].Changes[0][\"rev\"])\n\t\t\tlog.Printf(\"Changes response is %+v\", changesResults)\n\n\t\t\trawDocResponse := rt1.SendAdminRequest(http.MethodGet, \"/db/_raw/\"+docID, \"\")\n\t\t\tlog.Printf(\"Raw response: %s\", rawDocResponse.Body.Bytes())\n\n\t\t\tdocResponse := rt1.SendAdminRequest(http.MethodGet, \"/db/\"+docID, \"\")\n\t\t\tlog.Printf(\"Non-raw response: %s\", docResponse.Body.Bytes())\n\n\t\t\tdoc, err := rt1.GetDatabase().GetDocument(logger.TestCtx(t), docID, db.DocUnmarshalAll)\n\t\t\trequire.NoError(t, err)\n\t\t\tassert.Equal(t, test.expectedRevID, doc.SyncData.CurrentRev)\n\t\t\tassert.Equal(t, expectedLocalBody, doc.Body())\n\t\t\tlog.Printf(\"Doc %s is %+v\", docID, doc)\n\t\t\tlog.Printf(\"Doc %s attachments are %+v\", docID, doc.Attachments)\n\t\t\tfor revID, revInfo := range doc.SyncData.History {\n\t\t\t\tlog.Printf(\"doc revision [%s]: %+v\", revID, revInfo)\n\t\t\t}\n\n\t\t\t// Validate only one active leaf node remains after conflict resolution, and that all parents\n\t\t\t// of leaves have empty bodies\n\t\t\tactiveCount := 0\n\t\t\tfor _, revID := range doc.SyncData.History.GetLeaves() {\n\t\t\t\trevInfo, ok := doc.SyncData.History[revID]\n\t\t\t\trequire.True(t, ok)\n\t\t\t\tif !revInfo.Deleted {\n\t\t\t\t\tactiveCount++\n\t\t\t\t}\n\t\t\t\tif revInfo.Parent != \"\" {\n\t\t\t\t\tparentRevInfo, ok := doc.SyncData.History[revInfo.Parent]\n\t\t\t\t\trequire.True(t, ok)\n\t\t\t\t\tassert.True(t, parentRevInfo.Body == nil)\n\t\t\t\t}\n\t\t\t}\n\t\t\tassert.Equal(t, 1, activeCount)\n\n\t\t\t// Validate results on the remote (rt2)\n\t\t\trt2Since := remoteDoc.Sequence\n\t\t\tif test.expectedRevID == test.remoteRevID {\n\t\t\t\t// no changes should have been pushed back up to rt2, because this rev won.\n\t\t\t\trt2Since = 0\n\t\t\t}\n\t\t\tchangesResults, err = rt2.WaitForChanges(1, fmt.Sprintf(\"/db/_changes?since=%d\", rt2Since), \"\", true)\n\t\t\trequire.NoError(t, err)\n\t\t\trequire.Len(t, changesResults.Results, 1)\n\t\t\tassert.Equal(t, docID, changesResults.Results[0].ID)\n\t\t\tassert.Equal(t, test.expectedRevID, changesResults.Results[0].Changes[0][\"rev\"])\n\t\t\tlog.Printf(\"Changes response is %+v\", changesResults)\n\n\t\t\tdoc, err = rt2.GetDatabase().GetDocument(logger.TestCtx(t), docID, db.DocUnmarshalAll)\n\t\t\trequire.NoError(t, err)\n\t\t\tassert.Equal(t, test.expectedRevID, doc.SyncData.CurrentRev)\n\t\t\tassert.Equal(t, expectedLocalBody, doc.Body())\n\t\t\tlog.Printf(\"Remote Doc %s is %+v\", docID, doc)\n\t\t\tlog.Printf(\"Remote Doc %s attachments are %+v\", docID, doc.Attachments)\n\t\t\tfor revID, revInfo := range doc.SyncData.History {\n\t\t\t\tlog.Printf(\"doc revision [%s]: %+v\", revID, revInfo)\n\t\t\t}\n\n\t\t\t// Validate only one active leaf node remains after conflict resolution, and that all parents\n\t\t\t// of leaves have empty bodies\n\t\t\tactiveCount = 0\n\t\t\tfor _, revID := range doc.SyncData.History.GetLeaves() {\n\t\t\t\trevInfo, ok := doc.SyncData.History[revID]\n\t\t\t\trequire.True(t, ok)\n\t\t\t\tif !revInfo.Deleted {\n\t\t\t\t\tactiveCount++\n\t\t\t\t}\n\t\t\t\tif revInfo.Parent != \"\" {\n\t\t\t\t\tparentRevInfo, ok := doc.SyncData.History[revInfo.Parent]\n\t\t\t\t\trequire.True(t, ok)\n\t\t\t\t\tassert.True(t, parentRevInfo.Body == nil)\n\t\t\t\t}\n\t\t\t}\n\t\t\tassert.Equal(t, 1, activeCount)\n\t\t})\n\t}\n}", "func TestNoConflicts(t *testing.T) {\n\ttestDB(t, func(db *bolt.DB) {\n\t\tbucketName := []byte(\"testBucket\")\n\n\t\tif err := db.Update(func(tx *bolt.Tx) error {\n\t\t\tb, err := tx.CreateBucket(bucketName)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tc := b.Cursor()\n\t\t\tfor _, test := range matchTests {\n\t\t\t\tpathB := []byte(test.path)\n\t\t\t\tif k, _ := SeekPathConflict(c, pathB); k != nil {\n\t\t\t\t\tt.Errorf(\"unexpected conflict with %q: %s\", test.path, string(k))\n\t\t\t\t}\n\n\t\t\t\tif err := b.Put(pathB, []byte{}); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t}\n\t\t\treturn nil\n\t\t}); err != nil {\n\t\t\tt.Fatal(\"failed to insert paths:\", err)\n\t\t}\n\t})\n}", "func (s *DockerRegistrySuite) TestPullConflict(c *check.C) {\n\trepoName := privateRegistryURL + \"/dockercli/conflict\"\n\n\t_, err := buildImage(repoName, `\n\t FROM scratch\n\t ENV IMAGE conflict\n\t CMD echo conflict\n\t`, true)\n\tif err != nil {\n\t\tc.Fatal(err)\n\t}\n\n\tdockerCmd(c, \"push\", repoName)\n\n\t// Pull to make it content-addressable\n\tdockerCmd(c, \"rmi\", repoName)\n\tdockerCmd(c, \"pull\", repoName)\n\n\tIDBeforeLoad := imageID(c, repoName)\n\n\t// Load/save to turn this into an unverified image with the same ID\n\ttmpDir, err := ioutil.TempDir(\"\", \"conflict-save-output\")\n\tif err != nil {\n\t\tc.Errorf(\"failed to create temporary directory: %s\", err)\n\t}\n\tdefer os.RemoveAll(tmpDir)\n\n\ttarFile := filepath.Join(tmpDir, \"repo.tar\")\n\n\tdockerCmd(c, \"save\", \"-o\", tarFile, repoName)\n\tdockerCmd(c, \"rmi\", repoName)\n\tdockerCmd(c, \"load\", \"-i\", tarFile)\n\n\t// Check that the the ID is the same after save/load.\n\tIDAfterLoad := imageID(c, repoName)\n\n\tif IDAfterLoad != IDBeforeLoad {\n\t\tc.Fatal(\"image's ID should be the same after save/load\")\n\t}\n\n\t// Repull\n\tdockerCmd(c, \"pull\", repoName)\n\n\t// Check that the ID is now different because of the conflict.\n\tIDAfterPull1 := imageID(c, repoName)\n\n\t// Expect the new ID to be SHA256(oldID)\n\texpectedIDDigest, err := digest.FromBytes([]byte(IDBeforeLoad))\n\tif err != nil {\n\t\tc.Fatalf(\"digest error: %v\", err)\n\t}\n\texpectedID := expectedIDDigest.Hex()\n\tif IDAfterPull1 != expectedID {\n\t\tc.Fatalf(\"image's ID should have changed on pull to %s (got %s)\", expectedID, IDAfterPull1)\n\t}\n\n\t// A second pull should use the new ID again.\n\tdockerCmd(c, \"pull\", repoName)\n\n\tIDAfterPull2 := imageID(c, repoName)\n\n\tif IDAfterPull2 != IDAfterPull1 {\n\t\tc.Fatal(\"image's ID unexpectedly changed after a repull\")\n\t}\n}", "func TestActiveReplicatorPullConflictReadWriteIntlProps(t *testing.T) {\n\n\tcreateRevID := func(generation int, parentRevID string, body db.Body) string {\n\t\trev, err := db.CreateRevID(generation, parentRevID, body)\n\t\trequire.NoError(t, err, \"Error creating revision\")\n\t\treturn rev\n\t}\n\tdocExpiry := time.Now().Local().Add(time.Hour * time.Duration(4)).Format(time.RFC3339)\n\n\t// scenarios\n\tconflictResolutionTests := []struct {\n\t\tname string\n\t\tcommonAncestorRevID string\n\t\tlocalRevisionBody db.Body\n\t\tlocalRevID string\n\t\tremoteRevisionBody db.Body\n\t\tremoteRevID string\n\t\tconflictResolver string\n\t\texpectedLocalBody db.Body\n\t\texpectedLocalRevID string\n\t}{\n\t\t{\n\t\t\tname: \"mergeReadWriteIntlProps\",\n\t\t\tlocalRevisionBody: db.Body{\n\t\t\t\t\"source\": \"local\",\n\t\t\t},\n\t\t\tlocalRevID: \"1-a\",\n\t\t\tremoteRevisionBody: db.Body{\n\t\t\t\t\"source\": \"remote\",\n\t\t\t},\n\t\t\tremoteRevID: \"1-b\",\n\t\t\tconflictResolver: `function(conflict) {\n\t\t\t\tvar mergedDoc = new Object();\n\t\t\t\tmergedDoc.source = \"merged\";\n\t\t\t\tmergedDoc.remoteDocId = conflict.RemoteDocument._id;\n\t\t\t\tmergedDoc.remoteRevId = conflict.RemoteDocument._rev;\n\t\t\t\tmergedDoc.localDocId = conflict.LocalDocument._id;\n\t\t\t\tmergedDoc.localRevId = conflict.LocalDocument._rev;\n\t\t\t\tmergedDoc._id = \"foo\";\n\t\t\t\tmergedDoc._rev = \"2-c\";\n\t\t\t\tmergedDoc._exp = 100;\n\t\t\t\treturn mergedDoc;\n\t\t\t}`,\n\t\t\texpectedLocalBody: db.Body{\n\t\t\t\tdb.BodyId: \"foo\",\n\t\t\t\tdb.BodyRev: \"2-c\",\n\t\t\t\tdb.BodyExpiry: json.Number(\"100\"),\n\t\t\t\t\"localDocId\": \"mergeReadWriteIntlProps\",\n\t\t\t\t\"localRevId\": \"1-a\",\n\t\t\t\t\"remoteDocId\": \"mergeReadWriteIntlProps\",\n\t\t\t\t\"remoteRevId\": \"1-b\",\n\t\t\t\t\"source\": \"merged\",\n\t\t\t},\n\t\t\texpectedLocalRevID: createRevID(2, \"1-b\", db.Body{\n\t\t\t\tdb.BodyId: \"foo\",\n\t\t\t\tdb.BodyRev: \"2-c\",\n\t\t\t\tdb.BodyExpiry: json.Number(\"100\"),\n\t\t\t\t\"localDocId\": \"mergeReadWriteIntlProps\",\n\t\t\t\t\"localRevId\": \"1-a\",\n\t\t\t\t\"remoteDocId\": \"mergeReadWriteIntlProps\",\n\t\t\t\t\"remoteRevId\": \"1-b\",\n\t\t\t\t\"source\": \"merged\",\n\t\t\t}),\n\t\t},\n\t\t{\n\t\t\tname: \"mergeReadWriteAttachments\",\n\t\t\tlocalRevisionBody: map[string]interface{}{\n\t\t\t\tdb.BodyAttachments: map[string]interface{}{\n\t\t\t\t\t\"A\": map[string]interface{}{\n\t\t\t\t\t\t\"data\": \"QQo=\",\n\t\t\t\t\t}},\n\t\t\t\t\"source\": \"local\",\n\t\t\t},\n\t\t\tlocalRevID: \"1-a\",\n\t\t\tremoteRevisionBody: map[string]interface{}{\n\t\t\t\tdb.BodyAttachments: map[string]interface{}{\n\t\t\t\t\t\"B\": map[string]interface{}{\n\t\t\t\t\t\t\"data\": \"Qgo=\",\n\t\t\t\t\t}},\n\t\t\t\t\"source\": \"remote\",\n\t\t\t},\n\t\t\tremoteRevID: \"1-b\",\n\t\t\tconflictResolver: `function(conflict) {\n\t\t\t\tvar mergedDoc = new Object();\n\t\t\t\tmergedDoc.source = \"merged\";\n\t\t\t\tvar mergedAttachments = new Object();\n\n\t\t\t\tdst = conflict.RemoteDocument._attachments;\n\t\t\t\tfor (var key in dst) {\n\t\t\t\t\tmergedAttachments[key] = dst[key];\n\t\t\t\t}\n\t\t\t\tsrc = conflict.LocalDocument._attachments;\n\t\t\t\tfor (var key in src) {\n\t\t\t\t\tmergedAttachments[key] = src[key];\n\t\t\t\t}\n\t\t\t\tmergedDoc._attachments = mergedAttachments;\n\t\t\t\treturn mergedDoc;\n\t\t\t}`,\n\t\t\texpectedLocalBody: map[string]interface{}{\n\t\t\t\t\"source\": \"merged\",\n\t\t\t},\n\t\t\texpectedLocalRevID: createRevID(2, \"1-b\", db.Body{\n\t\t\t\t\"source\": \"merged\",\n\t\t\t}),\n\t\t},\n\t\t{\n\t\t\tname: \"mergeReadIntlPropsLocalExpiry\",\n\t\t\tlocalRevisionBody: db.Body{\n\t\t\t\t\"source\": \"local\",\n\t\t\t\tdb.BodyExpiry: docExpiry,\n\t\t\t},\n\t\t\tlocalRevID: \"1-a\",\n\t\t\tremoteRevisionBody: db.Body{\"source\": \"remote\"},\n\t\t\tremoteRevID: \"1-b\",\n\t\t\tconflictResolver: `function(conflict) {\n\t\t\t\tvar mergedDoc = new Object();\n\t\t\t\tmergedDoc.source = \"merged\";\n\t\t\t\tmergedDoc.localDocExp = conflict.LocalDocument._exp;\n\t\t\t\treturn mergedDoc;\n\t\t\t}`,\n\t\t\texpectedLocalBody: db.Body{\n\t\t\t\t\"localDocExp\": docExpiry,\n\t\t\t\t\"source\": \"merged\",\n\t\t\t},\n\t\t\texpectedLocalRevID: createRevID(2, \"1-b\", db.Body{\n\t\t\t\t\"localDocExp\": docExpiry,\n\t\t\t\t\"source\": \"merged\",\n\t\t\t}),\n\t\t},\n\t\t{\n\t\t\tname: \"mergeWriteIntlPropsExpiry\",\n\t\t\tlocalRevisionBody: db.Body{\n\t\t\t\t\"source\": \"local\",\n\t\t\t\tdb.BodyExpiry: docExpiry,\n\t\t\t},\n\t\t\tlocalRevID: \"1-a\",\n\t\t\tremoteRevisionBody: db.Body{\n\t\t\t\t\"source\": \"remote\",\n\t\t\t},\n\t\t\tremoteRevID: \"1-b\",\n\t\t\tconflictResolver: fmt.Sprintf(`function(conflict) {\n\t\t\t\tvar mergedDoc = new Object();\n\t\t\t\tmergedDoc.source = \"merged\";\n\t\t\t\tmergedDoc._exp = %q;\n\t\t\t\treturn mergedDoc;\n\t\t\t}`, docExpiry),\n\t\t\texpectedLocalBody: db.Body{\n\t\t\t\tdb.BodyExpiry: docExpiry,\n\t\t\t\t\"source\": \"merged\",\n\t\t\t},\n\t\t\texpectedLocalRevID: createRevID(2, \"1-b\", db.Body{\n\t\t\t\tdb.BodyExpiry: docExpiry,\n\t\t\t\t\"source\": \"merged\",\n\t\t\t}),\n\t\t},\n\t\t{\n\t\t\tname: \"mergeReadIntlPropsDeletedWithLocalTombstone\",\n\t\t\tlocalRevisionBody: db.Body{\n\t\t\t\t\"source\": \"local\",\n\t\t\t\tdb.BodyDeleted: true,\n\t\t\t},\n\t\t\tcommonAncestorRevID: \"1-a\",\n\t\t\tlocalRevID: \"2-a\",\n\t\t\tremoteRevisionBody: db.Body{\n\t\t\t\t\"source\": \"remote\",\n\t\t\t},\n\t\t\tremoteRevID: \"2-b\",\n\t\t\tconflictResolver: `function(conflict) {\n\t\t\t\tvar mergedDoc = new Object();\n\t\t\t\tmergedDoc.source = \"merged\";\n\t\t\t\tmergedDoc.localDeleted = conflict.LocalDocument._deleted;\n\t\t\t\treturn mergedDoc;\n\t\t\t}`,\n\t\t\texpectedLocalBody: db.Body{\n\t\t\t\t\"localDeleted\": true,\n\t\t\t\t\"source\": \"merged\",\n\t\t\t},\n\t\t\texpectedLocalRevID: createRevID(3, \"2-b\", db.Body{\n\t\t\t\t\"localDeleted\": true,\n\t\t\t\t\"source\": \"merged\",\n\t\t\t}),\n\t\t},\n\t}\n\n\tfor _, test := range conflictResolutionTests {\n\t\tt.Run(test.name, func(t *testing.T) {\n\t\t\tbase.RequireNumTestBuckets(t, 2)\n\t\t\tbase.SetUpTestLogging(t, logger.LevelDebug, logger.KeyAll)\n\n\t\t\t// Passive\n\t\t\ttb2 := base.GetTestBucket(t)\n\n\t\t\trt2 := NewRestTester(t, &RestTesterConfig{\n\t\t\t\tTestBucket: tb2,\n\t\t\t\tDatabaseConfig: &DatabaseConfig{DbConfig: DbConfig{\n\t\t\t\t\tUsers: map[string]*db.PrincipalConfig{\n\t\t\t\t\t\t\"alice\": {\n\t\t\t\t\t\t\tPassword: base.StringPtr(\"pass\"),\n\t\t\t\t\t\t\tExplicitChannels: utils.SetOf(\"*\"),\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t}},\n\t\t\t})\n\t\t\tdefer rt2.Close()\n\n\t\t\t// Create revision on rt2 (remote)\n\t\t\tdocID := test.name\n\t\t\tif test.commonAncestorRevID != \"\" {\n\t\t\t\t_, err := rt2.PutDocumentWithRevID(docID, test.commonAncestorRevID, \"\", test.remoteRevisionBody)\n\t\t\t\tassert.NoError(t, err)\n\t\t\t}\n\t\t\tresp, err := rt2.PutDocumentWithRevID(docID, test.remoteRevID, test.commonAncestorRevID, test.remoteRevisionBody)\n\t\t\tassert.NoError(t, err)\n\t\t\tassertStatus(t, resp, http.StatusCreated)\n\t\t\trt2revID := respRevID(t, resp)\n\t\t\tassert.Equal(t, test.remoteRevID, rt2revID)\n\n\t\t\t// Make rt2 listen on an actual HTTP port, so it can receive the blipsync request from rt1.\n\t\t\tsrv := httptest.NewServer(rt2.TestPublicHandler())\n\t\t\tdefer srv.Close()\n\n\t\t\tpassiveDBURL, err := url.Parse(srv.URL + \"/db\")\n\t\t\trequire.NoError(t, err)\n\n\t\t\t// Add basic auth creds to target db URL\n\t\t\tpassiveDBURL.User = url.UserPassword(\"alice\", \"pass\")\n\n\t\t\t// Active\n\t\t\ttb1 := base.GetTestBucket(t)\n\n\t\t\trt1 := NewRestTester(t, &RestTesterConfig{\n\t\t\t\tTestBucket: tb1,\n\t\t\t})\n\t\t\tdefer rt1.Close()\n\n\t\t\t// Create revision on rt1 (local)\n\t\t\tif test.commonAncestorRevID != \"\" {\n\t\t\t\t_, err := rt1.PutDocumentWithRevID(docID, test.commonAncestorRevID, \"\", test.remoteRevisionBody)\n\t\t\t\tassert.NoError(t, err)\n\t\t\t}\n\t\t\tresp, err = rt1.PutDocumentWithRevID(docID, test.localRevID, test.commonAncestorRevID, test.localRevisionBody)\n\t\t\tassert.NoError(t, err)\n\t\t\tassertStatus(t, resp, http.StatusCreated)\n\t\t\trt1revID := respRevID(t, resp)\n\t\t\tassert.Equal(t, test.localRevID, rt1revID)\n\n\t\t\tcustomConflictResolver, err := db.NewCustomConflictResolver(test.conflictResolver)\n\t\t\trequire.NoError(t, err)\n\t\t\treplicationStats := base.SyncGatewayStats.NewDBStats(t.Name(), false, false, false).DBReplicatorStats(t.Name())\n\t\t\tar := db.NewActiveReplicator(&db.ActiveReplicatorConfig{\n\t\t\t\tID: t.Name(),\n\t\t\t\tDirection: db.ActiveReplicatorTypePull,\n\t\t\t\tRemoteDBURL: passiveDBURL,\n\t\t\t\tActiveDB: &db.Database{\n\t\t\t\t\tDatabaseContext: rt1.GetDatabase(),\n\t\t\t\t},\n\t\t\t\tChangesBatchSize: 200,\n\t\t\t\tConflictResolverFunc: customConflictResolver,\n\t\t\t\tContinuous: true,\n\t\t\t\tReplicationStatsMap: replicationStats,\n\t\t\t})\n\t\t\tdefer func() { assert.NoError(t, ar.Stop()) }()\n\n\t\t\t// Start the replicator (implicit connect)\n\t\t\tassert.NoError(t, ar.Start())\n\t\t\twaitAndRequireCondition(t, func() bool { return ar.GetStatus().DocsRead == 1 })\n\t\t\tassert.Equal(t, 1, int(replicationStats.ConflictResolvedMergedCount.Value()))\n\n\t\t\t// Wait for the document originally written to rt2 to arrive at rt1.\n\t\t\t// Should end up as winner under default conflict resolution.\n\t\t\tchangesResults, err := rt1.WaitForChanges(1, \"/db/_changes?&since=0\", \"\", true)\n\t\t\trequire.NoError(t, err)\n\t\t\trequire.Len(t, changesResults.Results, 1)\n\t\t\tassert.Equal(t, docID, changesResults.Results[0].ID)\n\t\t\tassert.Equal(t, test.expectedLocalRevID, changesResults.Results[0].Changes[0][\"rev\"])\n\t\t\tlog.Printf(\"Changes response is %+v\", changesResults)\n\n\t\t\tdoc, err := rt1.GetDatabase().GetDocument(logger.TestCtx(t), docID, db.DocUnmarshalAll)\n\t\t\trequire.NoError(t, err)\n\t\t\tassert.Equal(t, test.expectedLocalRevID, doc.SyncData.CurrentRev)\n\t\t\tlog.Printf(\"doc.Body(): %v\", doc.Body())\n\t\t\tassert.Equal(t, test.expectedLocalBody, doc.Body())\n\t\t\tlog.Printf(\"Doc %s is %+v\", docID, doc)\n\t\t\tfor revID, revInfo := range doc.SyncData.History {\n\t\t\t\tlog.Printf(\"doc revision [%s]: %+v\", revID, revInfo)\n\t\t\t}\n\n\t\t\t// Validate only one active leaf node remains after conflict resolution, and that all parents\n\t\t\t// of leaves have empty bodies\n\t\t\tactiveCount := 0\n\t\t\tfor _, revID := range doc.SyncData.History.GetLeaves() {\n\t\t\t\trevInfo, ok := doc.SyncData.History[revID]\n\t\t\t\trequire.True(t, ok)\n\t\t\t\tif !revInfo.Deleted {\n\t\t\t\t\tactiveCount++\n\t\t\t\t}\n\t\t\t\tif revInfo.Parent != \"\" {\n\t\t\t\t\tparentRevInfo, ok := doc.SyncData.History[revInfo.Parent]\n\t\t\t\t\trequire.True(t, ok)\n\t\t\t\t\tassert.True(t, parentRevInfo.Body == nil)\n\t\t\t\t}\n\t\t\t}\n\t\t\tassert.Equal(t, 1, activeCount)\n\t\t})\n\t}\n}", "func TestK8gbRepeatedlyRecreatedFromIngress(t *testing.T) {\n\tt.Parallel()\n\t// name of ingress and gslb\n\tconst name = \"test-gslb-failover-simple\"\n\n\tassertStrategy := func(t *testing.T, options *k8s.KubectlOptions) {\n\t\tutils.AssertGslbSpec(t, options, name, \"spec.strategy.splitBrainThresholdSeconds\", \"300\")\n\t\tutils.AssertGslbSpec(t, options, name, \"spec.strategy.dnsTtlSeconds\", \"30\")\n\t\tutils.AssertGslbSpec(t, options, name, \"spec.strategy.primaryGeoTag\", settings.PrimaryGeoTag)\n\t\tutils.AssertGslbSpec(t, options, name, \"spec.strategy.type\", \"failover\")\n\t}\n\n\t// Path to the Kubernetes resource config we will test\n\tingressResourcePath, err := filepath.Abs(\"../examples/ingress-annotation-failover-simple.yaml\")\n\trequire.NoError(t, err)\n\n\t// To ensure we can reuse the resource config on the same cluster to test different scenarios, we setup a unique\n\t// namespace for the resources for this test.\n\t// Note that namespaces must be lowercase.\n\tnamespaceName := fmt.Sprintf(\"k8gb-test-repeatedly-recreated-from-ingress-%s\", strings.ToLower(random.UniqueId()))\n\n\t// Here we choose to use the defaults, which is:\n\t// - HOME/.kube/config for the kubectl config file\n\t// - Current context of the kubectl config file\n\t// - Random namespace\n\toptions := k8s.NewKubectlOptions(\"\", \"\", namespaceName)\n\n\tk8s.CreateNamespace(t, options, namespaceName)\n\n\tdefer k8s.DeleteNamespace(t, options, namespaceName)\n\n\tdefer k8s.KubectlDelete(t, options, ingressResourcePath)\n\n\tutils.CreateGslb(t, options, settings, ingressResourcePath)\n\n\tk8s.WaitUntilIngressAvailable(t, options, name, 60, 1*time.Second)\n\n\tingress := k8s.GetIngress(t, options, name)\n\n\trequire.Equal(t, ingress.Name, name)\n\n\t// assert Gslb strategy has expected values\n\tassertStrategy(t, options)\n\n\tk8s.KubectlDelete(t, options, ingressResourcePath)\n\n\tutils.AssertGslbDeleted(t, options, ingress.Name)\n\n\t// recreate ingress\n\tutils.CreateGslb(t, options, settings, ingressResourcePath)\n\n\tk8s.WaitUntilIngressAvailable(t, options, name, 60, 1*time.Second)\n\n\tingress = k8s.GetIngress(t, options, name)\n\n\trequire.Equal(t, ingress.Name, name)\n\t// assert Gslb strategy has expected values\n\tassertStrategy(t, options)\n}", "func TestManifestSyncJob(t *testing.T) {\n\tforAllReplicaTypes(t, func(strategy string) {\n\t\ttest.WithRoundTripper(func(_ *test.RoundTripper) {\n\t\t\tj1, s1 := setup(t)\n\t\t\tj2, s2 := setupReplica(t, s1, strategy)\n\t\t\ts1.Clock.StepBy(1 * time.Hour)\n\t\t\treplicaToken := s2.GetToken(t, \"repository:test1/foo:pull\")\n\t\t\tsyncManifestsJob1 := j1.ManifestSyncJob(s1.Registry)\n\t\t\tsyncManifestsJob2 := j2.ManifestSyncJob(s2.Registry)\n\n\t\t\t//upload some manifests...\n\t\t\timages := make([]test.Image, 4)\n\t\t\tfor idx := range images {\n\t\t\t\timage := test.GenerateImage(\n\t\t\t\t\ttest.GenerateExampleLayer(int64(10*idx+1)),\n\t\t\t\t\ttest.GenerateExampleLayer(int64(10*idx+2)),\n\t\t\t\t)\n\t\t\t\timages[idx] = image\n\n\t\t\t\t//...to the primary account...\n\t\t\t\timage.MustUpload(t, s1, fooRepoRef, \"\")\n\n\t\t\t\t//...and most of them also to the replica account (to simulate replication having taken place)\n\t\t\t\tif idx != 0 {\n\t\t\t\t\tassert.HTTPRequest{\n\t\t\t\t\t\tMethod: \"GET\",\n\t\t\t\t\t\tPath: fmt.Sprintf(\"/v2/test1/foo/manifests/%s\", image.Manifest.Digest),\n\t\t\t\t\t\tHeader: map[string]string{\"Authorization\": \"Bearer \" + replicaToken},\n\t\t\t\t\t\tExpectStatus: http.StatusOK,\n\t\t\t\t\t\tExpectBody: assert.ByteData(image.Manifest.Contents),\n\t\t\t\t\t}.Check(t, s2.Handler)\n\t\t\t\t}\n\t\t\t}\n\n\t\t\t//some of the replicated images are also tagged\n\t\t\tfor _, db := range []*keppel.DB{s1.DB, s2.DB} {\n\t\t\t\tfor _, tagName := range []string{\"latest\", \"other\"} {\n\t\t\t\t\tmustExec(t, db,\n\t\t\t\t\t\t`INSERT INTO tags (repo_id, name, digest, pushed_at) VALUES (1, $1, $2, $3)`,\n\t\t\t\t\t\ttagName,\n\t\t\t\t\t\timages[1].Manifest.Digest,\n\t\t\t\t\t\ts1.Clock.Now(),\n\t\t\t\t\t)\n\t\t\t\t}\n\t\t\t}\n\n\t\t\t//also setup an image list manifest containing some of those images (so that we have\n\t\t\t//some manifest-manifest refs to play with)\n\t\t\timageList := test.GenerateImageList(images[1], images[2])\n\t\t\timageList.MustUpload(t, s1, fooRepoRef, \"\")\n\t\t\t//this one is replicated as well\n\t\t\tassert.HTTPRequest{\n\t\t\t\tMethod: \"GET\",\n\t\t\t\tPath: fmt.Sprintf(\"/v2/test1/foo/manifests/%s\", imageList.Manifest.Digest),\n\t\t\t\tHeader: map[string]string{\"Authorization\": \"Bearer \" + replicaToken},\n\t\t\t\tExpectStatus: http.StatusOK,\n\t\t\t\tExpectBody: assert.ByteData(imageList.Manifest.Contents),\n\t\t\t}.Check(t, s2.Handler)\n\n\t\t\t//set a well-known last_pulled_at timestamp on all manifests in the primary\n\t\t\t//DB (we will later verify that this was not touched by the manifest sync)\n\t\t\tinitialLastPulledAt := time.Unix(42, 0)\n\t\t\tmustExec(t, s1.DB, `UPDATE manifests SET last_pulled_at = $1`, initialLastPulledAt)\n\t\t\tmustExec(t, s1.DB, `UPDATE tags SET last_pulled_at = $1`, initialLastPulledAt)\n\t\t\t//we set last_pulled_at to NULL on images[3] to verify that we can merge\n\t\t\t//NULL with a non-NULL last_pulled_at from the replica side\n\t\t\tmustExec(t, s1.DB, `UPDATE manifests SET last_pulled_at = NULL WHERE digest = $1`, images[3].Manifest.Digest)\n\n\t\t\t//as an exception, in the on_first_use method, we can and want to merge\n\t\t\t//last_pulled_at timestamps from the replica into those of the primary, so\n\t\t\t//set some of those to verify the merging behavior\n\t\t\tearlierLastPulledAt := initialLastPulledAt.Add(-10 * time.Second)\n\t\t\tlaterLastPulledAt := initialLastPulledAt.Add(+10 * time.Second)\n\t\t\tmustExec(t, s2.DB, `UPDATE manifests SET last_pulled_at = NULL`)\n\t\t\tmustExec(t, s2.DB, `UPDATE tags SET last_pulled_at = NULL`)\n\t\t\tmustExec(t, s2.DB, `UPDATE manifests SET last_pulled_at = $1 WHERE digest = $2`, earlierLastPulledAt, images[1].Manifest.Digest)\n\t\t\tmustExec(t, s2.DB, `UPDATE manifests SET last_pulled_at = $1 WHERE digest = $2`, laterLastPulledAt, images[2].Manifest.Digest)\n\t\t\tmustExec(t, s2.DB, `UPDATE manifests SET last_pulled_at = $1 WHERE digest = $2`, initialLastPulledAt, images[3].Manifest.Digest)\n\t\t\tmustExec(t, s2.DB, `UPDATE tags SET last_pulled_at = $1 WHERE name = $2`, earlierLastPulledAt, \"latest\")\n\t\t\tmustExec(t, s2.DB, `UPDATE tags SET last_pulled_at = $1 WHERE name = $2`, laterLastPulledAt, \"other\")\n\n\t\t\ttr, tr0 := easypg.NewTracker(t, s2.DB.DbMap.Db)\n\t\t\ttr0.AssertEqualToFile(fmt.Sprintf(\"fixtures/manifest-sync-setup-%s.sql\", strategy))\n\t\t\ttrForPrimary, _ := easypg.NewTracker(t, s1.DB.DbMap.Db)\n\n\t\t\t//ManifestSyncJob on the primary registry should have nothing to do\n\t\t\t//since there are no replica accounts\n\t\t\texpectError(t, sql.ErrNoRows.Error(), syncManifestsJob1.ProcessOne(s1.Ctx))\n\t\t\ttrForPrimary.DBChanges().AssertEmpty()\n\t\t\t//ManifestSyncJob on the secondary registry should set the\n\t\t\t//ManifestsSyncedAt timestamp on the repo, but otherwise not do anything\n\t\t\texpectSuccess(t, syncManifestsJob2.ProcessOne(s2.Ctx))\n\t\t\ttr.DBChanges().AssertEqualf(`\n\t\t\t\t\tUPDATE repos SET next_manifest_sync_at = %d WHERE id = 1 AND account_name = 'test1' AND name = 'foo';\n\t\t\t\t`,\n\t\t\t\ts1.Clock.Now().Add(1*time.Hour).Unix(),\n\t\t\t)\n\t\t\t//second run should not have anything else to do\n\t\t\texpectError(t, sql.ErrNoRows.Error(), syncManifestsJob2.ProcessOne(s2.Ctx))\n\t\t\ttr.DBChanges().AssertEmpty()\n\n\t\t\t//in on_first_use, the sync should have merged the replica's last_pulled_at\n\t\t\t//timestamps into the primary, i.e. primary.last_pulled_at =\n\t\t\t//max(primary.last_pulled_at, replica.last_pulled_at); this only touches\n\t\t\t//the DB when the replica's last_pulled_at is after the primary's\n\t\t\tif strategy == \"on_first_use\" {\n\t\t\t\ttrForPrimary.DBChanges().AssertEqualf(`\n\t\t\t\t\t\tUPDATE manifests SET last_pulled_at = %[1]d WHERE repo_id = 1 AND digest = '%[2]s';\n\t\t\t\t\t\tUPDATE manifests SET last_pulled_at = %[3]d WHERE repo_id = 1 AND digest = '%[4]s';\n\t\t\t\t\t\tUPDATE tags SET last_pulled_at = %[3]d WHERE repo_id = 1 AND name = 'other';\n\t\t\t\t\t`,\n\t\t\t\t\tinitialLastPulledAt.Unix(),\n\t\t\t\t\timages[3].Manifest.Digest,\n\t\t\t\t\tlaterLastPulledAt.Unix(),\n\t\t\t\t\timages[2].Manifest.Digest,\n\t\t\t\t)\n\t\t\t\t//reset all timestamps to prevent divergences in the rest of the test\n\t\t\t\tmustExec(t, s1.DB, `UPDATE manifests SET last_pulled_at = $1`, initialLastPulledAt)\n\t\t\t\tmustExec(t, s1.DB, `UPDATE tags SET last_pulled_at = $1`, initialLastPulledAt)\n\t\t\t\tmustExec(t, s2.DB, `UPDATE manifests SET last_pulled_at = $1`, initialLastPulledAt)\n\t\t\t\tmustExec(t, s2.DB, `UPDATE tags SET last_pulled_at = $1`, initialLastPulledAt)\n\t\t\t\ttr.DBChanges() // skip these changes\n\t\t\t} else {\n\t\t\t\ttrForPrimary.DBChanges().AssertEmpty()\n\t\t\t}\n\n\t\t\t//delete a manifest on the primary side (this one is a simple image not referenced by anyone else)\n\t\t\ts1.Clock.StepBy(2 * time.Hour)\n\t\t\tmustExec(t, s1.DB,\n\t\t\t\t`DELETE FROM manifests WHERE digest = $1`,\n\t\t\t\timages[3].Manifest.Digest,\n\t\t\t)\n\t\t\t//move a tag on the primary side\n\t\t\tmustExec(t, s1.DB,\n\t\t\t\t`UPDATE tags SET digest = $1 WHERE name = 'latest'`,\n\t\t\t\timages[2].Manifest.Digest,\n\t\t\t)\n\n\t\t\t//again, nothing to do on the primary side\n\t\t\texpectError(t, sql.ErrNoRows.Error(), syncManifestsJob1.ProcessOne(s1.Ctx))\n\t\t\t//ManifestSyncJob on the replica side should not do anything while\n\t\t\t//the account is in maintenance; only the timestamp is updated to make sure\n\t\t\t//that the job loop progresses to the next repo\n\t\t\tmustExec(t, s2.DB, `UPDATE accounts SET in_maintenance = TRUE`)\n\t\t\texpectSuccess(t, syncManifestsJob2.ProcessOne(s2.Ctx))\n\t\t\ttr.DBChanges().AssertEqualf(`\n\t\t\t\t\tUPDATE accounts SET in_maintenance = TRUE WHERE name = 'test1';\n\t\t\t\t\tUPDATE repos SET next_manifest_sync_at = %d WHERE id = 1 AND account_name = 'test1' AND name = 'foo';\n\t\t\t\t`,\n\t\t\t\ts1.Clock.Now().Add(1*time.Hour).Unix(),\n\t\t\t)\n\t\t\texpectError(t, sql.ErrNoRows.Error(), syncManifestsJob2.ProcessOne(s2.Ctx))\n\t\t\ttr.DBChanges().AssertEmpty()\n\n\t\t\t//end maintenance\n\t\t\tmustExec(t, s2.DB, `UPDATE accounts SET in_maintenance = FALSE`)\n\t\t\ttr.DBChanges().AssertEqual(`UPDATE accounts SET in_maintenance = FALSE WHERE name = 'test1';`)\n\n\t\t\t//test that replication from external uses the inbound cache\n\t\t\tif strategy == \"from_external_on_first_use\" {\n\t\t\t\t//after the end of the maintenance, we would naively expect\n\t\t\t\t//ManifestSyncJob to actually replicate the deletion, BUT we have an\n\t\t\t\t//inbound cache with a lifetime of 6 hours, so actually nothing should\n\t\t\t\t//happen (only the tag gets synced, which includes a validation of the\n\t\t\t\t//referenced manifest)\n\t\t\t\ts1.Clock.StepBy(2 * time.Hour)\n\t\t\t\texpectSuccess(t, syncManifestsJob2.ProcessOne(s2.Ctx))\n\t\t\t\ttr.DBChanges().AssertEqualf(`\n\t\t\t\t\t\tUPDATE manifests SET validated_at = %d WHERE repo_id = 1 AND digest = '%s';\n\t\t\t\t\t\tUPDATE repos SET next_manifest_sync_at = %d WHERE id = 1 AND account_name = 'test1' AND name = 'foo';\n\t\t\t\t\t`,\n\t\t\t\t\ts1.Clock.Now().Unix(),\n\t\t\t\t\timages[1].Manifest.Digest,\n\t\t\t\t\ts1.Clock.Now().Add(1*time.Hour).Unix(),\n\t\t\t\t)\n\t\t\t\texpectError(t, sql.ErrNoRows.Error(), syncManifestsJob2.ProcessOne(s2.Ctx))\n\t\t\t\ttr.DBChanges().AssertEmpty()\n\t\t\t}\n\n\t\t\t//From now on, we will go in clock increments of 7 hours to force the\n\t\t\t//inbound cache to never hit.\n\n\t\t\t//after the end of the maintenance, ManifestSyncJob on the replica\n\t\t\t//side should delete the same manifest that we deleted in the primary\n\t\t\t//account, and also replicate the tag change (which includes a validation\n\t\t\t//of the tagged manifests)\n\t\t\ts1.Clock.StepBy(7 * time.Hour)\n\t\t\texpectSuccess(t, syncManifestsJob2.ProcessOne(s2.Ctx))\n\t\t\tmanifestValidationBecauseOfExistingTag := fmt.Sprintf(\n\t\t\t\t//this validation is skipped in \"on_first_use\" because the respective tag is unchanged\n\t\t\t\t`UPDATE manifests SET validated_at = %d WHERE repo_id = 1 AND digest = '%s';`+\"\\n\",\n\t\t\t\ts1.Clock.Now().Unix(), images[1].Manifest.Digest,\n\t\t\t)\n\t\t\tif strategy == \"on_first_use\" {\n\t\t\t\tmanifestValidationBecauseOfExistingTag = \"\"\n\t\t\t}\n\t\t\ttr.DBChanges().AssertEqualf(`\n\t\t\t\t\tDELETE FROM manifest_blob_refs WHERE repo_id = 1 AND digest = '%[1]s' AND blob_id = 7;\n\t\t\t\t\tDELETE FROM manifest_blob_refs WHERE repo_id = 1 AND digest = '%[1]s' AND blob_id = 8;\n\t\t\t\t\tDELETE FROM manifest_blob_refs WHERE repo_id = 1 AND digest = '%[1]s' AND blob_id = 9;\n\t\t\t\t\tDELETE FROM manifest_contents WHERE repo_id = 1 AND digest = '%[1]s';\n\t\t\t\t\tDELETE FROM manifests WHERE repo_id = 1 AND digest = '%[1]s';\n\t\t\t\t\t%[5]sUPDATE manifests SET validated_at = %[2]d WHERE repo_id = 1 AND digest = '%[3]s';\n\t\t\t\t\tUPDATE repos SET next_manifest_sync_at = %[4]d WHERE id = 1 AND account_name = 'test1' AND name = 'foo';\n\t\t\t\t\tUPDATE tags SET digest = '%[3]s', pushed_at = %[2]d, last_pulled_at = NULL WHERE repo_id = 1 AND name = 'latest';\n\t\t\t\t\tDELETE FROM trivy_security_info WHERE repo_id = 1 AND digest = '%[1]s';\n\t\t\t\t`,\n\t\t\t\timages[3].Manifest.Digest, //the deleted manifest\n\t\t\t\ts1.Clock.Now().Unix(),\n\t\t\t\timages[2].Manifest.Digest, //the manifest now tagged as \"latest\"\n\t\t\t\ts1.Clock.Now().Add(1*time.Hour).Unix(),\n\t\t\t\tmanifestValidationBecauseOfExistingTag,\n\t\t\t)\n\t\t\texpectError(t, sql.ErrNoRows.Error(), syncManifestsJob2.ProcessOne(s2.Ctx))\n\t\t\ttr.DBChanges().AssertEmpty()\n\n\t\t\t//cause a deliberate inconsistency on the primary side: delete a manifest that\n\t\t\t//*is* referenced by another manifest (this requires deleting the\n\t\t\t//manifest-manifest ref first, otherwise the DB will complain)\n\t\t\ts1.Clock.StepBy(7 * time.Hour)\n\t\t\tmustExec(t, s1.DB,\n\t\t\t\t`DELETE FROM manifest_manifest_refs WHERE child_digest = $1`,\n\t\t\t\timages[2].Manifest.Digest,\n\t\t\t)\n\t\t\tmustExec(t, s1.DB,\n\t\t\t\t`DELETE FROM manifests WHERE digest = $1`,\n\t\t\t\timages[2].Manifest.Digest,\n\t\t\t)\n\n\t\t\t//ManifestSyncJob should now complain since it wants to delete\n\t\t\t//images[2].Manifest, but it can't because of the manifest-manifest ref to\n\t\t\t//the image list\n\t\t\texpectedError := fmt.Sprintf(\"cannot remove deleted manifests [%s] in repo test1/foo because they are still being referenced by other manifests (this smells like an inconsistency on the primary account)\",\n\t\t\t\timages[2].Manifest.Digest,\n\t\t\t)\n\t\t\texpectError(t, expectedError, syncManifestsJob2.ProcessOne(s2.Ctx))\n\t\t\t//the tag sync went through though, so the tag should be gone (the manifest\n\t\t\t//validation is because of the \"other\" tag that still exists)\n\t\t\tmanifestValidationBecauseOfExistingTag = fmt.Sprintf(\n\t\t\t\t//this validation is skipped in \"on_first_use\" because the respective tag is unchanged\n\t\t\t\t`UPDATE manifests SET validated_at = %d WHERE repo_id = 1 AND digest = '%s';`+\"\\n\",\n\t\t\t\ts1.Clock.Now().Unix(), images[1].Manifest.Digest,\n\t\t\t)\n\t\t\tif strategy == \"on_first_use\" {\n\t\t\t\tmanifestValidationBecauseOfExistingTag = \"\"\n\t\t\t}\n\t\t\ttr.DBChanges().AssertEqualf(`%sDELETE FROM tags WHERE repo_id = 1 AND name = 'latest';`,\n\t\t\t\tmanifestValidationBecauseOfExistingTag,\n\t\t\t)\n\n\t\t\t//also remove the image list manifest on the primary side\n\t\t\ts1.Clock.StepBy(7 * time.Hour)\n\t\t\tmustExec(t, s1.DB,\n\t\t\t\t`DELETE FROM manifests WHERE digest = $1`,\n\t\t\t\timageList.Manifest.Digest,\n\t\t\t)\n\t\t\t//and remove the other tag (this is required for the 404 error message in the next step but one to be deterministic)\n\t\t\tmustExec(t, s1.DB, `DELETE FROM tags`)\n\n\t\t\t//this makes the primary side consistent again, so ManifestSyncJob\n\t\t\t//should succeed now and remove both deleted manifests from the DB\n\t\t\texpectSuccess(t, syncManifestsJob2.ProcessOne(s2.Ctx))\n\t\t\ttr.DBChanges().AssertEqualf(`\n\t\t\t\t\tDELETE FROM manifest_blob_refs WHERE repo_id = 1 AND digest = '%[1]s' AND blob_id = 4;\n\t\t\t\t\tDELETE FROM manifest_blob_refs WHERE repo_id = 1 AND digest = '%[1]s' AND blob_id = 5;\n\t\t\t\t\tDELETE FROM manifest_blob_refs WHERE repo_id = 1 AND digest = '%[1]s' AND blob_id = 6;\n\t\t\t\t\tDELETE FROM manifest_contents WHERE repo_id = 1 AND digest = '%[1]s';\n\t\t\t\t\tDELETE FROM manifest_contents WHERE repo_id = 1 AND digest = '%[2]s';\n\t\t\t\t\tDELETE FROM manifest_manifest_refs WHERE repo_id = 1 AND parent_digest = '%[2]s' AND child_digest = '%[3]s';\n\t\t\t\t\tDELETE FROM manifest_manifest_refs WHERE repo_id = 1 AND parent_digest = '%[2]s' AND child_digest = '%[1]s';\n\t\t\t\t\tDELETE FROM manifests WHERE repo_id = 1 AND digest = '%[1]s';\n\t\t\t\t\tDELETE FROM manifests WHERE repo_id = 1 AND digest = '%[2]s';\n\t\t\t\t\tUPDATE repos SET next_manifest_sync_at = %[4]d WHERE id = 1 AND account_name = 'test1' AND name = 'foo';\n\t\t\t\t\tDELETE FROM tags WHERE repo_id = 1 AND name = 'other';\n\t\t\t\t\tDELETE FROM trivy_security_info WHERE repo_id = 1 AND digest = '%[1]s';\n\t\t\t\t\tDELETE FROM trivy_security_info WHERE repo_id = 1 AND digest = '%[2]s';\n\t\t\t\t`,\n\t\t\t\timages[2].Manifest.Digest,\n\t\t\t\timageList.Manifest.Digest,\n\t\t\t\timages[1].Manifest.Digest,\n\t\t\t\ts1.Clock.Now().Add(1*time.Hour).Unix(),\n\t\t\t)\n\t\t\texpectError(t, sql.ErrNoRows.Error(), syncManifestsJob2.ProcessOne(s2.Ctx))\n\t\t\ttr.DBChanges().AssertEmpty()\n\n\t\t\t//replace the primary registry's API with something that just answers 404 most of the time\n\t\t\t//\n\t\t\t//(We do allow the /keppel/v1/auth endpoint to work properly because\n\t\t\t//otherwise the error messages are not reproducible between passes.)\n\t\t\ts1.Clock.StepBy(7 * time.Hour)\n\t\t\thttp.DefaultTransport.(*test.RoundTripper).Handlers[\"registry.example.org\"] = answerMostWith404(s1.Handler)\n\t\t\t//This is particularly devious since 404 is returned by the GET endpoint for\n\t\t\t//a manifest when the manifest was deleted. We want to check that the next\n\t\t\t//ManifestSyncJob understands that this is a network issue and not\n\t\t\t//caused by the manifest getting deleted, since the 404-generating endpoint\n\t\t\t//does not render a proper MANIFEST_UNKNOWN error.\n\t\t\texpectedError = fmt.Sprintf(\"cannot check existence of manifest test1/foo/%s on primary account: during GET https://registry.example.org/v2/test1/foo/manifests/%[1]s: expected status 200, but got 404 Not Found\",\n\t\t\t\timages[1].Manifest.Digest, //the only manifest that is left\n\t\t\t)\n\t\t\texpectError(t, expectedError, syncManifestsJob2.ProcessOne(s2.Ctx))\n\t\t\ttr.DBChanges().AssertEmpty()\n\n\t\t\t//check that the manifest sync did not update the last_pulled_at timestamps\n\t\t\t//in the primary DB (even though there were GET requests for the manifests\n\t\t\t//there)\n\t\t\tvar lastPulledAt time.Time\n\t\t\texpectSuccess(t, s1.DB.DbMap.QueryRow(`SELECT MAX(last_pulled_at) FROM manifests`).Scan(&lastPulledAt))\n\t\t\tif !lastPulledAt.Equal(initialLastPulledAt) {\n\t\t\t\tt.Error(\"last_pulled_at timestamps on the primary side were touched\")\n\t\t\t\tt.Logf(\" expected = %#v\", initialLastPulledAt)\n\t\t\t\tt.Logf(\" actual = %#v\", lastPulledAt)\n\t\t\t}\n\n\t\t\t//flip back to the actual primary registry's API\n\t\t\thttp.DefaultTransport.(*test.RoundTripper).Handlers[\"registry.example.org\"] = s1.Handler\n\t\t\t//delete the entire repository on the primary\n\t\t\ts1.Clock.StepBy(7 * time.Hour)\n\t\t\tmustExec(t, s1.DB, `DELETE FROM manifests`)\n\t\t\tmustExec(t, s1.DB, `DELETE FROM repos`)\n\t\t\t//the manifest sync should reflect the repository deletion on the replica\n\t\t\texpectSuccess(t, syncManifestsJob2.ProcessOne(s2.Ctx))\n\t\t\ttr.DBChanges().AssertEqualf(`\n\t\t\t\t\tDELETE FROM blob_mounts WHERE blob_id = 1 AND repo_id = 1;\n\t\t\t\t\tDELETE FROM blob_mounts WHERE blob_id = 2 AND repo_id = 1;\n\t\t\t\t\tDELETE FROM blob_mounts WHERE blob_id = 3 AND repo_id = 1;\n\t\t\t\t\tDELETE FROM blob_mounts WHERE blob_id = 4 AND repo_id = 1;\n\t\t\t\t\tDELETE FROM blob_mounts WHERE blob_id = 5 AND repo_id = 1;\n\t\t\t\t\tDELETE FROM blob_mounts WHERE blob_id = 6 AND repo_id = 1;\n\t\t\t\t\tDELETE FROM blob_mounts WHERE blob_id = 7 AND repo_id = 1;\n\t\t\t\t\tDELETE FROM blob_mounts WHERE blob_id = 8 AND repo_id = 1;\n\t\t\t\t\tDELETE FROM blob_mounts WHERE blob_id = 9 AND repo_id = 1;\n\t\t\t\t\tDELETE FROM manifest_blob_refs WHERE repo_id = 1 AND digest = '%[1]s' AND blob_id = 1;\n\t\t\t\t\tDELETE FROM manifest_blob_refs WHERE repo_id = 1 AND digest = '%[1]s' AND blob_id = 2;\n\t\t\t\t\tDELETE FROM manifest_blob_refs WHERE repo_id = 1 AND digest = '%[1]s' AND blob_id = 3;\n\t\t\t\t\tDELETE FROM manifest_contents WHERE repo_id = 1 AND digest = '%[1]s';\n\t\t\t\t\tDELETE FROM manifests WHERE repo_id = 1 AND digest = '%[1]s';\n\t\t\t\t\tDELETE FROM repos WHERE id = 1 AND account_name = 'test1' AND name = 'foo';\n\t\t\t\t\tDELETE FROM trivy_security_info WHERE repo_id = 1 AND digest = '%[1]s';\n\t\t\t\t`,\n\t\t\t\timages[1].Manifest.Digest,\n\t\t\t)\n\t\t\texpectError(t, sql.ErrNoRows.Error(), syncManifestsJob2.ProcessOne(s2.Ctx))\n\t\t\ttr.DBChanges().AssertEmpty()\n\t\t})\n\t})\n}", "func CreateTemplateFailStatusMocked(t *testing.T, templateIn *types.Template) *types.Template {\n\n\tassert := assert.New(t)\n\n\t// wire up\n\tcs := &utils.MockConcertoService{}\n\tds, err := NewTemplateService(cs)\n\tassert.Nil(err, \"Couldn't load template service\")\n\tassert.NotNil(ds, \"Template service not instanced\")\n\n\t// convertMap\n\tmapIn, err := utils.ItemConvertParams(*templateIn)\n\tassert.Nil(err, \"Template test data corrupted\")\n\n\t// to json\n\tdOut, err := json.Marshal(templateIn)\n\tassert.Nil(err, \"Template test data corrupted\")\n\n\t// call service\n\tcs.On(\"Post\", \"/blueprint/templates/\", mapIn).Return(dOut, 499, nil)\n\ttemplateOut, err := ds.CreateTemplate(mapIn)\n\tassert.NotNil(err, \"We are expecting an status code error\")\n\tassert.Nil(templateOut, \"Expecting nil output\")\n\tassert.Contains(err.Error(), \"499\", \"Error should contain http code 499\")\n\n\treturn templateOut\n}", "func TestConflictErrorInDeleteInRR(t *testing.T) {\n\trequire.NoError(t, failpoint.Enable(\"github.com/pingcap/tidb/executor/assertPessimisticLockErr\", \"return\"))\n\tstore := testkit.CreateMockStore(t)\n\n\ttk := testkit.NewTestKit(t, store)\n\tdefer tk.MustExec(\"rollback\")\n\tse := tk.Session()\n\ttk2 := testkit.NewTestKit(t, store)\n\tdefer tk2.MustExec(\"rollback\")\n\n\ttk.MustExec(\"use test\")\n\ttk2.MustExec(\"use test\")\n\ttk.MustExec(\"create table t (id int primary key, v int)\")\n\ttk.MustExec(\"insert into t values (1, 1), (2, 2)\")\n\n\ttk.MustExec(\"begin pessimistic\")\n\ttk2.MustExec(\"insert into t values (3, 1)\")\n\tse.SetValue(sessiontxn.AssertLockErr, nil)\n\ttk.MustExec(\"delete from t where v = 1\")\n\t_, ok := se.Value(sessiontxn.AssertLockErr).(map[string]int)\n\trequire.False(t, ok)\n\ttk.MustQuery(\"select * from t\").Check(testkit.Rows(\"2 2\"))\n\ttk.MustExec(\"commit\")\n\n\ttk.MustExec(\"begin pessimistic\")\n\t// However, if sub select in delete is point get, we will incur one write conflict\n\ttk2.MustExec(\"update t set id = 1 where id = 2\")\n\tse.SetValue(sessiontxn.AssertLockErr, nil)\n\ttk.MustExec(\"delete from t where id = 1\")\n\n\trecords, ok := se.Value(sessiontxn.AssertLockErr).(map[string]int)\n\trequire.True(t, ok)\n\trequire.Equal(t, records[\"errWriteConflict\"], 1)\n\ttk.MustQuery(\"select * from t for update\").Check(testkit.Rows())\n\n\ttk.MustExec(\"rollback\")\n\trequire.NoError(t, failpoint.Disable(\"github.com/pingcap/tidb/executor/assertPessimisticLockErr\"))\n}", "func Conflict(message string, args ...interface{}) *Failure {\n\treturn NewWithStatus(fmt.Sprintf(message, args...), http.StatusConflict)\n}", "func TestIntermediateNameAmbiguous(t *testing.T) {\n\tth := kusttest_test.MakeHarness(t)\n\tth.WriteK(\"gcp\", `\nnamePrefix: gcp-\nresources:\n- ../emea\npatchesStrategicMerge:\n- depPatch.yaml\n`)\n\tth.WriteF(\"gcp/depPatch.yaml\", `\napiVersion: apps/v1\nkind: Deployment\nmetadata:\n name: foo\nspec:\n replicas: 999\n`)\n\tth.WriteK(\"emea\", `\nnamePrefix: emea-\nresources:\n- ../prod\n- deployment.yaml\n`)\n\tth.WriteF(\"emea/deployment.yaml\", `\napiVersion: apps/v1\nkind: Deployment\nmetadata:\n name: foo\nspec:\n template:\n spec:\n containers:\n - image: whatever\n`)\n\tth.WriteK(\"prod\", `\nnamePrefix: prod-\nresources:\n- ../base\n`)\n\tth.WriteK(\"base\", `\nresources:\n- deployment.yaml\n`)\n\tth.WriteF(\"base/deployment.yaml\", `\napiVersion: apps/v1\nkind: Deployment\nmetadata:\n name: foo\nspec:\n template:\n spec:\n containers:\n - image: whatever\n`)\n\terr := th.RunWithErr(\"gcp\", th.MakeDefaultOptions())\n\tassert.Error(t, err)\n}", "func targetMapVersionMismatch(getNum func(int) int, t *testing.T, proxyURL string) {\n\tsmap := tutils.GetClusterMap(t, proxyURL)\n\ttlog.Logf(\"targets: %d, proxies: %d\\n\", smap.CountActiveTargets(), smap.CountActiveProxies())\n\n\tsmap.Version++\n\tjsonMap, err := jsoniter.Marshal(smap)\n\ttassert.CheckFatal(t, err)\n\n\tn := getNum(smap.CountActiveTargets() + smap.CountActiveProxies() - 1)\n\tfor _, v := range smap.Tmap {\n\t\tif n == 0 {\n\t\t\tbreak\n\t\t}\n\n\t\tbaseParams := tutils.BaseAPIParams(v.URL(cmn.NetworkPublic))\n\t\tbaseParams.Method = http.MethodPut\n\t\terr = api.DoHTTPRequest(api.ReqParams{\n\t\t\tBaseParams: baseParams,\n\t\t\tPath: cmn.URLPathDaemon.Join(cmn.SyncSmap),\n\t\t\tBody: jsonMap,\n\t\t})\n\t\ttassert.CheckFatal(t, err)\n\t\tn--\n\t}\n\tkillRestorePrimary(t, proxyURL, false, nil)\n}", "func (pas *PodAutoscalerStatus) MarkResourceFailedCreation(kind, name string) {\n\tpas.MarkInactive(\"FailedCreate\",\n\t\tfmt.Sprintf(\"Failed to create %s %q.\", kind, name))\n}", "func TestMarker_FaultTolerance(t *testing.T) {\n\tdone := false\n\tfor i := 1; !done && i < 1000; i++ {\n\t\tt.Run(strconv.Itoa(i), func(t *testing.T) {\n\t\t\tvar count atomic.Int32\n\t\t\tcount.Store(int32(i))\n\t\t\tinj := errorfs.InjectorFunc(func(op errorfs.Op, path string) error {\n\t\t\t\t// Don't inject on Sync errors. They're fatal.\n\t\t\t\tif op == errorfs.OpFileSync {\n\t\t\t\t\treturn nil\n\t\t\t\t}\n\t\t\t\tif v := count.Add(-1); v == 0 {\n\t\t\t\t\treturn errorfs.ErrInjected\n\t\t\t\t}\n\t\t\t\treturn nil\n\t\t\t})\n\n\t\t\tmem := vfs.NewMem()\n\t\t\tfs := errorfs.Wrap(mem, inj)\n\t\t\tmarkers := map[string]*Marker{}\n\t\t\tops := []struct {\n\t\t\t\top string\n\t\t\t\tname string\n\t\t\t\tvalue string\n\t\t\t}{\n\t\t\t\t{op: \"locate\", name: \"foo\", value: \"\"},\n\t\t\t\t{op: \"locate\", name: \"foo\", value: \"\"},\n\t\t\t\t{op: \"locate\", name: \"bar\", value: \"\"},\n\t\t\t\t{op: \"rm-obsolete\", name: \"foo\"},\n\t\t\t\t{op: \"move\", name: \"bar\", value: \"california\"},\n\t\t\t\t{op: \"rm-obsolete\", name: \"bar\"},\n\t\t\t\t{op: \"move\", name: \"bar\", value: \"california\"},\n\t\t\t\t{op: \"move\", name: \"bar\", value: \"new-york\"},\n\t\t\t\t{op: \"locate\", name: \"bar\", value: \"new-york\"},\n\t\t\t\t{op: \"move\", name: \"bar\", value: \"california\"},\n\t\t\t\t{op: \"rm-obsolete\", name: \"bar\"},\n\t\t\t\t{op: \"locate\", name: \"bar\", value: \"california\"},\n\t\t\t\t{op: \"move\", name: \"foo\", value: \"connecticut\"},\n\t\t\t\t{op: \"locate\", name: \"foo\", value: \"connecticut\"},\n\t\t\t}\n\n\t\t\tfor _, op := range ops {\n\t\t\t\trunOp := func() error {\n\t\t\t\t\tswitch op.op {\n\t\t\t\t\tcase \"locate\":\n\t\t\t\t\t\tm, v, err := LocateMarker(fs, \"\", op.name)\n\t\t\t\t\t\tif err != nil {\n\t\t\t\t\t\t\treturn err\n\t\t\t\t\t\t}\n\t\t\t\t\t\trequire.NotNil(t, m)\n\t\t\t\t\t\trequire.Equal(t, op.value, v)\n\t\t\t\t\t\tif existingMarker := markers[op.name]; existingMarker != nil {\n\t\t\t\t\t\t\trequire.NoError(t, existingMarker.Close())\n\t\t\t\t\t\t}\n\t\t\t\t\t\tmarkers[op.name] = m\n\t\t\t\t\t\treturn nil\n\t\t\t\t\tcase \"move\":\n\t\t\t\t\t\tm := markers[op.name]\n\t\t\t\t\t\trequire.NotNil(t, m)\n\t\t\t\t\t\treturn m.Move(op.value)\n\t\t\t\t\tcase \"rm-obsolete\":\n\t\t\t\t\t\tm := markers[op.name]\n\t\t\t\t\t\trequire.NotNil(t, m)\n\t\t\t\t\t\treturn m.RemoveObsolete()\n\t\t\t\t\tdefault:\n\t\t\t\t\t\tpanic(\"unreachable\")\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\t// Run the operation, if it fails with the injected\n\t\t\t\t// error, retry it exactly once. The retry should always\n\t\t\t\t// succeed.\n\t\t\t\terr := runOp()\n\t\t\t\tif errors.Is(err, errorfs.ErrInjected) {\n\t\t\t\t\terr = runOp()\n\t\t\t\t}\n\t\t\t\trequire.NoError(t, err)\n\t\t\t}\n\n\t\t\tfor _, m := range markers {\n\t\t\t\trequire.NoError(t, m.Close())\n\t\t\t}\n\n\t\t\t// Stop if the number of operations in the test case is\n\t\t\t// fewer than `i`.\n\t\t\tdone = count.Load() > 0\n\t\t})\n\t}\n}", "func TestParallelCreateConflictingTables(t *testing.T) {\n\tdefer leaktest.AfterTest(t)()\n\n\tconst numberOfTables = 30\n\tconst numberOfNodes = 3\n\n\ttc := testcluster.StartTestCluster(t, numberOfNodes, base.TestClusterArgs{})\n\tdefer tc.Stopper().Stop()\n\n\tif _, err := tc.ServerConn(0).Exec(`CREATE DATABASE \"test\"`); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t// Get the id descriptor generator count.\n\tkvDB := tc.Servers[0].KVClient().(*client.DB)\n\tvar descIDStart int64\n\tif descID, err := kvDB.Get(context.Background(), keys.DescIDGenerator); err != nil {\n\t\tt.Fatal(err)\n\t} else {\n\t\tdescIDStart = descID.ValueInt()\n\t}\n\n\tvar wgStart sync.WaitGroup\n\tvar wgEnd sync.WaitGroup\n\twgStart.Add(numberOfTables)\n\twgEnd.Add(numberOfTables)\n\tsignal := make(chan struct{})\n\tcompleted := make(chan int, numberOfTables)\n\tfor i := 0; i < numberOfTables; i++ {\n\t\tdb := tc.ServerConn(i % numberOfNodes)\n\t\tgo createTestTable(t, tc, 0, db, &wgStart, &wgEnd, signal, completed)\n\t}\n\n\t// Wait until all goroutines are ready.\n\twgStart.Wait()\n\t// Signal the create table goroutines to start.\n\tclose(signal)\n\t// Wait until all create tables are finished.\n\twgEnd.Wait()\n\tclose(completed)\n\n\tverifyTables(\n\t\tt,\n\t\ttc,\n\t\tcompleted,\n\t\t1,\n\t\tdescIDStart,\n\t)\n}", "func TestSendChangesToNoConflictPreHydrogenTarget(t *testing.T) {\n\tt.Skip(\"Test is only for development purposes\")\n\n\tbase.RequireNumTestBuckets(t, 2)\n\n\terrorCountBefore := base.SyncGatewayStats.GlobalStats.ResourceUtilizationStats().ErrorCount.Value()\n\n\t// Passive\n\ttb2 := base.GetTestBucket(t)\n\trt2 := NewRestTester(t, &RestTesterConfig{\n\t\tTestBucket: tb2,\n\t\tDatabaseConfig: &DatabaseConfig{DbConfig: DbConfig{\n\t\t\tAllowConflicts: false,\n\t\t}},\n\t})\n\tdefer rt2.Close()\n\n\trt1 := NewRestTester(t, &RestTesterConfig{\n\t\tTestBucket: base.GetTestBucket(t),\n\t})\n\tdefer rt1.Close()\n\n\t// Make rt2 listen on an actual HTTP port, so it can receive the blipsync request from rt1.\n\tsrv := httptest.NewTLSServer(rt2.TestAdminHandler())\n\tdefer srv.Close()\n\n\tpassiveDBURL, err := url.Parse(srv.URL + \"/db\")\n\trequire.NoError(t, err)\n\n\tar := db.NewActiveReplicator(&db.ActiveReplicatorConfig{\n\t\tID: \"test\",\n\t\tDirection: db.ActiveReplicatorTypePush,\n\t\tRemoteDBURL: passiveDBURL,\n\t\tActiveDB: &db.Database{\n\t\t\tDatabaseContext: rt1.GetDatabase(),\n\t\t},\n\t\tContinuous: true,\n\t\tInsecureSkipVerify: true,\n\t\tReplicationStatsMap: base.SyncGatewayStats.NewDBStats(t.Name(), false, false, false).DBReplicatorStats(t.Name()),\n\t})\n\n\tdefer func() {\n\t\trequire.NoError(t, ar.Stop())\n\t}()\n\trequire.NoError(t, ar.Start())\n\n\tassert.Equal(t, errorCountBefore, base.SyncGatewayStats.GlobalStats.ResourceUtilizationStats().ErrorCount.Value())\n\n\tresponse := rt1.SendAdminRequest(\"PUT\", \"/db/doc1\", \"{}\")\n\tassertStatus(t, response, http.StatusCreated)\n\n\terr = rt2.WaitForCondition(func() bool {\n\t\tif base.SyncGatewayStats.GlobalStats.ResourceUtilizationStats().ErrorCount.Value() == errorCountBefore+1 {\n\t\t\treturn true\n\t\t}\n\t\treturn false\n\t})\n\tassert.NoError(t, err)\n\n\tassert.Equal(t, db.ReplicationStateStopped, ar.GetStatus().Status)\n\tassert.Equal(t, db.PreHydrogenTargetAllowConflictsError.Error(), ar.GetStatus().ErrorMessage)\n}", "func CreateTemplateFailErrMocked(t *testing.T, templateIn *types.Template) *types.Template {\n\n\tassert := assert.New(t)\n\n\t// wire up\n\tcs := &utils.MockConcertoService{}\n\tds, err := NewTemplateService(cs)\n\tassert.Nil(err, \"Couldn't load template service\")\n\tassert.NotNil(ds, \"Template service not instanced\")\n\n\t// convertMap\n\tmapIn, err := utils.ItemConvertParams(*templateIn)\n\tassert.Nil(err, \"Template test data corrupted\")\n\n\t// to json\n\tdOut, err := json.Marshal(templateIn)\n\tassert.Nil(err, \"Template test data corrupted\")\n\n\t// call service\n\tcs.On(\"Post\", \"/blueprint/templates/\", mapIn).Return(dOut, 200, fmt.Errorf(\"mocked error\"))\n\ttemplateOut, err := ds.CreateTemplate(mapIn)\n\tassert.NotNil(err, \"We are expecting an error\")\n\tassert.Nil(templateOut, \"Expecting nil output\")\n\tassert.Equal(err.Error(), \"mocked error\", \"Error should be 'mocked error'\")\n\n\treturn templateOut\n}", "func TestRetryNotRequired(t *testing.T) {\n\tcheck := assert.New(t)\n\tretryRequired := checkRetryRequired(http.StatusConflict)\n\tcheck.Equal(retryRequired, false)\n}", "func TestCmdDeploy_retryOk(t *testing.T) {\n\tdeletedPods := []string{}\n\tconfig := deploytest.OkDeploymentConfig(1)\n\n\texistingDeployment := deploymentFor(config, deployapi.DeploymentStatusFailed)\n\texistingDeployment.Annotations[deployapi.DeploymentCancelledAnnotation] = deployapi.DeploymentCancelledAnnotationValue\n\texistingDeployment.Annotations[deployapi.DeploymentStatusReasonAnnotation] = deployapi.DeploymentCancelledByUser\n\n\texistingDeployerPods := []kapi.Pod{\n\t\t{ObjectMeta: kapi.ObjectMeta{Name: \"prehook\"}},\n\t\t{ObjectMeta: kapi.ObjectMeta{Name: \"posthook\"}},\n\t\t{ObjectMeta: kapi.ObjectMeta{Name: \"deployerpod\"}},\n\t}\n\n\tvar updatedDeployment *kapi.ReplicationController\n\tcommandClient := &deployCommandClientImpl{\n\t\tGetDeploymentFn: func(namespace, name string) (*kapi.ReplicationController, error) {\n\t\t\treturn existingDeployment, nil\n\t\t},\n\t\tUpdateDeploymentConfigFn: func(config *deployapi.DeploymentConfig) (*deployapi.DeploymentConfig, error) {\n\t\t\tt.Fatalf(\"unexpected call to UpdateDeploymentConfig\")\n\t\t\treturn nil, nil\n\t\t},\n\t\tUpdateDeploymentFn: func(deployment *kapi.ReplicationController) (*kapi.ReplicationController, error) {\n\t\t\tupdatedDeployment = deployment\n\t\t\treturn deployment, nil\n\t\t},\n\t\tListDeployerPodsForFn: func(namespace, name string) (*kapi.PodList, error) {\n\t\t\treturn &kapi.PodList{Items: existingDeployerPods}, nil\n\t\t},\n\t\tDeletePodFn: func(pod *kapi.Pod) error {\n\t\t\tdeletedPods = append(deletedPods, pod.Name)\n\t\t\treturn nil\n\t\t},\n\t}\n\n\tc := &retryDeploymentCommand{client: commandClient}\n\terr := c.retry(config, ioutil.Discard)\n\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error: %v\", err)\n\t}\n\n\tif updatedDeployment == nil {\n\t\tt.Fatalf(\"expected updated config\")\n\t}\n\n\tif deployutil.IsDeploymentCancelled(updatedDeployment) {\n\t\tt.Fatalf(\"deployment should not have the cancelled flag set anymore\")\n\t}\n\n\tif deployutil.DeploymentStatusReasonFor(updatedDeployment) != \"\" {\n\t\tt.Fatalf(\"deployment status reason should be empty\")\n\t}\n\n\tsort.Strings(deletedPods)\n\tif !reflect.DeepEqual(deletedPods, []string{\"deployerpod\", \"posthook\", \"prehook\"}) {\n\t\tt.Fatalf(\"Not all deployer pods for the failed deployment were deleted\")\n\t}\n\n\tif e, a := deployapi.DeploymentStatusNew, deployutil.DeploymentStatusFor(updatedDeployment); e != a {\n\t\tt.Fatalf(\"expected deployment status %s, got %s\", e, a)\n\t}\n}", "func (t *simpleTest) replaceExistingDocumentWrongRevision(collectionName string, key, rev string) error {\n\n\toperationTimeout := t.OperationTimeout\n\ttestTimeout := time.Now().Add(operationTimeout * 4)\n\n\tq := url.Values{}\n\tq.Set(\"waitForSync\", \"true\")\n\tnewName := fmt.Sprintf(\"Updated name %s\", time.Now())\n\thdr := ifMatchHeader(nil, rev)\n\tnewDoc := UserDocument{\n\t\tKey: key,\n\t\tName: fmt.Sprintf(\"Replaced named %s\", key),\n\t\tValue: rand.Int(),\n\t\tOdd: rand.Int()%2 == 0,\n\t}\n\turl := fmt.Sprintf(\"/_api/document/%s/%s\", collectionName, key)\n\tbackoff := time.Millisecond * 250\n\ti := 0\n\n\tfor {\n\n\t\ti++\n\t\tif time.Now().After(testTimeout) {\n\t\t\tbreak\n\t\t}\n\n\t\tt.log.Infof(\n\t\t\t\"Replacing (%d) existing document '%s' wrong revision in '%s' (name -> '%s')...\",\n\t\t\ti, key, collectionName, newName)\n\t\tresp, err := t.client.Put(\n\t\t\turl, q, hdr, newDoc, \"\", nil, []int{0, 1, 412, 503},\n\t\t\t[]int{200, 201, 202, 400, 404, 307}, operationTimeout, 1)\n\t\tt.log.Infof(\"... got http %d - arangodb %d via %s\",\n\t\t\tresp[0].StatusCode, resp[0].Error_.ErrorNum, resp[0].CoordinatorURL)\n\n\t\tif err[0] == nil {\n\t\t\tif resp[0].StatusCode == 412 {\n\t\t\t\tt.replaceExistingWrongRevisionCounter.succeeded++\n\t\t\t\tt.log.Infof(\"Replacing existing document '%s' wrong revision in '%s' (name -> '%s') succeeded\", key, collectionName, newName)\n\t\t\t\treturn nil\n\t\t\t}\n\t\t\t// In cases 0 and 1 and 503, we fall through here and try again\n\t\t} else {\n\t\t\t// This is a failure\n\t\t\tt.replaceExistingWrongRevisionCounter.failed++\n\t\t\tt.reportFailure(\n\t\t\t\ttest.NewFailure(\n\t\t\t\t\t\"Failed to replace existing document '%s' wrong revision in collection '%s': %v\",\n\t\t\t\t\tkey, collectionName, err[0]))\n\t\t\treturn maskAny(err[0])\n\t\t}\n\n\t\ttime.Sleep(backoff)\n\t\tif backoff < time.Second*5 {\n\t\t\tbackoff += backoff\n\t\t}\n\t}\n\n\tt.replaceExistingWrongRevisionCounter.failed++\n\tt.reportFailure(\n\t\ttest.NewFailure(\n\t\t\t\"Timed out while replacing (%d) existing document '%s' wrong revision in collection '%s'\",\n\t\t\ti, key, collectionName))\n\treturn maskAny(\n\t\tfmt.Errorf(\n\t\t\t\"Timed out while replacing (%d) existing document '%s' wrong revision in collection '%s'\",\n\t\t\ti, key, collectionName))\n\n}", "func TestActiveReplicatorEdgeCheckpointNameCollisions(t *testing.T) {\n\n\tbase.RequireNumTestBuckets(t, 3)\n\n\tbase.SetUpTestLogging(t, logger.LevelDebug, logger.KeyReplicate, logger.KeyHTTP, logger.KeyHTTPResp, logger.KeySync, logger.KeySyncMsg)\n\n\tconst (\n\t\tchangesBatchSize = 10\n\t\tnumRT1DocsInitial = 13 // 2 batches of changes\n\t)\n\n\t// Central cluster\n\ttb1 := base.GetTestBucket(t)\n\trt1 := NewRestTester(t, &RestTesterConfig{\n\t\tTestBucket: tb1,\n\t\tDatabaseConfig: &DatabaseConfig{DbConfig: DbConfig{\n\t\t\tUsers: map[string]*db.PrincipalConfig{\n\t\t\t\t\"alice\": {\n\t\t\t\t\tPassword: base.StringPtr(\"pass\"),\n\t\t\t\t\tExplicitChannels: utils.SetOf(\"alice\"),\n\t\t\t\t},\n\t\t\t},\n\t\t}},\n\t})\n\tdefer rt1.Close()\n\n\t// Create first batch of docs\n\tdocIDPrefix := t.Name() + \"rt1doc\"\n\tfor i := 0; i < numRT1DocsInitial; i++ {\n\t\tresp := rt1.SendAdminRequest(http.MethodPut, fmt.Sprintf(\"/db/%s%d\", docIDPrefix, i), `{\"source\":\"rt1\",\"channels\":[\"alice\"]}`)\n\t\tassertStatus(t, resp, http.StatusCreated)\n\t}\n\n\t// Make rt1 listen on an actual HTTP port, so it can receive the blipsync request from edges\n\tsrv := httptest.NewServer(rt1.TestPublicHandler())\n\tdefer srv.Close()\n\n\t// Build rt1DBURL with basic auth creds\n\trt1DBURL, err := url.Parse(srv.URL + \"/db\")\n\trequire.NoError(t, err)\n\trt1DBURL.User = url.UserPassword(\"alice\", \"pass\")\n\n\t// Edge 1\n\tedge1Bucket := base.GetTestBucket(t)\n\tedge1 := NewRestTester(t, &RestTesterConfig{\n\t\tTestBucket: edge1Bucket,\n\t})\n\tdefer edge1.Close()\n\n\tarConfig := db.ActiveReplicatorConfig{\n\t\tID: \"edge-repl\",\n\t\tDirection: db.ActiveReplicatorTypePull,\n\t\tRemoteDBURL: rt1DBURL,\n\t\tActiveDB: &db.Database{\n\t\t\tDatabaseContext: edge1.GetDatabase(),\n\t\t},\n\t\tContinuous: true,\n\t\tChangesBatchSize: changesBatchSize,\n\t}\n\tarConfig.SetCheckpointPrefix(t, \"cluster1:\")\n\n\t// Create the first active replicator to pull from seq:0\n\tarConfig.ReplicationStatsMap = base.SyncGatewayStats.NewDBStats(t.Name()+\"edge1\", false, false, false).DBReplicatorStats(t.Name())\n\tedge1Replicator := db.NewActiveReplicator(&arConfig)\n\n\tstartNumChangesRequestedFromZeroTotal := rt1.GetDatabase().DbStats.CBLReplicationPull().NumPullReplSinceZero.Value()\n\tstartNumRevsHandledTotal := edge1Replicator.Pull.GetStats().HandleRevCount.Value()\n\n\tassert.NoError(t, edge1Replicator.Start())\n\n\t// wait for all of the documents originally written to rt1 to arrive at edge1\n\tchangesResults, err := edge1.WaitForChanges(numRT1DocsInitial, \"/db/_changes?since=0\", \"\", true)\n\trequire.NoError(t, err)\n\tedge1LastSeq := changesResults.Last_Seq\n\trequire.Len(t, changesResults.Results, numRT1DocsInitial)\n\tdocIDsSeen := make(map[string]bool, numRT1DocsInitial)\n\tfor _, result := range changesResults.Results {\n\t\tdocIDsSeen[result.ID] = true\n\t}\n\tfor i := 0; i < numRT1DocsInitial; i++ {\n\t\tdocID := fmt.Sprintf(\"%s%d\", docIDPrefix, i)\n\t\tassert.True(t, docIDsSeen[docID])\n\n\t\tdoc, err := edge1.GetDatabase().GetDocument(logger.TestCtx(t), docID, db.DocUnmarshalAll)\n\t\tassert.NoError(t, err)\n\n\t\tbody, err := doc.GetDeepMutableBody()\n\t\trequire.NoError(t, err)\n\t\tassert.Equal(t, \"rt1\", body[\"source\"])\n\t}\n\n\tedge1Replicator.Pull.Checkpointer.CheckpointNow()\n\n\t// one _changes from seq:0 with initial number of docs sent\n\tnumChangesRequestedFromZeroTotal := rt1.GetDatabase().DbStats.CBLReplicationPull().NumPullReplSinceZero.Value()\n\tassert.Equal(t, startNumChangesRequestedFromZeroTotal+1, numChangesRequestedFromZeroTotal)\n\n\t// rev assertions\n\tnumRevsHandledTotal := edge1Replicator.Pull.GetStats().HandleRevCount.Value()\n\tassert.Equal(t, startNumRevsHandledTotal+numRT1DocsInitial, numRevsHandledTotal)\n\tassert.Equal(t, int64(numRT1DocsInitial), edge1Replicator.Pull.Checkpointer.Stats().ProcessedSequenceCount)\n\tassert.Equal(t, int64(numRT1DocsInitial), edge1Replicator.Pull.Checkpointer.Stats().ExpectedSequenceCount)\n\n\t// checkpoint assertions\n\tassert.Equal(t, int64(0), edge1Replicator.Pull.Checkpointer.Stats().GetCheckpointHitCount)\n\tassert.Equal(t, int64(1), edge1Replicator.Pull.Checkpointer.Stats().GetCheckpointMissCount)\n\tassert.Equal(t, int64(1), edge1Replicator.Pull.Checkpointer.Stats().SetCheckpointCount)\n\n\tassert.NoError(t, edge1Replicator.Stop())\n\n\t// Edge 2\n\tedge2Bucket := base.GetTestBucket(t)\n\tedge2 := NewRestTester(t, &RestTesterConfig{\n\t\tTestBucket: edge2Bucket,\n\t})\n\tdefer edge2.Close()\n\n\t// Create a new replicator using the same ID, which should NOT use the checkpoint set by the first edge.\n\tarConfig.ReplicationStatsMap = base.SyncGatewayStats.NewDBStats(t.Name()+\"edge2\", false, false, false).DBReplicatorStats(t.Name())\n\tarConfig.ActiveDB = &db.Database{\n\t\tDatabaseContext: edge2.GetDatabase(),\n\t}\n\tarConfig.SetCheckpointPrefix(t, \"cluster2:\")\n\tedge2Replicator := db.NewActiveReplicator(&arConfig)\n\tassert.NoError(t, edge2Replicator.Start())\n\n\tchangesResults, err = edge2.WaitForChanges(numRT1DocsInitial, \"/db/_changes?since=0\", \"\", true)\n\trequire.NoError(t, err)\n\n\tedge2Replicator.Pull.Checkpointer.CheckpointNow()\n\n\t// make sure that edge 2 didn't use a checkpoint\n\tassert.Equal(t, int64(0), edge2Replicator.Pull.Checkpointer.Stats().GetCheckpointHitCount)\n\tassert.Equal(t, int64(1), edge2Replicator.Pull.Checkpointer.Stats().GetCheckpointMissCount)\n\tassert.Equal(t, int64(1), edge2Replicator.Pull.Checkpointer.Stats().SetCheckpointCount)\n\n\tassert.NoError(t, edge2Replicator.Stop())\n\n\tresp := rt1.SendAdminRequest(http.MethodPut, fmt.Sprintf(\"/db/%s%d\", docIDPrefix, numRT1DocsInitial), `{\"source\":\"rt1\",\"channels\":[\"alice\"]}`)\n\tassertStatus(t, resp, http.StatusCreated)\n\trequire.NoError(t, rt1.WaitForPendingChanges())\n\n\t// run a replicator on edge1 again to make sure that edge2 didn't blow away its checkpoint\n\tarConfig.ReplicationStatsMap = base.SyncGatewayStats.NewDBStats(t.Name()+\"edge1\", false, false, false).DBReplicatorStats(t.Name())\n\tarConfig.ActiveDB = &db.Database{\n\t\tDatabaseContext: edge1.GetDatabase(),\n\t}\n\tarConfig.SetCheckpointPrefix(t, \"cluster1:\")\n\n\tedge1Replicator2 := db.NewActiveReplicator(&arConfig)\n\trequire.NoError(t, edge1Replicator2.Start())\n\n\tchangesResults, err = edge1.WaitForChanges(1, fmt.Sprintf(\"/db/_changes?since=%v\", edge1LastSeq), \"\", true)\n\trequire.NoErrorf(t, err, \"changesResults: %v\", changesResults)\n\tchangesResults.requireDocIDs(t, []string{fmt.Sprintf(\"%s%d\", docIDPrefix, numRT1DocsInitial)})\n\n\tedge1Replicator2.Pull.Checkpointer.CheckpointNow()\n\n\tassert.Equal(t, int64(1), edge1Replicator2.Pull.Checkpointer.Stats().GetCheckpointHitCount)\n\tassert.Equal(t, int64(0), edge1Replicator2.Pull.Checkpointer.Stats().GetCheckpointMissCount)\n\tassert.Equal(t, int64(1), edge1Replicator2.Pull.Checkpointer.Stats().SetCheckpointCount)\n\n\trequire.NoError(t, edge1Replicator2.Stop())\n}", "func TestSQLUpdateManyConcurrentConflictRollback(t *testing.T) {\n\tdbStore, cleanup := sqldbSetup(t)\n\tdefer cleanup()\n\n\tvar gun data.GUN = \"testGUN\"\n\tconcurrency := 50\n\tvar wg sync.WaitGroup\n\n\terrCh := make(chan error)\n\n\tfor i := 0; i < concurrency; i++ {\n\t\ttufObj := SampleCustomTUFObj(gun, data.CanonicalRootRole, 1, []byte{byte(i)})\n\t\tupdates := []MetaUpdate{MakeUpdate(tufObj)}\n\t\twg.Add(1)\n\t\tgo func() {\n\t\t\tdefer wg.Done()\n\t\t\terrCh <- dbStore.UpdateMany(gun, updates)\n\t\t}()\n\t}\n\n\tgo func() {\n\t\twg.Wait()\n\t\tclose(errCh)\n\t}()\n\n\tsuccesses := 0\n\tfor err := range errCh {\n\t\tif err == nil {\n\t\t\tsuccesses++\n\t\t}\n\t}\n\n\trequire.Equal(t, 1, successes)\n}", "func TestRecreateRunningWorkflowFails(t *testing.T) {\n\tr := task.NewTaskRegistry()\n\tr.AddOrchestratorN(\"SleepyWorkflow\", func(ctx *task.OrchestrationContext) (any, error) {\n\t\terr := ctx.CreateTimer(24 * time.Hour).Await(nil)\n\t\treturn nil, err\n\t})\n\n\tctx := context.Background()\n\tclient, engine := startEngine(ctx, t, r)\n\n\tfor _, opt := range GetTestOptions() {\n\t\tt.Run(opt(engine), func(t *testing.T) {\n\t\t\t// Start the first workflow, which will not complete\n\t\t\tvar metadata *api.OrchestrationMetadata\n\t\t\tid, err := client.ScheduleNewOrchestration(ctx, \"SleepyWorkflow\")\n\t\t\tif assert.NoError(t, err) {\n\t\t\t\tif metadata, err = client.WaitForOrchestrationStart(ctx, id); assert.NoError(t, err) {\n\t\t\t\t\tassert.False(t, metadata.IsComplete())\n\t\t\t\t}\n\t\t\t}\n\n\t\t\t// Attempting to start a second workflow with the same ID should fail\n\t\t\t_, err = client.ScheduleNewOrchestration(ctx, \"SleepyWorkflow\", api.WithInstanceID(id))\n\t\t\trequire.Error(t, err)\n\t\t\t// We expect that the workflow instance ID is included in the error message\n\t\t\tassert.Contains(t, err.Error(), id)\n\t\t})\n\t}\n}", "func TestResolveIdentifyImplicitTeamWithDuplicates(t *testing.T) {\n\ttt := newTeamTester(t)\n\tdefer tt.cleanup()\n\n\talice := tt.addUser(\"abc\")\n\tg := alice.tc.G\n\n\tbob := tt.addUser(\"bob\")\n\n\tiTeamNameCreate := strings.Join([]string{alice.username, bob.username}, \",\")\n\t// simple duplicate\n\tiTeamNameLookup1 := strings.Join([]string{alice.username, bob.username, bob.username}, \",\")\n\t// duplicate after resolution\n\tiTeamNameLookup2 := strings.Join([]string{alice.username, bob.username, bob.username + \"@rooter\"}, \",\")\n\t// duplicate across reader boundary\n\tiTeamNameLookup3 := strings.Join([]string{alice.username, bob.username + \"@rooter\"}, \",\") + \"#\" + bob.username\n\n\tt.Logf(\"make an implicit team\")\n\tiTeam, _, _, err := teams.LookupOrCreateImplicitTeam(context.TODO(), g, iTeamNameCreate, false /*isPublic*/)\n\trequire.NoError(t, err)\n\n\tbob.proveRooter()\n\n\tcli, err := client.GetIdentifyClient(g)\n\trequire.NoError(t, err, \"failed to get new identifyclient\")\n\n\tfor i, lookup := range []string{iTeamNameLookup1, iTeamNameLookup2, iTeamNameLookup3} {\n\t\tt.Logf(\"checking %v: %v\", i, lookup)\n\t\tres, err := cli.ResolveIdentifyImplicitTeam(context.Background(), keybase1.ResolveIdentifyImplicitTeamArg{\n\t\t\tAssertions: lookup,\n\t\t\tSuffix: \"\",\n\t\t\tIsPublic: false,\n\t\t\tDoIdentifies: false,\n\t\t\tCreate: false,\n\t\t\tIdentifyBehavior: keybase1.TLFIdentifyBehavior_DEFAULT_KBFS,\n\t\t})\n\t\trequire.NoError(t, err, \"%v %v\", err, spew.Sdump(res))\n\t\trequire.Equal(t, res.TeamID, iTeam.ID)\n\t\trequire.Equal(t, res.DisplayName, iTeamNameCreate)\n\t\trequire.True(t, compareUserVersionSets([]keybase1.UserVersion{alice.userVersion(), bob.userVersion()}, res.Writers))\n\t\trequire.Nil(t, res.TrackBreaks, \"track breaks\")\n\t}\n}", "func Conflict(message string, errors []Error) {\n\tresponse := Response{\n\t\tStatus: http.StatusConflict,\n\t\tMessage: message,\n\t\tData: nil,\n\t\tErrors: errors,\n\t}\n\tpanic(response)\n}", "func TestRollbackDuringAddDropRegionAsyncJobFailure(t *testing.T) {\n\tdefer leaktest.AfterTest(t)()\n\tdefer log.Scope(t).Close(t)\n\n\tskip.UnderRace(t, \"times out under race\")\n\n\t// Decrease the adopt loop interval so that retries happen quickly.\n\tdefer sqltestutils.SetTestJobsAdoptInterval()()\n\n\tknobs := base.TestingKnobs{\n\t\tSQLTypeSchemaChanger: &sql.TypeSchemaChangerTestingKnobs{\n\t\t\tRunBeforeMultiRegionUpdates: func() error {\n\t\t\t\treturn errors.New(\"boom\")\n\t\t\t},\n\t\t},\n\t}\n\n\t// Setup.\n\t_, sqlDB, cleanup := multiregionccltestutils.TestingCreateMultiRegionCluster(\n\t\tt, 3 /* numServers */, knobs, nil, /* baseDir */\n\t)\n\tdefer cleanup()\n\t_, err := sqlDB.Exec(`CREATE DATABASE db WITH PRIMARY REGION \"us-east1\" REGIONS \"us-east2\"`)\n\trequire.NoError(t, err)\n\n\ttestCases := []struct {\n\t\tname string\n\t\tquery string\n\t}{\n\t\t{\n\t\t\t\"add-region\",\n\t\t\t`ALTER DATABASE db ADD REGION \"us-east3\"`,\n\t\t},\n\t\t{\n\t\t\t\"drop-region\",\n\t\t\t`ALTER DATABASE db DROP REGION \"us-east2\"`,\n\t\t},\n\t\t{\n\t\t\t\"add-drop-region-in-txn\",\n\t\t\t`BEGIN;\n\tALTER DATABASE db DROP REGION \"us-east2\";\n\tALTER DATABASE db ADD REGION \"us-east3\";\n\tCOMMIT`,\n\t\t},\n\t}\n\n\tfor _, tc := range testCases {\n\t\tt.Run(tc.name, func(t *testing.T) {\n\t\t\tvar originalZoneConfig string\n\t\t\tres := sqlDB.QueryRow(`SELECT raw_config_sql FROM [SHOW ZONE CONFIGURATION FOR DATABASE db]`)\n\t\t\terr = res.Scan(&originalZoneConfig)\n\t\t\trequire.NoError(t, err)\n\n\t\t\t_, err = sqlDB.Exec(tc.query)\n\t\t\ttestutils.IsError(err, \"boom\")\n\n\t\t\tvar jobStatus string\n\t\t\tvar jobErr string\n\t\t\trow := sqlDB.QueryRow(\"SELECT status, error FROM [SHOW JOBS] WHERE job_type = 'TYPEDESC SCHEMA CHANGE'\")\n\t\t\trequire.NoError(t, row.Scan(&jobStatus, &jobErr))\n\t\t\trequire.Contains(t, \"boom\", jobErr)\n\t\t\trequire.Contains(t, \"failed\", jobStatus)\n\n\t\t\t// Ensure the zone configuration didn't change.\n\t\t\tvar newZoneConfig string\n\t\t\tres = sqlDB.QueryRow(`SELECT raw_config_sql FROM [SHOW ZONE CONFIGURATION FOR DATABASE db]`)\n\t\t\terr = res.Scan(&newZoneConfig)\n\t\t\trequire.NoError(t, err)\n\n\t\t\tif newZoneConfig != originalZoneConfig {\n\t\t\t\tt.Fatalf(\"expected zone config to not have changed, expected %q found %q\",\n\t\t\t\t\toriginalZoneConfig,\n\t\t\t\t\tnewZoneConfig,\n\t\t\t\t)\n\t\t\t}\n\t\t})\n\t}\n}", "func TestResourceContentionIsPreventedForTwoNamespacesMappingToSameProjectInDifferentClusters(t *testing.T) {\n\tshouldRun := func(fixture resourcefixture.ResourceFixture, mgr manager.Manager) bool {\n\t\t// only need to test contention for a single resource since the logic will apply to all resources\n\t\treturn fixture.GVK.Kind == \"PubSubTopic\"\n\t}\n\ttestFunc := func(t *testing.T, testContext testrunner.TestContext, systemContext testrunner.SystemContext) {\n\t\tif err := systemContext.Manager.GetClient().Create(context.TODO(), testContext.CreateUnstruct); err != nil {\n\t\t\tt.Fatalf(\"error creating resource: %v\", err)\n\t\t}\n\t\tsystemContext.Reconciler.Reconcile(testContext.UpdateUnstruct, testreconciler.ExpectedSuccessfulReconcileResultFor(systemContext.Reconciler, testContext.UpdateUnstruct), nil)\n\t\tassertLeaseLabelsAreNotPresent(t, systemContext.Manager, testContext.CreateUnstruct)\n\t\tprojectId := testgcp.GetDefaultProjectID(t)\n\t\ttestcontroller.EnsureNamespaceExistsT(t, mgr2.GetClient(), testContext.UniqueId)\n\t\ttestcontroller.EnsureNamespaceHasProjectIDAnnotation(t, mgr2.GetClient(), testContext.UniqueId, projectId)\n\t\tassertNamespaceIdsAreNotEqual(t, systemContext.Manager, mgr2, testContext.UniqueId, testContext.UniqueId)\n\t\treconciler2 := testreconciler.New(t, mgr2, systemContext.TFProvider)\n\t\tif err := mgr2.GetClient().Create(context.TODO(), testContext.UpdateUnstruct); err != nil {\n\t\t\tt.Fatalf(\"error creating resource: %v\", err)\n\t\t}\n\t\treconciler2.Reconcile(testContext.UpdateUnstruct, testreconciler.ExpectedUnsuccessfulReconcileResult, regexp.MustCompile(\"error obtaining lease\"))\n\t\tevents := testcontroller.CollectEvents(t, mgr2.GetConfig(), testContext.UpdateUnstruct.GetNamespace(), 1, 10*time.Second)\n\t\tevent := events[0]\n\t\texpectedReason := k8s.ManagementConflict\n\t\tif event.Reason != expectedReason {\n\t\t\tt.Fatalf(\"event mismatch: got '%v', want '%v'\", event.Reason, expectedReason)\n\t\t}\n\t\t// Since the controller was unable to obtain the lease it does not write the default finalizer onto the object.\n\t\t// Add the finalizer manually so that we can test the deletion resource contention flow.\n\t\tensureFinalizer(t, mgr2, testContext.NamespacedName, testContext.CreateUnstruct)\n\t\tif err := mgr2.GetClient().Delete(context.TODO(), testContext.CreateUnstruct); err != nil {\n\t\t\tt.Fatalf(\"error deleting resource: %v\", err)\n\t\t}\n\t\treconciler2.Reconcile(testContext.CreateUnstruct, testreconciler.ExpectedUnsuccessfulReconcileResult, regexp.MustCompile(\"error obtaining lease\"))\n\t\tevents = testcontroller.CollectEvents(t, mgr2.GetConfig(), testContext.CreateUnstruct.GetNamespace(), 3, 10*time.Second)\n\t\tnextEvent := events[2]\n\t\tif nextEvent.Reason != expectedReason {\n\t\t\tt.Fatalf(\"event mismatch: got '%v', want '%v'\", nextEvent.Reason, expectedReason)\n\t\t}\n\t\tif !(event.LastTimestamp == nextEvent.LastTimestamp || event.LastTimestamp.Before(&nextEvent.LastTimestamp)) {\n\t\t\tt.Fatalf(\"expected the previous event's last timestamp to be before or equal to the next event's last timestamp\")\n\t\t}\n\t}\n\ttestrunner.RunAllWithDependenciesCreatedButNotObject(t, mgr1, shouldRun, testFunc)\n}", "func NewConflictR(field string, message string, args ...interface{}) *AppError {\n\treturn NewError(AlreadyExists, field, message, args...)\n}", "func (suite *TaskFailRetryTestSuite) TestTaskFailRetryFailedPatch() {\n\ttaskConfig := pbtask.TaskConfig{\n\t\tRestartPolicy: &pbtask.RestartPolicy{\n\t\t\tMaxFailures: 3,\n\t\t},\n\t}\n\n\tsuite.cachedTask.EXPECT().\n\t\tID().\n\t\tReturn(uint32(0)).\n\t\tAnyTimes()\n\n\tsuite.jobFactory.EXPECT().\n\t\tGetJob(suite.jobID).Return(suite.cachedJob)\n\n\tsuite.cachedJob.EXPECT().\n\t\tGetTask(suite.instanceID).Return(suite.cachedTask)\n\n\tsuite.cachedJob.EXPECT().\n\t\tID().Return(suite.jobID)\n\n\tsuite.cachedTask.EXPECT().\n\t\tGetRuntime(gomock.Any()).Return(suite.taskRuntime, nil)\n\n\tsuite.taskConfigV2Ops.EXPECT().\n\t\tGetTaskConfig(gomock.Any(), suite.jobID, suite.instanceID, gomock.Any()).\n\t\tReturn(&taskConfig, &models.ConfigAddOn{}, nil)\n\n\tsuite.cachedJob.EXPECT().\n\t\tPatchTasks(gomock.Any(), gomock.Any(), false).\n\t\tReturn(nil, nil, fmt.Errorf(\"patch error\"))\n\n\terr := TaskFailRetry(context.Background(), suite.taskEnt)\n\tsuite.Error(err)\n}", "func Test_jsonpatch_Replace_NonExistent_IsError(t *testing.T) {\n\tt.SkipNow() // Test is no working as expected\n\tg := NewWithT(t)\n\n\torigDoc := []byte(`{\"asd\":\"foof\"}`)\n\n\tpatch1, _ := jsonpatch.DecodePatch([]byte(`[{\"op\":\"replace\", \"path\":\"/test_key\", \"value\":\"qwe\"}]`))\n\n\texpectNewDoc := []byte(`{\"asd\":\"foof\"}`)\n\n\tnewDoc, err := patch1.Apply(origDoc)\n\tg.Expect(err).Should(HaveOccurred(), \"patch apply\")\n\tg.Expect(jsonpatch.Equal(newDoc, expectNewDoc)).Should(BeTrue(), \"%v is not equal to %v\", string(newDoc), string(expectNewDoc))\n}", "func TestFileDirConflict(t *testing.T) {\n\tif testing.Short() {\n\t\tt.SkipNow()\n\t}\n\n\ttestFileDirConflict(t, false)\n\ttestFileDirConflict(t, true)\n}", "func Conflict(w http.ResponseWriter, message ...interface{}) {\n\tboom(w, 409, message...)\n}", "func TestPostPresentationMergeInvalidrequest(t *testing.T) {\n request := createPostPresentationMergeRequest()\n request.request = invalidizeTestParamValue(request.request, \"request\", \"PresentationsMergeRequest\").(PresentationsMergeRequest)\n e := initializeTest(\"PostPresentationMerge\", \"request\", request.request)\n if e != nil {\n t.Errorf(\"Error: %v.\", e)\n return\n }\n r, _, e := getTestApiClient().MergeDocumentApi.PostPresentationMerge(request)\n assertError(t, \"PostPresentationMerge\", \"request\", r.Code, e)\n}", "func createVoteMapForConflicts(conflictIDs, timestampIDs []string) map[string]opinion.Opinions {\n\tvoteMap := map[string]opinion.Opinions{}\n\n\tfor _, id := range conflictIDs {\n\t\tvoteMap[id] = opinion.Opinions{}\n\t}\n\tfor _, id := range timestampIDs {\n\t\tvoteMap[id] = opinion.Opinions{}\n\t}\n\n\treturn voteMap\n}", "func TestDo_Retry(t *testing.T) {\n\ttype testServerResponse struct {\n\t\tAPIResource\n\t\tMessage string `json:\"message\"`\n\t}\n\n\tmessage := \"Hello, client.\"\n\trequestNum := 0\n\n\ttestServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\terr := r.ParseForm()\n\t\tassert.NoError(t, err)\n\n\t\t// The body should always be the same with every retry. We've\n\t\t// previously had regressions in this behavior as we switched to HTTP/2\n\t\t// and `Request` became non-reusable, so we want to check it with every\n\t\t// request.\n\t\tassert.Equal(t, \"bar\", r.Form.Get(\"foo\"))\n\n\t\tswitch requestNum {\n\t\tcase 0:\n\t\t\tw.WriteHeader(http.StatusConflict)\n\t\t\tw.Write([]byte(`{\"error\":\"Conflict (this should be retried).\"}`))\n\n\t\tcase 1:\n\t\t\tresponse := testServerResponse{Message: message}\n\n\t\t\tdata, err := json.Marshal(response)\n\t\t\tassert.NoError(t, err)\n\n\t\t\t_, err = w.Write(data)\n\t\t\tassert.NoError(t, err)\n\n\t\tdefault:\n\t\t\tassert.Fail(t, \"Should not have reached request %v\", requestNum)\n\t\t}\n\n\t\trequestNum++\n\t}))\n\tdefer testServer.Close()\n\n\tbackend := GetBackendWithConfig(\n\t\tAPIBackend,\n\t\t&BackendConfig{\n\t\t\tLeveledLogger: nullLeveledLogger,\n\t\t\tMaxNetworkRetries: Int64(5),\n\t\t\tURL: String(testServer.URL),\n\t\t},\n\t).(*BackendImplementation)\n\n\t// Disable sleeping duration our tests.\n\tbackend.SetNetworkRetriesSleep(false)\n\n\trequest, err := backend.NewRequest(\n\t\thttp.MethodPost,\n\t\t\"/hello\",\n\t\t\"sk_test_123\",\n\t\t\"application/x-www-form-urlencoded\",\n\t\tnil,\n\t)\n\tassert.NoError(t, err)\n\n\tbodyBuffer := bytes.NewBufferString(\"foo=bar\")\n\tvar response testServerResponse\n\terr = backend.Do(request, bodyBuffer, &response)\n\n\tassert.NoError(t, err)\n\tassert.Equal(t, message, response.Message)\n\n\t// We should have seen exactly two requests.\n\tassert.Equal(t, 2, requestNum)\n}", "func NewConflict(msg string) error {\n\treturn &ELBError{\n\t\tmsg: msg,\n\t\tCode: http.StatusConflict,\n\t}\n}", "func TestImportProjectCreatedFailWhenAPIIsExisted(t *testing.T) {\n\tapim := apimClients[0]\n\tprojectName := \"OpenAPI3Project\"\n\tusername := superAdminUser\n\tpassword := superAdminPassword\n\n\targs := &testutils.InitTestArgs{\n\t\tCtlUser: testutils.Credentials{Username: username, Password: password},\n\t\tSrcAPIM: apim,\n\t\tInitFlag: projectName,\n\t\tOasFlag: utils.TestOpenAPI3DefinitionPath,\n\t\tForceFlag: false,\n\t}\n\n\t//Import API for the First time\n\ttestutils.ValidateImportInitializedProject(t, args)\n\n\t//Import API for the second time\n\ttestutils.ValidateImportFailedWithInitializedProject(t, args)\n\n}", "func patchOrCreate(mapping *meta.RESTMapping, config *rest.Config, group string,\n\tversion string, namespace string, name string, data []byte) error {\n\tlog.Infof(\"Applying resource configuration for %v\", name)\n\terr := getResource(mapping, config, group, version, namespace, name)\n\tif err != nil {\n\t\tlog.Infof(\"getResource error, treating as not found: %v\", err)\n\t\terr = createResource(mapping, config, group, version, namespace, data)\n\t} else {\n\t\tlog.Infof(\"getResource succeeds, treating as found.\")\n\t\terr = patchResource(mapping, config, group, version, namespace, data)\n\t}\n\n\tfor i := 1; i < maxRetries && k8serrors.IsConflict(err); i++ {\n\t\ttime.Sleep(backoffInterval)\n\n\t\tlog.Infof(\"Retrying patchOrCreate at %v attempt ...\", i)\n\t\terr = getResource(mapping, config, group, version, namespace, name)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\terr = patchResource(mapping, config, group, version, namespace, data)\n\t}\n\n\tif err != nil && (k8serrors.IsConflict(err) || k8serrors.IsInvalid(err) ||\n\t\tk8serrors.IsMethodNotSupported(err)) {\n\t\tlog.Infof(\"Trying delete and create as last resort ...\")\n\t\tif err = deleteResource(mapping, config, group, version, namespace, name); err != nil {\n\t\t\treturn err\n\t\t}\n\t\terr = createResource(mapping, config, group, version, namespace, data)\n\t}\n\treturn err\n}", "func TestPostPresentationMergeInvalidname(t *testing.T) {\n request := createPostPresentationMergeRequest()\n request.name = invalidizeTestParamValue(request.name, \"name\", \"string\").(string)\n e := initializeTest(\"PostPresentationMerge\", \"name\", request.name)\n if e != nil {\n t.Errorf(\"Error: %v.\", e)\n return\n }\n r, _, e := getTestApiClient().MergeDocumentApi.PostPresentationMerge(request)\n assertError(t, \"PostPresentationMerge\", \"name\", r.Code, e)\n}", "func recordRetry(ctx context.Context, apiName, apiStatus string) {\n\n\tctx, err := tag.New(ctx,\n\t\ttag.Upsert(apiNameKey, apiName),\n\t\ttag.Upsert(apiStatusKey, apiStatus),\n\t)\n\tif err != nil {\n\t\tlogrus.WithError(err).Fatalf(\"cannot add tags %v=%v %v=%v\", apiNameKey, apiName, apiStatusKey, apiStatus)\n\t}\n\n\tstats.Record(ctx, dockerRetriesMeasure.M(0))\n}", "func TestAllocRunner_HandlesArtifactFailure(t *testing.T) {\n\tci.Parallel(t)\n\n\talloc := mock.BatchAlloc()\n\trp := &structs.RestartPolicy{\n\t\tMode: structs.RestartPolicyModeFail,\n\t\tAttempts: 1,\n\t\tDelay: time.Nanosecond,\n\t\tInterval: time.Hour,\n\t}\n\talloc.Job.TaskGroups[0].RestartPolicy = rp\n\talloc.Job.TaskGroups[0].Tasks[0].RestartPolicy = rp\n\n\t// Create a new task with a bad artifact\n\tbadtask := alloc.Job.TaskGroups[0].Tasks[0].Copy()\n\tbadtask.Name = \"bad\"\n\tbadtask.Artifacts = []*structs.TaskArtifact{\n\t\t{GetterSource: \"http://127.0.0.1:0/foo/bar/baz\"},\n\t}\n\n\talloc.Job.TaskGroups[0].Tasks = append(alloc.Job.TaskGroups[0].Tasks, badtask)\n\talloc.AllocatedResources.Tasks[\"bad\"] = &structs.AllocatedTaskResources{\n\t\tCpu: structs.AllocatedCpuResources{\n\t\t\tCpuShares: 500,\n\t\t},\n\t\tMemory: structs.AllocatedMemoryResources{\n\t\t\tMemoryMB: 256,\n\t\t},\n\t}\n\n\tconf, cleanup := testAllocRunnerConfig(t, alloc)\n\tdefer cleanup()\n\tar, err := NewAllocRunner(conf)\n\trequire.NoError(t, err)\n\tgo ar.Run()\n\tdefer destroy(ar)\n\n\ttestutil.WaitForResult(func() (bool, error) {\n\t\tstate := ar.AllocState()\n\n\t\tswitch state.ClientStatus {\n\t\tcase structs.AllocClientStatusComplete, structs.AllocClientStatusFailed:\n\t\t\treturn true, nil\n\t\tdefault:\n\t\t\treturn false, fmt.Errorf(\"got status %v but want terminal\", state.ClientStatus)\n\t\t}\n\n\t}, func(err error) {\n\t\trequire.NoError(t, err)\n\t})\n\n\tstate := ar.AllocState()\n\trequire.Equal(t, structs.AllocClientStatusFailed, state.ClientStatus)\n\trequire.Equal(t, structs.TaskStateDead, state.TaskStates[\"web\"].State)\n\trequire.True(t, state.TaskStates[\"web\"].Successful())\n\trequire.Equal(t, structs.TaskStateDead, state.TaskStates[\"bad\"].State)\n\trequire.True(t, state.TaskStates[\"bad\"].Failed)\n}", "func TestAllocRunner_TaskFailed_KillTG(t *testing.T) {\n\tci.Parallel(t)\n\n\talloc := mock.Alloc()\n\ttr := alloc.AllocatedResources.Tasks[alloc.Job.TaskGroups[0].Tasks[0].Name]\n\talloc.Job.TaskGroups[0].RestartPolicy.Attempts = 0\n\talloc.Job.TaskGroups[0].Tasks[0].RestartPolicy.Attempts = 0\n\n\t// Create two tasks in the task group\n\ttask := alloc.Job.TaskGroups[0].Tasks[0]\n\ttask.Name = \"task1\"\n\ttask.Driver = \"mock_driver\"\n\ttask.KillTimeout = 10 * time.Millisecond\n\ttask.Config = map[string]interface{}{\n\t\t\"run_for\": \"10s\",\n\t}\n\t// Set a service with check\n\ttask.Services = []*structs.Service{\n\t\t{\n\t\t\tName: \"fakservice\",\n\t\t\tPortLabel: \"http\",\n\t\t\tProvider: structs.ServiceProviderConsul,\n\t\t\tChecks: []*structs.ServiceCheck{\n\t\t\t\t{\n\t\t\t\t\tName: \"fakecheck\",\n\t\t\t\t\tType: structs.ServiceCheckScript,\n\t\t\t\t\tCommand: \"true\",\n\t\t\t\t\tInterval: 30 * time.Second,\n\t\t\t\t\tTimeout: 5 * time.Second,\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\n\ttask2 := alloc.Job.TaskGroups[0].Tasks[0].Copy()\n\ttask2.Name = \"task 2\"\n\ttask2.Driver = \"mock_driver\"\n\ttask2.Config = map[string]interface{}{\n\t\t\"start_error\": \"fail task please\",\n\t}\n\talloc.Job.TaskGroups[0].Tasks = append(alloc.Job.TaskGroups[0].Tasks, task2)\n\talloc.AllocatedResources.Tasks[task.Name] = tr\n\talloc.AllocatedResources.Tasks[task2.Name] = tr\n\n\t// Make the alloc be part of a deployment\n\talloc.DeploymentID = uuid.Generate()\n\talloc.Job.TaskGroups[0].Update = structs.DefaultUpdateStrategy.Copy()\n\talloc.Job.TaskGroups[0].Update.HealthCheck = structs.UpdateStrategyHealthCheck_Checks\n\talloc.Job.TaskGroups[0].Update.MaxParallel = 1\n\talloc.Job.TaskGroups[0].Update.MinHealthyTime = 10 * time.Millisecond\n\talloc.Job.TaskGroups[0].Update.HealthyDeadline = 2 * time.Second\n\n\tcheckHealthy := &api.AgentCheck{\n\t\tCheckID: uuid.Generate(),\n\t\tStatus: api.HealthPassing,\n\t}\n\n\tconf, cleanup := testAllocRunnerConfig(t, alloc)\n\tdefer cleanup()\n\n\tconsulClient := conf.Consul.(*regMock.ServiceRegistrationHandler)\n\tconsulClient.AllocRegistrationsFn = func(allocID string) (*serviceregistration.AllocRegistration, error) {\n\t\treturn &serviceregistration.AllocRegistration{\n\t\t\tTasks: map[string]*serviceregistration.ServiceRegistrations{\n\t\t\t\ttask.Name: {\n\t\t\t\t\tServices: map[string]*serviceregistration.ServiceRegistration{\n\t\t\t\t\t\t\"123\": {\n\t\t\t\t\t\t\tService: &api.AgentService{Service: \"fakeservice\"},\n\t\t\t\t\t\t\tChecks: []*api.AgentCheck{checkHealthy},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t}, nil\n\t}\n\n\tar, err := NewAllocRunner(conf)\n\trequire.NoError(t, err)\n\tdefer destroy(ar)\n\tgo ar.Run()\n\tupd := conf.StateUpdater.(*MockStateUpdater)\n\n\ttestutil.WaitForResult(func() (bool, error) {\n\t\tlast := upd.Last()\n\t\tif last == nil {\n\t\t\treturn false, fmt.Errorf(\"No updates\")\n\t\t}\n\t\tif last.ClientStatus != structs.AllocClientStatusFailed {\n\t\t\treturn false, fmt.Errorf(\"got status %v; want %v\", last.ClientStatus, structs.AllocClientStatusFailed)\n\t\t}\n\n\t\t// Task One should be killed\n\t\tstate1 := last.TaskStates[task.Name]\n\t\tif state1.State != structs.TaskStateDead {\n\t\t\treturn false, fmt.Errorf(\"got state %v; want %v\", state1.State, structs.TaskStateDead)\n\t\t}\n\t\tif len(state1.Events) < 2 {\n\t\t\t// At least have a received and destroyed\n\t\t\treturn false, fmt.Errorf(\"Unexpected number of events\")\n\t\t}\n\n\t\tfound := false\n\t\tfor _, e := range state1.Events {\n\t\t\tif e.Type != structs.TaskSiblingFailed {\n\t\t\t\tfound = true\n\t\t\t}\n\t\t}\n\n\t\tif !found {\n\t\t\treturn false, fmt.Errorf(\"Did not find event %v\", structs.TaskSiblingFailed)\n\t\t}\n\n\t\t// Task Two should be failed\n\t\tstate2 := last.TaskStates[task2.Name]\n\t\tif state2.State != structs.TaskStateDead {\n\t\t\treturn false, fmt.Errorf(\"got state %v; want %v\", state2.State, structs.TaskStateDead)\n\t\t}\n\t\tif !state2.Failed {\n\t\t\treturn false, fmt.Errorf(\"task2 should have failed\")\n\t\t}\n\n\t\tif !last.DeploymentStatus.HasHealth() {\n\t\t\treturn false, fmt.Errorf(\"Expected deployment health to be non nil\")\n\t\t}\n\n\t\treturn true, nil\n\t}, func(err error) {\n\t\trequire.Fail(t, \"err: %v\", err)\n\t})\n}", "func TestPostSlidesDocumentInvalidtemplatePath(t *testing.T) {\n request := createPostSlidesDocumentRequest()\n request.templatePath = invalidizeTestParamValue(request.templatePath, \"templatePath\", \"string\").(string)\n e := initializeTest(\"PostSlidesDocument\", \"templatePath\", request.templatePath)\n if e != nil {\n t.Errorf(\"Error: %v.\", e)\n return\n }\n r, _, e := getTestApiClient().DocumentApi.PostSlidesDocument(request)\n assertError(t, \"PostSlidesDocument\", \"templatePath\", r.Code, e)\n}", "func testNonleadersElectionTimeoutNonconflict(t *testing.T, state StateType) {\n\tet := 10\n\tsize := 5\n\trs := make([]*raft, size)\n\tids := idsBySize(size)\n\tfor k := range rs {\n\t\trs[k] = newTestRaft(ids[k], ids, et, 1, NewMemoryStorage())\n\t}\n\tdefer func() {\n\t\tfor k := range rs {\n\t\t\tcloseAndFreeRaft(rs[k])\n\t\t}\n\t}()\n\tconflicts := 0\n\tfor round := 0; round < 1000; round++ {\n\t\tfor _, r := range rs {\n\t\t\tswitch state {\n\t\t\tcase StateFollower:\n\t\t\t\tr.becomeFollower(r.Term+1, None)\n\t\t\tcase StateCandidate:\n\t\t\t\tr.becomeCandidate()\n\t\t\t}\n\t\t}\n\n\t\ttimeoutNum := 0\n\t\tfor timeoutNum == 0 {\n\t\t\tfor _, r := range rs {\n\t\t\t\tr.tick()\n\t\t\t\tif len(r.readMessages()) > 0 {\n\t\t\t\t\ttimeoutNum++\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\t// several rafts time out at the same tick\n\t\tif timeoutNum > 1 {\n\t\t\tconflicts++\n\t\t}\n\t}\n\n\tif g := float64(conflicts) / 1000; g > 0.3 {\n\t\tt.Errorf(\"probability of conflicts = %v, want <= 0.3\", g)\n\t}\n}", "func Test_jsonpatch_Replace_NonExistent_IsError(t *testing.T) {\n\tt.SkipNow()\n\tg := NewWithT(t)\n\n\torigDoc := []byte(`{\"foo\":\"bar\"}`)\n\n\tpatch1, _ := DecodePatch([]byte(`[{\"op\":\"replace\", \"path\":\"/test_key\", \"value\":\"qwe\"}]`))\n\n\texpectNewDoc := []byte(`{\"foo\":\"bar\"}`)\n\n\tnewDoc, err := patch1.Apply(origDoc)\n\tg.Expect(err).Should(HaveOccurred(), \"replace operation should \")\n\tg.Expect(JSONEqual(newDoc, expectNewDoc)).Should(BeTrue(), \"%v is not equal to %v\", string(newDoc), string(expectNewDoc))\n}", "func buildFailOnDuplicateOps(c client.Client, m model.Model) ([]ovsdb.Operation, error) {\n\t// Right now we mostly consider models with a \"Name\" field that is not an\n\t// index for which we don't expect duplicate names.\n\t// A duplicate Name field that is an index will fail without the\n\t// need of this wait operation.\n\t// Some models that require a complex condition to detect duplicates are not\n\t// considered for the time being due to the performance hit (e.g ACLs).\n\ttimeout := types.OVSDBWaitTimeout\n\tvar field interface{}\n\tvar value string\n\tswitch t := m.(type) {\n\tcase *nbdb.LogicalRouter:\n\t\tfield = &t.Name\n\t\tvalue = t.Name\n\tcase *nbdb.LogicalSwitch:\n\t\tfield = &t.Name\n\t\tvalue = t.Name\n\tcase *nbdb.LogicalRouterPolicy:\n\t\tcondPriority := model.Condition{\n\t\t\tField: &t.Priority,\n\t\t\tFunction: ovsdb.ConditionEqual,\n\t\t\tValue: t.Priority,\n\t\t}\n\t\tcondMatch := model.Condition{\n\t\t\tField: &t.Match,\n\t\t\tFunction: ovsdb.ConditionEqual,\n\t\t\tValue: t.Match,\n\t\t}\n\t\treturn c.WhereAll(t, condPriority, condMatch).Wait(\n\t\t\tovsdb.WaitConditionNotEqual,\n\t\t\t&timeout,\n\t\t\tt,\n\t\t\t&t.Priority,\n\t\t\t&t.Match,\n\t\t)\n\tdefault:\n\t\treturn []ovsdb.Operation{}, nil\n\t}\n\n\tcond := model.Condition{\n\t\tField: field,\n\t\tFunction: ovsdb.ConditionEqual,\n\t\tValue: value,\n\t}\n\treturn c.WhereAny(m, cond).Wait(ovsdb.WaitConditionNotEqual, &timeout, m, field)\n}", "func TestIntermediateNameSameNameDifferentLayer(t *testing.T) {\n\tth := kusttest_test.MakeHarness(t)\n\tth.WriteK(\"gcp\", `\nnamePrefix: gcp-\nresources:\n- ../emea\npatchesStrategicMerge:\n- depPatch.yaml\n`)\n\tth.WriteF(\"gcp/depPatch.yaml\", `\napiVersion: apps/v1\nkind: Deployment\nmetadata:\n name: prod-foo\nspec:\n replicas: 999\n`)\n\tth.WriteK(\"emea\", `\nnamePrefix: emea-\nresources:\n- ../prod\n- deployment.yaml\n`)\n\tth.WriteF(\"emea/deployment.yaml\", `\napiVersion: apps/v1\nkind: Deployment\nmetadata:\n name: foo\nspec:\n template:\n spec:\n containers:\n - image: whatever\n`)\n\tth.WriteK(\"prod\", `\nnamePrefix: prod-\nresources:\n- ../base\n`)\n\tth.WriteK(\"base\", `\nresources:\n- deployment.yaml\n`)\n\tth.WriteF(\"base/deployment.yaml\", `\napiVersion: apps/v1\nkind: Deployment\nmetadata:\n name: foo\nspec:\n template:\n spec:\n containers:\n - image: whatever\n`)\n\tm := th.Run(\"gcp\", th.MakeDefaultOptions())\n\tth.AssertActualEqualsExpected(m, `\napiVersion: apps/v1\nkind: Deployment\nmetadata:\n name: gcp-emea-prod-foo\nspec:\n replicas: 999\n template:\n spec:\n containers:\n - image: whatever\n---\napiVersion: apps/v1\nkind: Deployment\nmetadata:\n name: gcp-emea-foo\nspec:\n template:\n spec:\n containers:\n - image: whatever\n`)\n}", "func FaultTolerance(attempts int, backoff time.Duration) Decorator {\n\treturn func(c HTTPClient) HTTPClient {\n\t\treturn ClientFunc(func(r *http.Request) (res *http.Response, err error) {\n\t\t\tfor i := 0; i <= attempts; i++ {\n\t\t\t\tif res, err = c.Do(r); err == nil {\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t\tslow := 1\n\t\t\t\t// Code: 252 DB::Exception: Too many parts (300). Merges are processing significantly slower than inserts\n\t\t\t\t// This error needs longer backoff time\n\t\t\t\tif strings.HasPrefix(err.Error(), \"Code: 252\") {\n\t\t\t\t\tslow = 10\n\t\t\t\t}\n\t\t\t\ttime.Sleep(backoff * time.Duration(i) * time.Duration(slow))\n\t\t\t}\n\t\t\treturn res, err\n\t\t})\n\t}\n}", "func (fbo *folderBranchOps) forceStuckConflictForTesting(\n\tctx context.Context) (err error) {\n\tstartTime, timer := fbo.startOp(ctx, \"Forcing a stuck conflict\")\n\tdefer func() {\n\t\tfbo.endOp(\n\t\t\tctx, startTime, timer, \"Forcing a stuck conflict done: %+v\", err)\n\t}()\n\n\tlState := makeFBOLockState()\n\tfbo.mdWriterLock.Lock(lState)\n\tdefer fbo.mdWriterLock.Unlock(lState)\n\n\tif fbo.isUnmergedLocked(lState) {\n\t\treturn errors.New(\"Cannot force conflict when already unmerged\")\n\t}\n\n\t// Disable updates.\n\tunpauseUpdatesCh := make(chan struct{})\n\tselect {\n\tcase fbo.updatePauseChan <- unpauseUpdatesCh:\n\tcase <-ctx.Done():\n\t\treturn ctx.Err()\n\t}\n\tdefer func() { unpauseUpdatesCh <- struct{}{} }()\n\n\t// Make a no-op revision with an empty resolutionOp. Wait for it\n\t// to flush to the server.\n\torigHead, _ := fbo.getHead(ctx, lState, mdNoCommit)\n\tmergedGCOp := newGCOp(origHead.data.LastGCRevision)\n\terr = fbo.finalizeGCOpLocked(ctx, lState, mergedGCOp)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tjManager, _ := GetJournalManager(fbo.config)\n\tif jManager != nil {\n\t\terr := fbo.waitForJournalLocked(ctx, lState, jManager)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t// Wait for the flush handler to finish, so we don't\n\t\t// accidentally swap in the upcoming MD on the conflict branch\n\t\t// over the \"merged\" one we just flushed, before the pointer\n\t\t// archiving step happens.\n\t\terr = fbo.mdFlushes.Wait(ctx)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\t// Roll back the local view to the original revision.\n\terr = func() error {\n\t\tfbo.headLock.Lock(lState)\n\t\tdefer fbo.headLock.Unlock(lState)\n\t\terr = fbo.setHeadLocked(ctx, lState, origHead, headTrusted, mdNoCommit)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tfbo.setLatestMergedRevisionLocked(\n\t\t\tctx, lState, origHead.Revision(), true)\n\t\treturn nil\n\t}()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// Set CR to always fail.\n\toldMode := fbo.cr.getFailModeForTesting()\n\tfbo.cr.setFailModeForTesting(alwaysFailCR)\n\tdefer func() { fbo.cr.setFailModeForTesting(oldMode) }()\n\n\t// Make fake conflicting files to trigger CR. Make one for each\n\t// attempt needed to result in stuck CR.\n\thandle := origHead.GetTlfHandle()\n\trootNode, err := fbo.nodeCache.GetOrCreate(\n\t\torigHead.data.Dir.BlockPointer,\n\t\tdata.NewPathPartString(string(handle.GetCanonicalName()),\n\t\t\tfbo.makeObfuscator()),\n\t\tnil, data.Dir)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfor i := 0; i < maxConflictResolutionAttempts+1; i++ {\n\t\tfilename := fmt.Sprintf(\"FILE_FOR_STUCK_CONFLICT_%02d\", i)\n\t\t_, _, err := fbo.createEntryLocked(\n\t\t\tctx, lState, rootNode, rootNode.ChildName(filename), data.File,\n\t\t\tNoExcl)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\terr = fbo.syncAllLocked(ctx, lState, NoExcl)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tif jManager != nil && TLFJournalEnabled(fbo.config, fbo.id()) {\n\t\t\t// Can't use fbo.waitForJournalLocked here, since the\n\t\t\t// flushing won't actually complete.\n\t\t\terr := jManager.Wait(ctx, fbo.id())\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tnewHead, _ := fbo.getHead(ctx, lState, mdNoCommit)\n\t\t\tfbo.cr.Resolve(\n\t\t\t\tctx, newHead.Revision(), kbfsmd.RevisionUninitialized)\n\t\t}\n\n\t\terr = fbo.cr.Wait(ctx)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\t// Make sure we're stuck.\n\tisStuck, err := fbo.cr.isStuck()\n\tif err != nil {\n\t\treturn err\n\t}\n\tif !isStuck {\n\t\treturn errors.New(\"CR not stuck after trying to force conflict\")\n\t}\n\n\treturn nil\n}", "func TestClient_CreateReplica_Err(t *testing.T) {\n\tc := OpenClient(0)\n\tdefer c.Close()\n\tc.Server.Handler.Broker().CreateReplica(123, &url.URL{Host: \"localhost\"})\n\tif err := c.CreateReplica(123, &url.URL{Host: \"localhost\"}); err == nil || err.Error() != `replica already exists` {\n\t\tt.Fatalf(\"unexpected error: %v\", err)\n\t}\n}", "func CreateTemplateFailJSONMocked(t *testing.T, templateIn *types.Template) *types.Template {\n\n\tassert := assert.New(t)\n\n\t// wire up\n\tcs := &utils.MockConcertoService{}\n\tds, err := NewTemplateService(cs)\n\tassert.Nil(err, \"Couldn't load template service\")\n\tassert.NotNil(ds, \"Template service not instanced\")\n\n\t// convertMap\n\tmapIn, err := utils.ItemConvertParams(*templateIn)\n\tassert.Nil(err, \"Template test data corrupted\")\n\n\t// wrong json\n\tdOut := []byte{10, 20, 30}\n\n\t// call service\n\tcs.On(\"Post\", \"/blueprint/templates/\", mapIn).Return(dOut, 200, nil)\n\ttemplateOut, err := ds.CreateTemplate(mapIn)\n\tassert.NotNil(err, \"We are expecting a marshalling error\")\n\tassert.Nil(templateOut, \"Expecting nil output\")\n\tassert.Contains(err.Error(), \"invalid character\", \"Error message should include the string 'invalid character'\")\n\n\treturn templateOut\n}", "func testFileDirConflict(t *testing.T, open bool) {\n\t// Prepare a filesystem.\n\troot := filepath.Join(testDir(t.Name()), fmt.Sprintf(\"open-%v\", open), \"fs-root\")\n\terr := os.RemoveAll(root)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tfs := newTestFileSystem(root)\n\n\t// Create a file.\n\tfilepath := newSiaPath(\"dir1/file1\")\n\tfs.addTestSiaFile(filepath)\n\n\tif open {\n\t\t// Open the file. This shouldn't affect later checks.\n\t\tnode, err := fs.OpenSiaFile(filepath)\n\t\tif err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t\tdefer func() {\n\t\t\terr := node.Close()\n\t\t\tif err != nil {\n\t\t\t\tt.Fatal(err)\n\t\t\t}\n\t\t}()\n\t}\n\n\t// Make sure we can't create another file with the same name.\n\terr = fs.addTestSiaFileWithErr(filepath)\n\tif !errors.Contains(err, ErrExists) {\n\t\tt.Fatalf(\"Expected err %v, got %v\", ErrExists, err)\n\t}\n\n\t// Make sure we can't rename another file to the same name.\n\tfilepath2 := newSiaPath(\"dir1/file2\")\n\tfs.addTestSiaFile(filepath2)\n\terr = fs.RenameFile(filepath2, filepath)\n\tif !errors.Contains(err, ErrExists) {\n\t\tt.Fatalf(\"Expected err %v, got %v\", ErrExists, err)\n\t}\n\n\t// Make sure we (still) can't create another file with the same name.\n\terr = fs.addTestSiaFileWithErr(filepath)\n\tif !errors.Contains(err, ErrExists) {\n\t\tt.Fatalf(\"Expected err %v, got %v\", ErrExists, err)\n\t}\n\n\t// Make sure we can't create a dir with the same name.\n\terr = fs.NewSiaDir(filepath, modules.DefaultDirPerm)\n\tif !errors.Contains(err, ErrExists) {\n\t\tt.Fatalf(\"Expected err %v, got %v\", ErrExists, err)\n\t}\n\n\t// Create a dir.\n\tdirpath := newSiaPath(\"dir2/dir3\")\n\terr = fs.NewSiaDir(dirpath, modules.DefaultDirPerm)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tif open {\n\t\t// Open the dir. This shouldn't affect later checks.\n\t\tnode, err := fs.OpenSiaDir(dirpath)\n\t\tif err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t\tdefer func() {\n\t\t\terr := node.Close()\n\t\t\tif err != nil {\n\t\t\t\tt.Fatal(err)\n\t\t\t}\n\t\t}()\n\t}\n\n\t// Make sure we CAN create another dir with the same name as the first\n\t// dir.\n\terr = fs.NewSiaDir(dirpath, modules.DefaultDirPerm)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t// Make sure we can't rename a dir to the same name as the first file.\n\terr = fs.RenameDir(dirpath, filepath)\n\tif !errors.Contains(err, ErrExists) {\n\t\tt.Fatalf(\"Expected err %v, got %v\", ErrExists, err)\n\t}\n\n\t// Make sure we still CAN create another dir with the same name as the first\n\t// dir.\n\terr = fs.NewSiaDir(dirpath, modules.DefaultDirPerm)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t// Make sure we can't create a file with the same name as the dir.\n\terr = fs.addTestSiaFileWithErr(dirpath)\n\tif !errors.Contains(err, ErrExists) {\n\t\tt.Fatalf(\"Expected err %v, got %v\", ErrExists, err)\n\t}\n\n\t// Make sure we can't rename a file to the same name as the first dir.\n\terr = fs.RenameFile(filepath, dirpath)\n\tif !errors.Contains(err, ErrExists) {\n\t\tt.Fatalf(\"Expected err %v, got %v\", ErrExists, err)\n\t}\n\n\t// Make sure we can't rename another dir to the same name as the first dir.\n\tdirpath2 := newSiaPath(\"dir2/dir4\")\n\terr = fs.NewSiaDir(dirpath2, modules.DefaultDirPerm)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\terr = fs.RenameDir(dirpath2, dirpath)\n\tif !errors.Contains(err, ErrExists) {\n\t\tt.Fatalf(\"Expected err %v, got %v\", ErrExists, err)\n\t}\n}", "func TestRegisterByDeviceFailWithInvalidUUID(t *testing.T) {\n\t// initialize\n\tapiTest.T = t\n\ttestCaseStatusError := []struct {\n\t\tname string\n\t\tparamRequest map[string][]string\n\t}{\n\t\t{\n\t\t\tname: \"miss device id parameter\",\n\t\t\tparamRequest: map[string][]string{},\n\t\t},\n\t\t{\n\t\t\tname: \"device id is empty\",\n\t\t\tparamRequest: map[string][]string{\n\t\t\t\t\"device_id\": {\"\"},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"device id has length > 36\",\n\t\t\tparamRequest: map[string][]string{\n\t\t\t\t\"device_id\": {\"123e4567-e89b-12d3-a456-426655440018MORETHAN36\"},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"device id has length < 36\",\n\t\t\tparamRequest: map[string][]string{\n\t\t\t\t\"device_id\": {\"123e4567-e89b-12d3-a456-LOWER36\"},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"device id wrong format 8-4-4-4-12\",\n\t\t\tparamRequest: map[string][]string{\n\t\t\t\t\"device_id\": {\"124567-e89b-12d3-a43333356-WRONGFORMAT\"},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"block 1 of DeviceID has special character\",\n\t\t\tparamRequest: map[string][]string{\n\t\t\t\t\"device_id\": {\"123$4567-e89b-12d3-a456-426655440018\"},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"block 1 of DeviceID has > 8 character\",\n\t\t\tparamRequest: map[string][]string{\n\t\t\t\t\"device_id\": {\"123e4567MORE8-e89b-12d3-a456-426655440018\"},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"block 1 of DeviceID has < 8 character\",\n\t\t\tparamRequest: map[string][]string{\n\t\t\t\t\"device_id\": {\"LOWER8-e89b-12d3-a456-426655440018\"},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"block 2 of DeviceID has special character\",\n\t\t\tparamRequest: map[string][]string{\n\t\t\t\t\"device_id\": {\"123e4567-e$9b-12d3-a456-426655440018\"},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"block 2 of DeviceID has > 4 character\",\n\t\t\tparamRequest: map[string][]string{\n\t\t\t\t\"device_id\": {\"123e4567-e8MORE89b-12d3-a456-426655440018\"},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"block 2 of DeviceID has < 4 character\",\n\t\t\tparamRequest: map[string][]string{\n\t\t\t\t\"device_id\": {\"123e4567-eb-12d3-a456-426655440018\"},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"block 3 of DeviceID has special character\",\n\t\t\tparamRequest: map[string][]string{\n\t\t\t\t\"device_id\": {\"123e4567-e89b-12$3-a456-426655440018\"},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"block 3 of DeviceID has > 4 character\",\n\t\t\tparamRequest: map[string][]string{\n\t\t\t\t\"device_id\": {\"123e4567-e89b-12d356-a456-426655440018\"},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"block 3 of DeviceID has < 4 character\",\n\t\t\tparamRequest: map[string][]string{\n\t\t\t\t\"device_id\": {\"123e4567-e89b-12-a456-426655440018\"},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"block 4 of DeviceID has special character\",\n\t\t\tparamRequest: map[string][]string{\n\t\t\t\t\"device_id\": {\"123e4567-e89b-12d3-a$56-426655440018\"},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"block 4 of DeviceID has > 4 character\",\n\t\t\tparamRequest: map[string][]string{\n\t\t\t\t\"device_id\": {\"123e4567-e89b-12d3-a456sdasdas-426655440018\"},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"block 4 of DeviceID has < 4 character\",\n\t\t\tparamRequest: map[string][]string{\n\t\t\t\t\"device_id\": {\"123e4567-e89b-12d3-a6-426655440018\"},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"block 5 of DeviceID has special character\",\n\t\t\tparamRequest: map[string][]string{\n\t\t\t\t\"device_id\": {\"123e4567-e89b-12d3-a456-42665544001$\"},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"block 5 of DeviceID has > 12 character\",\n\t\t\tparamRequest: map[string][]string{\n\t\t\t\t\"device_id\": {\"123e4567-e89b-12d3-a456-426655440018MORETHAN12\"},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"block 5 of DeviceID has < 12 character\",\n\t\t\tparamRequest: map[string][]string{\n\t\t\t\t\"device_id\": {\"123e4567-e89b-12d3-a456-LOWER12\"},\n\t\t\t},\n\t\t},\n\t}\n\n\tfor _, testCase := range testCaseStatusError {\n\t\tt.Run(testCase.name, func(t *testing.T) {\n\t\t\tt.Parallel()\n\t\t\tresp := sendRequest(testCase.paramRequest, \"application/x-www-form-urlencoded\", apiTest)\n\t\t\t// check status bad request.\n\t\t\tcheckStatusCodeResponse(t, resp, http.StatusBadRequest)\n\t\t\t// check response data.\n\t\t\tcheckJSONResponeError(t, resp)\n\t\t\t// check user is not created in user_app table\n\t\t\tassert.False(t, checkUserExisted(testCase.paramRequest[\"device_id\"][0]))\n\t\t})\n\t}\n}", "func TestConstructRoutingTreeConflict(t *testing.T) {\n\t// create gearbox instance\n\tgb := new(gearbox)\n\tgb.registeredRoutes = make([]*routeInfo, 0)\n\n\t// register routes\n\tgb.registerRoute(MethodGet, \"/articles/test\", emptyHandler)\n\tgb.registerRoute(MethodGet, \"/articles/test\", emptyHandler)\n\n\tif err := gb.constructRoutingTree(); err == nil {\n\t\tt.Fatalf(\"invalid listener passed\")\n\t}\n}", "func TestPutPresentationMergeInvalidrequest(t *testing.T) {\n request := createPutPresentationMergeRequest()\n request.request = invalidizeTestParamValue(request.request, \"request\", \"OrderedMergeRequest\").(OrderedMergeRequest)\n e := initializeTest(\"PutPresentationMerge\", \"request\", request.request)\n if e != nil {\n t.Errorf(\"Error: %v.\", e)\n return\n }\n r, _, e := getTestApiClient().MergeDocumentApi.PutPresentationMerge(request)\n assertError(t, \"PutPresentationMerge\", \"request\", r.Code, e)\n}", "func TestCheckRequiredTemplate_Create_DoesNotSwallowError(t *testing.T) {\n\tctrl := gomock.NewController(t)\n\tdefer ctrl.Finish()\n\n\tr := ResourceCheckRequiredTemplate()\n\tresourceData := schema.TestResourceDataRaw(t, r.Schema, nil)\n\tflattenErr := flattenCheckRequiredTemplate(resourceData, &requiredTemplateCheckTest, requiredTemplateCheckProjectID)\n\n\tpipelinesChecksClient := azdosdkmocks.NewMockPipelineschecksextrasClient(ctrl)\n\tclients := &client.AggregatedClient{PipelinesChecksClientExtras: pipelinesChecksClient, Ctx: context.Background()}\n\n\texpectedArgs := pipelineschecksextras.AddCheckConfigurationArgs{Configuration: &requiredTemplateCheckTest, Project: &requiredTemplateCheckProjectID}\n\tpipelinesChecksClient.\n\t\tEXPECT().\n\t\tAddCheckConfiguration(clients.Ctx, expectedArgs).\n\t\tReturn(nil, errors.New(\"AddCheckConfiguration() Failed\")).\n\t\tTimes(1)\n\n\terr := r.Create(resourceData, clients)\n\trequire.Contains(t, err.Error(), \"AddCheckConfiguration() Failed\")\n\trequire.Nil(t, flattenErr)\n}", "func TestReloadWithReadLock_PartialRegisterFailure(t *testing.T) {\n\trequire := require.New(t)\n\n\tresources := initVMRegistryTest(t)\n\n\tfactory1 := vms.NewMockFactory(resources.ctrl)\n\tfactory2 := vms.NewMockFactory(resources.ctrl)\n\tfactory3 := vms.NewMockFactory(resources.ctrl)\n\tfactory4 := vms.NewMockFactory(resources.ctrl)\n\n\tregisteredVms := map[ids.ID]vms.Factory{\n\t\tid1: factory1,\n\t\tid2: factory2,\n\t}\n\n\tunregisteredVms := map[ids.ID]vms.Factory{\n\t\tid3: factory3,\n\t\tid4: factory4,\n\t}\n\n\tresources.mockVMGetter.EXPECT().\n\t\tGet().\n\t\tTimes(1).\n\t\tReturn(registeredVms, unregisteredVms, nil)\n\tresources.mockVMRegisterer.EXPECT().\n\t\tRegisterWithReadLock(gomock.Any(), id3, factory3).\n\t\tTimes(1).\n\t\tReturn(errTest)\n\tresources.mockVMRegisterer.EXPECT().\n\t\tRegisterWithReadLock(gomock.Any(), id4, factory4).\n\t\tTimes(1).\n\t\tReturn(nil)\n\n\tinstalledVMs, failedVMs, err := resources.vmRegistry.ReloadWithReadLock(context.Background())\n\trequire.NoError(err)\n\trequire.Len(failedVMs, 1)\n\trequire.ErrorIs(failedVMs[id3], errTest)\n\trequire.Len(installedVMs, 1)\n\trequire.Equal(id4, installedVMs[0])\n}", "func TestDeployRouterInvalidConfig(t *testing.T) {\n\t// Read existing router that MUST have already exists from previous create router e2e test\n\t// Router name is assumed to follow this format: e2e-experiment-{{.TestID}}\n\trouterName := \"e2e-experiment-\" + globalTestContext.TestID\n\tt.Log(fmt.Sprintf(\"Retrieving router with name '%s' created from previous test step\", routerName))\n\texistingRouter, err := getRouterByName(\n\t\tglobalTestContext.httpClient, globalTestContext.APIBasePath, globalTestContext.ProjectID, routerName)\n\trequire.NoError(t, err)\n\n\t// Deploy router version\n\turl := fmt.Sprintf(\n\t\t\"%s/projects/%d/routers/%d/versions/2/deploy\",\n\t\tglobalTestContext.APIBasePath,\n\t\tglobalTestContext.ProjectID, existingRouter.ID,\n\t)\n\tt.Log(\"Deploying router: POST \" + url)\n\treq, err := http.NewRequestWithContext(context.Background(), http.MethodPost, url, nil)\n\trequire.NoError(t, err)\n\tresponse, err := globalTestContext.httpClient.Do(req)\n\trequire.NoError(t, err)\n\tdefer response.Body.Close()\n\tassert.Equal(t, http.StatusAccepted, response.StatusCode)\n\n\t// Wait for the version status to to change to success/failed deployment\n\tt.Log(\"Waiting for router to deploy\")\n\terr = waitDeployVersion(\n\t\tglobalTestContext.httpClient,\n\t\tglobalTestContext.APIBasePath,\n\t\tglobalTestContext.ProjectID,\n\t\tint(existingRouter.ID),\n\t\t2,\n\t)\n\trequire.NoError(t, err)\n\n\t// Test router version configuration\n\tt.Log(\"Testing GET router version\")\n\trouterVersion, err := getRouterVersion(\n\t\tglobalTestContext.httpClient,\n\t\tglobalTestContext.APIBasePath,\n\t\tglobalTestContext.ProjectID,\n\t\tint(existingRouter.ID),\n\t\t2,\n\t)\n\trequire.NoError(t, err)\n\tassert.Equal(t, models.RouterVersionStatusFailed, routerVersion.Status)\n\n\t// Test router configuration\n\tt.Log(\"Testing GET router\")\n\trouter, err := getRouter(\n\t\tglobalTestContext.httpClient,\n\t\tglobalTestContext.APIBasePath,\n\t\tglobalTestContext.ProjectID,\n\t\tint(existingRouter.ID),\n\t)\n\trequire.NoError(t, err)\n\trequire.NotNil(t, router.CurrRouterVersion)\n\t// the expected version 1 is the valid version that the deployment fallback to due to invalid config\n\tassert.Equal(t, uint(1), router.CurrRouterVersion.Version)\n\tassert.Equal(t, models.RouterVersionStatusUndeployed, router.CurrRouterVersion.Status)\n\tassert.Equal(t, models.RouterStatusUndeployed, router.Status)\n}", "func Conflict(msg string) Error {\n\te := err{msg: msg, code: conflictCode, group: generic, kind: conflict}\n\treturn &e\n}", "func TestAccKeycloakOpenIdFullNameProtocolMapper_clientDuplicateNameValidation(t *testing.T) {\n\trealmName := \"terraform-realm-\" + acctest.RandString(10)\n\tclientId := \"terraform-client-\" + acctest.RandString(10)\n\tmapperName := \"terraform-protocol-mapper-\" + acctest.RandString(5)\n\n\tgroupMembershipProtocolMapperResourceName := \"keycloak_openid_group_membership_protocol_mapper.group_membership_mapper_client\"\n\n\tresource.Test(t, resource.TestCase{\n\t\tProviderFactories: testAccProviderFactories,\n\t\tPreCheck: func() { testAccPreCheck(t) },\n\t\tCheckDestroy: testAccKeycloakOpenIdGroupMembershipProtocolMapperDestroy(),\n\t\tSteps: []resource.TestStep{\n\t\t\t{\n\t\t\t\tConfig: testGenericProtocolMapperValidation_clientGroupMembershipMapper(realmName, clientId, mapperName),\n\t\t\t\tCheck: testKeycloakOpenIdGroupMembershipProtocolMapperExists(groupMembershipProtocolMapperResourceName),\n\t\t\t},\n\t\t\t{\n\t\t\t\tConfig: testGenericProtocolMapperValidation_clientFullNameAndGroupMembershipMapper(realmName, clientId, mapperName),\n\t\t\t\tExpectError: regexp.MustCompile(\"validation error: a protocol mapper with name .+ already exists for this client\"),\n\t\t\t},\n\t\t},\n\t})\n}", "func TestAzureDevOpsServiceEndpointDockerRegistry_Create_DoesNotSwallowError(t *testing.T) {\n\tctrl := gomock.NewController(t)\n\tdefer ctrl.Finish()\n\n\tr := resourceServiceEndpointDockerRegistry()\n\tresourceData := schema.TestResourceDataRaw(t, r.Schema, nil)\n\tflattenServiceEndpointDockerRegistry(resourceData, &dockerRegistryTestServiceEndpoint, dockerRegistryTestServiceEndpointProjectID)\n\n\tbuildClient := azdosdkmocks.NewMockServiceendpointClient(ctrl)\n\tclients := &config.AggregatedClient{ServiceEndpointClient: buildClient, Ctx: context.Background()}\n\n\texpectedArgs := serviceendpoint.CreateServiceEndpointArgs{Endpoint: &dockerRegistryTestServiceEndpoint, Project: dockerRegistryTestServiceEndpointProjectID}\n\tbuildClient.\n\t\tEXPECT().\n\t\tCreateServiceEndpoint(clients.Ctx, expectedArgs).\n\t\tReturn(nil, errors.New(\"CreateServiceEndpoint() Failed\")).\n\t\tTimes(1)\n\n\terr := r.Create(resourceData, clients)\n\trequire.Contains(t, err.Error(), \"CreateServiceEndpoint() Failed\")\n}", "func TestPutPresentationMergeInvalidname(t *testing.T) {\n request := createPutPresentationMergeRequest()\n request.name = invalidizeTestParamValue(request.name, \"name\", \"string\").(string)\n e := initializeTest(\"PutPresentationMerge\", \"name\", request.name)\n if e != nil {\n t.Errorf(\"Error: %v.\", e)\n return\n }\n r, _, e := getTestApiClient().MergeDocumentApi.PutPresentationMerge(request)\n assertError(t, \"PutPresentationMerge\", \"name\", r.Code, e)\n}", "func UpdateTemplateFailErrMocked(t *testing.T, templateIn *types.Template) *types.Template {\n\n\tassert := assert.New(t)\n\n\t// wire up\n\tcs := &utils.MockConcertoService{}\n\tds, err := NewTemplateService(cs)\n\tassert.Nil(err, \"Couldn't load template service\")\n\tassert.NotNil(ds, \"Template service not instanced\")\n\n\t// convertMap\n\tmapIn, err := utils.ItemConvertParams(*templateIn)\n\tassert.Nil(err, \"Template test data corrupted\")\n\n\t// to json\n\tdOut, err := json.Marshal(templateIn)\n\tassert.Nil(err, \"Template test data corrupted\")\n\n\t// call service\n\tcs.On(\"Put\", fmt.Sprintf(\"/blueprint/templates/%s\", templateIn.ID), mapIn).Return(dOut, 200, fmt.Errorf(\"mocked error\"))\n\ttemplateOut, err := ds.UpdateTemplate(mapIn, templateIn.ID)\n\tassert.NotNil(err, \"We are expecting an error\")\n\tassert.Nil(templateOut, \"Expecting nil output\")\n\tassert.Equal(err.Error(), \"mocked error\", \"Error should be 'mocked error'\")\n\n\treturn templateOut\n}", "func TestFailureReasonsCompatibilityMap(t *testing.T) {\n\tf := newFailureReasonMapper(nil)\n\trequire.Equal(t, failureReasonsCompatibilityMap, f.compatibilityMap)\n\n\tfor _, r := range allFailureReasons {\n\t\tt.Run(string(r), func(t *testing.T) {\n\t\t\tf.Map(r)\n\t\t\tassert.NoError(t, f.err)\n\t\t})\n\t}\n}", "func isRetryableConflict(err *genericarmclient.CloudError) bool {\n\tif err == nil {\n\t\treturn false\n\t}\n\n\t// We retry on this code as ADS may be in the process of being enabled (in the case of parallel deployment)\n\treturn err.Code() == \"VulnerabilityAssessmentADSIsDisabled\"\n}", "func TestInsertNewUserServiceAlreadyExists (t *testing.T){\n\terr := PostNewUserService(user_01)\n\tassert.Equal(t, 409, err.HTTPStatus)\n}", "func TestCreateOrUpdateResource(t *testing.T) {\n\tt.Run(\"ready status unknown\", func(t *testing.T) {\n\t\tg := NewGomegaWithT(t)\n\n\t\tsch := runtime.NewScheme()\n\t\tg.Expect(asoresourcesv1.AddToScheme(sch)).To(Succeed())\n\t\tc := fakeclient.NewClientBuilder().\n\t\t\tWithScheme(sch).\n\t\t\tBuild()\n\t\ts := New(c, clusterName)\n\n\t\tmockCtrl := gomock.NewController(t)\n\t\tspecMock := mock_azure.NewMockASOResourceSpecGetter(mockCtrl)\n\t\tspecMock.EXPECT().ResourceRef().Return(&asoresourcesv1.ResourceGroup{\n\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\tName: \"name\",\n\t\t\t\tNamespace: \"namespace\",\n\t\t\t},\n\t\t})\n\n\t\tctx := context.Background()\n\t\tg.Expect(c.Create(ctx, &asoresourcesv1.ResourceGroup{\n\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\tName: \"name\",\n\t\t\t\tNamespace: \"namespace\",\n\t\t\t\tLabels: map[string]string{\n\t\t\t\t\tinfrav1.OwnedByClusterLabelKey: clusterName,\n\t\t\t\t},\n\t\t\t},\n\t\t\tStatus: asoresourcesv1.ResourceGroup_STATUS{},\n\t\t})).To(Succeed())\n\n\t\tresult, err := s.CreateOrUpdateResource(ctx, specMock, \"service\")\n\t\tg.Expect(result).To(BeNil())\n\t\tg.Expect(err).NotTo(BeNil())\n\t\tg.Expect(err.Error()).To(ContainSubstring(\"ready status unknown\"))\n\t})\n\n\tt.Run(\"create resource that doesn't already exist\", func(t *testing.T) {\n\t\tg := NewGomegaWithT(t)\n\n\t\tsch := runtime.NewScheme()\n\t\tg.Expect(asoresourcesv1.AddToScheme(sch)).To(Succeed())\n\t\tc := fakeclient.NewClientBuilder().\n\t\t\tWithScheme(sch).\n\t\t\tBuild()\n\t\ts := New(c, clusterName)\n\n\t\tmockCtrl := gomock.NewController(t)\n\t\tspecMock := mock_azure.NewMockASOResourceSpecGetter(mockCtrl)\n\t\tspecMock.EXPECT().ResourceRef().Return(&asoresourcesv1.ResourceGroup{\n\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\tName: \"name\",\n\t\t\t\tNamespace: \"namespace\",\n\t\t\t},\n\t\t})\n\t\tspecMock.EXPECT().Parameters(gomockinternal.AContext(), gomock.Nil()).Return(&asoresourcesv1.ResourceGroup{\n\t\t\tSpec: asoresourcesv1.ResourceGroup_Spec{\n\t\t\t\tLocation: ptr.To(\"location\"),\n\t\t\t},\n\t\t}, nil)\n\n\t\tctx := context.Background()\n\t\tresult, err := s.CreateOrUpdateResource(ctx, specMock, \"service\")\n\t\tg.Expect(result).To(BeNil())\n\t\tg.Expect(err).NotTo(BeNil())\n\t\tg.Expect(azure.IsOperationNotDoneError(err)).To(BeTrue())\n\t\tvar recerr azure.ReconcileError\n\t\tg.Expect(errors.As(err, &recerr)).To(BeTrue())\n\t\tg.Expect(recerr.IsTransient()).To(BeTrue())\n\n\t\tcreated := &asoresourcesv1.ResourceGroup{}\n\t\tg.Expect(c.Get(ctx, types.NamespacedName{Name: \"name\", Namespace: \"namespace\"}, created)).To(Succeed())\n\t\tg.Expect(created.Name).To(Equal(\"name\"))\n\t\tg.Expect(created.Namespace).To(Equal(\"namespace\"))\n\t\tg.Expect(created.Labels).To(Equal(map[string]string{\n\t\t\tinfrav1.OwnedByClusterLabelKey: clusterName,\n\t\t}))\n\t\tg.Expect(created.Annotations).To(Equal(map[string]string{\n\t\t\tReconcilePolicyAnnotation: ReconcilePolicySkip,\n\t\t\tSecretNameAnnotation: \"cluster-aso-secret\",\n\t\t}))\n\t\tg.Expect(created.Spec).To(Equal(asoresourcesv1.ResourceGroup_Spec{\n\t\t\tLocation: ptr.To(\"location\"),\n\t\t}))\n\t})\n\n\tt.Run(\"resource is not ready in non-terminal state\", func(t *testing.T) {\n\t\tg := NewGomegaWithT(t)\n\n\t\tsch := runtime.NewScheme()\n\t\tg.Expect(asoresourcesv1.AddToScheme(sch)).To(Succeed())\n\t\tc := fakeclient.NewClientBuilder().\n\t\t\tWithScheme(sch).\n\t\t\tBuild()\n\t\ts := New(c, clusterName)\n\n\t\tmockCtrl := gomock.NewController(t)\n\t\tspecMock := mock_azure.NewMockASOResourceSpecGetter(mockCtrl)\n\t\tspecMock.EXPECT().ResourceRef().Return(&asoresourcesv1.ResourceGroup{\n\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\tName: \"name\",\n\t\t\t\tNamespace: \"namespace\",\n\t\t\t},\n\t\t})\n\n\t\tctx := context.Background()\n\t\tg.Expect(c.Create(ctx, &asoresourcesv1.ResourceGroup{\n\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\tName: \"name\",\n\t\t\t\tNamespace: \"namespace\",\n\t\t\t\tLabels: map[string]string{\n\t\t\t\t\tinfrav1.OwnedByClusterLabelKey: clusterName,\n\t\t\t\t},\n\t\t\t},\n\t\t\tStatus: asoresourcesv1.ResourceGroup_STATUS{\n\t\t\t\tConditions: []conditions.Condition{\n\t\t\t\t\t{\n\t\t\t\t\t\tType: conditions.ConditionTypeReady,\n\t\t\t\t\t\tStatus: metav1.ConditionFalse,\n\t\t\t\t\t\tSeverity: conditions.ConditionSeverityInfo,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t})).To(Succeed())\n\n\t\tresult, err := s.CreateOrUpdateResource(ctx, specMock, \"service\")\n\t\tg.Expect(result).To(BeNil())\n\t\tg.Expect(err).NotTo(BeNil())\n\t\tg.Expect(err.Error()).To(ContainSubstring(\"resource is not Ready\"))\n\t\tvar recerr azure.ReconcileError\n\t\tg.Expect(errors.As(err, &recerr)).To(BeTrue())\n\t\tg.Expect(recerr.IsTransient()).To(BeTrue())\n\t})\n\n\tt.Run(\"resource is not ready in reconciling state\", func(t *testing.T) {\n\t\tg := NewGomegaWithT(t)\n\n\t\tsch := runtime.NewScheme()\n\t\tg.Expect(asoresourcesv1.AddToScheme(sch)).To(Succeed())\n\t\tc := fakeclient.NewClientBuilder().\n\t\t\tWithScheme(sch).\n\t\t\tBuild()\n\t\ts := New(c, clusterName)\n\n\t\tmockCtrl := gomock.NewController(t)\n\t\tspecMock := mock_azure.NewMockASOResourceSpecGetter(mockCtrl)\n\t\tspecMock.EXPECT().ResourceRef().Return(&asoresourcesv1.ResourceGroup{\n\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\tName: \"name\",\n\t\t\t\tNamespace: \"namespace\",\n\t\t\t},\n\t\t})\n\n\t\tctx := context.Background()\n\t\tg.Expect(c.Create(ctx, &asoresourcesv1.ResourceGroup{\n\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\tName: \"name\",\n\t\t\t\tNamespace: \"namespace\",\n\t\t\t\tLabels: map[string]string{\n\t\t\t\t\tinfrav1.OwnedByClusterLabelKey: clusterName,\n\t\t\t\t},\n\t\t\t},\n\t\t\tStatus: asoresourcesv1.ResourceGroup_STATUS{\n\t\t\t\tConditions: []conditions.Condition{\n\t\t\t\t\t{\n\t\t\t\t\t\tType: conditions.ConditionTypeReady,\n\t\t\t\t\t\tStatus: metav1.ConditionFalse,\n\t\t\t\t\t\tSeverity: conditions.ConditionSeverityInfo,\n\t\t\t\t\t\tReason: conditions.ReasonReconciling.Name,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t})).To(Succeed())\n\n\t\tresult, err := s.CreateOrUpdateResource(ctx, specMock, \"service\")\n\t\tg.Expect(result).To(BeNil())\n\t\tg.Expect(azure.IsOperationNotDoneError(err)).To(BeTrue())\n\t})\n\n\tt.Run(\"resource is not ready in terminal state\", func(t *testing.T) {\n\t\tg := NewGomegaWithT(t)\n\n\t\tsch := runtime.NewScheme()\n\t\tg.Expect(asoresourcesv1.AddToScheme(sch)).To(Succeed())\n\t\tc := fakeclient.NewClientBuilder().\n\t\t\tWithScheme(sch).\n\t\t\tBuild()\n\t\ts := New(c, clusterName)\n\n\t\tmockCtrl := gomock.NewController(t)\n\t\tspecMock := mock_azure.NewMockASOResourceSpecGetter(mockCtrl)\n\t\tspecMock.EXPECT().ResourceRef().Return(&asoresourcesv1.ResourceGroup{\n\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\tName: \"name\",\n\t\t\t\tNamespace: \"namespace\",\n\t\t\t},\n\t\t})\n\n\t\tctx := context.Background()\n\t\tg.Expect(c.Create(ctx, &asoresourcesv1.ResourceGroup{\n\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\tName: \"name\",\n\t\t\t\tNamespace: \"namespace\",\n\t\t\t\tLabels: map[string]string{\n\t\t\t\t\tinfrav1.OwnedByClusterLabelKey: clusterName,\n\t\t\t\t},\n\t\t\t},\n\t\t\tStatus: asoresourcesv1.ResourceGroup_STATUS{\n\t\t\t\tConditions: []conditions.Condition{\n\t\t\t\t\t{\n\t\t\t\t\t\tType: conditions.ConditionTypeReady,\n\t\t\t\t\t\tStatus: metav1.ConditionFalse,\n\t\t\t\t\t\tSeverity: conditions.ConditionSeverityError,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t})).To(Succeed())\n\n\t\tresult, err := s.CreateOrUpdateResource(ctx, specMock, \"service\")\n\t\tg.Expect(result).To(BeNil())\n\t\tg.Expect(err).NotTo(BeNil())\n\t\tg.Expect(err.Error()).To(ContainSubstring(\"resource is not Ready\"))\n\t\tvar recerr azure.ReconcileError\n\t\tg.Expect(errors.As(err, &recerr)).To(BeTrue())\n\t\tg.Expect(recerr.IsTerminal()).To(BeTrue())\n\t})\n\n\tt.Run(\"error getting existing resource\", func(t *testing.T) {\n\t\tg := NewGomegaWithT(t)\n\n\t\tsch := runtime.NewScheme()\n\t\tg.Expect(asoresourcesv1.AddToScheme(sch)).To(Succeed())\n\t\tc := fakeclient.NewClientBuilder().\n\t\t\tWithScheme(sch).\n\t\t\tBuild()\n\t\ts := New(ErroringGetClient{Client: c, err: errors.New(\"an error\")}, clusterName)\n\n\t\tmockCtrl := gomock.NewController(t)\n\t\tspecMock := mock_azure.NewMockASOResourceSpecGetter(mockCtrl)\n\t\tspecMock.EXPECT().ResourceRef().Return(&asoresourcesv1.ResourceGroup{\n\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\tName: \"name\",\n\t\t\t\tNamespace: \"namespace\",\n\t\t\t},\n\t\t})\n\n\t\tctx := context.Background()\n\t\tresult, err := s.CreateOrUpdateResource(ctx, specMock, \"service\")\n\t\tg.Expect(result).To(BeNil())\n\t\tg.Expect(err).NotTo(BeNil())\n\t\tg.Expect(err.Error()).To(ContainSubstring(\"failed to get existing resource\"))\n\t})\n\n\tt.Run(\"begin an update\", func(t *testing.T) {\n\t\tg := NewGomegaWithT(t)\n\n\t\tsch := runtime.NewScheme()\n\t\tg.Expect(asoresourcesv1.AddToScheme(sch)).To(Succeed())\n\t\tc := fakeclient.NewClientBuilder().\n\t\t\tWithScheme(sch).\n\t\t\tBuild()\n\t\ts := New(c, clusterName)\n\n\t\tmockCtrl := gomock.NewController(t)\n\t\tspecMock := mock_azure.NewMockASOResourceSpecGetter(mockCtrl)\n\t\tspecMock.EXPECT().ResourceRef().Return(&asoresourcesv1.ResourceGroup{\n\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\tName: \"name\",\n\t\t\t\tNamespace: \"namespace\",\n\t\t\t},\n\t\t})\n\t\tspecMock.EXPECT().Parameters(gomockinternal.AContext(), gomock.Not(gomock.Nil())).DoAndReturn(func(_ context.Context, object genruntime.MetaObject) (genruntime.MetaObject, error) {\n\t\t\tgroup := object.DeepCopyObject().(*asoresourcesv1.ResourceGroup)\n\t\t\tgroup.Spec.Location = ptr.To(\"location\")\n\t\t\treturn group, nil\n\t\t})\n\t\tspecMock.EXPECT().WasManaged(gomock.Any()).Return(false)\n\n\t\tctx := context.Background()\n\t\tg.Expect(c.Create(ctx, &asoresourcesv1.ResourceGroup{\n\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\tName: \"name\",\n\t\t\t\tNamespace: \"namespace\",\n\t\t\t\tLabels: map[string]string{\n\t\t\t\t\tinfrav1.OwnedByClusterLabelKey: clusterName,\n\t\t\t\t},\n\t\t\t},\n\t\t\tStatus: asoresourcesv1.ResourceGroup_STATUS{\n\t\t\t\tConditions: []conditions.Condition{\n\t\t\t\t\t{\n\t\t\t\t\t\tType: conditions.ConditionTypeReady,\n\t\t\t\t\t\tStatus: metav1.ConditionTrue,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t})).To(Succeed())\n\n\t\tresult, err := s.CreateOrUpdateResource(ctx, specMock, \"service\")\n\t\tg.Expect(result).To(BeNil())\n\t\tg.Expect(err).NotTo(BeNil())\n\t})\n\n\tt.Run(\"adopt managed resource in not found state\", func(t *testing.T) {\n\t\tg := NewGomegaWithT(t)\n\n\t\tsch := runtime.NewScheme()\n\t\tg.Expect(asoresourcesv1.AddToScheme(sch)).To(Succeed())\n\t\tc := fakeclient.NewClientBuilder().\n\t\t\tWithScheme(sch).\n\t\t\tBuild()\n\t\tclusterName := \"cluster\"\n\t\ts := New(c, clusterName)\n\n\t\tmockCtrl := gomock.NewController(t)\n\t\tspecMock := mock_azure.NewMockASOResourceSpecGetter(mockCtrl)\n\t\tspecMock.EXPECT().ResourceRef().Return(&asoresourcesv1.ResourceGroup{\n\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\tName: \"name\",\n\t\t\t\tNamespace: \"namespace\",\n\t\t\t},\n\t\t})\n\t\tspecMock.EXPECT().Parameters(gomockinternal.AContext(), gomock.Not(gomock.Nil())).DoAndReturn(func(_ context.Context, object genruntime.MetaObject) (genruntime.MetaObject, error) {\n\t\t\treturn object, nil\n\t\t})\n\n\t\tctx := context.Background()\n\t\tg.Expect(c.Create(ctx, &asoresourcesv1.ResourceGroup{\n\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\tName: \"name\",\n\t\t\t\tNamespace: \"namespace\",\n\t\t\t\tLabels: map[string]string{\n\t\t\t\t\tinfrav1.OwnedByClusterLabelKey: clusterName,\n\t\t\t\t},\n\t\t\t\tAnnotations: map[string]string{\n\t\t\t\t\tReconcilePolicyAnnotation: ReconcilePolicySkip,\n\t\t\t\t},\n\t\t\t},\n\t\t\tStatus: asoresourcesv1.ResourceGroup_STATUS{\n\t\t\t\tConditions: []conditions.Condition{\n\t\t\t\t\t{\n\t\t\t\t\t\tType: conditions.ConditionTypeReady,\n\t\t\t\t\t\tStatus: metav1.ConditionFalse,\n\t\t\t\t\t\tReason: conditions.ReasonAzureResourceNotFound.Name,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t})).To(Succeed())\n\n\t\tresult, err := s.CreateOrUpdateResource(ctx, specMock, \"service\")\n\t\tg.Expect(result).To(BeNil())\n\t\tg.Expect(err).NotTo(BeNil())\n\n\t\tupdated := &asoresourcesv1.ResourceGroup{}\n\t\tg.Expect(c.Get(ctx, types.NamespacedName{Name: \"name\", Namespace: \"namespace\"}, updated)).To(Succeed())\n\t\tg.Expect(updated.Annotations).To(Equal(map[string]string{\n\t\t\tReconcilePolicyAnnotation: ReconcilePolicyManage,\n\t\t\tSecretNameAnnotation: \"cluster-aso-secret\",\n\t\t}))\n\t})\n\n\tt.Run(\"adopt previously managed resource\", func(t *testing.T) {\n\t\tg := NewGomegaWithT(t)\n\n\t\tsch := runtime.NewScheme()\n\t\tg.Expect(asoresourcesv1.AddToScheme(sch)).To(Succeed())\n\t\tc := fakeclient.NewClientBuilder().\n\t\t\tWithScheme(sch).\n\t\t\tBuild()\n\t\tclusterName := \"cluster\"\n\t\ts := New(c, clusterName)\n\n\t\tmockCtrl := gomock.NewController(t)\n\t\tspecMock := mock_azure.NewMockASOResourceSpecGetter(mockCtrl)\n\t\tspecMock.EXPECT().ResourceRef().Return(&asoresourcesv1.ResourceGroup{\n\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\tName: \"name\",\n\t\t\t\tNamespace: \"namespace\",\n\t\t\t},\n\t\t})\n\t\tspecMock.EXPECT().Parameters(gomockinternal.AContext(), gomock.Not(gomock.Nil())).DoAndReturn(func(_ context.Context, object genruntime.MetaObject) (genruntime.MetaObject, error) {\n\t\t\treturn nil, nil\n\t\t})\n\t\tspecMock.EXPECT().WasManaged(gomock.Any()).Return(true)\n\n\t\tctx := context.Background()\n\t\tg.Expect(c.Create(ctx, &asoresourcesv1.ResourceGroup{\n\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\tName: \"name\",\n\t\t\t\tNamespace: \"namespace\",\n\t\t\t\tLabels: map[string]string{\n\t\t\t\t\tinfrav1.OwnedByClusterLabelKey: clusterName,\n\t\t\t\t},\n\t\t\t\tAnnotations: map[string]string{\n\t\t\t\t\tReconcilePolicyAnnotation: ReconcilePolicySkip,\n\t\t\t\t},\n\t\t\t},\n\t\t\tStatus: asoresourcesv1.ResourceGroup_STATUS{\n\t\t\t\tConditions: []conditions.Condition{\n\t\t\t\t\t{\n\t\t\t\t\t\tType: conditions.ConditionTypeReady,\n\t\t\t\t\t\tStatus: metav1.ConditionTrue,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t})).To(Succeed())\n\n\t\tresult, err := s.CreateOrUpdateResource(ctx, specMock, \"service\")\n\t\tg.Expect(result).To(BeNil())\n\t\tg.Expect(err).NotTo(BeNil())\n\n\t\tupdated := &asoresourcesv1.ResourceGroup{}\n\t\tg.Expect(c.Get(ctx, types.NamespacedName{Name: \"name\", Namespace: \"namespace\"}, updated)).To(Succeed())\n\t\tg.Expect(updated.Annotations).To(Equal(map[string]string{\n\t\t\tReconcilePolicyAnnotation: ReconcilePolicyManage,\n\t\t\tSecretNameAnnotation: \"cluster-aso-secret\",\n\t\t}))\n\t})\n\n\tt.Run(\"Parameters error\", func(t *testing.T) {\n\t\tg := NewGomegaWithT(t)\n\n\t\tsch := runtime.NewScheme()\n\t\tg.Expect(asoresourcesv1.AddToScheme(sch)).To(Succeed())\n\t\tc := fakeclient.NewClientBuilder().\n\t\t\tWithScheme(sch).\n\t\t\tBuild()\n\t\ts := New(c, clusterName)\n\n\t\tmockCtrl := gomock.NewController(t)\n\t\tspecMock := mock_azure.NewMockASOResourceSpecGetter(mockCtrl)\n\t\tspecMock.EXPECT().ResourceRef().Return(&asoresourcesv1.ResourceGroup{\n\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\tName: \"name\",\n\t\t\t\tNamespace: \"namespace\",\n\t\t\t},\n\t\t})\n\t\tspecMock.EXPECT().Parameters(gomockinternal.AContext(), gomock.Not(gomock.Nil())).Return(nil, errors.New(\"parameters error\"))\n\n\t\tctx := context.Background()\n\t\tg.Expect(c.Create(ctx, &asoresourcesv1.ResourceGroup{\n\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\tName: \"name\",\n\t\t\t\tNamespace: \"namespace\",\n\t\t\t\tLabels: map[string]string{\n\t\t\t\t\tinfrav1.OwnedByClusterLabelKey: clusterName,\n\t\t\t\t},\n\t\t\t},\n\t\t\tStatus: asoresourcesv1.ResourceGroup_STATUS{\n\t\t\t\tConditions: []conditions.Condition{\n\t\t\t\t\t{\n\t\t\t\t\t\tType: conditions.ConditionTypeReady,\n\t\t\t\t\t\tStatus: metav1.ConditionTrue,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t})).To(Succeed())\n\n\t\tresult, err := s.CreateOrUpdateResource(ctx, specMock, \"service\")\n\t\tg.Expect(result).To(BeNil())\n\t\tg.Expect(err).NotTo(BeNil())\n\t\tg.Expect(err.Error()).To(ContainSubstring(\"parameters error\"))\n\t})\n\n\tt.Run(\"skip update for unmanaged resource\", func(t *testing.T) {\n\t\tg := NewGomegaWithT(t)\n\n\t\tsch := runtime.NewScheme()\n\t\tg.Expect(asoresourcesv1.AddToScheme(sch)).To(Succeed())\n\t\tc := fakeclient.NewClientBuilder().\n\t\t\tWithScheme(sch).\n\t\t\tBuild()\n\t\ts := New(c, clusterName)\n\n\t\tmockCtrl := gomock.NewController(t)\n\t\tspecMock := mock_azure.NewMockASOResourceSpecGetter(mockCtrl)\n\t\tspecMock.EXPECT().ResourceRef().Return(&asoresourcesv1.ResourceGroup{\n\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\tName: \"name\",\n\t\t\t\tNamespace: \"namespace\",\n\t\t\t},\n\t\t})\n\n\t\tctx := context.Background()\n\t\tg.Expect(c.Create(ctx, &asoresourcesv1.ResourceGroup{\n\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\tName: \"name\",\n\t\t\t\tNamespace: \"namespace\",\n\t\t\t},\n\t\t\tStatus: asoresourcesv1.ResourceGroup_STATUS{\n\t\t\t\tConditions: []conditions.Condition{\n\t\t\t\t\t{\n\t\t\t\t\t\tType: conditions.ConditionTypeReady,\n\t\t\t\t\t\tStatus: metav1.ConditionTrue,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t})).To(Succeed())\n\n\t\tresult, err := s.CreateOrUpdateResource(ctx, specMock, \"service\")\n\t\tg.Expect(result).NotTo(BeNil())\n\t\tg.Expect(err).To(BeNil())\n\t})\n\n\tt.Run(\"resource up to date\", func(t *testing.T) {\n\t\tg := NewGomegaWithT(t)\n\n\t\tsch := runtime.NewScheme()\n\t\tg.Expect(asoresourcesv1.AddToScheme(sch)).To(Succeed())\n\t\tc := fakeclient.NewClientBuilder().\n\t\t\tWithScheme(sch).\n\t\t\tBuild()\n\t\ts := New(c, clusterName)\n\n\t\tmockCtrl := gomock.NewController(t)\n\t\tspecMock := mock_azure.NewMockASOResourceSpecGetter(mockCtrl)\n\t\tspecMock.EXPECT().ResourceRef().Return(&asoresourcesv1.ResourceGroup{\n\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\tName: \"name\",\n\t\t\t\tNamespace: \"namespace\",\n\t\t\t},\n\t\t})\n\t\tspecMock.EXPECT().Parameters(gomockinternal.AContext(), gomock.Any()).DoAndReturn(func(_ context.Context, object genruntime.MetaObject) (genruntime.MetaObject, error) {\n\t\t\treturn nil, nil\n\t\t})\n\t\tspecMock.EXPECT().WasManaged(gomock.Any()).Return(false)\n\n\t\tctx := context.Background()\n\t\tg.Expect(c.Create(ctx, &asoresourcesv1.ResourceGroup{\n\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\tName: \"name\",\n\t\t\t\tNamespace: \"namespace\",\n\t\t\t\tLabels: map[string]string{\n\t\t\t\t\tinfrav1.OwnedByClusterLabelKey: clusterName,\n\t\t\t\t},\n\t\t\t\tAnnotations: map[string]string{\n\t\t\t\t\tReconcilePolicyAnnotation: ReconcilePolicyManage,\n\t\t\t\t\tSecretNameAnnotation: \"cluster-aso-secret\",\n\t\t\t\t},\n\t\t\t},\n\t\t\tSpec: asoresourcesv1.ResourceGroup_Spec{\n\t\t\t\tLocation: ptr.To(\"location\"),\n\t\t\t},\n\t\t\tStatus: asoresourcesv1.ResourceGroup_STATUS{\n\t\t\t\tConditions: []conditions.Condition{\n\t\t\t\t\t{\n\t\t\t\t\t\tType: conditions.ConditionTypeReady,\n\t\t\t\t\t\tStatus: metav1.ConditionTrue,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t})).To(Succeed())\n\n\t\tresult, err := s.CreateOrUpdateResource(ctx, specMock, \"service\")\n\t\tg.Expect(result).NotTo(BeNil())\n\t\tg.Expect(err).To(BeNil())\n\n\t\tg.Expect(result.GetName()).To(Equal(\"name\"))\n\t\tg.Expect(result.GetNamespace()).To(Equal(\"namespace\"))\n\t\tg.Expect(result.(*asoresourcesv1.ResourceGroup).Spec.Location).To(Equal(ptr.To(\"location\")))\n\t})\n\n\tt.Run(\"error updating\", func(t *testing.T) {\n\t\tg := NewGomegaWithT(t)\n\n\t\tsch := runtime.NewScheme()\n\t\tg.Expect(asoresourcesv1.AddToScheme(sch)).To(Succeed())\n\t\tc := fakeclient.NewClientBuilder().\n\t\t\tWithScheme(sch).\n\t\t\tBuild()\n\t\ts := New(ErroringPatchClient{Client: c, err: errors.New(\"an error\")}, clusterName)\n\n\t\tmockCtrl := gomock.NewController(t)\n\t\tspecMock := mock_azure.NewMockASOResourceSpecGetter(mockCtrl)\n\t\tspecMock.EXPECT().ResourceRef().Return(&asoresourcesv1.ResourceGroup{\n\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\tName: \"name\",\n\t\t\t\tNamespace: \"namespace\",\n\t\t\t},\n\t\t})\n\t\tspecMock.EXPECT().Parameters(gomockinternal.AContext(), gomock.Any()).DoAndReturn(func(_ context.Context, object genruntime.MetaObject) (genruntime.MetaObject, error) {\n\t\t\tgroup := object.DeepCopyObject().(*asoresourcesv1.ResourceGroup)\n\t\t\tgroup.Spec.Location = ptr.To(\"location\")\n\t\t\treturn group, nil\n\t\t})\n\t\tspecMock.EXPECT().WasManaged(gomock.Any()).Return(false)\n\n\t\tctx := context.Background()\n\t\tg.Expect(c.Create(ctx, &asoresourcesv1.ResourceGroup{\n\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\tName: \"name\",\n\t\t\t\tNamespace: \"namespace\",\n\t\t\t\tLabels: map[string]string{\n\t\t\t\t\tinfrav1.OwnedByClusterLabelKey: clusterName,\n\t\t\t\t},\n\t\t\t},\n\t\t\tStatus: asoresourcesv1.ResourceGroup_STATUS{\n\t\t\t\tConditions: []conditions.Condition{\n\t\t\t\t\t{\n\t\t\t\t\t\tType: conditions.ConditionTypeReady,\n\t\t\t\t\t\tStatus: metav1.ConditionTrue,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t})).To(Succeed())\n\n\t\tresult, err := s.CreateOrUpdateResource(ctx, specMock, \"service\")\n\t\tg.Expect(result).To(BeNil())\n\t\tg.Expect(err).NotTo(BeNil())\n\t\tg.Expect(err.Error()).To(ContainSubstring(\"failed to update resource\"))\n\t})\n\n\tt.Run(\"with tags success\", func(t *testing.T) {\n\t\tg := NewGomegaWithT(t)\n\n\t\tsch := runtime.NewScheme()\n\t\tg.Expect(asoresourcesv1.AddToScheme(sch)).To(Succeed())\n\t\tc := fakeclient.NewClientBuilder().\n\t\t\tWithScheme(sch).\n\t\t\tBuild()\n\t\ts := New(c, clusterName)\n\n\t\tmockCtrl := gomock.NewController(t)\n\t\tspecMock := struct {\n\t\t\t*mock_azure.MockASOResourceSpecGetter\n\t\t\t*mock_aso.MockTagsGetterSetter\n\t\t}{\n\t\t\tMockASOResourceSpecGetter: mock_azure.NewMockASOResourceSpecGetter(mockCtrl),\n\t\t\tMockTagsGetterSetter: mock_aso.NewMockTagsGetterSetter(mockCtrl),\n\t\t}\n\t\tspecMock.MockASOResourceSpecGetter.EXPECT().ResourceRef().Return(&asoresourcesv1.ResourceGroup{\n\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\tName: \"name\",\n\t\t\t\tNamespace: \"namespace\",\n\t\t\t},\n\t\t})\n\t\tspecMock.MockASOResourceSpecGetter.EXPECT().Parameters(gomockinternal.AContext(), gomock.Any()).DoAndReturn(func(_ context.Context, object genruntime.MetaObject) (genruntime.MetaObject, error) {\n\t\t\treturn nil, nil\n\t\t})\n\t\tspecMock.MockASOResourceSpecGetter.EXPECT().WasManaged(gomock.Any()).Return(false)\n\n\t\tspecMock.MockTagsGetterSetter.EXPECT().GetActualTags(gomock.Any()).Return(nil)\n\t\tspecMock.MockTagsGetterSetter.EXPECT().GetAdditionalTags().Return(nil)\n\t\tspecMock.MockTagsGetterSetter.EXPECT().GetDesiredTags(gomock.Any()).Return(nil)\n\t\tspecMock.MockTagsGetterSetter.EXPECT().SetTags(gomock.Any(), gomock.Any())\n\n\t\tctx := context.Background()\n\t\tg.Expect(c.Create(ctx, &asoresourcesv1.ResourceGroup{\n\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\tName: \"name\",\n\t\t\t\tNamespace: \"namespace\",\n\t\t\t\tLabels: map[string]string{\n\t\t\t\t\tinfrav1.OwnedByClusterLabelKey: clusterName,\n\t\t\t\t},\n\t\t\t\tAnnotations: map[string]string{\n\t\t\t\t\tReconcilePolicyAnnotation: ReconcilePolicyManage,\n\t\t\t\t},\n\t\t\t},\n\t\t\tStatus: asoresourcesv1.ResourceGroup_STATUS{\n\t\t\t\tConditions: []conditions.Condition{\n\t\t\t\t\t{\n\t\t\t\t\t\tType: conditions.ConditionTypeReady,\n\t\t\t\t\t\tStatus: metav1.ConditionTrue,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t})).To(Succeed())\n\n\t\tresult, err := s.CreateOrUpdateResource(ctx, specMock, \"service\")\n\t\tg.Expect(result).To(BeNil())\n\t\tg.Expect(azure.IsOperationNotDoneError(err)).To(BeTrue())\n\n\t\tupdated := &asoresourcesv1.ResourceGroup{}\n\t\tg.Expect(c.Get(ctx, types.NamespacedName{Name: \"name\", Namespace: \"namespace\"}, updated)).To(Succeed())\n\t\tg.Expect(updated.Annotations).To(HaveKey(tagsLastAppliedAnnotation))\n\t})\n\n\tt.Run(\"with tags failure\", func(t *testing.T) {\n\t\tg := NewGomegaWithT(t)\n\n\t\tsch := runtime.NewScheme()\n\t\tg.Expect(asoresourcesv1.AddToScheme(sch)).To(Succeed())\n\t\tc := fakeclient.NewClientBuilder().\n\t\t\tWithScheme(sch).\n\t\t\tBuild()\n\t\ts := New(c, clusterName)\n\n\t\tmockCtrl := gomock.NewController(t)\n\t\tspecMock := struct {\n\t\t\t*mock_azure.MockASOResourceSpecGetter\n\t\t\t*mock_aso.MockTagsGetterSetter\n\t\t}{\n\t\t\tMockASOResourceSpecGetter: mock_azure.NewMockASOResourceSpecGetter(mockCtrl),\n\t\t\tMockTagsGetterSetter: mock_aso.NewMockTagsGetterSetter(mockCtrl),\n\t\t}\n\t\tspecMock.MockASOResourceSpecGetter.EXPECT().ResourceRef().Return(&asoresourcesv1.ResourceGroup{\n\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\tName: \"name\",\n\t\t\t\tNamespace: \"namespace\",\n\t\t\t},\n\t\t})\n\t\tspecMock.MockASOResourceSpecGetter.EXPECT().Parameters(gomockinternal.AContext(), gomock.Any()).DoAndReturn(func(_ context.Context, object genruntime.MetaObject) (genruntime.MetaObject, error) {\n\t\t\treturn nil, nil\n\t\t})\n\n\t\tctx := context.Background()\n\t\tg.Expect(c.Create(ctx, &asoresourcesv1.ResourceGroup{\n\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\tName: \"name\",\n\t\t\t\tNamespace: \"namespace\",\n\t\t\t\tLabels: map[string]string{\n\t\t\t\t\tinfrav1.OwnedByClusterLabelKey: clusterName,\n\t\t\t\t},\n\t\t\t\tAnnotations: map[string]string{\n\t\t\t\t\tReconcilePolicyAnnotation: ReconcilePolicyManage,\n\t\t\t\t\ttagsLastAppliedAnnotation: \"{\",\n\t\t\t\t},\n\t\t\t},\n\t\t\tStatus: asoresourcesv1.ResourceGroup_STATUS{\n\t\t\t\tConditions: []conditions.Condition{\n\t\t\t\t\t{\n\t\t\t\t\t\tType: conditions.ConditionTypeReady,\n\t\t\t\t\t\tStatus: metav1.ConditionTrue,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t})).To(Succeed())\n\n\t\tresult, err := s.CreateOrUpdateResource(ctx, specMock, \"service\")\n\t\tg.Expect(result).To(BeNil())\n\t\tg.Expect(err.Error()).To(ContainSubstring(\"failed to reconcile tags\"))\n\t})\n\n\tt.Run(\"reconcile policy annotation resets after un-pause\", func(t *testing.T) {\n\t\tg := NewGomegaWithT(t)\n\n\t\tsch := runtime.NewScheme()\n\t\tg.Expect(asoresourcesv1.AddToScheme(sch)).To(Succeed())\n\t\tc := fakeclient.NewClientBuilder().\n\t\t\tWithScheme(sch).\n\t\t\tBuild()\n\t\ts := New(c, clusterName)\n\n\t\tmockCtrl := gomock.NewController(t)\n\t\tspecMock := mock_azure.NewMockASOResourceSpecGetter(mockCtrl)\n\t\tspecMock.EXPECT().ResourceRef().Return(&asoresourcesv1.ResourceGroup{\n\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\tName: \"name\",\n\t\t\t\tNamespace: \"namespace\",\n\t\t\t},\n\t\t})\n\t\tspecMock.EXPECT().Parameters(gomockinternal.AContext(), gomock.Any()).DoAndReturn(func(_ context.Context, object genruntime.MetaObject) (genruntime.MetaObject, error) {\n\t\t\treturn nil, nil\n\t\t})\n\t\tspecMock.EXPECT().WasManaged(gomock.Any()).Return(false)\n\n\t\tctx := context.Background()\n\t\tg.Expect(c.Create(ctx, &asoresourcesv1.ResourceGroup{\n\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\tName: \"name\",\n\t\t\t\tNamespace: \"namespace\",\n\t\t\t\tLabels: map[string]string{\n\t\t\t\t\tinfrav1.OwnedByClusterLabelKey: clusterName,\n\t\t\t\t},\n\t\t\t\tAnnotations: map[string]string{\n\t\t\t\t\tPrePauseReconcilePolicyAnnotation: ReconcilePolicyManage,\n\t\t\t\t\tReconcilePolicyAnnotation: ReconcilePolicySkip,\n\t\t\t\t},\n\t\t\t},\n\t\t\tSpec: asoresourcesv1.ResourceGroup_Spec{\n\t\t\t\tLocation: ptr.To(\"location\"),\n\t\t\t},\n\t\t\tStatus: asoresourcesv1.ResourceGroup_STATUS{\n\t\t\t\tConditions: []conditions.Condition{\n\t\t\t\t\t{\n\t\t\t\t\t\tType: conditions.ConditionTypeReady,\n\t\t\t\t\t\tStatus: metav1.ConditionTrue,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t})).To(Succeed())\n\n\t\tresult, err := s.CreateOrUpdateResource(ctx, specMock, \"service\")\n\t\tg.Expect(result).To(BeNil())\n\t\tg.Expect(azure.IsOperationNotDoneError(err)).To(BeTrue())\n\n\t\tupdated := &asoresourcesv1.ResourceGroup{}\n\t\tg.Expect(c.Get(ctx, types.NamespacedName{Name: \"name\", Namespace: \"namespace\"}, updated)).To(Succeed())\n\t\tg.Expect(updated.Annotations).NotTo(HaveKey(PrePauseReconcilePolicyAnnotation))\n\t\tg.Expect(updated.Annotations).To(HaveKeyWithValue(ReconcilePolicyAnnotation, ReconcilePolicyManage))\n\t})\n}", "func TestLegacyExecuteFailOnAutocommit(t *testing.T) {\n\tctx := utils.LeakCheckContext(t)\n\n\tcreateSandbox(\"TestExecuteFailOnAutocommit\")\n\thc := discovery.NewFakeHealthCheck(nil)\n\tsc := newTestScatterConn(ctx, hc, newSandboxForCells(ctx, []string{\"aa\"}), \"aa\")\n\tsbc0 := hc.AddTestTablet(\"aa\", \"0\", 1, \"TestExecuteFailOnAutocommit\", \"0\", topodatapb.TabletType_PRIMARY, true, 1, nil)\n\tsbc1 := hc.AddTestTablet(\"aa\", \"1\", 1, \"TestExecuteFailOnAutocommit\", \"1\", topodatapb.TabletType_PRIMARY, true, 1, nil)\n\n\trss := []*srvtopo.ResolvedShard{\n\t\t{\n\t\t\tTarget: &querypb.Target{\n\t\t\t\tKeyspace: \"TestExecuteFailOnAutocommit\",\n\t\t\t\tShard: \"0\",\n\t\t\t\tTabletType: topodatapb.TabletType_PRIMARY,\n\t\t\t},\n\t\t\tGateway: sbc0,\n\t\t},\n\t\t{\n\t\t\tTarget: &querypb.Target{\n\t\t\t\tKeyspace: \"TestExecuteFailOnAutocommit\",\n\t\t\t\tShard: \"1\",\n\t\t\t\tTabletType: topodatapb.TabletType_PRIMARY,\n\t\t\t},\n\t\t\tGateway: sbc1,\n\t\t},\n\t}\n\tqueries := []*querypb.BoundQuery{\n\t\t{\n\t\t\t// This will fail to go to shard. It will be rejected at vtgate.\n\t\t\tSql: \"query1\",\n\t\t\tBindVariables: map[string]*querypb.BindVariable{\n\t\t\t\t\"bv0\": sqltypes.Int64BindVariable(0),\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\t// This will go to shard.\n\t\t\tSql: \"query2\",\n\t\t\tBindVariables: map[string]*querypb.BindVariable{\n\t\t\t\t\"bv1\": sqltypes.Int64BindVariable(1),\n\t\t\t},\n\t\t},\n\t}\n\t// shard 0 - has transaction\n\t// shard 1 - does not have transaction.\n\tsession := &vtgatepb.Session{\n\t\tInTransaction: true,\n\t\tShardSessions: []*vtgatepb.Session_ShardSession{\n\t\t\t{\n\t\t\t\tTarget: &querypb.Target{Keyspace: \"TestExecuteFailOnAutocommit\", Shard: \"0\", TabletType: topodatapb.TabletType_PRIMARY, Cell: \"aa\"},\n\t\t\t\tTransactionId: 123,\n\t\t\t\tTabletAlias: nil,\n\t\t\t},\n\t\t},\n\t\tAutocommit: false,\n\t}\n\t_, errs := sc.ExecuteMultiShard(ctx, nil, rss, queries, NewSafeSession(session), true /*autocommit*/, false)\n\terr := vterrors.Aggregate(errs)\n\trequire.Error(t, err)\n\trequire.Contains(t, err.Error(), \"in autocommit mode, transactionID should be zero but was: 123\")\n\tutils.MustMatch(t, 0, len(sbc0.Queries), \"\")\n\tutils.MustMatch(t, []*querypb.BoundQuery{queries[1]}, sbc1.Queries, \"\")\n}", "func (r *ReconcileAerospikeCluster) recoverFailedCreate(aeroCluster *aerospikev1alpha1.AerospikeCluster) error {\n\tlogger := pkglog.New(log.Ctx{\"AerospikeCluster\": utils.ClusterNamespacedName(aeroCluster)})\n\tlogger.Info(\"Forcing a cluster recreate as status is nil. The cluster could be unreachable due to bad configuration.\")\n\n\t// Delete all statefulsets and everything related so that it can be properly created and updated in next run.\n\tstatefulSetList, err := r.getClusterStatefulSets(aeroCluster)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Error getting statefulsets while forcing recreate of the cluster as status is nil: %v\", err)\n\t}\n\n\tlogger.Debug(\"Found statefulset for cluster. Need to delete them\", log.Ctx{\"nSTS\": len(statefulSetList.Items)})\n\tfor _, statefulset := range statefulSetList.Items {\n\t\tif err := r.deleteStatefulSet(aeroCluster, &statefulset); err != nil {\n\t\t\treturn fmt.Errorf(\"Error deleting statefulset while forcing recreate of the cluster as status is nil: %v\", err)\n\t\t}\n\t}\n\n\t// Clear pod status as well in status since we want to be re-initializing or cascade deleting devices if any.\n\t// This is not necessary since scale-up would cleanup danglin pod status. However done here for general\n\t// cleanliness.\n\trackStateList := getNewRackStateList(aeroCluster)\n\tfor _, state := range rackStateList {\n\t\tpods, err := r.getRackPodList(aeroCluster, state.Rack.ID)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"Failed recover failed cluster: %v\", err)\n\t\t}\n\n\t\tnewPodNames := []string{}\n\t\tfor i := 0; i < len(pods.Items); i++ {\n\t\t\tnewPodNames = append(newPodNames, pods.Items[i].Name)\n\t\t}\n\n\t\terr = r.cleanupPods(aeroCluster, newPodNames, state)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"Failed recover failed cluster: %v\", err)\n\t\t}\n\t}\n\n\treturn fmt.Errorf(\"Forcing recreate of the cluster as status is nil\")\n}", "func testFailWithTweak(key *Key, data *metadata.WrappedKeyData, tweak []byte) error {\n\ttweak[0]++\n\tkey, err := Unwrap(key, data)\n\tif err == nil {\n\t\tkey.Wipe()\n\t}\n\ttweak[0]--\n\treturn err\n}", "func TestBackoffOnRangefeedFailure(t *testing.T) {\n\tdefer leaktest.AfterTest(t)()\n\n\tvar called int64\n\tconst timesToFail = 3\n\trpcKnobs := rpc.ContextTestingKnobs{\n\t\tStreamClientInterceptor: func(\n\t\t\ttarget string, class rpc.ConnectionClass,\n\t\t) grpc.StreamClientInterceptor {\n\t\t\treturn func(\n\t\t\t\tctx context.Context, desc *grpc.StreamDesc, cc *grpc.ClientConn,\n\t\t\t\tmethod string, streamer grpc.Streamer, opts ...grpc.CallOption,\n\t\t\t) (stream grpc.ClientStream, err error) {\n\t\t\t\tif strings.Contains(method, \"RangeFeed\") &&\n\t\t\t\t\tatomic.AddInt64(&called, 1) <= timesToFail {\n\t\t\t\t\treturn nil, errors.Errorf(\"boom\")\n\t\t\t\t}\n\t\t\t\treturn streamer(ctx, desc, cc, method, opts...)\n\t\t\t}\n\t\t},\n\t}\n\tctx := context.Background()\n\tvar seen int64\n\ttc := testcluster.StartTestCluster(t, 2, base.TestClusterArgs{\n\t\tServerArgs: base.TestServerArgs{\n\t\t\tKnobs: base.TestingKnobs{\n\t\t\t\tServer: &server.TestingKnobs{\n\t\t\t\t\tContextTestingKnobs: rpcKnobs,\n\t\t\t\t},\n\t\t\t\tRangeFeed: &rangefeed.TestingKnobs{\n\t\t\t\t\tOnRangefeedRestart: func() {\n\t\t\t\t\t\tatomic.AddInt64(&seen, 1)\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t})\n\tdefer tc.Stopper().Stop(ctx)\n\ttestutils.SucceedsSoon(t, func() error {\n\t\tif n := atomic.LoadInt64(&seen); n < timesToFail {\n\t\t\treturn errors.Errorf(\"seen %d, waiting for %d\", n, timesToFail)\n\t\t}\n\t\treturn nil\n\t})\n}", "func TestPutNewPresentationInvalidtemplatePath(t *testing.T) {\n request := createPutNewPresentationRequest()\n request.templatePath = invalidizeTestParamValue(request.templatePath, \"templatePath\", \"string\").(string)\n e := initializeTest(\"PutNewPresentation\", \"templatePath\", request.templatePath)\n if e != nil {\n t.Errorf(\"Error: %v.\", e)\n return\n }\n r, _, e := getTestApiClient().DocumentApi.PutNewPresentation(request)\n assertError(t, \"PutNewPresentation\", \"templatePath\", r.Code, e)\n}", "func TestEmployeeManagerMapCreate_BadRequest(t *testing.T) {\n\tdb, _, err := sqlmock.New()\n\tif err != nil {\n\t\tt.Fatalf(\"an error '%s' was not expected when opening a stub database connection\", err)\n\t}\n\tdefer db.Close()\n\n\templyManagerMap := NewEmployeeManagerMapHandler(db)\n\n\tw := httptest.NewRecorder()\n\tvar jsonStr = []byte(`{\"invalidjson\":}`)\n\tr := httptest.NewRequest(\"POST\", \"http://localhost:9090/api/v1/emplymgrmap\", bytes.NewBuffer(jsonStr))\n\tr = r.WithContext(context.Background())\n\templyManagerMap.Create(w, r)\n\n\texpectedResponse := `{\"error_message\":\"Error:: Invalid Request\"}`\n\tassert.Equal(t, gohttp.StatusBadRequest, w.Code)\n\tassert.Equal(t, expectedResponse, w.Body.String())\n}", "func IsConflict(err error) bool {\n\t// TODO(horwitz): This is supposed to be fixed soon. It's a bug in the OCI API that causes a 409 to\n\t// be returned instead of a 412.\n\treturn IsError(err, \"409\", \"Conflict\") || IsError(err, \"412\", \"NoEtagMatch\")\n}", "func TestStepsFailedRetries(t *testing.T) {\n\twf := test.LoadTestWorkflow(\"testdata/steps-failed-retries.yaml\")\n\twoc := newWoc(*wf)\n\twoc.operate()\n\tassert.Equal(t, string(wfv1.NodeFailed), string(woc.wf.Status.Phase))\n}", "func Retries400Test() Test {\n\tvar (\n\t\tmtx sync.Mutex\n\t\taccept bool\n\t\tts int64\n\t)\n\n\treturn Test{\n\t\tName: \"Retries400\",\n\t\tMetrics: metricHandler(prometheus.NewGaugeFunc(prometheus.GaugeOpts{\n\t\t\tName: \"now\",\n\t\t}, func() float64 {\n\t\t\treturn float64(time.Now().Unix() * 1000)\n\t\t})),\n\t\tWrites: func(next http.Handler) http.Handler {\n\t\t\treturn http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\t\t\tmtx.Lock()\n\t\t\t\tdefer mtx.Unlock()\n\n\t\t\t\tif accept {\n\t\t\t\t\tnext.ServeHTTP(w, r)\n\t\t\t\t\treturn\n\t\t\t\t}\n\n\t\t\t\t// We're going to pick a timestamp from this batch, and then make sure\n\t\t\t\t// it gets resent. First we need to decode this batch.\n\t\t\t\tts = getFirstTimestamp(w, r)\n\t\t\t\taccept = true\n\t\t\t\thttp.Error(w, \"bad request\", http.StatusBadRequest)\n\t\t\t})\n\n\t\t},\n\t\tExpected: func(t *testing.T, bs []Batch) {\n\t\t\tfound := false\n\t\t\tforAllSamples(bs, func(s sample) {\n\t\t\t\tif labelsContain(s.l, labels.FromStrings(\"__name__\", \"now\")) && s.t == ts {\n\t\t\t\t\tfound = true\n\t\t\t\t}\n\t\t\t})\n\t\t\trequire.False(t, found, `found sample that should not have been retried`)\n\t\t},\n\t}\n}", "func TestTrigger_ErrorYaml(t *testing.T) {\n\tcontroller := gomock.NewController(t)\n\tdefer controller.Finish()\n\n\tmockUsers := mock.NewMockUserStore(controller)\n\tmockUsers.EXPECT().Find(noContext, dummyRepo.UserID).Return(dummyUser, nil)\n\n\tmockConfigService := mock.NewMockConfigService(controller)\n\tmockConfigService.EXPECT().Find(gomock.Any(), gomock.Any()).Return(dummyYamlInvalid, nil)\n\n\tmockConvertService := mock.NewMockConvertService(controller)\n\tmockConvertService.EXPECT().Convert(gomock.Any(), gomock.Any()).Return(dummyYamlInvalid, nil)\n\n\tmockRepos := mock.NewMockRepositoryStore(controller)\n\tmockRepos.EXPECT().Increment(gomock.Any(), dummyRepo).Return(dummyRepo, nil)\n\n\tmockBuilds := mock.NewMockBuildStore(controller)\n\tmockBuilds.EXPECT().Create(gomock.Any(), gomock.Any(), gomock.Any()).Return(nil) // .Do(checkBuild).Return(nil)\n\n\ttriggerer := New(\n\t\tnil,\n\t\tmockConfigService,\n\t\tmockConvertService,\n\t\tnil,\n\t\tnil,\n\t\tmockBuilds,\n\t\tnil,\n\t\tmockRepos,\n\t\tmockUsers,\n\t\tnil,\n\t\tnil,\n\t)\n\n\tbuild, err := triggerer.Trigger(noContext, dummyRepo, dummyHook)\n\tif err != nil {\n\t\tt.Error(err)\n\t}\n\n\tif got, want := build.Status, core.StatusError; got != want {\n\t\tt.Errorf(\"Want status %s, got %s\", want, got)\n\t}\n\tif got, want := build.Error, \"yaml: found unknown directive name\"; got != want {\n\t\tt.Errorf(\"Want error %s, got %s\", want, got)\n\t}\n\tif build.Finished == 0 {\n\t\tt.Errorf(\"Want non-zero finished time\")\n\t}\n}", "func (c *SeaterController) TraceConflictf(err error, format string, args ...interface{}) {\n\tmsg := fmt.Sprintf(format, args...)\n\tc.traceJSONAbort(err, 409, msg)\n}", "func testReconcileDeploymentReadyRestartRequired(t *testing.T) {\n\treaper := createReaper()\n\tdeployment := createReadyDeployment(reaper)\n\n\tobjs := []runtime.Object{reaper, deployment}\n\n\tSetConfigurationUpdatedCondition(&reaper.Status)\n\n\tr := createDeploymentReconciler(objs...)\n\tresult, err := r.ReconcileDeployment(context.TODO(), reaper)\n\n\tif result == nil {\n\t\tt.Errorf(\"expected non-nil result\")\n\t} else if !result.Requeue {\n\t\tt.Errorf(\"expected requeue\")\n\t}\n\n\tif err != nil {\n\t\tt.Errorf(\"expected err (nil), got (%s)\", err)\n\t}\n\n\tcond := GetCondition(&reaper.Status, v1alpha1.ConfigurationUpdated)\n\tif cond == nil {\n\t\tt.Errorf(\"expected to find condition (%s)\", v1alpha1.ConfigurationUpdated)\n\t} else if cond.Reason != RestartRequiredReason {\n\t\tt.Errorf(\"condition %s reason is wrong: expected (%s), got (%s)\", v1alpha1.ConfigurationUpdated, RestartRequiredReason, cond.Reason)\n\t}\n\n\tdeployment = &appsv1.Deployment{}\n\tif err := r.client.Get(context.TODO(), namespaceName, deployment); err != nil {\n\t\tt.Errorf(\"failed to get deployment: (%s)\", err)\n\t} else if _, found := deployment.Spec.Template.Annotations[reaperRestartedAt]; !found {\n\t\tt.Errorf(\"expected to find deployment annotation: (%s)\", reaperRestartedAt)\n\t}\n}", "func TestCreatesAllowedDuringNamespaceDeletion(t *testing.T) {\n\tconfig := &origin.MasterConfig{\n\t\tKubeletClientConfig: &kclient.KubeletConfig{},\n\t\tEtcdHelper: etcdstorage.NewEtcdStorage(nil, nil, \"\"),\n\t}\n\tstorageMap := config.GetRestStorage()\n\tresources := sets.String{}\n\n\tfor resource := range storageMap {\n\t\tresources.Insert(strings.ToLower(resource))\n\t}\n\n\tfor resource := range recommendedCreatableResources {\n\t\tif !resources.Has(resource) {\n\t\t\tt.Errorf(\"recommendedCreatableResources has resource %v, but that resource isn't registered.\", resource)\n\t\t}\n\t}\n}", "func TestAfPacketModifyRecreateNotFound(t *testing.T) {\n\tctx, plugin, _ := afPacketTestSetup(t)\n\tdefer afPacketTestTeardown(ctx)\n\n\t// Data\n\toldData := getTestAfPacketData(\"if1\", []string{\"10.0.0.1/24\"}, \"host1\")\n\tnewData := getTestAfPacketData(\"if1\", []string{\"10.0.0.1/24\"}, \"host2\")\n\n\t// Test af packet modify\n\trecreate, err := plugin.ModifyAfPacketInterface(newData, oldData)\n\tExpect(err).To(BeNil())\n\tExpect(recreate).To(BeTrue())\n}", "func (suite *TaskFailRetryTestSuite) TestTaskFailRetry() {\n\ttaskConfig := pbtask.TaskConfig{\n\t\tRestartPolicy: &pbtask.RestartPolicy{\n\t\t\tMaxFailures: 3,\n\t\t},\n\t}\n\n\tsuite.cachedTask.EXPECT().\n\t\tID().\n\t\tReturn(uint32(0)).\n\t\tAnyTimes()\n\n\tsuite.jobFactory.EXPECT().\n\t\tGetJob(suite.jobID).Return(suite.cachedJob)\n\n\tsuite.cachedJob.EXPECT().\n\t\tGetTask(suite.instanceID).Return(suite.cachedTask)\n\n\tsuite.cachedJob.EXPECT().\n\t\tID().Return(suite.jobID)\n\n\tsuite.cachedTask.EXPECT().\n\t\tGetRuntime(gomock.Any()).Return(suite.taskRuntime, nil)\n\n\tsuite.taskConfigV2Ops.EXPECT().\n\t\tGetTaskConfig(gomock.Any(), suite.jobID, suite.instanceID, gomock.Any()).\n\t\tReturn(&taskConfig, &models.ConfigAddOn{}, nil)\n\n\tsuite.cachedJob.EXPECT().\n\t\tPatchTasks(gomock.Any(), gomock.Any(), false).\n\t\tDo(func(ctx context.Context,\n\t\t\truntimeDiffs map[uint32]jobmgrcommon.RuntimeDiff,\n\t\t\t_ bool) {\n\t\t\truntimeDiff := runtimeDiffs[suite.instanceID]\n\t\t\tsuite.True(\n\t\t\t\truntimeDiff[jobmgrcommon.MesosTaskIDField].(*mesosv1.TaskID).GetValue() != suite.mesosTaskID)\n\t\t\tsuite.True(\n\t\t\t\truntimeDiff[jobmgrcommon.PrevMesosTaskIDField].(*mesosv1.TaskID).GetValue() == suite.mesosTaskID)\n\t\t\tsuite.True(\n\t\t\t\truntimeDiff[jobmgrcommon.StateField].(pbtask.TaskState) == pbtask.TaskState_INITIALIZED)\n\t\t}).Return(nil, nil, nil)\n\n\tsuite.cachedJob.EXPECT().\n\t\tGetJobType().Return(pbjob.JobType_BATCH)\n\n\tsuite.taskGoalStateEngine.EXPECT().\n\t\tEnqueue(gomock.Any(), gomock.Any()).\n\t\tReturn()\n\n\tsuite.jobGoalStateEngine.EXPECT().\n\t\tEnqueue(gomock.Any(), gomock.Any()).\n\t\tReturn()\n\n\terr := TaskFailRetry(context.Background(), suite.taskEnt)\n\tsuite.NoError(err)\n}", "func TestIssue351MultipleJobRun(t *testing.T) {\n\tctx, err := NewContext(t)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tdefer ctx.Cleanup()\n\n\t// Create initial CR to generate an initial job and create the initial k8s resources\n\n\tgitops := &gitopsv1alpha1.GitOpsConfig{\n\t\tTypeMeta: metav1.TypeMeta{\n\t\t\tKind: \"GitOpsConfig\",\n\t\t\tAPIVersion: \"eunomia.kohls.io/v1alpha1\",\n\t\t},\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tName: \"gitops-issue351\",\n\t\t\tNamespace: ctx.namespace,\n\t\t},\n\t\tSpec: gitopsv1alpha1.GitOpsConfigSpec{\n\t\t\tTemplateSource: gitopsv1alpha1.GitConfig{\n\t\t\t\tURI: ctx.eunomiaURI,\n\t\t\t\tRef: ctx.eunomiaRef,\n\t\t\t\tContextDir: \"test/e2e/testdata/hello-a\",\n\t\t\t},\n\t\t\tParameterSource: gitopsv1alpha1.GitConfig{\n\t\t\t\tURI: ctx.eunomiaURI,\n\t\t\t\tRef: ctx.eunomiaRef,\n\t\t\t\tContextDir: \"test/e2e/testdata/empty-yaml\",\n\t\t\t},\n\t\t\tTriggers: []gitopsv1alpha1.GitOpsTrigger{\n\t\t\t\t{Type: \"Change\"},\n\t\t\t},\n\t\t\tTemplateProcessorImage: \"quay.io/kohlstechnology/eunomia-base:dev\",\n\t\t\tResourceHandlingMode: \"Apply\",\n\t\t\tResourceDeletionMode: \"Delete\",\n\t\t\tServiceAccountRef: \"eunomia-operator\",\n\t\t},\n\t}\n\n\terr = framework.Global.Client.Create(ctx, gitops, &framework.CleanupOptions{TestContext: ctx.TestCtx, Timeout: timeout, RetryInterval: retryInterval})\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t// When the initial job is created, we will use it as a template to create two additional jobs at the same time\n\terr = WaitForJobCreation(ctx.namespace, \"gitopsconfig-gitops-issue351-\", framework.Global.KubeClient)\n\tif err != nil {\n\t\tt.Error(err)\n\t}\n\n\tgitopsJob, err := GetJob(ctx.namespace, \"gitopsconfig-gitops-issue351-\", framework.Global.KubeClient)\n\n\tfirstJob := &batchv1.Job{\n\t\tTypeMeta: metav1.TypeMeta{\n\t\t\tKind: \"GitOpsConfig\",\n\t\t\tAPIVersion: \"eunomia.kohls.io/v1alpha1\",\n\t\t},\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tName: \"first-job\",\n\t\t\tNamespace: ctx.namespace,\n\t\t},\n\t\tSpec: gitopsJob.Spec,\n\t}\n\t// The deep copy of the job keeps the selector and selector label that has to be generated by k8s.\n\t// Trying to create a job with those set will fail.\n\tfirstJob.Spec.Template.SetLabels(map[string]string{})\n\tfirstJob.Spec.Selector.Reset()\n\n\terr = framework.Global.Client.Create(ctx, firstJob, &framework.CleanupOptions{TestContext: ctx.TestCtx, Timeout: timeout, RetryInterval: retryInterval})\n\tif err != nil {\n\t\tt.Error(err)\n\t}\n\n\tsecondJob := &batchv1.Job{\n\t\tTypeMeta: metav1.TypeMeta{\n\t\t\tKind: \"GitOpsConfig\",\n\t\t\tAPIVersion: \"eunomia.kohls.io/v1alpha1\",\n\t\t},\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tName: \"second-job\",\n\t\t\tNamespace: ctx.namespace,\n\t\t},\n\t\tSpec: gitopsJob.Spec,\n\t}\n\tsecondJob.Spec.Template.SetLabels(map[string]string{})\n\tsecondJob.Spec.Selector.Reset()\n\n\terr = framework.Global.Client.Create(ctx, secondJob, &framework.CleanupOptions{TestContext: ctx.TestCtx, Timeout: timeout, RetryInterval: retryInterval})\n\tif err != nil {\n\t\tt.Error(err)\n\t}\n\n\t// Wait to make sure both of the jobs finish running\n\terr = wait.Poll(retryInterval, 60*time.Second, func() (done bool, err error) {\n\t\tjobOne, _ := GetJob(ctx.namespace, \"first-job\", framework.Global.KubeClient)\n\t\tjobTwo, _ := GetJob(ctx.namespace, \"second-job\", framework.Global.KubeClient)\n\n\t\tswitch {\n\t\tcase jobOne.Status.Succeeded == 1 && jobTwo.Status.Succeeded == 1:\n\t\t\tt.Logf(\"Both jobs are done\")\n\t\t\treturn true, nil\n\t\tdefault:\n\t\t\tt.Logf(\"Both jobs are not done\")\n\t\t\treturn false, nil\n\t\t}\n\t})\n\tif err != nil {\n\t\tt.Error(err)\n\t}\n\n\tdeploymentList, err := framework.Global.KubeClient.AppsV1().Deployments(ctx.namespace).List(metav1.ListOptions{})\n\tif err != nil {\n\t\tt.Error(err)\n\t}\n\tif len(deploymentList.Items) != 1 {\n\t\tt.Errorf(\"There was only %d deployments when we were expecting 1\", len(deploymentList.Items))\n\t}\n\tif deploymentList.Items[0].GetDeletionTimestamp() != nil {\n\t\tt.Errorf(\"The deployment has been marked for deletion\")\n\t}\n}", "func TestInvalidFilesInOverlay(t *testing.T) { packagestest.TestAll(t, testInvalidFilesInOverlay) }", "func TestFailbackRetryFailed10Times(t *testing.T) {\n\tctrl := gomock.NewController(t)\n\tdefer ctrl.Finish()\n\n\tinvoker := mock.NewMockInvoker(ctrl)\n\tclusterInvoker := registerFailback(invoker).(*failbackClusterInvoker)\n\tclusterInvoker.maxRetries = 10\n\n\tinvoker.EXPECT().IsAvailable().Return(true).AnyTimes()\n\tinvoker.EXPECT().GetURL().Return(failbackUrl).AnyTimes()\n\n\t// 10 task should failed firstly.\n\tmockFailedResult := &protocol.RPCResult{Err: perrors.New(\"error\")}\n\tinvoker.EXPECT().Invoke(gomock.Any(), gomock.Any()).Return(mockFailedResult).Times(10)\n\n\t// 10 task should retry and failed.\n\tvar wg sync.WaitGroup\n\twg.Add(10)\n\tnow := time.Now()\n\tinvoker.EXPECT().Invoke(gomock.Any(), gomock.Any()).DoAndReturn(func(context.Context, protocol.Invocation) protocol.Result {\n\t\tdelta := time.Since(now).Nanoseconds() / int64(time.Second)\n\t\tassert.True(t, delta >= 5)\n\t\twg.Done()\n\t\treturn mockFailedResult\n\t}).Times(10)\n\n\tfor i := 0; i < 10; i++ {\n\t\tresult := clusterInvoker.Invoke(context.Background(), &invocation.RPCInvocation{})\n\t\tassert.Nil(t, result.Error())\n\t\tassert.Nil(t, result.Result())\n\t\tassert.Equal(t, 0, len(result.Attachments()))\n\t}\n\n\twg.Wait()\n\ttime.Sleep(time.Second) // in order to ensure checkRetry have done\n\tassert.Equal(t, int64(10), clusterInvoker.taskList.Len())\n\n\tinvoker.EXPECT().Destroy().Return()\n\tclusterInvoker.Destroy()\n\n\tassert.Equal(t, int64(0), clusterInvoker.taskList.Len())\n}", "func TestCmdDeploy_retryRejectNonFailed(t *testing.T) {\n\tvar existingDeployment *kapi.ReplicationController\n\n\tcommandClient := &deployCommandClientImpl{\n\t\tGetDeploymentFn: func(namespace, name string) (*kapi.ReplicationController, error) {\n\t\t\treturn existingDeployment, nil\n\t\t},\n\t\tUpdateDeploymentConfigFn: func(config *deployapi.DeploymentConfig) (*deployapi.DeploymentConfig, error) {\n\t\t\tt.Fatalf(\"unexpected call to UpdateDeploymentConfig\")\n\t\t\treturn nil, nil\n\t\t},\n\t\tUpdateDeploymentFn: func(deployment *kapi.ReplicationController) (*kapi.ReplicationController, error) {\n\t\t\tt.Fatalf(\"unexpected call to UpdateDeployment\")\n\t\t\treturn nil, nil\n\t\t},\n\t\tListDeployerPodsForFn: func(namespace, deploymentName string) (*kapi.PodList, error) {\n\t\t\tt.Fatalf(\"unexpected call to ListDeployerPodsFor\")\n\t\t\treturn nil, nil\n\t\t},\n\t\tDeletePodFn: func(pod *kapi.Pod) error {\n\t\t\tt.Fatalf(\"unexpected call to DeletePod\")\n\t\t\treturn nil\n\t\t},\n\t}\n\n\tc := &retryDeploymentCommand{client: commandClient}\n\n\tinvalidStatusList := []deployapi.DeploymentStatus{\n\t\tdeployapi.DeploymentStatusNew,\n\t\tdeployapi.DeploymentStatusPending,\n\t\tdeployapi.DeploymentStatusRunning,\n\t\tdeployapi.DeploymentStatusComplete,\n\t}\n\n\tfor _, status := range invalidStatusList {\n\t\tconfig := deploytest.OkDeploymentConfig(1)\n\t\texistingDeployment = deploymentFor(config, status)\n\t\terr := c.retry(config, ioutil.Discard)\n\t\tif err == nil {\n\t\t\tt.Errorf(\"expected an error retrying deployment with status %s\", status)\n\t\t}\n\t}\n}", "func TestProjectsService_CreateTag(t *testing.T) {\n\tsetup()\n\tdefer teardown()\n\n\tinput := &gerrit.TagInput{\n\t\tRef: \"v1.0.0\",\n\t\tRevision: \"master\",\n\t\tMessage: \"v1.0.0 release\",\n\t}\n\n\ttestMux.HandleFunc(\"/projects/go/tags/v1.0.0\", func(w http.ResponseWriter, r *http.Request) {\n\t\ttestMethod(t, r, \"PUT\")\n\n\t\tv := new(gerrit.TagInput)\n\t\tif err := json.NewDecoder(r.Body).Decode(v); err != nil {\n\t\t\tt.Error(err)\n\t\t}\n\n\t\tif !reflect.DeepEqual(v, input) {\n\t\t\tt.Errorf(\"Request body = %+v, want %+v\", v, input)\n\t\t}\n\n\t\tfmt.Fprint(w, `)]}'`+\"\\n\"+`{\"ref\":\"v1.0.0\",\"revision\":\"master\",\"message\":\"v1.0.0 release\"}`)\n\t})\n\n\ttag, _, err := testClient.Projects.CreateTag(\"go\", \"v1.0.0\", input)\n\tif err != nil {\n\t\tt.Errorf(\"Projects.CreateTag returned error: %v\", err)\n\t}\n\n\twant := &gerrit.TagInfo{\n\t\tRef: \"v1.0.0\",\n\t\tRevision: \"master\",\n\t\tMessage: \"v1.0.0 release\",\n\t}\n\n\tif !reflect.DeepEqual(tag, want) {\n\t\tt.Errorf(\"Projects.CreateTag returned %+v, want %+v\", tag, want)\n\t}\n}", "func TestDeltaCreateDestinationLock(t *testing.T) {\n\tvar (\n\t\tbase = \"debian:8\"\n\t\ttarget = \"debian:10\"\n\t\tdelta = \"debian:delta\"\n\t)\n\n\tvar (\n\t\terr error\n\t\trc io.ReadCloser\n\t\tctx = context.Background()\n\t\tclient = testEnv.APIClient()\n\t)\n\n\tpullBaseAndTargetImages(t, client, base, target)\n\n\trc, err = client.ImageDelta(ctx,\n\t\tbase,\n\t\ttarget,\n\t\ttypes.ImageDeltaOptions{\n\t\t\tTag: delta,\n\t\t})\n\tassert.NilError(t, err)\n\tdefer rc.Close()\n\n\tvar (\n\t\twaitFingerprinting = make(chan struct{})\n\t\twaitDelta = make(chan struct{})\n\t)\n\tgo func() {\n\t\tdefer close(waitFingerprinting)\n\t\tdefer close(waitDelta)\n\t\tfor br := bufio.NewReader(rc); ; {\n\t\t\tline, err := br.ReadString('\\n')\n\t\t\tif err != nil {\n\t\t\t\tif err == io.EOF {\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t\tt.Fatal(err)\n\t\t\t}\n\t\t\tif strings.Contains(line, \"Fingerprint complete\") {\n\t\t\t\twaitFingerprinting <- struct{}{}\n\t\t\t}\n\t\t\tif strings.Contains(line, \"layer does not exist\") {\n\t\t\t\tt.Fail()\n\t\t\t}\n\t\t}\n\t}()\n\n\tinspectTarget, _, err := client.ImageInspectWithRaw(ctx, target)\n\n\t<-waitFingerprinting\n\tdeleted, err := client.ImageRemove(ctx, target, types.ImageRemoveOptions{})\n\tassert.NilError(t, err)\n\tfor _, item := range deleted {\n\t\tfor i := 0; i < len(inspectTarget.RootFS.Layers); i++ {\n\t\t\tassert.Assert(t, item.Deleted != inspectTarget.RootFS.Layers[i], \"deleted target image layer\")\n\t\t}\n\t}\n\n\t<-waitDelta\n\tinspectDelta, _, err := client.ImageInspectWithRaw(ctx, delta)\n\tassert.NilError(t, err)\n\n\tinspectBase, _, err := client.ImageInspectWithRaw(ctx, base)\n\tassert.NilError(t, err)\n\tassert.Assert(t, inspectDelta.Config.Labels[\"io.resin.delta.base\"] == inspectBase.ID)\n}", "func Test_jsonpatch_Add_WithNonExistentParent_is_error(t *testing.T) {\n\tg := NewWithT(t)\n\n\tpatch1, _ := DecodePatch([]byte(`\n\t\t[{\"op\":\"add\", \"path\":\"/level1/level2/test_key\", \"value\":\"qwe\"}]\n\t`))\n\n\torigDoc := []byte(`{\"bar\":\"foo\"}`)\n\n\t_, err := patch1.Apply(origDoc)\n\tg.Expect(err).Should(HaveOccurred(), \"patch apply\")\n}" ]
[ "0.7377411", "0.6283647", "0.6053458", "0.58340085", "0.57508224", "0.5725261", "0.5700338", "0.56593096", "0.5609627", "0.5538565", "0.55150545", "0.5466872", "0.54553527", "0.5412734", "0.5387116", "0.53148997", "0.5268616", "0.52679753", "0.52667946", "0.5256825", "0.5214256", "0.52132535", "0.5191079", "0.51722527", "0.515398", "0.5147439", "0.5132419", "0.5127984", "0.5114139", "0.5093225", "0.5087033", "0.5079732", "0.50762725", "0.5049804", "0.5046739", "0.50403833", "0.5011119", "0.5005303", "0.50019526", "0.49942088", "0.49910322", "0.49857786", "0.49797502", "0.49645782", "0.49630836", "0.49619606", "0.49483612", "0.49400058", "0.49326205", "0.49286816", "0.4919028", "0.49147913", "0.49075094", "0.49055037", "0.49010593", "0.48869547", "0.48785833", "0.486872", "0.48677102", "0.48630694", "0.48468605", "0.4842078", "0.48211232", "0.48193243", "0.48097983", "0.48057967", "0.4803962", "0.47998914", "0.47960025", "0.47855744", "0.47797284", "0.47785172", "0.47742605", "0.4768312", "0.47664377", "0.47628248", "0.47596535", "0.47570553", "0.47570127", "0.47436017", "0.4741869", "0.4731545", "0.47286287", "0.47233924", "0.47207046", "0.47108394", "0.47105542", "0.47082433", "0.47065178", "0.47063786", "0.47017115", "0.47008753", "0.46904755", "0.46797088", "0.4678837", "0.46714503", "0.46655124", "0.46609437", "0.4658428", "0.46528414" ]
0.7310727
1
TODO: this could be improved, here we are creating a new router. We cannot use router.CreateRouter() because it generates cyclic references
func testRouter() *gin.Engine { gin.SetMode(gin.TestMode) router := gin.Default() router.HandleMethodNotAllowed = true return router }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func NewRouter() *Router { return &Router{mux.NewRouter()} }", "func newRouter() *router {\n\treturn &router{\n\t\troots: make(map[string]*node),\n\t}\n}", "func newRouter() *Router {\n\tr := new(Router)\n\tr.routeMap = make(map[string]*Route)\n\tfor m := range METHODS {\n\t\tr.routeMap[m] = newRoute(\"/\", nil, nil)\n\t}\n\tr.routeNamedMap = make(map[string]string)\n\tr.group = newGroup()\n\treturn r\n}", "func NewRouter(api *API) *Router {\n\trouter := &Router{\n\t\tAPI: api,\n\t}\n\n\thostname, err := os.Hostname()\n\tif hostname == \"\" || err != nil {\n\t\thostname = \"localhost\"\n\t}\n\tvar buf [12]byte\n\tvar b64 string\n\tfor len(b64) < 10 {\n\t\trand.Read(buf[:])\n\t\tb64 = base64.StdEncoding.EncodeToString(buf[:])\n\t\tb64 = strings.NewReplacer(\"+\", \"\", \"/\", \"\").Replace(b64)\n\t}\n\n\tprefix = fmt.Sprintf(\"%s/%s\", hostname, b64[0:10])\n\n\tr := phi.NewRouter()\n\n\tr.Use(router.requestID)\n\tr.Use(router.recover)\n\tr.Use(router.logger)\n\tr.Use(router.cors)\n\n\tr.NotFound(router.notFound)\n\tr.MethodNotAllowed(router.methodNotAllowed)\n\n\tr.Get(\"/\", HomeController{API: api}.Index)\n\n\tr.Route(\"/api/v1\", func(r phi.Router) {\n\t\tr.Route(\"/user\", func(r phi.Router) {\n\t\t\tr.Post(\"/sign_in\", LoginController{API: api}.Create)\n\t\t\tr.Post(\"/token\", TokenController{API: api}.Create)\n\t\t})\n\n\t\tr.Group(func(r phi.Router) {\n\t\t\tr.Use(api.JWTAuth.Verify)\n\t\t\t// Job Routes\n\t\t\tr.Group(func(r phi.Router) {\n\t\t\t\tr.Get(\"/job\", JobController{API: api}.Index)\n\t\t\t\tr.Post(\"/job\", JobController{API: api}.Create)\n\t\t\t\tr.Route(\"/job/{jobID}\", func(r phi.Router) {\n\t\t\t\t\tr.Get(\"/\", JobController{API: api}.Show)\n\t\t\t\t\tr.Delete(\"/\", JobController{API: api}.Delete)\n\n\t\t\t\t\t// Detail Routes\n\t\t\t\t\tr.Route(\"/detail\", func(r phi.Router) {\n\t\t\t\t\t\tr.Post(\"/\", JobDetailController{API: api}.Create)\n\t\t\t\t\t})\n\t\t\t\t})\n\t\t\t})\n\n\t\t\t// File routes\n\t\t\tr.Group(func(r phi.Router) {\n\t\t\t\tr.Get(\"/file/log\", FileLogController{API: api}.Index)\n\t\t\t\tr.Get(\"/file\", FileController{API: api}.Index)\n\t\t\t\tr.Post(\"/file\", FileController{API: api}.Create)\n\t\t\t\tr.Route(\"/file/{fileID}\", func(r phi.Router) {\n\t\t\t\t\tr.Get(\"/log\", FileLogController{API: api}.Index)\n\t\t\t\t\tr.Get(\"/\", FileController{API: api}.Show)\n\t\t\t\t\tr.Put(\"/\", FileController{API: api}.Update)\n\t\t\t\t\tr.Delete(\"/\", FileController{API: api}.Delete)\n\t\t\t\t})\n\t\t\t})\n\n\t\t\tr.Get(\"/upload/dir\", UploadController{API: api}.DirIndex)\n\t\t\tr.Post(\"/upload\", UploadController{API: api}.Create)\n\t\t})\n\t})\n\n\trouter.Server = &fasthttp.Server{\n\t\tHandler: r.ServeFastHTTP,\n\t\tReadTimeout: 10 * time.Second,\n\t\tMaxRequestBodySize: 1 * 1024 * 1024 * 1024,\n\t\tLogger: api.App.Logger,\n\t}\n\trouter.Addr = \":\" + strconv.Itoa(api.App.Config.Port)\n\trouter.Handler = r\n\n\treturn router\n}", "func newRouter() *Router {\n\treturn &Router{routes: make([]*Route, 0)}\n}", "func NewRouter(configuration config.Configuration) *Router {\n\tcfg, _ := config.New(\"../../shared/config/\")\n\n\tdbInstance, err := data.NewDbFactory(cfg)\n\tdbase, err := dbInstance.DBConnection()\n\n\tfService, errS := friend.NewService(dbase)\n\tif errS != nil {\n\t\tglog.Fatalf(\"Fatal Error on create friend Service : %s\", errS.Error())\n\t}\n\n\tfController, errC := friend.NewController(fService)\n\tif errC != nil {\n\t\tglog.Fatalf(\"Fatal Error on create friend Controller : %s\", errC.Error())\n\t}\n\n\tnService, errS := notification.NewService(dbase)\n\tif errS != nil {\n\t\tglog.Fatalf(\"Fatal Error on create friend Service : %s\", errS.Error())\n\t}\n\n\tnController, errC := notification.NewController(nService)\n\tif err != nil {\n\t\tglog.Fatalf(\"Fatal Error on create notification Controller : %s\", errC.Error())\n\t}\n\n\treturn &Router{\n\t\tfriendController: *fController,\n\t\tfriendService: *fService,\n\t\tnotificationController: *nController,\n\t\tnotificationService: *nService,\n\t}\n}", "func CreateRouter() {\n\trouter = mux.NewRouter()\n}", "func NewRouter() *Router {\n\treturn &Router{connCache: map[connKey]*grpc.ClientConn{}}\n}", "func newRouter() *router {\n\treturn &router{\n\t\troots: make(map[string]*node),\n\t\thandlers: make(map[string]HandlerFunc),\n\t}\n}", "func (routeObj *Routes)NewRouter() *mux.Router {\n log := logger.GetLoggerInstance()\n router := mux.NewRouter().StrictSlash(true)\n routeObj.CreateAllRoutes()\n for _, route := range routeObj.entries {\n var handler http.Handler\n handler = route.HandlerFunc\n router.\n Methods(route.Method).\n Path(route.Pattern).\n Name(route.Name).\n Handler(handler)\n log.Trace(\"Created route for %s\", route.Name)\n }\n routeObj.controller = new(controller)\n return router\n}", "func CreateManagedRouter(router *httprouter.Router) *ManagedRouter {\n return &ManagedRouter {\n router: router,\n }\n}", "func newRouter() martini.Router {\n\tr := martini.NewRouter()\n\tr.Get(\"/images/latest\", getLastImage)\n\tr.Get(\"/users/top\", getUsersTop)\n\tr.Get(\"/users/:user_id\", getUser)\n\tr.Post(\"/messages/slack\", addMessage)\n\tr.Get(\"/messages\", getMessages)\n\tr.Get(\"/questions/current\", getCurrentQuestion)\n\tr.Post(\"/slack/commands/tv\", slackCommandTV)\n\treturn r\n}", "func NewRouter() *router {\n\treturn newRpcRouter()\n}", "func CreateRouter() *mux.Router {\n\n\trouter := mux.NewRouter().StrictSlash(true)\n\n\trouter.MethodNotAllowedHandler = http.HandlerFunc(handlers.MethodNotAllowedHandler)\n\trouter.NotFoundHandler = http.HandlerFunc(handlers.NotFoundHandler)\n\n\tfor _, route := range LambdaRoutes {\n\t\trouter.\n\t\t\tMethods(route.Method).\n\t\t\tPath(route.Pattern).\n\t\t\tName(route.Name).\n\t\t\tHandler(auth.LambdaRouterAuthenticationWrapper(route.Pattern, route.Method, util.SentryWrapper(route.HandlerFunc)))\n\t}\n\n\tfor scope, route := range ImplicitRoutes {\n\t\trouter.\n\t\t\tMethods(route.Method).\n\t\t\tPath(route.Pattern).\n\t\t\tName(route.Name).\n\t\t\tHandler(auth.ImplicitRouterAuthenticationWrapper(scope, util.SentryWrapper(route.HandlerFunc)))\n\t}\n\n\tfor _, route := range ExplicitRoutes {\n\t\trouter.\n\t\t\tMethods(route.Method).\n\t\t\tPath(route.Pattern).\n\t\t\tName(route.Name).\n\t\t\tHandler(auth.ExplicitRouterAuthenticationWrapper(route.Pattern, route.Method, util.SentryWrapper(route.HandlerFunc)))\n\t}\n\n\tfor provider, routes := range VoiceCommandRoutes {\n\t\tfor _, route := range routes {\n\t\t\trouter.\n\t\t\t\tMethods(route.Method).\n\t\t\t\tPath(route.Pattern).\n\t\t\t\tName(route.Name).\n\t\t\t\tHandler(auth.AuthenticateVoiceRequest(provider, util.SentryWrapper(route.HandlerFunc)))\n\t\t}\n\t}\n\n\trouter.Methods(\"GET\").Path(\"/\").Name(\"Status Check\").HandlerFunc(func(w http.ResponseWriter, r *http.Request) { w.WriteHeader(200); w.Write([]byte(\"ok\")) })\n\n\treturn router\n}", "func NewRouter(log Logger) *Router {\n\tr := &Router{\n\t\tch: make(chan *MsgDiagMsgIndDst),\n\t\tm: make(map[string]chan *MsgDiagMsgInd),\n\t\tdone: make(chan struct{}),\n\t\tlog: log,\n\t}\n\n\tgo r.run(r.done)\n\treturn r\n}", "func NewRouter() Router {\n\treturn &router{chi.NewRouter()}\n}", "func MakeRouter(externIP [4]byte, port uint16) Router {\n\tmyPeer := MakePeer(externIP, port)\n\treturn Router{\n\t\ttree: makeTree(myPeer),\n\t\tmyPeerUDPAddr: myPeer.getUDPAddr(),\n\t\tMyPeerInfo: myPeer,\n\t\tmyPeerNonce: myPeer.computePeerNonce(),\n\t}\n}", "func NewRouter(db *sql.DB) *Router {\n\treturn &Router{\n\t\tmux: chi.NewRouter(),\n\t\tdb: db,\n\t\tc: &realClock{},\n\t}\n}", "func CreateRouter(request *restful.Request, response *restful.Response) {\n\n\tnamespace := request.PathParameter(\"namespace\")\n\n\tnewRouter := Router{}\n\terr := request.ReadEntity(&newRouter)\n\n\tif err != nil {\n\t\tresponse.WriteAsJson(err)\n\t\treturn\n\t}\n\n\tvar router *v1.Service\n\n\tserviceType, annotationMap, err := parseParameter(newRouter)\n\n\tif err != nil {\n\t\tresponse.WriteHeaderAndEntity(http.StatusBadRequest, errors.Wrap(fmt.Errorf(\"wrong annotations, missing key or value\")))\n\t\treturn\n\t}\n\n\trouter, err = routers.CreateRouter(namespace, serviceType, annotationMap)\n\n\tif err != nil {\n\t\tresponse.WriteHeaderAndEntity(http.StatusInternalServerError, errors.Wrap(err))\n\t\treturn\n\t}\n\n\tresponse.WriteAsJson(router)\n}", "func newRouter(hdl *handler.AppHandler) *mux.Router {\n\n\t// I should take this as a dependency, but need to do some work with wire\n\trtr := mux.NewRouter()\n\n\t// I should take this as a dependency, but need to do some work with wire\n\tc := alice.New()\n\n\t// add Standard Handler chain and zerolog logger to Context\n\tc = hdl.AddStandardHandlerChain(c)\n\n\t// send Router through PathPrefix method to validate any standard\n\t// subroutes you may want for your APIs. e.g. I always want to be\n\t// sure that every request has \"/api\" as part of it's path prefix\n\t// without having to put it into every handle path in my various\n\t// routing functions\n\trtr = rtr.PathPrefix(\"/api\").Subrouter()\n\n\t// Match only POST requests at /api/v1/movies\n\t// with Content-Type header = application/json\n\trtr.Handle(\"/v1/movies\",\n\t\tc.Append(hdl.AccessTokenHandler).\n\t\t\tThen(http.HandlerFunc(hdl.CreateMovie))).\n\t\tMethods(\"POST\").\n\t\tHeaders(\"Content-Type\", \"application/json\")\n\n\t// Match only PUT requests having an ID at /api/v1/movies/{id}\n\t// with the Content-Type header = application/json\n\trtr.Handle(\"/v1/movies/{id}\",\n\t\tc.Append(hdl.AccessTokenHandler).\n\t\t\tThen(http.HandlerFunc(hdl.UpdateMovie))).\n\t\tMethods(\"PUT\").\n\t\tHeaders(\"Content-Type\", \"application/json\")\n\n\t// Match only DELETE requests having an ID at /api/v1/movies/{id}\n\trtr.Handle(\"/v1/movies/{id}\",\n\t\tc.Append(hdl.AccessTokenHandler).\n\t\t\tThen(http.HandlerFunc(hdl.DeleteMovie))).\n\t\tMethods(\"DELETE\")\n\n\t// Match only GET requests having an ID at /api/v1/movies/{id}\n\trtr.Handle(\"/v1/movies/{id}\",\n\t\tc.Append(hdl.AccessTokenHandler).\n\t\t\tThen(http.HandlerFunc(hdl.FindByID))).\n\t\tMethods(\"GET\")\n\n\t// Match only GET requests /api/v1/movies\n\trtr.Handle(\"/v1/movies\",\n\t\tc.Append(hdl.AccessTokenHandler).\n\t\t\tThen(http.HandlerFunc(hdl.FindAll))).\n\t\tMethods(\"GET\")\n\n\t// Match only GET requests at /api/v1/ping\n\trtr.Handle(\"/v1/ping\",\n\t\tc.Then(http.HandlerFunc(hdl.Ping))).\n\t\tMethods(\"GET\")\n\n\treturn rtr\n}", "func CreateRouter() *gin.Engine {\n\t// create the router\n\trouter := gin.Default()\n\tvar nrMiddleware gin.HandlerFunc\n\tif os.Getenv(\"GO_ENVIRONMENT\") == \"production\" {\n\t\tgin.SetMode(gin.ReleaseMode)\n\t\trouter = gin.New()\n\t\t// nrMiddleware = metric.NewRelic(true)\n\t} else {\n\t\t// nrMiddleware = metric.NewRelic(false)\n\t}\n\t// router.Use(context.PropagateHeaders)\n\trouter.Use(gin.Recovery())\n\t//router.Use(datadog.Handler)\n\n\trouter.NoRoute(noRouteHandler)\n\trouter.NoMethod(methodNotAllowedHandler)\n\n\trouter.HandleMethodNotAllowed = true\n\trouter.RedirectFixedPath = false\n\trouter.RedirectTrailingSlash = false\n\n\t// ping\n\trouter.GET(\"/ping\", pingHandler)\n\n\t//router\n\tparentGroup := router.Group(\"/\")\n\tif nrMiddleware != nil {\n\t\tparentGroup.Use(nrMiddleware)\n\t}\n\t// configure the whole set of URL/Handlers mappings\n\tconfigureMappings(parentGroup)\n\n\treturn router\n}", "func NewRouter() *Router {\n\treturn &Router{}\n}", "func NewRouter(conf config.Config) Router {\n\tvar r Router\n\tr.services = make([]*services.Service, len(conf.Services))\n\tr.serviceNameMap = make(map[string]*services.Service)\n\tfor i, sconf := range conf.Services {\n\t\ts := services.NewService(sconf)\n\t\tr.services[i] = s\n\t\tr.serviceNameMap[s.Name] = s\n\t}\n\tr.RESTPort = conf.Router.Port\n\tlogstashHost = conf.LogstashHost\n\tconfig.ProxyFor = conf.Router.Name\n\treturn r\n}", "func NewRouter(opt *Options) *Router {\n\tengine := gin.New()\n\tengine.Use(gin.Recovery())\n\t// engine := gin.Default()\n\t// engine.Use(limits.RequestSizeLimiter(int64(opt.MaxUploadSize)))\n\tif !opt.GinLogEnabled {\n\t\tgin.SetMode(gin.ReleaseMode)\n\t} else {\n\t\tconf := gin.LoggerConfig{\n\t\t\tSkipPaths: opt.GinLogSkipPath,\n\t\t}\n\t\tengine.Use(gin.LoggerWithConfig(conf))\n\t\t// engine.Use(ginlog.Middleware())\n\t}\n\n\tr := &Router{\n\t\tEngine: engine,\n\t\tRoutes: make(map[string][]*Route, 0),\n\t\tProfileDescriptions: make([]*Profile, 0),\n\t}\n\n\tif opt.MetricsEnabled {\n\t\tklog.Infof(\"start load router path:%s \", opt.MetricsPath)\n\t\tp, err := metrics.NewOcPrometheus()\n\t\tif err != nil {\n\t\t\tklog.Fatalf(\"NewOcPrometheus err: %#v\", err)\n\t\t}\n\n\t\tmetrics.RegisterGinView()\n\t\tr.Engine.GET(\"/metrics\", gin.HandlerFunc(func(c *gin.Context) {\n\t\t\tp.Exporter.ServeHTTP(c.Writer, c.Request)\n\t\t}))\n\n\t\tr.AddProfile(\"GET\", MetricsPath, \"Prometheus format metrics\")\n\t}\n\n\tif opt.PprofEnabled {\n\t\t// automatically add routers for net/http/pprof e.g. /debug/pprof, /debug/pprof/heap, etc.\n\t\tginpprof.Wrap(r.Engine)\n\t\tr.AddProfile(\"GET\", PprofPath, `PProf related things:<br/>\n\t\t\t<a href=\"/debug/pprof/goroutine?debug=2\">full goroutine stack dump</a>`)\n\t}\n\n\tr.Opt = opt\n\tr.NoRoute(r.masterHandler)\n\treturn r\n}", "func New() *Router {\n\treturn &Router{\n\t\tdestinations: destinationList{},\n\t\tcommands: map[string]commandList{},\n\t\tregexpCommands: []regexpCommand{},\n\t\tdone: make(chan struct{}),\n\t}\n}", "func New(m Matcher) *Router {\n\tr := &Router{\n\t\tmatcher: m,\n\t\tRoutes: map[*Route]string{},\n\t\tNamedRoutes: map[string]*Route{},\n\t}\n\tr.Router = r\n\treturn r\n}", "func NewRouter(paramSpace params.Subspace) *Router {\n\treturn &Router{\n\t\tparamSpace: paramSpace,\n\t\troutes: make(map[string]sdk.Handler),\n\t}\n}", "func New() *Router {\n\treturn &Router{}\n}", "func New() *Router {\n\treturn &Router{}\n}", "func newRouter(log tools.Logger, statsd tools.StatsD, healthcheckHandlerFunc http.HandlerFunc, apiRouteHandler ChiRouteHandler, uiRouteHandler ChiRouteHandler) http.Handler {\n\trouter := chi.NewRouter()\n\n\trouter.Use(middleware.Timeout(60 * time.Second))\n\n\trouter.Get(\"/healthcheck\", healthcheckHandlerFunc)\n\n\trouter.Route(\"/api\", apiRouteHandler)\n\n\trouter.Route(\"/\", uiRouteHandler) // mount to the root of this route\n\n\treturn router\n}", "func (c *container) Router() *http.Router {\n\tif nil == c.router {\n\t\tc.router = http.NewRouter(c.HttpService(), c.Config().GetCors(), c.LoggerService(), c.HandlerPushToken())\n\t}\n\treturn c.router\n}", "func NewRouter(database *db.DB) http.Handler {\n\trouter := chi.NewRouter()\n\n\t// Set up our middleware with sane defaults\n\trouter.Use(middleware.RealIP)\n\trouter.Use(middleware.Logger)\n\trouter.Use(middleware.Recoverer)\n\trouter.Use(middleware.DefaultCompress)\n\trouter.Use(middleware.Timeout(60 * time.Second))\n\n\t// Set up our API\n\trouter.Mount(\"/api/v1/\", v1.NewRouter(database))\n\n\t// serve web app\n\trouter.Handle(\"/*\", http.FileServer(http.Dir(\"./web/exchange/dist\")))\n\n\treturn router\n}", "func (b *Baa) Router() Router {\n\tif b.router == nil {\n\t\tb.router = b.GetDI(\"router\").(Router)\n\t}\n\treturn b.router\n}", "func NewRouter() Router {\n\treturn &router{\n\t\tlogger: logging.NewLogger(),\n\t\thealth: health.NewHealth(),\n\t\trecruiter: recruiterservice.NewRecruiterService(),\n\t\tauth: authservice.NewAuthService(),\n\t\tjobs: jobsservice.NewJobService(),\n\t\ttokenAuth: jwt.NewTokenAuth(),\n\t\tmiddlewares: middlewares.NewMiddlewares(),\n\t}\n}", "func (r *Router) Create() error {\n\tfmt.Printf(emoji.Sprintf(\":gear:\")+\" Generating \"+aurora.Yellow(\"%s\").String()+\" router\\n\\n\", r.Name)\n\n\tisNewRouter := true\n\toverwriteRouter := true\n\toverwriteController := true\n\n\t// check if router exists and if user wants to overwrite it\n\tif _, err := os.Stat(fmt.Sprintf(\"%s/src/routes/%s.ts\", r.Project.AbsolutePath, r.Name)); err == nil {\n\t\toverwriteRouter = util.AskForConfirmation(fmt.Sprintf(aurora.Yellow(\" src/routes/%s.ts already exists. Would you like to overwrite it?\").String(), r.Name))\n\t\tisNewRouter = false\n\t}\n\tif overwriteRouter {\n\t\tfmt.Print(\" src/routes/\")\n\t\terr := util.CreateFile(r, r.Name+\".ts\", r.Project.AbsolutePath+\"/src/routes\", string(tpl.RouterTemplate()), 0)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\t// check if controller exists and if user wants to overwrite it\n\tif _, err := os.Stat(fmt.Sprintf(\"%s/%s.ts\", r.Project.AbsolutePath+\"/src/controllers\", r.Name)); err == nil {\n\t\toverwriteController = util.AskForConfirmation(fmt.Sprintf(aurora.Yellow(\" src/controllers/%s.ts already exists. Would you like to overwrite it?\").String(), r.Name))\n\t}\n\tif overwriteController {\n\t\tfmt.Print(\" src/controllers/\")\n\t\terr := util.CreateFile(r, r.Name+\".ts\", r.Project.AbsolutePath+\"/src/controllers\", string(tpl.ControllerTemplate()), 0)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\t// Update the app if it's a new router\n\tif isNewRouter {\n\t\tfmt.Printf(\" \" + aurora.Cyan(\"Updating \").String() + \"src/app.ts\\n\")\n\t\tappFile, err := ioutil.ReadFile(fmt.Sprintf(\"%s/src/app.ts\", r.Project.AbsolutePath))\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tappFileLines := strings.Split(string(appFile), \"\\n\")\n\t\tuseStr := fmt.Sprintf(\"app.use(%sRouter);\", r.Name)\n\t\timportStr := fmt.Sprintf(\"import { %sRouter } from \\\"./routes/%s\\\";\", r.Name, r.Name)\n\t\tlinesToAdd := []string{useStr, importStr}\n\t\tfor i, line := range appFileLines {\n\t\t\tif strings.Contains(line, \"import cors from\") {\n\t\t\t\tappFileLines = append(appFileLines, \"\")\n\t\t\t\tcopy(appFileLines[i+2:], appFileLines[i+1:])\n\t\t\t\tappFileLines[i+1] = linesToAdd[1]\n\t\t\t}\n\t\t\tif strings.Contains(line, \"app.use((req: Request\") {\n\t\t\t\tappFileLines = append(appFileLines, \"\")\n\t\t\t\tcopy(appFileLines[i+2:], appFileLines[i+1:])\n\t\t\t\tappFileLines[i+1] = linesToAdd[0]\n\t\t\t}\n\t\t}\n\n\t\toutput := strings.Join(appFileLines, \"\\n\")\n\t\terr = ioutil.WriteFile(fmt.Sprintf(\"%s/src/app.ts\", r.Project.AbsolutePath), []byte(output), 0644)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tfmt.Println(\"\\n\" + emoji.Sprintf(\":party_popper:\") + \"Done\")\n\n\treturn nil\n}", "func NewRouter(a, b, c phi.Sender) Router {\n\treturn Router{\n\t\tdestA: a,\n\t\tdestB: b,\n\t\tdestC: c,\n\t}\n}", "func NewRouter(defClient rb.DefinitionManager,\n\tprofileClient rb.ProfileManager,\n\tinstClient app.InstanceManager,\n\tqueryClient app.QueryManager,\n\tconfigClient app.ConfigManager,\n\tconnectionClient connection.ConnectionManager,\n\ttemplateClient rb.ConfigTemplateManager,\n\tsubscriptionClient app.InstanceStatusSubManager,\n\thealthcheckClient healthcheck.InstanceHCManager) *mux.Router {\n\n\trouter := mux.NewRouter()\n\n\t// Setup Instance handler routes\n\tif instClient == nil {\n\t\tinstClient = app.NewInstanceClient()\n\t}\n\tinstHandler := instanceHandler{client: instClient}\n\tinstRouter := router.PathPrefix(\"/v1\").Subrouter()\n\tinstRouter.HandleFunc(\"/instance\", instHandler.createHandler).Methods(\"POST\")\n\tinstRouter.HandleFunc(\"/instance\", instHandler.listHandler).Methods(\"GET\")\n\t// Match rb-names, versions or profiles\n\tinstRouter.HandleFunc(\"/instance\", instHandler.listHandler).\n\t\tQueries(\"rb-name\", \"{rb-name}\",\n\t\t\t\"rb-version\", \"{rb-version}\",\n\t\t\t\"profile-name\", \"{profile-name}\").Methods(\"GET\")\n\t//Want to get full Data -> add query param: /install/{instID}?full=true\n\tinstRouter.HandleFunc(\"/instance/{instID}\", instHandler.getHandler).Methods(\"GET\")\n\tinstRouter.HandleFunc(\"/instance/{instID}/upgrade\", instHandler.upgradeHandler).Methods(\"POST\")\n\tinstRouter.HandleFunc(\"/instance/{instID}/query\", instHandler.queryHandler).Methods(\"GET\")\n\tinstRouter.HandleFunc(\"/instance/{instID}/query\", instHandler.queryHandler).\n\t\tQueries(\"ApiVersion\", \"{ApiVersion}\",\n\t\t\t\"Kind\", \"{Kind}\",\n\t\t\t\"Name\", \"{Name}\",\n\t\t\t\"Labels\", \"{Labels}\").Methods(\"GET\")\n\tinstRouter.HandleFunc(\"/instance/{instID}\", instHandler.deleteHandler).Methods(\"DELETE\")\n\n\t// Status handler routes\n\tif subscriptionClient == nil {\n\t\tsubscriptionClient = app.NewInstanceStatusSubClient()\n\t\tsubscriptionClient.RestoreWatchers()\n\t}\n\tinstanceStatusSubHandler := instanceStatusSubHandler{client: subscriptionClient}\n\tinstRouter.HandleFunc(\"/instance/{instID}/status\", instHandler.statusHandler).Methods(\"GET\")\n\tinstRouter.HandleFunc(\"/instance/{instID}/status/subscription\", instanceStatusSubHandler.listHandler).Methods(\"GET\")\n\tinstRouter.HandleFunc(\"/instance/{instID}/status/subscription\", instanceStatusSubHandler.createHandler).Methods(\"POST\")\n\tinstRouter.HandleFunc(\"/instance/{instID}/status/subscription/{subID}\", instanceStatusSubHandler.getHandler).Methods(\"GET\")\n\tinstRouter.HandleFunc(\"/instance/{instID}/status/subscription/{subID}\", instanceStatusSubHandler.updateHandler).Methods(\"PUT\")\n\tinstRouter.HandleFunc(\"/instance/{instID}/status/subscription/{subID}\", instanceStatusSubHandler.deleteHandler).Methods(\"DELETE\")\n\n\t// Query handler routes\n\tif queryClient == nil {\n\t\tqueryClient = app.NewQueryClient()\n\t}\n\tqueryHandler := queryHandler{client: queryClient}\n\tqueryRouter := router.PathPrefix(\"/v1\").Subrouter()\n\tqueryRouter.HandleFunc(\"/query\", queryHandler.queryHandler).Methods(\"GET\")\n\tqueryRouter.HandleFunc(\"/query\", queryHandler.queryHandler).\n\t\tQueries(\"Namespace\", \"{Namespace}\",\n\t\t\t\"CloudRegion\", \"{CloudRegion}\",\n\t\t\t\"ApiVersion\", \"{ApiVersion}\",\n\t\t\t\"Kind\", \"{Kind}\",\n\t\t\t\"Name\", \"{Name}\",\n\t\t\t\"Labels\", \"{Labels}\").Methods(\"GET\")\n\n\t//Setup the broker handler here\n\t//Use the base router without any path prefixes\n\tbrokerHandler := brokerInstanceHandler{client: instClient}\n\trouter.HandleFunc(\"/{cloud-owner}/{cloud-region}/infra_workload\", brokerHandler.createHandler).Methods(\"POST\")\n\trouter.HandleFunc(\"/{cloud-owner}/{cloud-region}/infra_workload/{instID}\", brokerHandler.getHandler).Methods(\"GET\")\n\trouter.HandleFunc(\"/{cloud-owner}/{cloud-region}/infra_workload\", brokerHandler.findHandler).Queries(\"name\", \"{name}\").Methods(\"GET\")\n\trouter.HandleFunc(\"/{cloud-owner}/{cloud-region}/infra_workload/{instID}\", brokerHandler.deleteHandler).Methods(\"DELETE\")\n\n\t//Setup the connectivity api handler here\n\tif connectionClient == nil {\n\t\tconnectionClient = connection.NewConnectionClient()\n\t}\n\tconnectionHandler := connectionHandler{client: connectionClient}\n\tinstRouter.HandleFunc(\"/connectivity-info\", connectionHandler.createHandler).Methods(\"POST\")\n\tinstRouter.HandleFunc(\"/connectivity-info/{connname}\", connectionHandler.getHandler).Methods(\"GET\")\n\tinstRouter.HandleFunc(\"/connectivity-info/{connname}\", connectionHandler.deleteHandler).Methods(\"DELETE\")\n\n\t//Setup resource bundle definition routes\n\tif defClient == nil {\n\t\tdefClient = rb.NewDefinitionClient()\n\t}\n\tdefHandler := rbDefinitionHandler{client: defClient}\n\tresRouter := router.PathPrefix(\"/v1/rb\").Subrouter()\n\tresRouter.HandleFunc(\"/definition\", defHandler.createHandler).Methods(\"POST\")\n\tresRouter.HandleFunc(\"/definition/{rbname}/{rbversion}/content\", defHandler.uploadHandler).Methods(\"POST\")\n\tresRouter.HandleFunc(\"/definition/{rbname}\", defHandler.listVersionsHandler).Methods(\"GET\")\n\tresRouter.HandleFunc(\"/definition\", defHandler.listAllHandler).Methods(\"GET\")\n\tresRouter.HandleFunc(\"/definition/{rbname}/{rbversion}\", defHandler.getHandler).Methods(\"GET\")\n\tresRouter.HandleFunc(\"/definition/{rbname}/{rbversion}\", defHandler.updateHandler).Methods(\"PUT\")\n\tresRouter.HandleFunc(\"/definition/{rbname}/{rbversion}\", defHandler.deleteHandler).Methods(\"DELETE\")\n\n\t//Setup resource bundle profile routes\n\tif profileClient == nil {\n\t\tprofileClient = rb.NewProfileClient()\n\t}\n\tprofileHandler := rbProfileHandler{client: profileClient}\n\tresRouter.HandleFunc(\"/definition/{rbname}/{rbversion}/profile\", profileHandler.createHandler).Methods(\"POST\")\n\tresRouter.HandleFunc(\"/definition/{rbname}/{rbversion}/profile\", profileHandler.listHandler).Methods(\"GET\")\n\tresRouter.HandleFunc(\"/definition/{rbname}/{rbversion}/profile/{prname}/content\", profileHandler.uploadHandler).Methods(\"POST\")\n\tresRouter.HandleFunc(\"/definition/{rbname}/{rbversion}/profile/{prname}\", profileHandler.getHandler).Methods(\"GET\")\n\tresRouter.HandleFunc(\"/definition/{rbname}/{rbversion}/profile/{prname}\", profileHandler.updateHandler).Methods(\"PUT\")\n\tresRouter.HandleFunc(\"/definition/{rbname}/{rbversion}/profile/{prname}\", profileHandler.deleteHandler).Methods(\"DELETE\")\n\n\t// Config Template\n\tif templateClient == nil {\n\t\ttemplateClient = rb.NewConfigTemplateClient()\n\t}\n\ttemplateHandler := rbTemplateHandler{client: templateClient}\n\tresRouter.HandleFunc(\"/definition/{rbname}/{rbversion}/config-template\", templateHandler.createHandler).Methods(\"POST\")\n\tresRouter.HandleFunc(\"/definition/{rbname}/{rbversion}/config-template\", templateHandler.listHandler).Methods(\"GET\")\n\tresRouter.HandleFunc(\"/definition/{rbname}/{rbversion}/config-template/{tname}/content\", templateHandler.uploadHandler).Methods(\"POST\")\n\tresRouter.HandleFunc(\"/definition/{rbname}/{rbversion}/config-template/{tname}\", templateHandler.getHandler).Methods(\"GET\")\n\tresRouter.HandleFunc(\"/definition/{rbname}/{rbversion}/config-template/{tname}\", templateHandler.updateHandler).Methods(\"PUT\")\n\tresRouter.HandleFunc(\"/definition/{rbname}/{rbversion}/config-template/{tname}\", templateHandler.deleteHandler).Methods(\"DELETE\")\n\n\t// Config value\n\tif configClient == nil {\n\t\tconfigClient = app.NewConfigClient()\n\t}\n\tconfigHandler := rbConfigHandler{client: configClient}\n\tinstRouter.HandleFunc(\"/instance/{instID}/config\", configHandler.createHandler).Methods(\"POST\")\n\tinstRouter.HandleFunc(\"/instance/{instID}/config\", configHandler.listHandler).Methods(\"GET\")\n\tinstRouter.HandleFunc(\"/instance/{instID}/config/{cfgname}\", configHandler.getHandler).Methods(\"GET\")\n\tinstRouter.HandleFunc(\"/instance/{instID}/config/{cfgname}\", configHandler.updateHandler).Methods(\"PUT\")\n\tinstRouter.HandleFunc(\"/instance/{instID}/config/{cfgname}\", configHandler.deleteAllHandler).Methods(\"DELETE\")\n\tinstRouter.HandleFunc(\"/instance/{instID}/config/{cfgname}/delete\", configHandler.deleteHandler).Methods(\"POST\")\n\tinstRouter.HandleFunc(\"/instance/{instID}/config/{cfgname}/rollback\", configHandler.rollbackHandler).Methods(\"POST\")\n\tinstRouter.HandleFunc(\"/instance/{instID}/config/{cfgname}/tag\", configHandler.tagListHandler).Methods(\"GET\")\n\tinstRouter.HandleFunc(\"/instance/{instID}/config/{cfgname}/tag/{tagname}\", configHandler.getTagHandler).Methods(\"GET\")\n\tinstRouter.HandleFunc(\"/instance/{instID}/config/{cfgname}/version\", configHandler.versionListHandler).Methods(\"GET\")\n\tinstRouter.HandleFunc(\"/instance/{instID}/config/{cfgname}/version/{cfgversion}\", configHandler.getVersionHandler).Methods(\"GET\")\n\tinstRouter.HandleFunc(\"/instance/{instID}/config/{cfgname}/tagit\", configHandler.tagitHandler).Methods(\"POST\")\n\n\t// Instance Healthcheck API\n\tif healthcheckClient == nil {\n\t\thealthcheckClient = healthcheck.NewHCClient()\n\t}\n\thealthcheckHandler := instanceHCHandler{client: healthcheckClient}\n\tinstRouter.HandleFunc(\"/instance/{instID}/healthcheck\", healthcheckHandler.listHandler).Methods(\"GET\")\n\tinstRouter.HandleFunc(\"/instance/{instID}/healthcheck\", healthcheckHandler.createHandler).Methods(\"POST\")\n\tinstRouter.HandleFunc(\"/instance/{instID}/healthcheck/{hcID}\", healthcheckHandler.getHandler).Methods(\"GET\")\n\tinstRouter.HandleFunc(\"/instance/{instID}/healthcheck/{hcID}\", healthcheckHandler.deleteHandler).Methods(\"DELETE\")\n\n\t// Add healthcheck path\n\tinstRouter.HandleFunc(\"/healthcheck\", healthCheckHandler).Methods(\"GET\")\n\n\treturn router\n}", "func NewRouter(e *env.Environment, s service) *Router {\n\treturn &Router{\n\t\tenv: e,\n\t\tservice: s,\n\t}\n}", "func newRouter() *mux.Router {\n\troutes := []route{\n\t\tnewPage(\"/\", indexTmpls, getIndexData),\n\t\tsimpleRoute{\"/connect\", \"GET\", googleauth.ConnectHandler},\n\t}\n\n\trouter := mux.NewRouter().StrictSlash(true)\n\tfor _, r := range routes {\n\t\tglog.V(1).Infof(\"Registering route for %q on %q\\n\", r.Method(), r.Pattern())\n\t\trouter.\n\t\t\tMethods(r.Method()).\n\t\t\tPath(r.Pattern()).\n\t\t\tHandlerFunc(r.HandlerFunc())\n\t}\n\treturn router\n}", "func NewRouter(root *OpenAPI) Router {\n\treturn newRouter(root)\n}", "func NewRouter(serviceTimeout time.Duration, logger *log.Logger) (r *Router) {\n\n\treturn &Router{\n\t\tMuxer: vestigo.NewRouter(),\n\t\tServiceTimeout: serviceTimeout,\n\t\tServices: make(map[string]*proxy.Service),\n\t\tLogger: logger,\n\t}\n}", "func NewRouter() *gin.Engine {\n\tpostServiceClient = post.NewAPIClient(post.NewConfiguration())\n\tuserServiceClient = user.NewAPIClient(user.NewConfiguration())\n\tratingServiceClient = rating.NewAPIClient(rating.NewConfiguration())\n\n\trouter := gin.Default()\n\tfor _, route := range routes {\n\t\tswitch route.Method {\n\t\tcase \"GET\":\n\t\t\trouter.GET(route.Pattern, route.HandlerFunc)\n\t\tcase \"POST\":\n\t\t\trouter.POST(route.Pattern, route.HandlerFunc)\n\t\tcase \"PUT\":\n\t\t\trouter.PUT(route.Pattern, route.HandlerFunc)\n\t\tcase \"DELETE\":\n\t\t\trouter.DELETE(route.Pattern, route.HandlerFunc)\n\t\t}\n\t}\n\n\treturn router\n}", "func (a *Api) Router() http.Handler {\n\trouter := mux.NewRouter()\n\n\t// /links post request to create link <link to source>, returns <short link>\n\trouter.HandleFunc(\"/links\", a.postCreateLink).Methods(http.MethodPost)\n\n\t// /{link} get redirect\n\trouter.HandleFunc(\"/link/{link_id}\", a.getPage).Methods(http.MethodGet)\n\n\trouter.HandleFunc(\"/signup\", a.postSignup).Methods(http.MethodPost)\n\trouter.HandleFunc(\"/signin\", a.postSignin).Methods(http.MethodPost)\n\n\t// lookup all my links\n\trouter.HandleFunc(\"/accounts/{id}\", a.authenticate(a.getAccount)).Methods(http.MethodGet)\n\n\t// create link with account\n\trouter.HandleFunc(\"/accounts/{id}/\", a.authenticate(a.postCreateUserLink)).Methods(http.MethodPost)\n\n\t// /accounts/{id}/delete/{link_id}\n\trouter.HandleFunc(\"/accounts/{id}/delete/{link_id}\", a.authenticate(a.getDeleteLink)).Methods(http.MethodGet)\n\n\trouter.Handle(\"/metrics\", promhttp.Handler())\n\n\trouter.Use(prom.Measurer())\n\trouter.Use(a.logger)\n\n\treturn router\n}", "func createRouter() *httprouter.Router {\n\n\trouter := httprouter.New()\n\trouter.GET(\"/\", index)\n\trouter.GET(\"/block/:block\", showBlock)\n\trouter.GET(\"/api/\"+network+\"/getdifficulty\", getDifficulty)\n\trouter.GET(\"/api/\"+network+\"/blocks\", getLatestBlocks)\n\trouter.GET(\"/api/\"+network+\"/block/:hash\", getBlock)\n\trouter.GET(\"/api/\"+network+\"/block-index/:height\", getBlockIndex)\n\trouter.GET(\"/api/\"+network+\"/tx/:txid\", getTransaction)\n\trouter.GET(\"/api/\"+network+\"/addr/:addr\", getAddressInfo)\n\trouter.GET(\"/api/\"+network+\"/addr/:addr/utxo\", getAddressUtxo)\n\n\tfileServer := http.FileServer(http.Dir(\"static\"))\n\n\trouter.GET(\"/static/*filepath\", func(w http.ResponseWriter, r *http.Request, p httprouter.Params) {\n\t\tw.Header().Set(\"Vary\", \"Accept-Encoding\")\n\t\tw.Header().Set(\"Cache-Control\", \"public, max-age=7776000\")\n\t\tr.URL.Path = p.ByName(\"filepath\")\n\t\tfileServer.ServeHTTP(w, r)\n\t})\n\n\treturn router\n}", "func (s *ServerState) NewRouter() *gin.Engine {\n\tr := gin.Default()\n\n\tUsers := r.Group(\"/user/\")\n\t{\n\t\t//placeholder handler functions demonstrating the grouping of the API\n\t\tUsers.POST(\"/\", s.addUser) //localhost:8080\n\t\tUsers.GET(\"/\", s.getUsers)\n\t\tUsers.GET(\"/:email\", s.getUser) //localhost:8080/user/sdakjfbdshfbsdihvb\n\t\t//Users.PUT(\"/:id\", s.placeholder)\n\t\t//Users.DELETE(\"/:id\", s.placeholder)\n\t}\n\n\tPosts := r.Group(\"/posts/\")\n\t{\n\t\tPosts.POST(\"/\", s.addPost)\n\t\tPosts.GET(\"/\", s.getPosts)\n\t\tPosts.GET(\"/:id\", s.getPost)\n\t\tPosts.PUT(\"/:id\", s.updatePost)\n\t\tPosts.DELETE(\"/:id\", s.deletePost)\n\t}\n\n\tCampaigns := r.Group(\"/campaigns\")\n\t{\n\t\tCampaigns.POST(\"/\", s.addCampaign)\n\t\tCampaigns.GET(\"/\", s.getCampaigns)\n\t\tCampaigns.GET(\"/:id\", s.getCampaign)\n\t\tCampaigns.PUT(\"/:id\", s.updateCampaign)\n\t\tCampaigns.PUT(\"/:id/funding\", s.updateCampaignFunding)\n\t\tCampaigns.PUT(\"/:id/approve\", s.approveCampaign)\n\t}\n\n\treturn r\n}", "func InitializeRouter(container *dependencies.Container) *gin.Engine {\n\tr := router.NewRouter()\n\n\tctrls := buildControllers(container)\n\n\tfor i := range ctrls {\n\t\tctrls[i].DefineRoutes(r)\n\t}\n\n\treturn r\n}", "func NewRouter(settings config.Config) (Router, error) {\n\tdb, err := database.NewDatabase(settings)\n\tif err != nil {\n\t\treturn Router{}, err\n\t}\n\n\treturn Router{\n\t\tSettings: settings,\n\t\tDatabase: db,\n\t}, nil\n}", "func NewRouter(host, username, password string) *Netgear {\n router := &Netgear{\n host: host,\n username: username,\n password: password,\n regex : regexp.MustCompile(\"<NewAttachDevice>(.*)</NewAttachDevice>\"),\n }\n return router\n}", "func createRouter() *mux.Router {\n\tlog.WithFields(log.Fields{\n\t\t\"environment\": environment,\n\t\t\"address\": addr,\n\t\t\"TLS\": (tlsCertFile != \"\" && tlsKeyFile != \"\"),\n\t}).Info(\"Create server\")\n\trenderer := render.NewRenderer()\n\trouter := mux.NewRouter()\n\trouter.HandleFunc(\"/render\", renderHandler(renderer)).Methods(http.MethodGet)\n\trouter.HandleFunc(\"/statistics\", statisticsHandler(renderer)).Methods(http.MethodGet)\n\trouter.Use(loggingMiddleware)\n\treturn router\n}", "func (s *Service) Router() *mux.Router {\n return s.router\n}", "func NewRouter(c Config) (Router, error) {\n\tc.Environment.Merge(DefaultEnvironment())\n\tr := Router{\n\t\tInterfaces: make(map[string]InterfaceConfig, len(c.Interfaces)),\n\t\tResolvers: make(map[string]ResolverConfig, len(c.Resolvers)),\n\t\tScalars: make(map[string]ScalarConfig, len(c.Scalars)),\n\t\tUnions: make(map[string]UnionConfig, len(c.Unions)),\n\t}\n\terr := r.load(c)\n\treturn r, err\n}", "func NewRouter(db *sql.DB, sub *mux.Router) (ur *Router) {\n\tur = new(Router)\n\tur.Subrouter = sub\n\tur.DB = db\n\treturn\n}", "func NewRouter(bot *tbot.Server) *tbot.Server {\n\n\tfor _, route := range routes {\n\n\t\tif route.Reply != \"\" {\n\t\t\tbot.Handle(route.Path, route.Reply)\n\t\t\tcontinue\n\t\t}\n\t\tbot.HandleFunc(route.Path, route.HandlerFunc)\n\t}\n\n\tbot.HandleDefault(handlers.EchoHandler)\n\treturn bot\n}", "func NewRouter(doc *openapi3.T) (routers.Router, error) {\n\tservers, err := makeServers(doc.Servers)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tmuxRouter := mux.NewRouter().UseEncodedPath()\n\tr := &Router{}\n\tfor _, path := range doc.Paths.InMatchingOrder() {\n\t\tpathItem := doc.Paths[path]\n\t\tif len(pathItem.Servers) > 0 {\n\t\t\tif servers, err = makeServers(pathItem.Servers); err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t}\n\n\t\toperations := pathItem.Operations()\n\t\tmethods := make([]string, 0, len(operations))\n\t\tfor method := range operations {\n\t\t\tmethods = append(methods, method)\n\t\t}\n\t\tsort.Strings(methods)\n\n\t\tfor _, s := range servers {\n\t\t\tmuxRoute := muxRouter.Path(s.base + path).Methods(methods...)\n\t\t\tif schemes := s.schemes; len(schemes) != 0 {\n\t\t\t\tmuxRoute.Schemes(schemes...)\n\t\t\t}\n\t\t\tif host := s.host; host != \"\" {\n\t\t\t\tmuxRoute.Host(host)\n\t\t\t}\n\t\t\tif err := muxRoute.GetError(); err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tr.muxes = append(r.muxes, routeMux{\n\t\t\t\tmuxRoute: muxRoute,\n\t\t\t\tvarsUpdater: s.varsUpdater,\n\t\t\t})\n\t\t\tr.routes = append(r.routes, &routers.Route{\n\t\t\t\tSpec: doc,\n\t\t\t\tServer: s.server,\n\t\t\t\tPath: path,\n\t\t\t\tPathItem: pathItem,\n\t\t\t\tMethod: \"\",\n\t\t\t\tOperation: nil,\n\t\t\t})\n\t\t}\n\t}\n\treturn r, nil\n}", "func (b *base) initRouter(services []Service) {\n\trouter := &router{b, services}\n\trouter.init()\n\tb.router = router\n}", "func (h Handler) NewRouter() *echo.Echo {\n\te := echo.New()\n\n\th.defineMiddlewares(e)\n\th.defineRoutes(e)\n\n\treturn e\n}", "func NewRouter(configuration *config.Configuration, log *utility.Log) *Router {\n\treturn &Router{config: configuration, log: log}\n}", "func CreateRouter(ctx api.Session) http.Handler {\n\tdoLookup := func(w http.ResponseWriter, r *http.Request) {\n\t\tks := keyPattern.FindStringSubmatch(r.URL.Path)\n\t\tif ks == nil {\n\t\t\thttp.NotFound(w, r)\n\t\t\treturn\n\t\t}\n\t\tkey := ks[1]\n\n\t\tdefer func() {\n\t\t\tif r := recover(); r != nil {\n\t\t\t\tvar err error\n\t\t\t\tif er, ok := r.(error); ok {\n\t\t\t\t\terr = er\n\t\t\t\t} else if es, ok := r.(string); ok {\n\t\t\t\t\terr = errors.New(es)\n\t\t\t\t} else {\n\t\t\t\t\tpanic(r)\n\t\t\t\t}\n\t\t\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\t\t}\n\t\t}()\n\n\t\topts := cmdOpts\n\t\tparams := r.URL.Query()\n\t\tif dflt, ok := params[`default`]; ok && len(dflt) > 0 {\n\t\t\topts.Default = &dflt[0]\n\t\t}\n\t\topts.Merge = params.Get(`merge`)\n\t\topts.Type = params.Get(`type`)\n\t\topts.Variables = append(opts.Variables, params[`var`]...)\n\t\topts.RenderAs = `json`\n\t\tout := bytes.Buffer{}\n\t\tif hiera.LookupAndRender(ctx, &opts, []string{key}, &out) {\n\t\t\tw.Header().Set(\"Content-Type\", \"application/json\")\n\t\t\t_, _ = w.Write(out.Bytes())\n\t\t} else {\n\t\t\thttp.Error(w, `404 value not found`, http.StatusNotFound)\n\t\t}\n\t}\n\n\trouter := http.NewServeMux()\n\trouter.HandleFunc(\"/lookup/\", doLookup)\n\treturn router\n}", "func newRouter() *mux.Router {\n r := mux.NewRouter()\n r.HandleFunc(\"/api/v1/hello\", handlerHello).Methods(\"GET\")\n r.HandleFunc(\"/ws-echo\", handlerWS)\n return r\n}", "func instantiateRouter(id string, r router, resolvers map[string]rdns.Resolver) error {\n\trouter := rdns.NewRouter(id)\n\tfor _, route := range r.Routes {\n\t\tresolver, ok := resolvers[route.Resolver]\n\t\tif !ok {\n\t\t\treturn fmt.Errorf(\"router '%s' references non-existent resolver or group '%s'\", id, route.Resolver)\n\t\t}\n\t\ttypes := route.Types\n\t\tif route.Type != \"\" { // Support the deprecated \"Type\" by just adding it to \"Types\" if defined\n\t\t\ttypes = append(types, route.Type)\n\t\t}\n\t\tr, err := rdns.NewRoute(route.Name, route.Class, types, route.Weekdays, route.Before, route.After, route.Source, route.DoHPath, route.Listener, route.TLSServerName, resolver)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"failure parsing routes for router '%s' : %s\", id, err.Error())\n\t\t}\n\t\tr.Invert(route.Invert)\n\t\trouter.Add(r)\n\t}\n\tresolvers[id] = router\n\treturn nil\n}", "func (s *Server) Router() http.Handler {\n\tr := mux.NewRouter()\n\n\tapirouter := jsonapi.New(s.output, s.auth, s.sessionStore, nil)\n\tr.PathPrefix(apiPath).Handler(http.StripPrefix(apiPath, apirouter))\n\n\tapirouter.Handle(\"GET\", \"/sync/stats\",\n\t\ts.syncStats, true)\n\tapirouter.Handle(\"POST\", \"/sync/trigger\",\n\t\ts.syncTrigger, true)\n\n\tapirouter.HandleTX(\"POST\", \"/buildings\",\n\t\ts.db, crud.CreateBuilding, true)\n\tapirouter.HandleTX(\"GET\", fmt.Sprintf(\"/buildings/{id:%s}\", uuidRegexp),\n\t\ts.db, crud.ReadBuilding, true)\n\tapirouter.HandleTX(\"PUT\", fmt.Sprintf(\"/buildings/{id:%s}\", uuidRegexp),\n\t\ts.db, crud.UpdateBuilding, true)\n\tapirouter.HandleTX(\"DELETE\", fmt.Sprintf(\"/buildings/{id:%s}\", uuidRegexp),\n\t\ts.db, crud.DeleteBuilding, true)\n\tapirouter.HandleTX(\"GET\", \"/buildings\",\n\t\ts.db, crud.QueryBuildings, true)\n\n\tapirouter.HandleTX(\"POST\", fmt.Sprintf(\n\t\t\"/buildings/{building_id:%s}/locations\", uuidRegexp),\n\t\ts.db, crud.CreateLocation, true)\n\tapirouter.HandleTX(\"GET\", fmt.Sprintf(\n\t\t\"/buildings/{building_id:%s}/locations/{id:%s}\", uuidRegexp, uuidRegexp),\n\t\ts.db, crud.ReadLocation, true)\n\tapirouter.HandleTX(\"PUT\", fmt.Sprintf(\n\t\t\"/buildings/{building_id:%s}/locations/{id:%s}\", uuidRegexp, uuidRegexp),\n\t\ts.db, crud.UpdateLocation, true)\n\tapirouter.HandleTX(\"DELETE\", fmt.Sprintf(\n\t\t\"/buildings/{building_id:%s}/locations/{id:%s}\", uuidRegexp, uuidRegexp),\n\t\ts.db, crud.DeleteLocation, true)\n\tapirouter.HandleTX(\"GET\", fmt.Sprintf(\n\t\t\"/buildings/{building_id:%s}/locations\", uuidRegexp),\n\t\ts.db, crud.QueryLocations, true)\n\tapirouter.HandleTX(\"GET\", \"/locations\",\n\t\ts.db, crud.QueryLocations, true)\n\n\tapirouter.HandleTX(\"POST\", fmt.Sprintf(\n\t\t\"/buildings/{building_id:%s}/locations/{location_id:%s}/printers\",\n\t\tuuidRegexp, uuidRegexp),\n\t\ts.db, crud.CreatePrinter, true)\n\tapirouter.HandleTX(\"GET\", fmt.Sprintf(\n\t\t\"/buildings/{building_id:%s}/locations/{location_id:%s}/printers/{id:%s}\",\n\t\tuuidRegexp, uuidRegexp, uuidRegexp),\n\t\ts.db, crud.ReadPrinter, true)\n\tapirouter.HandleTX(\"PUT\", fmt.Sprintf(\n\t\t\"/buildings/{building_id:%s}/locations/{location_id:%s}/printers/{id:%s}\",\n\t\tuuidRegexp, uuidRegexp, uuidRegexp),\n\t\ts.db, crud.UpdatePrinter, true)\n\tapirouter.HandleTX(\"DELETE\", fmt.Sprintf(\n\t\t\"/buildings/{building_id:%s}/locations/{location_id:%s}/printers/{id:%s}\",\n\t\tuuidRegexp, uuidRegexp, uuidRegexp),\n\t\ts.db, crud.DeletePrinter, true)\n\tapirouter.HandleTX(\"GET\", fmt.Sprintf(\n\t\t\"/buildings/{building_id:%s}/locations/{location_id:%s}/printers\",\n\t\tuuidRegexp, uuidRegexp),\n\t\ts.db, crud.QueryPrinters, true)\n\tapirouter.HandleTX(\"GET\", \"/printers\",\n\t\ts.db, crud.QueryPrinters, true)\n\n\tapirouter.HandleTX(\"POST\", \"/manufacturers\",\n\t\ts.db, crud.CreateManufacturer, true)\n\tapirouter.HandleTX(\"GET\", fmt.Sprintf(\"/manufacturers/{id:%s}\", uuidRegexp),\n\t\ts.db, crud.ReadManufacturer, true)\n\tapirouter.HandleTX(\"PUT\", fmt.Sprintf(\"/manufacturers/{id:%s}\", uuidRegexp),\n\t\ts.db, crud.UpdateManufacturer, true)\n\tapirouter.HandleTX(\"DELETE\", fmt.Sprintf(\"/manufacturers/{id:%s}\", uuidRegexp),\n\t\ts.db, crud.DeleteManufacturer, true)\n\tapirouter.HandleTX(\"GET\", \"/manufacturers\",\n\t\ts.db, crud.QueryManufacturers, true)\n\n\tapirouter.HandleTX(\"POST\", fmt.Sprintf(\n\t\t\"/manufacturers/{manufacturer_id:%s}/models\", uuidRegexp),\n\t\ts.db, crud.CreateModel, true)\n\tapirouter.HandleTX(\"GET\", fmt.Sprintf(\n\t\t\"/manufacturers/{manufacturer_id:%s}/models/{id:%s}\", uuidRegexp, uuidRegexp),\n\t\ts.db, crud.ReadModel, true)\n\tapirouter.HandleTX(\"PUT\", fmt.Sprintf(\n\t\t\"/manufacturers/{manufacturer_id:%s}/models/{id:%s}\", uuidRegexp, uuidRegexp),\n\t\ts.db, crud.UpdateModel, true)\n\tapirouter.HandleTX(\"DELETE\", fmt.Sprintf(\n\t\t\"/manufacturers/{manufacturer_id:%s}/models/{id:%s}\", uuidRegexp, uuidRegexp),\n\t\ts.db, crud.DeleteModel, true)\n\tapirouter.HandleTX(\"GET\", fmt.Sprintf(\n\t\t\"/manufacturers/{manufacturer_id:%s}/models\", uuidRegexp),\n\t\ts.db, crud.QueryModels, true)\n\tapirouter.HandleTX(\"GET\", \"/models\",\n\t\ts.db, crud.QueryModels, true)\n\n\tapirouter.HandleTX(\"POST\", \"/users\",\n\t\ts.db, crud.CreateUser, true)\n\tapirouter.HandleTX(\"GET\", fmt.Sprintf(\"/users/{id:%s}\", uuidRegexp),\n\t\ts.db, crud.ReadUser, true)\n\tapirouter.HandleTX(\"PUT\", fmt.Sprintf(\"/users/{id:%s}\", uuidRegexp),\n\t\ts.db, crud.UpdateUser, true)\n\tapirouter.HandleTX(\"DELETE\", fmt.Sprintf(\"/users/{id:%s}\", uuidRegexp),\n\t\ts.db, crud.DeleteUser, true)\n\tapirouter.HandleTX(\"GET\", \"/users\",\n\t\ts.db, crud.QueryUsers, true)\n\n\tapirouter.HandleTX(\"POST\", \"/groups\",\n\t\ts.db, crud.CreateGroup, true)\n\tapirouter.HandleTX(\"GET\", fmt.Sprintf(\"/groups/{id:%s}\", uuidRegexp),\n\t\ts.db, crud.ReadGroup, true)\n\tapirouter.HandleTX(\"PUT\", fmt.Sprintf(\"/groups/{id:%s}\", uuidRegexp),\n\t\ts.db, crud.UpdateGroup, true)\n\tapirouter.HandleTX(\"DELETE\", fmt.Sprintf(\"/groups/{id:%s}\", uuidRegexp),\n\t\ts.db, crud.DeleteGroup, true)\n\tapirouter.HandleTX(\"GET\", \"/groups\",\n\t\ts.db, crud.QueryGroups, true)\n\n\tapirouter.HandleTX(\"GET\", fmt.Sprintf(\n\t\t\"/groups/{group_id:%s}/users\",\n\t\tuuidRegexp),\n\t\ts.db, crud.ReadGroupUsers, true)\n\tapirouter.HandleTX(\"GET\", fmt.Sprintf(\n\t\t\"/users/{user_id:%s}/groups\",\n\t\tuuidRegexp),\n\t\ts.db, crud.ReadUserGroups, true)\n\tapirouter.HandleTX(\"PUT\", fmt.Sprintf(\n\t\t\"/groups/{group_id:%s}/users/{user_id:%s}/assign\",\n\t\tuuidRegexp, uuidRegexp),\n\t\ts.db, crud.RelateGroupUser, true)\n\tapirouter.HandleTX(\"DELETE\", fmt.Sprintf(\n\t\t\"/groups/{group_id:%s}/users/{user_id:%s}/assign\",\n\t\tuuidRegexp, uuidRegexp),\n\t\ts.db, crud.UnrelateGroupUser, true)\n\n\tapirouter.HandleTX(\"GET\", fmt.Sprintf(\n\t\t\"/buildings/{building_id:%s}/locations/{location_id:%s}/users\",\n\t\tuuidRegexp, uuidRegexp),\n\t\ts.db, crud.ReadLocationUsers, true)\n\tapirouter.HandleTX(\"GET\", fmt.Sprintf(\n\t\t\"/users/{user_id:%s}/locations\",\n\t\tuuidRegexp),\n\t\ts.db, crud.ReadUserLocations, true)\n\tapirouter.HandleTX(\"PUT\", fmt.Sprintf(\n\t\t\"/buildings/{building_id:%s}/locations/{location_id:%s}/users/{user_id:%s}/assign\",\n\t\tuuidRegexp, uuidRegexp, uuidRegexp),\n\t\ts.db, crud.RelateLocationUser, true)\n\tapirouter.HandleTX(\"DELETE\", fmt.Sprintf(\n\t\t\"/buildings/{building_id:%s}/locations/{location_id:%s}/users/{user_id:%s}/assign\",\n\t\tuuidRegexp, uuidRegexp, uuidRegexp),\n\t\ts.db, crud.UnrelateLocationUser, true)\n\n\tapirouter.HandleTX(\"GET\", fmt.Sprintf(\n\t\t\"/buildings/{building_id:%s}/locations/{location_id:%s}/groups\",\n\t\tuuidRegexp, uuidRegexp),\n\t\ts.db, crud.ReadLocationGroups, true)\n\tapirouter.HandleTX(\"GET\", fmt.Sprintf(\n\t\t\"/groups/{group_id:%s}/locations\",\n\t\tuuidRegexp),\n\t\ts.db, crud.ReadGroupLocations, true)\n\tapirouter.HandleTX(\"PUT\", fmt.Sprintf(\n\t\t\"/buildings/{building_id:%s}/locations/{location_id:%s}/groups/{group_id:%s}/assign\",\n\t\tuuidRegexp, uuidRegexp, uuidRegexp),\n\t\ts.db, crud.RelateLocationGroup, true)\n\tapirouter.HandleTX(\"DELETE\", fmt.Sprintf(\n\t\t\"/buildings/{building_id:%s}/locations/{location_id:%s}/groups/{group_id:%s}/assign\",\n\t\tuuidRegexp, uuidRegexp, uuidRegexp),\n\t\ts.db, crud.UnrelateLocationGroup, true)\n\n\tapirouter.HandleTX(\"GET\", \"/users/{username}/printers\",\n\t\ts.db, readUserPrinters, false)\n\n\treturn r\n}", "func New() *Router {\n\tnode := node{component: \"/\", isNamedParam: false}\n\treturn &Router{tree: &node}\n}", "func NewRouter() *gin.Engine {\n\trouter := logger_util.NewGinWithLogrus(logger.GinLog)\n\tAddService(router)\n\treturn router\n}", "func NewRouter() *gin.Engine {\n\trouter := logger_util.NewGinWithLogrus(logger.GinLog)\n\tAddService(router)\n\treturn router\n}", "func New() Router {\n\trouter := &router{\n\t\tdefaultMessageHandler: msghandlers.HandleNotFound,\n\t}\n\treturn router\n}", "func NewRouter() *gin.Engine {\n\tr := gin.Default()\n\n\tapi := r.Group(\"/api\")\n\t/*api.Use(handler.AuthRequired)*/\n\t{\n\t\tcontacts := api.Group(\"/contacts\")\n\t\t/*persons.Use(AccountRequired)*/\n\t\t{\n\t\t\tcontacts.GET(\"/\", GetAllContacts)\n\t\t\tcontacts.GET(\"/:id\", GetContact)\n\n\t\t\t// @todo Replace the post requests with a message queue\n\n\t\t\tcontacts.POST(\"/\", PostContact)\n\t\t\tcontacts.PUT(\"/:id\", PutContact)\n\n\t\t\tnotes := contacts.Group(\"/:id/notes\")\n\t\t\t{\n\t\t\t\tnotes.GET(\"/\", GetAllNotes)\n\n\t\t\t\tnotes.POST(\"/\", PostNote)\n\t\t\t}\n\t\t}\n\t}\n\n\treturn r\n}", "func NewRouter(doc *openapi3.T, opts ...openapi3.ValidationOption) (routers.Router, error) {\n\tif err := doc.Validate(context.Background(), opts...); err != nil {\n\t\treturn nil, fmt.Errorf(\"validating OpenAPI failed: %w\", err)\n\t}\n\trouter := &Router{doc: doc}\n\troot := router.node()\n\tfor path, pathItem := range doc.Paths {\n\t\tfor method, operation := range pathItem.Operations() {\n\t\t\tmethod = strings.ToUpper(method)\n\t\t\tif err := root.Add(method+\" \"+path, &routers.Route{\n\t\t\t\tSpec: doc,\n\t\t\t\tPath: path,\n\t\t\t\tPathItem: pathItem,\n\t\t\t\tMethod: method,\n\t\t\t\tOperation: operation,\n\t\t\t}, nil); err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t}\n\t}\n\treturn router, nil\n}", "func NewRouter(a, c restful.FilterFunction, profiling bool) *Router {\n\t// Activate etcd, cpu, memory and timer profiling\n\tif profiling {\n\t\tsetupProfiling()\n\t}\n\t// Hooks processed before any endpoint\n\tif a == nil {\n\t\ta = IdentityFilter\n\t}\n\tif c == nil {\n\t\tc = IdentityFilter\n\t}\n\n version := StableVersion()\n\n\treturn &Router{\n prefix: fmt.Sprintf(\"/v%d/methods/\", version.major),\n\t\tauthentification: a,\n\t\tcontrol: c,\n\t\tprofiling: profiling,\n\t}\n}", "func New(eventEnv EventEnv) *Router {\n\t// FIXME: how do we account for NavSkipRender? Is it even needed? The render loop is controlled outside of\n\t// this so maybe just leave rendering outside of the router altogether.\n\n\t// TODO: WE NEED TO THINK ABOUT SYNCHRONIZATION FOR ALL THIS STUFF - WHAT HAPPENS IF A\n\t// GOROUTINE TRIES TO NAVIGATE WHILE SOMETHING ELSE IS HAPPENING?\n\n\treturn &Router{\n\t\teventEnv: eventEnv,\n\t\tbindParamMap: make(map[string]BindParam),\n\t}\n}", "func NewRouter(client *at.AirtableClient) *mux.Router {\n\n\t// Create new gorilla/mux router with with strict slash\n\trouter := mux.NewRouter().StrictSlash(true)\n\tfor _, route := range routes {\n\n\t\t// Associate each route with an HTTP endpoint\n\t\trouter.\n\t\t\tMethods(route.Method).\n\t\t\tPath(route.Pattern).\n\t\t\tName(route.Name).\n\t\t\tHandler(Handler{client, route.Function})\n\n\t}\n\n\t// Return router to be used by server\n\treturn router\n}", "func NewRouter(s *discordgo.Session, owners, prefixes []string) *Router {\n\tcache := ttlcache.NewCache()\n\tcache.SkipTTLExtensionOnHit(true)\n\n\th := ttlcache.NewCache()\n\th.SetCacheSizeLimit(10000)\n\th.SetTTL(15 * time.Minute)\n\th.SetExpirationCallback(func(key string, value interface{}) {\n\t\tvalue.(func())()\n\t})\n\n\trouter := &Router{\n\t\tBotOwners: owners,\n\t\tSession: s,\n\t\tCooldowns: cache,\n\t\tHandlers: h,\n\t\tPrefixes: prefixes,\n\t\tPermCache: NewPermCache(s),\n\t}\n\n\trouter.AddCommand(&Command{\n\t\tName: \"Commands\",\n\t\tDescription: \"Show a list of commands\",\n\t\tUsage: \"[command]\",\n\t\tCommand: router.dummy,\n\t})\n\n\treturn router\n}", "func New() *Router {\n\treturn NewWithServiceName(\"httprouter.router\", tracer.DefaultTracer)\n}", "func (r *Router) Router(path string) *Router {\n\tsteps := splitPath(path)\n\treturn r.registerRouter(steps)\n}", "func NewRouter() *gin.Engine {\n\trouter := gin.Default()\n\tAddService(router)\n\treturn router\n}", "func MakeRouter() *Router {\n\tr := &Router{notFound: defaultFallback, notAllowedMethod: defaultNotAllowedMethod, prefix: \"\", panicHandler: defaultPanicHandler}\n\treturn r\n}", "func NewRouter(generationPath string) *http.ServeMux {\n\tif generationPath == \"\" {\n\t\tgenerationPath = \"/generate\"\n\t}\n\t// Create router and define routes and return that router\n\trouter := http.NewServeMux()\n\n\trouter.HandleFunc(\"/version\", func(w http.ResponseWriter, r *http.Request) {\n\t\tfmt.Fprintf(w, \"Pilot Light version: %s\\n\", plVersion)\n\t})\n\n\trouter.HandleFunc(\"/healthz\", func(w http.ResponseWriter, r *http.Request) {\n\t\tfmt.Fprintf(w, \"OK\")\n\t})\n\n\trouter.HandleFunc(\"/assets/\", func(w http.ResponseWriter, r *http.Request) {\n\t\tfmt.Fprintf(w, \"OK\")\n\t})\n\n\trouter.HandleFunc(generationPath, func(w http.ResponseWriter, r *http.Request) {\n\t\tclientIPAddress := ReadUserIPNoPort(r)\n\t\taddress, err := NslookupIP(clientIPAddress)\n\t\tif err != nil {\n\t\t\tlog.Printf(\"Error in DNS resolution!\\n\\n%s\", err)\n\t\t}\n\t\tif err == nil {\n\t\t\tlog.Printf(\"Address %s Resolved To Hostname: %s\", clientIPAddress, address[0])\n\t\t}\n\n\t\t// Loop through ignition sets to do matching\n\t\tmatchedHostname := false\n\t\tfor k, v := range readConfig.PilotLight.IgnitionSets {\n\t\t\tif strings.Contains(address[0], v.HostnameFormat) {\n\t\t\t\tmatchedHostname = true\n\t\t\t\tlog.Printf(\"Matched hostname %s to IgnitionSet #%d %s\", address[0], k, v.Name)\n\t\t\t\tdat, err := ioutil.ReadFile(readConfig.PilotLight.AssetDirectory + \"/conf/\" + v.Type + \".ign\")\n\t\t\t\tcheck(err)\n\t\t\t\tfmt.Fprintf(w, string(dat))\n\t\t\t}\n\t\t}\n\t\tif !matchedHostname {\n\t\t\tif readConfig.PilotLight.DefaultIgnitionFile != \"none\" {\n\t\t\t\tlog.Printf(\"No match for hostname %s to any IgnitionSets, serving %s.ign\", address[0], readConfig.PilotLight.DefaultIgnitionFile)\n\t\t\t\tdat, err := ioutil.ReadFile(readConfig.PilotLight.AssetDirectory + \"/conf/\" + readConfig.PilotLight.DefaultIgnitionFile + \".ign\")\n\t\t\t\tcheck(err)\n\t\t\t\tfmt.Fprintf(w, string(dat))\n\t\t\t} else {\n\t\t\t\tlog.Printf(\"No match for hostname %s to any IgnitionSets\", address[0])\n\t\t\t}\n\t\t}\n\t})\n\n\treturn router\n}", "func NewRouter(settings RouterSetting) (model.Router, error) {\n\tr := Router{}\n\tvar err error\n\tauthorizer := authorization.NewAuthorizer()\n\n\t// API router setup\n\tapiCorsSettings := model.DefaultCors\n\tif settings.AppOriginChecker != nil {\n\t\tapiCorsSettings.AllowOriginRequestFunc = settings.AppOriginChecker.CheckOrigin\n\t}\n\tapiCors := cors.New(apiCorsSettings)\n\n\tapiSettings := api.RouterSettings{\n\t\tServer: settings.Server,\n\t\tLogger: settings.Logger,\n\t\tLoggerSettings: settings.LoggerSettings,\n\t\tAuthorizer: authorizer,\n\t\tHost: settings.HostName,\n\t\tLoginAppPath: loginAppPath,\n\t\tLoginWith: settings.Server.Settings().Login.LoginWith,\n\t\tTFAType: settings.Server.Settings().Login.TFAType,\n\t\tTFAResendTimeout: settings.Server.Settings().Login.TFAResendTimeout,\n\t\tCors: apiCors,\n\t}\n\n\tapiRouter, err := api.NewRouter(apiSettings)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tr.APIRouter = apiRouter\n\n\tif settings.Server.Settings().LoginWebApp.Type == model.FileStorageTypeNone {\n\t\tr.LoginAppRouter = nil\n\t} else {\n\t\t// Web login app setup\n\t\tloginAppSettings := spa.SPASettings{\n\t\t\tName: \"LOGIN_APP\",\n\t\t\tRoot: \"/\",\n\t\t\tFileSystem: http.FS(settings.Server.Storages().LoginAppFS),\n\t\t}\n\t\tr.LoginAppRouter, err = spa.NewRouter(loginAppSettings, nil, settings.Logger)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\t// Admin panel\n\tif settings.ServeAdminPanel {\n\t\trouterSettings := admin.RouterSettings{\n\t\t\tServer: settings.Server,\n\t\t\tLogger: settings.Logger,\n\t\t\tHost: settings.HostName,\n\t\t\tPrefix: adminpanelAPIPath,\n\t\t\tRestart: settings.RestartChan,\n\t\t\tLoginAppPath: loginAppPath,\n\t\t\tOriginUpdate: func() error {\n\t\t\t\treturn settings.AppOriginChecker.Update()\n\t\t\t},\n\t\t}\n\n\t\t// init admin panel api router\n\t\tr.AdminRouter, err = admin.NewRouter(routerSettings)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\t// init admin panel web app\n\t\tadminPanelAppSettings := spa.SPASettings{\n\t\t\tName: \"ADMIN_PANEL\",\n\t\t\tRoot: \"/\",\n\t\t\tFileSystem: http.FS(fsWithConfig(settings.Server.Storages().AdminPanelFS)),\n\t\t}\n\t\tr.AdminPanelRouter, err = spa.NewRouter(adminPanelAppSettings, nil, settings.Logger)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\tr.setupRoutes()\n\treturn &r, nil\n}", "func Create() *mux.Router {\n\tr := mux.NewRouter()\n\treturn routes.SetRouter(r)\n}", "func NewRouter(name string) (*Router, error) {\n\tproject, err := prj.Current()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &Router{\n\t\tName: name,\n\t\tProject: project,\n\t}, nil\n}", "func NewRouter(index app.Handler) *Router {\n\thandlers := make(methodHandlers)\n\thandlers[http.MethodGet] = index\n\n\t// Initialize trie\n\troutes := &node{\n\t\tsplit: \"\",\n\t\tchildren: []*node{},\n\t\thandlers: handlers,\n\t}\n\n\treturn &Router{routes}\n}", "func (as *ActorSystem) BuildRouter(name string) *routeBuilder {\n\treturn &routeBuilder{\n\t\tas,\n\t\tname,\n\t\t[]RouteFunc{},\n\t}\n}", "func NewRouter(routerIP net.IP, destinationIP net.IP) Router {\n\treturn Router{IP: routerIP, Nonce: nonce(destinationIP)}\n}", "func NewRouter(s *app.Services) http.Handler {\n\tr := chi.NewRouter()\n\tr.Mount(\"/user\", NewUserHandler(s.User).routes())\n\tr.Mount(\"/auth\", NewAuthHandler(s.Auth).routes())\n\treturn r\n}", "func initializeRouter(service *shortener.Service, database *db.DB) *mux.Router {\n\tr := mux.NewRouter()\n\t// add shortenURL POST endpoint to the router\n\tr.HandleFunc(shortenURLEndpoint, func(w http.ResponseWriter, r *http.Request) {\n\t\thandler.ShortenHandler(w, r, service)\n\t}).Methods(http.MethodPost)\n\n\tr.HandleFunc(originalURLEndpoint, func(w http.ResponseWriter, r *http.Request) {\n\t\thandler.OriginalURLHandler(w, r, database)\n\t}).Methods(http.MethodGet)\n\n\treturn r\n}", "func (sh *Handler) Router(ctx *fasthttp.RequestCtx) {\n\tif ctx.IsPost() {\n\t\tsh.doPost(ctx)\n\t\treturn\n\t}\n\tif ctx.IsGet() {\n\t\tsh.doGet(ctx)\n\t\treturn\n\t}\n\tsh.doDefault(ctx)\n\treturn\n}", "func New(d Dispatcher) *Router {\n\treturn &Router{\n\t\tdispatcher: d,\n\t}\n}", "func SetupRouter() *gin.Engine {\n\trouter := gin.Default() // Create router\n\trouter.GET(\"/\", Heartbeat)\n\trouter.GET(\"/helloget/:arg\", Helloget)\n\n\treturn router\n}", "func NewRouter() *mux.Router {\n\troutes := []util.Route{\n\t\tutil.Route{\n\t\t\tName: \"Hosts\",\n\t\t\tMethod: \"GET\",\n\t\t\tPattern: \"/hosts\",\n\t\t\tHandlerFunc: getHosts,\n\t\t},\n\t\tutil.Route{\n\t\t\tName: \"Hosts\",\n\t\t\tMethod: \"GET\",\n\t\t\tPattern: \"/hosts/{id}\",\n\t\t\tHandlerFunc: getHostInfo,\n\t\t},\n\t\tutil.Route{\n\t\t\tName: \"Devices\",\n\t\t\tMethod: \"GET\",\n\t\t\tPattern: \"/hosts/{id}/devices\",\n\t\t\tHandlerFunc: getDevices,\n\t\t},\n\t\tutil.Route{\n\t\t\tName: \"Hostname\",\n\t\t\tMethod: \"GET\",\n\t\t\tPattern: \"/hosts/{id}/hostname\",\n\t\t\tHandlerFunc: getHostNameAndDomain,\n\t\t},\n\t\tutil.Route{\n\t\t\tName: \"CreateDevice\",\n\t\t\tMethod: \"POST\",\n\t\t\tPattern: \"/hosts/{id}/devices\",\n\t\t\tHandlerFunc: createDevices,\n\t\t},\n\t\tutil.Route{\n\t\t\tName: \"CreateFileSystemOnDevice\",\n\t\t\tMethod: \"PUT\",\n\t\t\tPattern: \"/hosts/{id}/devices/{serialnumber}/{filesystem}\",\n\t\t\tHandlerFunc: createFileSystemOnDevice,\n\t\t},\n\t\tutil.Route{\n\t\t\tName: \"OfflineDevice\",\n\t\t\tMethod: \"PUT\",\n\t\t\tPattern: \"/hosts/{id}/devices/{serialnumber}/actions/offline\",\n\t\t\tHandlerFunc: offlineDevice,\n\t\t},\n\t\tutil.Route{\n\t\t\tName: \"DeleteDevice\",\n\t\t\tMethod: \"DELETE\",\n\t\t\tPattern: \"/hosts/{id}/devices/{serialnumber}\",\n\t\t\tHandlerFunc: deleteDevice,\n\t\t},\n\t\tutil.Route{\n\t\t\tName: \"DeviceWithSerialNumber\",\n\t\t\tMethod: \"GET\",\n\t\t\tPattern: \"/hosts/{id}/devices/{serialnumber}\",\n\t\t\tHandlerFunc: getDeviceForSerialNumber,\n\t\t},\n\t\tutil.Route{\n\t\t\tName: \"PartitionsForDevice\",\n\t\t\tMethod: \"GET\",\n\t\t\tPattern: \"/hosts/{id}/devices/{serialnumber}/partitions\",\n\t\t\tHandlerFunc: getPartitionsForDevice,\n\t\t},\n\t\tutil.Route{\n\t\t\tName: \"MountsOnHost\",\n\t\t\tMethod: \"GET\",\n\t\t\tPattern: \"/hosts/{id}/mounts/{serialNumber}\",\n\t\t\tHandlerFunc: getMountsOnHostForSerialNumber,\n\t\t},\n\t\tutil.Route{\n\t\t\tName: \"MountDevice\",\n\t\t\tMethod: \"POST\",\n\t\t\tPattern: \"/hosts/{id}/mounts/{serialNumber}\",\n\t\t\tHandlerFunc: mountDevice,\n\t\t},\n\t\tutil.Route{\n\t\t\tName: \"UnmountDevice\",\n\t\t\tMethod: \"DELETE\",\n\t\t\tPattern: \"/hosts/{id}/mounts/{mountID}\",\n\t\t\tHandlerFunc: unmountDevice,\n\t\t},\n\t\tutil.Route{\n\t\t\tName: \"MountForDevice\",\n\t\t\tMethod: \"GET\",\n\t\t\tPattern: \"/hosts/{id}/mounts/{mountid}/{serialNumber}\",\n\t\t\tHandlerFunc: getMountForDevice,\n\t\t},\n\t\tutil.Route{\n\t\t\tName: \"HostInitiators\",\n\t\t\tMethod: \"GET\",\n\t\t\tPattern: \"/hosts/{id}/initiators\",\n\t\t\tHandlerFunc: getHostInitiators,\n\t\t},\n\t\tutil.Route{\n\t\t\tName: \"HostNetworks\",\n\t\t\tMethod: \"GET\",\n\t\t\tPattern: \"/hosts/{id}/networks\",\n\t\t\tHandlerFunc: getHostNetworks,\n\t\t},\n\t\tutil.Route{\n\t\t\tName: \"Recommendations\",\n\t\t\tMethod: \"GET\",\n\t\t\tPattern: \"/hosts/{id}/recommendations\",\n\t\t\tHandlerFunc: getHostRecommendations,\n\t\t},\n\t\tutil.Route{\n\t\t\tName: \"DeletingDevices\",\n\t\t\tMethod: \"GET\",\n\t\t\tPattern: \"/hosts/{id}/deletingdevices\",\n\t\t\tHandlerFunc: getDeletingDevices,\n\t\t},\n\t\tutil.Route{\n\t\t\tName: \"ChapInfo\",\n\t\t\tMethod: \"GET\",\n\t\t\tPattern: \"/hosts/{id}/chapinfo\",\n\t\t\tHandlerFunc: getChapInfo,\n\t\t},\n\t}\n\trouter := mux.NewRouter().StrictSlash(true)\n\tutil.InitializeRouter(router, routes)\n\treturn router\n}", "func NewRouter(\n\tvalidator utilities.Validator,\n\tredisClient utilities.RedisClient,\n) *httprouter.Router {\n\trouter := httprouter.New()\n\n\tfor _, route := range RouterRoutes {\n\t\trouter.Handle(\n\t\t\troute.Method,\n\t\t\troute.Path,\n\t\t\troute.Handler(validator, redisClient),\n\t\t)\n\t}\n\n\trouter.NotFound = http.HandlerFunc(handlers.HandleNotFound)\n\trouter.MethodNotAllowed = http.HandlerFunc(handlers.HandleMethodNotAllowed)\n\trouter.PanicHandler = handlers.HandlePanic\n\n\treturn router\n}", "func CreateRouter(handlerFunc http.HandlerFunc) *mux.Router {\n router := mux.NewRouter()\n\n config := dots_config.GetSystemConfig()\n prefixPath := config.ClientRestfulApiConfiguration.RestfulApiPath\n\n restfulHandlerFunc := createRestfulHandlerFunc(handlerFunc)\n\n // router.HandleFunc(\"/test\", restfulHandlerFunc).Methods(\"GET\")\n router.HandleFunc(prefixPath + MITIGATION_PATH + \"/cuid={cuid}\", restfulHandlerFunc).Methods(\"GET\")\n router.HandleFunc(prefixPath + MITIGATION_PATH + \"/cuid={cuid}/mid={mid}\", restfulHandlerFunc).Methods(\"GET\")\n router.HandleFunc(prefixPath + MITIGATION_PATH + \"/cuid={cuid}/mid={mid}\", restfulHandlerFunc).Methods(\"PUT\")\n router.HandleFunc(prefixPath + MITIGATION_PATH + \"/cuid={cuid}/mid={mid}\", restfulHandlerFunc).Methods(\"DELETE\")\n\n return router\n}", "func NewRouter(config RouterConfig, logger watermill.LoggerAdapter) (*Router, error) {\n\tconfig.setDefaults()\n\tif err := config.Validate(); err != nil {\n\t\treturn nil, errors.Wrap(err, \"invalid config\")\n\t}\n\n\tif logger == nil {\n\t\tlogger = watermill.NopLogger{}\n\t}\n\n\treturn &Router{\n\t\tconfig: config,\n\n\t\thandlers: map[string]*handler{},\n\n\t\thandlersWg: &sync.WaitGroup{},\n\n\t\trunningHandlersWg: &sync.WaitGroup{},\n\t\trunningHandlersWgLock: &sync.Mutex{},\n\n\t\thandlerAdded: make(chan struct{}),\n\n\t\thandlersLock: &sync.RWMutex{},\n\n\t\tclosingInProgressCh: make(chan struct{}),\n\t\tclosedCh: make(chan struct{}),\n\n\t\tlogger: logger,\n\n\t\trunning: make(chan struct{}),\n\t}, nil\n}", "func (r *Router) SetupRouter() *gin.Engine {\n\trouter := gin.New()\n\n\t//middleware setup\n\trouter.Use(ginglog.Logger(5), gin.Recovery())\n\n\t//diagnostic endpoint\n\tdiagnostic := router.Group(\"api/v1\")\n\t{\n\t\tdiagnostic.GET(\"/ping\", func(c *gin.Context) {\n\t\t\tc.JSON(http.StatusOK, gin.H{\n\t\t\t\t\"Name\": \"Friend Management\",\n\t\t\t\t\"message\": \"OK\",\n\t\t\t\t\"serverTime\": time.Now().UTC(),\n\t\t\t\t\"version\": \"0.1\",\n\t\t\t})\n\t\t})\n\t}\n\n\t//friend endpoint\n\tfriend := router.Group(\"api/v1/friend\")\n\t{\n\t\tfriend.POST(\"/connect\", r.friendController.CreateConnection)\n\t\tfriend.POST(\"/list\", r.friendController.List)\n\t\tfriend.POST(\"/common\", r.friendController.Common)\n\t}\n\n\t//notification endpoint\n\tnotification := router.Group(\"api/v1/notification\")\n\t{\n\t\tnotification.POST(\"/subscribe\", r.notificationController.Subscribe)\n\t\tnotification.POST(\"/update\", r.notificationController.Update)\n\t\tnotification.POST(\"/block\", r.notificationController.Block)\n\t}\n\n\treturn router\n}", "func outerRouter(e *bm.Engine) {\n\te.GET(\"/monitor/ping\", ping)\n\tg := e.Group(\"/x/web\", authSvc.UserWeb)\n\t{\n\t\twebDanmuRouter(g)\n\t\tacademyRouter(g)\n\t\tstaffRouter(g)\n\t\tswitchRouter(g)\n\t\twebElecRouter(g)\n\t\twebAssistRouter(g)\n\t\tnewcomerRouter(g)\n\t\tg.GET(\"/ugcpay/protocol\", webUgcPayProtocol)\n\t\t// mission\n\t\tg.GET(\"/mission/protocol\", webMissionProtocol)\n\t\t// netsafe\n\t\tg.POST(\"/ns/md5\", webNsMd5)\n\t\t//white\n\t\tg.GET(\"/white\", webWhite)\n\t\t// archive.\n\t\tg.GET(\"/archive/parts\", webArchVideos)\n\t\tg.GET(\"/archive/view\", webViewArc)\n\t\tg.GET(\"/archives\", webArchives)\n\t\tg.GET(\"/archive/staff/applies\", webStaffApplies)\n\t\tg.GET(\"/archive/pre\", webViewPre)\n\t\tg.GET(\"/archive/videos\", webVideos)\n\t\tg.POST(\"/archive/delete\", webDelArc)\n\t\tg.GET(\"/archive/tags\", webTags)\n\t\tg.GET(\"/archive/desc/format\", webDescFormat)\n\t\t// history\n\t\tg.GET(\"/archive/history/list\", webHistoryList)\n\t\tg.GET(\"/archive/history/view\", webHistoryView)\n\t\t// ad\n\t\tg.GET(\"/ad/game/list\", webAdGameList)\n\t\t// appeal.\n\t\tg.GET(\"/appeal/list\", webAppealList)\n\t\tg.GET(\"/appeal/detail\", webAppealDetail)\n\t\tg.GET(\"/appeal/contact\", webAppealContact)\n\t\tg.POST(\"/appeal/add\", webAppealAdd)\n\t\tg.POST(\"/appeal/reply\", antispamSvc.ServeHTTP, webAppealReply)\n\t\tg.POST(\"/appeal/down\", webAppealDown)\n\t\tg.POST(\"/appeal/star\", webAppealStar)\n\t\t// cover list.\n\t\tg.GET(\"/archive/covers\", coverList)\n\t\tg.GET(\"/archive/recovers\", webRecommandCover)\n\t\t// index.\n\t\tg.GET(\"/index/stat\", webIndexStat)\n\t\tg.GET(\"/index/tool\", webIndexTool)\n\t\tg.GET(\"/index/full\", webIndexFull) //collect_arc\n\t\tg.GET(\"/index/notify\", webIndexNotify)\n\t\tg.GET(\"/index/operation\", webIndexOper)\n\t\tg.GET(\"/index/version\", webIndexVersion)\n\t\tg.GET(\"/index/newcomer\", webIndexNewcomer)\n\t\t// data\n\t\tg.GET(\"/data/videoquit\", webVideoQuitPoints)\n\t\tg.GET(\"/data/archive\", webArchive)\n\t\tg.GET(\"/data/article\", webArticleData)\n\t\tg.GET(\"/data/base\", base)\n\t\tg.GET(\"/data/trend\", trend)\n\t\tg.GET(\"/data/action\", action)\n\t\tg.GET(\"/data/survey\", survey)\n\t\tg.GET(\"/data/pandect\", pandect)\n\t\tg.GET(\"/data/fan\", webFan)\n\t\tg.GET(\"/data/playsource\", webPlaySource)\n\t\tg.GET(\"/data/playanalysis\", webArcPlayAnalysis)\n\t\tg.GET(\"/data/article/thirty\", webArtThirtyDay)\n\t\tg.GET(\"/data/article/rank\", webArtRank)\n\t\tg.GET(\"/data/article/source\", webArtReadAnalysis)\n\t\t// water mark\n\t\tg.GET(\"/watermark\", waterMark)\n\t\tg.POST(\"/watermark/set\", waterMarkSet)\n\t\t// feedback\n\t\tg.GET(\"/feedbacks\", webFeedbacks)\n\t\tg.GET(\"/feedback/detail\", webFeedbackDetail)\n\t\tg.GET(\"/feedback/tags\", webFeedbackTags)\n\t\tg.GET(\"/feedback/newtags\", webFeedbackNewTags)\n\t\tg.POST(\"/feedback/add\", webFeedbackAdd)\n\t\tg.POST(\"/feedback/close\", webFeedbackClose)\n\t\t// reply\n\t\tg.GET(\"/replies\", replyList)\n\t\t// template.\n\t\tg.GET(\"/tpls\", webTemplates)\n\t\tg.POST(\"/tpl/add\", webAddTpl)\n\t\tg.POST(\"/tpl/update\", webUpdateTpl)\n\t\tg.POST(\"/tpl/delete\", webDelTpl)\n\t\t// fans medal\n\t\tg.GET(\"/medal/status\", webMedalStatus)\n\t\tg.GET(\"/medal/recent\", webRecentFans)\n\t\tg.POST(\"/medal/open\", webMedalOpen)\n\t\tg.POST(\"/medal/check\", webMedalCheck)\n\t\tg.GET(\"/medal/rank\", webMedalRank)\n\t\tg.POST(\"/medal/rename\", webMedalRename)\n\t\tg.GET(\"/medal/fans\", webFansMedal)\n\t\t// article.\n\t\tg.GET(\"/article/author\", webAuthor)\n\t\tg.GET(\"/article/view\", webArticle)\n\t\tg.GET(\"/article/list\", webArticleList)\n\t\tg.GET(\"/article/pre\", webArticlePre)\n\t\tg.POST(\"/article/submit\", webSubArticle)\n\t\tg.POST(\"/article/update\", webUpdateArticle)\n\t\tg.POST(\"/article/delete\", webDelArticle)\n\t\tg.POST(\"/article/withdraw\", webWithDrawArticle)\n\t\tg.POST(\"/article/upcover\", antispamSvc.ServeHTTP, webArticleUpCover)\n\t\tg.GET(\"/draft/view\", webDraft)\n\t\tg.GET(\"/draft/list\", webDraftList)\n\t\tg.POST(\"/draft/addupdate\", webSubmitDraft)\n\t\tg.POST(\"/draft/delete\", webDeleteDraft)\n\t\tg.POST(\"/article/capture\", antispamSvc.ServeHTTP, webArticleCapture)\n\t\t// cm\n\t\tg.GET(\"/cm/oasis/stat\", webCmOasisStat)\n\t\t// common\n\t\tg.GET(\"/user/mid\", webUserMid)\n\t\tg.GET(\"/user/search\", webUserSearch)\n\t\t//viewpoint\n\t\tg.GET(\"/viewpoints\", webViewPoints)\n\t\t//g.POST(\"/viewpoints/edit\", webViewPointsEdit)\n\t}\n\th5 := e.Group(\"/x/h5\")\n\t{\n\t\t// app h5 cooperate pager\n\t\th5.GET(\"/cooperate/pre\", authSvc.User, appCooperatePre)\n\t\t// bgm\n\t\th5.GET(\"/bgm/ext\", authSvc.User, appBgmExt)\n\t\t// faq\n\t\th5.GET(\"/faq/editor\", authSvc.User, appH5FaqEditor)\n\t\th5.POST(\"/bgm/feedback\", authSvc.User, appH5BgmFeedback)\n\t\th5.GET(\"/elec/bill\", authSvc.User, appElecBill)\n\t\th5.GET(\"/elec/rank/recent\", authSvc.User, appElecRecentRank)\n\t\th5.GET(\"/medal/status\", authSvc.User, appMedalStatus)\n\t\th5.POST(\"/medal/check\", authSvc.User, appMedalCheck)\n\t\th5.POST(\"/medal/open\", authSvc.User, appMedalOpen)\n\t\th5.POST(\"/medal/rename\", authSvc.User, appMedalRename)\n\t\t//academy\n\t\th5.POST(\"/academy/play/add\", authSvc.Guest, h5AddPlay) //添加播放\n\t\th5.POST(\"/academy/play/del\", authSvc.Guest, h5DelPlay) //删除播放\n\t\th5.GET(\"/academy/play/list\", authSvc.User, h5PlayList) //我的课程\n\t\th5.GET(\"/academy/play/view\", authSvc.User, h5ViewPlay) //查看我的课程\n\t\th5.GET(\"/academy/theme/dir\", h5ThemeDir) //主题课程目录 对应职业列表\n\t\th5.GET(\"/academy/newb/course\", h5NewbCourse) //新人课程\n\t\th5.GET(\"/academy/tag\", h5Tags) //标签目录\n\t\th5.GET(\"/academy/archive\", h5Archive) //课程列表\n\t\th5.GET(\"/academy/feature\", h5Feature) //精选课程\n\t\th5.GET(\"/academy/recommend/v2\", authSvc.Guest, h5RecommendV2) //推荐课程v2\n\t\th5.GET(\"/academy/theme/course/v2\", h5ThemeCousreV2) //技能树(主题课程)v2\n\t\th5.GET(\"/academy/keywords\", h5Keywords) //搜索关键词提示\n\t\t// data center\n\t\th5.GET(\"/data/archive\", authSvc.User, appDataArc)\n\t\th5.GET(\"/data/videoquit\", authSvc.User, appDataVideoQuit)\n\t\th5.GET(\"/data/fan\", authSvc.User, appFan) //粉丝用户信息分析总览\n\t\th5.GET(\"/data/fan/rank\", authSvc.User, appFanRank) //新粉丝排行榜\n\t\th5.GET(\"/data/overview\", authSvc.User, appOverView) //新数据概览\n\t\th5.GET(\"/data/archive/analyze\", authSvc.User, appArchiveAnalyze) //新稿件数据分析\n\t\th5.GET(\"/data/video/retention\", authSvc.User, appVideoRetention) //新视频播放完成度\n\t\th5.GET(\"/data/article\", authSvc.User, appDataArticle)\n\t\th5.GET(\"/archives/simple\", authSvc.User, appSimpleArcVideos)\n\t\t// watermark\n\t\th5.GET(\"/watermark\", authSvc.User, waterMark)\n\t\th5.POST(\"/watermark/set\", authSvc.User, waterMarkSet)\n\t\t// up weekly honor\n\t\th5.GET(\"/weeklyhonor\", authSvc.Guest, weeklyHonor)\n\t\t// switch weekly honor subscribe\n\t\th5.POST(\"/weeklyhonor/subscribe\", authSvc.User, weeklyHonorSubSwitch)\n\t\t// task system\n\t\th5.POST(\"/task/bind\", authSvc.User, h5TaskBind)\n\t\th5.GET(\"/task/list\", authSvc.User, h5TaskList)\n\t\th5.POST(\"/task/reward/receive\", authSvc.User, h5RewardReceive)\n\t\th5.POST(\"/task/reward/activate\", authSvc.User, h5RewardActivate)\n\t\th5.GET(\"/task/reward/list\", authSvc.User, h5RewardReceiveList)\n\t\th5.GET(\"/task/pub/list\", authSvc.User, taskPubList) //其他业务方查看任务列表\n\t}\n\tapp := e.Group(\"/x/app\")\n\t{\n\t\tappDanmuRouter(app)\n\t\t// h5\n\t\tapp.GET(\"/h5/pre\", authSvc.User, appH5Pre)\n\t\tapp.GET(\"/h5/mission/type\", authSvc.User, appH5MissionByType)\n\t\tapp.GET(\"/h5/archive/tags\", authSvc.User, appH5ArcTags)\n\t\tapp.GET(\"/h5/archive/tag/info\", authSvc.User, appH5ArcTagInfo)\n\t\tapp.GET(\"/banner\", authSvc.User, appBanner)\n\t\t// archive\n\t\tapp.GET(\"/mission/type\", authSvc.UserMobile, appMissionByType)\n\t\tapp.GET(\"/index\", authSvc.User, appIndex)\n\t\tapp.GET(\"/archives\", authSvc.UserMobile, appArchives)\n\t\tapp.GET(\"/archives/simple\", authSvc.UserMobile, appSimpleArcVideos)\n\t\tapp.GET(\"/up/info\", authSvc.UserMobile, appUpInfo)\n\t\t// main app features\n\t\tapp.GET(\"/pre\", authSvc.User, appPre)\n\t\tapp.GET(\"/archive/pre\", authSvc.User, appArchivePre)\n\t\tapp.GET(\"/archive/desc/format\", authSvc.UserMobile, appArcDescFormat)\n\t\tapp.GET(\"/archive/view\", authSvc.UserMobile, appArcView)\n\t\tapp.POST(\"/archive/delete\", authSvc.UserMobile, appArcDel)\n\t\t// reply.\n\t\tapp.GET(\"/replies\", authSvc.UserMobile, appReplyList)\n\t\t// data\n\t\tapp.GET(\"/data/archive\", authSvc.UserMobile, appDataArc)\n\t\tapp.GET(\"/data/videoquit\", authSvc.UserMobile, appDataVideoQuit)\n\t\tapp.GET(\"/data/fan\", authSvc.UserMobile, appFan)\n\t\tapp.GET(\"/data/fan/rank\", authSvc.UserMobile, appFanRank) //新粉丝排行榜\n\t\tapp.GET(\"/data/overview\", authSvc.UserMobile, appOverView) //新数据概览\n\t\tapp.GET(\"/data/archive/analyze\", authSvc.UserMobile, appArchiveAnalyze) //新稿件数据分析\n\t\tapp.GET(\"/data/video/retention\", authSvc.UserMobile, appVideoRetention) //新视频播放完成度\n\t\tapp.GET(\"/data/article\", authSvc.UserMobile, appDataArticle)\n\t\t// elec\n\t\tapp.GET(\"/elec/bill\", authSvc.UserMobile, appElecBill)\n\t\tapp.GET(\"/elec/rank/recent\", authSvc.UserMobile, appElecRecentRank)\n\t\t// fans medal\n\t\tapp.GET(\"/medal/status\", authSvc.UserMobile, appMedalStatus)\n\t\tapp.POST(\"/medal/check\", authSvc.UserMobile, appMedalCheck)\n\t\tapp.POST(\"/medal/open\", authSvc.UserMobile, appMedalOpen)\n\t\tapp.POST(\"/medal/rename\", authSvc.UserMobile, appMedalRename)\n\t\t// article\n\t\tapp.GET(\"/article/list\", authSvc.UserMobile, appArticleList)\n\t\t// material\n\t\tapp.GET(\"/material/pre\", authSvc.UserMobile, appMaterialPre)\n\t\tapp.GET(\"/material/view\", authSvc.UserMobile, appMaterial)\n\t\t// bgm\n\t\tapp.GET(\"/bgm/pre\", authSvc.UserMobile, appBgmPre)\n\t\tapp.GET(\"/bgm/list\", authSvc.UserMobile, appBgmList)\n\t\tapp.GET(\"/bgm/view\", authSvc.UserMobile, appBgmView)\n\t\tapp.GET(\"/bgm/search\", authSvc.UserMobile, appBgmSearch)\n\t\tapp.GET(\"/cooperate/view\", authSvc.User, appCooperate)\n\t\t// task\n\t\tapp.POST(\"/newcomer/task/bind\", authSvc.UserMobile, appTaskBind)\n\t}\n\tcli := e.Group(\"/x/client\", authSvc.User)\n\t{\n\t\t// archive.\n\t\tcli.GET(\"/archives\", clientArchives)\n\t\tcli.GET(\"/archive/search\", clientArchiveSearch)\n\t\tcli.GET(\"/archive/view\", clientViewArc)\n\t\tcli.POST(\"/archive/delete\", clientDelArc)\n\t\tcli.GET(\"/archive/pre\", clientPre)\n\t\tcli.GET(\"/archive/tags\", clientTags)\n\t\t// template.\n\t\tcli.GET(\"/tpls\", clientTemplates)\n\t\tcli.POST(\"/tpl/add\", clientAddTpl)\n\t\tcli.POST(\"/tpl/update\", clientUpdateTpl)\n\t\tcli.POST(\"/tpl/delete\", clientDelTpl)\n\t\t// cover list.\n\t\tcli.GET(\"/archive/covers\", coverList)\n\t}\n\tgeeg := e.Group(\"/x/geetest\", authSvc.UserWeb)\n\t{\n\t\t// geetest.\n\t\tgeeg.GET(\"/pre\", gtPreProcess)\n\t\tgeeg.POST(\"/validate\", gtValidate)\n\t\tgeeg.GET(\"/pre/add\", gtPreProcessAdd)\n\t}\n\tcreator := e.Group(\"/x/creator\", authSvc.UserMobile)\n\t{\n\t\t// index\n\t\tcreator.GET(\"/my\", creatorMy)\n\t\tcreator.GET(\"/index\", creatorIndex)\n\t\tcreator.GET(\"/earnings\", creatorEarnings)\n\t\tcreator.GET(\"/banner\", creatorBanner)\n\t\tcreator.GET(\"/replies\", creatorReplyList)\n\t\t//archive\n\t\tcreator.GET(\"/archives\", creatorArchives)\n\t\tcreator.GET(\"/archive/tag/info\", creatorArcTagInfo)\n\t\tcreator.GET(\"/archive/view\", creatorViewArc)\n\t\tcreator.GET(\"/archive/videoquit\", creatorVideoQuit)\n\t\tcreator.GET(\"/archive/data\", creatorArchiveData)\n\t\tcreator.POST(\"/archive/delete\", creatorDelArc)\n\t\tcreator.GET(\"/archive/pre\", creatorPre)\n\t\tcreator.GET(\"/archive/tags\", creatorPredictTag)\n\t\tcreator.GET(\"/archive/desc/format\", creatorDescFormat)\n\t\t// article\n\t\tcreator.GET(\"/article/pre\", creatorArticlePre)\n\t\tcreator.GET(\"/article/list\", creatorArticleList)\n\t\tcreator.GET(\"/article/view\", creatorArticle)\n\t\tcreator.POST(\"/article/delete\", creatorDelArticle)\n\t\tcreator.POST(\"/article/withdraw\", creatorWithDrawArticle)\n\t\tcreator.GET(\"/draft/list\", creatorDraftList)\n\t\t// danmu\n\t\tcreator.GET(\"/danmu/list\", creatorDmList)\n\t\tcreator.GET(\"/danmu/recent\", creatorDmRecent)\n\t\tcreator.POST(\"/danmu/edit\", creatorDmEdit)\n\t\tcreator.POST(\"/danmu/edit/batch\", creatorDmEditBatch)\n\t\t//data\n\t\tcreator.GET(\"/data/archive\", creatorDataArchive)\n\t\tcreator.GET(\"/data/article\", creatorDataArticle)\n\t}\n\n\ti := e.Group(\"/x/internal/creative\", verifySvc.Verify)\n\t{\n\t\t// TODO deprecated\n\t\ti.GET(\"/porder\", upPorder)\n\t\t// for main app\n\t\ti.GET(\"/app/pre\", appNewPre)\n\t\t// get order game info for app\n\t\ti.GET(\"/arc/commercial\", arcCommercial)\n\t\ti.POST(\"/watermark/set\", waterMarkSetInternal)\n\t\ti.GET(\"/order/game\", arcOrderGameInfo)\n\t\ti.POST(\"/upload/material\", uploadMaterial)\n\t\ti.POST(\"/join/growup/account\", growAccountStateInternal)\n\t\ti.GET(\"/video/viewpoints\", videoViewPoints)\n\t\ti.GET(\"/archive/bgm\", arcBgmList)\n\t\ti.GET(\"/archive/staff\", arcStaff)\n\t\ti.GET(\"/archive/vote\", voteAcsByTime)\n\n\t\t//联合投稿配置\n\t\ti.GET(\"/staff/config\", staffConfig)\n\n\t\t// data\n\t\ti.GET(\"/data/videoquit\", setContextMid, webVideoQuitPoints)\n\t\ti.GET(\"/data/archive\", setContextMid, webArchive)\n\t\ti.GET(\"/data/article\", setContextMid, webArticleData)\n\t\ti.GET(\"/data/base\", setContextMid, base)\n\t\ti.GET(\"/data/trend\", setContextMid, trend)\n\t\ti.GET(\"/data/action\", setContextMid, action)\n\t\ti.GET(\"/data/survey\", setContextMid, survey)\n\t\ti.GET(\"/data/pandect\", setContextMid, pandect)\n\t\ti.GET(\"/data/fan\", setContextMid, webFan)\n\t\ti.GET(\"/data/playsource\", setContextMid, webPlaySource)\n\t\ti.GET(\"/data/playanalysis\", setContextMid, webArcPlayAnalysis)\n\t\ti.GET(\"/data/article/thirty\", setContextMid, webArtThirtyDay)\n\t\ti.GET(\"/data/article/rank\", setContextMid, webArtRank)\n\t\ti.GET(\"/data/article/source\", setContextMid, webArtReadAnalysis)\n\n\t\t// archive\n\t\ti.GET(\"/archives\", setContextMid, webArchives)\n\t\t// videos\n\t\ti.GET(\"/archive/videos\", setContextMid, webVideos)\n\n\t\t// history\n\t\ti.GET(\"/archive/history/list\", setContextMid, webHistoryList)\n\n\t\t// danmu\n\t\ti.GET(\"/danmu/distri\", setContextMid, webDmDistri)\n\n\t\t// up weekly honor\n\t\ti.GET(\"/task/pub/list\", setContextMid, taskPubList) //其他业务方查看任务列表\n\t}\n}", "func New(staff repository.IStaffRepo, security security.ISecurity) *router {\n\treturn &router{\n\t\tstaff: staff,\n\t\tsecurity: security,\n\t}\n}", "func NewRouter(app *app.App) http.Handler {\n\tr := chi.NewRouter()\n\th := &handler{app: app}\n\tr.Get(\"/\", h.Get)\t\n\tr.With(auth.Middleware(app)).Post(\"/\", h.Create)\n\tr.With(auth.Middleware(app)).Delete(\"/{id}\", h.Delete)\n\treturn r\n}", "func New(h handlers.Handler, l *zap.SugaredLogger) Router {\n\tr := chi.NewRouter()\n\n\tr.Mount(\"/\", newIndexRouter(h))\n\n\tr.NotFound(h.NotFound)\n\tr.MethodNotAllowed(h.MethodNotAllowed)\n\n\trouter := &router{\n\t\tHandler: r,\n\t\tlogger: l,\n\t}\n\n\trouter.Handler = middleware.Log(router.Handler, router.logger)\n\n\treturn router\n}", "func newRouter() *mux.Router {\n\tr := mux.NewRouter()\n\tstaticFileDirectory := http.Dir(\"./assets/\")\n\tstaticFileHandler := http.StripPrefix(\"/assets/\", http.FileServer(staticFileDirectory))\n\tr.PathPrefix(\"/assets/\").Handler(staticFileHandler).Methods(\"GET\")\n\n\t// r.HandleFunc(\"/bird\", getBirdHandler).Methods(\"GET\")\n\tr.HandleFunc(\"/register\", apis.CreateUser).Methods(\"POST\")\n\tr.HandleFunc(\"/login\", apis.LoginUser).Methods(\"POST\")\n\tr.HandleFunc(\"/logout\", apis.LogoutUser).Methods(\"GET\")\n\tr.HandleFunc(\"/\", middlewares.UserLogged(homePage)).Methods(\"GET\")\n\tr.HandleFunc(\"/blog\", middlewares.UserLogged(apis.CreateBlog)).Methods(\"POST\")\n\tr.HandleFunc(\"/blogs\", middlewares.UserLogged(apis.GetBlogs)).Methods(\"GET\")\n\tr.HandleFunc(\"/blog/{id:[0-9]+}\", middlewares.UserLogged(apis.GetBlog)).Methods(\"GET\")\n\tr.HandleFunc(\"/tag/\", middlewares.UserLogged(apis.GetBlogsWithTag)).Methods(\"GET\")\n\treturn r\n}", "func CreateRouter(handler controllers.RequestHandler) *httprouter.Router {\n\trouter := httprouter.New()\n\n\trouter.PanicHandler = controllers.PanicHandler\n\n\t//user routes\n\trouter.POST(\"/user\", handler.PostUser)\n\trouter.DELETE(\"/user/:id\", handler.DeleteUser)\n\trouter.PATCH(\"/user/password\", handler.PatchUserPassword)\n\n\treturn router\n}", "func NewRouter(\n\tapp app.App,\n\tnatsClient *nats.Conn,\n) (*gin.Engine, error) {\n\tgin.SetMode(gin.ReleaseMode)\n\tgin.DisableConsoleColor()\n\n\trouter := gin.New()\n\trouter.Use(accesslog.Middleware())\n\trouter.Use(gin.Recovery())\n\trouter.Use(identity.Middleware(\n\t\tidentity.NewMiddlewareOptions().\n\t\t\tSetPathRegex(`^/api/(devices|management)/v[0-9]/`),\n\t))\n\trouter.Use(requestid.Middleware())\n\trouter.Use(cors.New(cors.Config{\n\t\tAllowAllOrigins: true,\n\t\tAllowCredentials: true,\n\t\tAllowHeaders: []string{\n\t\t\t\"Accept\",\n\t\t\t\"Allow\",\n\t\t\t\"Content-Type\",\n\t\t\t\"Origin\",\n\t\t\t\"Authorization\",\n\t\t\t\"Accept-Encoding\",\n\t\t\t\"Access-Control-Request-Headers\",\n\t\t\t\"Header-Access-Control-Request\",\n\t\t},\n\t\tAllowMethods: []string{\n\t\t\thttp.MethodGet,\n\t\t\thttp.MethodPost,\n\t\t\thttp.MethodPut,\n\t\t\thttp.MethodDelete,\n\t\t\thttp.MethodOptions,\n\t\t},\n\t\tAllowWebSockets: true,\n\t\tExposeHeaders: []string{\n\t\t\t\"Location\",\n\t\t\t\"Link\",\n\t\t},\n\t\tMaxAge: time.Hour * 12,\n\t}))\n\n\tstatus := NewStatusController(app)\n\trouter.GET(APIURLInternalAlive, status.Alive)\n\trouter.GET(APIURLInternalHealth, status.Health)\n\n\ttenants := NewTenantsController(app)\n\trouter.POST(APIURLInternalTenants, tenants.Provision)\n\n\tdevice := NewDeviceController(app, natsClient)\n\trouter.GET(APIURLDevicesConnect, device.Connect)\n\trouter.POST(APIURLInternalDevices, device.Provision)\n\trouter.DELETE(APIURLInternalDevicesID, device.Delete)\n\n\tmanagement := NewManagementController(app, natsClient)\n\trouter.GET(APIURLManagementDevice, management.GetDevice)\n\trouter.GET(APIURLManagementDeviceConnect, management.Connect)\n\n\treturn router, nil\n}", "func (s *Server) Router(w http.ResponseWriter, r *http.Request) {\n\tswitch {\n\n\tcase r.Method == http.MethodGet && r.URL.Path == getProductsPath:\n\t\thandler(s.handleGetProducts).ServeHTTP(s.Log, w, r)\n\n\tcase r.Method == http.MethodGet && productPath.MatchString(r.URL.Path):\n\t\thandler(s.handleGetProduct).ServeHTTP(s.Log, w, r)\n\n\tcase r.Method == http.MethodPost && removeProductsPath.MatchString(r.URL.Path):\n\t\thandler(s.handleRemoveProduct).ServeHTTP(s.Log, w, r)\n\n\tcase r.Method == http.MethodPost && r.URL.Path == importProductsPath:\n\t\thandler(s.handleImportProducts).ServeHTTP(s.Log, w, r)\n\n\tcase r.Method == http.MethodPost && r.URL.Path == importArticlesPath:\n\t\thandler(s.handleImportArticles).ServeHTTP(s.Log, w, r)\n\n\tcase r.Method == http.MethodGet && r.URL.Path == getArticlesPath:\n\t\thandler(s.handleGetArticles).ServeHTTP(s.Log, w, r)\n\n\t}\n}", "func NewRouter() *Router {\n\treturn &Router{httprouter.New()}\n}" ]
[ "0.7551313", "0.72328985", "0.7204123", "0.7153823", "0.7105062", "0.7093772", "0.70665324", "0.7046973", "0.69841635", "0.69757175", "0.69553894", "0.69019806", "0.6884239", "0.68499106", "0.6813694", "0.6808235", "0.67856115", "0.67693615", "0.6755253", "0.6675599", "0.6664318", "0.66385216", "0.6631877", "0.66309303", "0.6629629", "0.6626813", "0.6621066", "0.6619364", "0.6619364", "0.661029", "0.66064936", "0.6593072", "0.65929073", "0.6588176", "0.6578569", "0.6564754", "0.65617704", "0.65442955", "0.6542758", "0.6537032", "0.6530594", "0.6529939", "0.65243816", "0.65092397", "0.6509099", "0.64955837", "0.6484661", "0.64818335", "0.6481253", "0.6478604", "0.6473778", "0.64670295", "0.6456559", "0.64530987", "0.64511824", "0.6449344", "0.6444627", "0.644029", "0.6439985", "0.643724", "0.64284456", "0.6427046", "0.641551", "0.641551", "0.64096034", "0.6407329", "0.64031035", "0.64005786", "0.63936734", "0.6389559", "0.6386463", "0.6386256", "0.6382386", "0.6382047", "0.6369848", "0.6349531", "0.6345354", "0.6342435", "0.63406503", "0.6339909", "0.6335871", "0.63304836", "0.63292927", "0.6327819", "0.63084304", "0.63072795", "0.63050616", "0.6293785", "0.6285955", "0.62821496", "0.6278577", "0.6273072", "0.6271448", "0.62622374", "0.625664", "0.6255772", "0.62508285", "0.6248707", "0.62462497", "0.6246111", "0.6243242" ]
0.0
-1
Validate check if user info is valid
func (user *User) Validate() *errors.RestError { user.Email = strings.TrimSpace(strings.ToLower(user.Email)) if user.Email == "" { return errors.NewBadRequestError("Invalid email address ") } if user.Password == "" { return errors.NewBadRequestError("Invalid password") } return nil }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func ValidBaseInfo(ctx *gin.Context) {\n\tres := helper.Res{}\n\n\tvar baseInfo BaseInfo\n\tif err := ctx.Bind(&baseInfo); err != nil {\n\t\tres.Status(http.StatusBadRequest).Error(err).Send(ctx)\n\t\treturn\n\t}\n\n\t// user does exist\n\tif _, err := models.FindOneByUsername(baseInfo.Username); err != nil {\n\t\tres.Status(http.StatusBadRequest).Error(err).Send(ctx)\n\t\treturn\n\t}\n\n\t// the email of user does exist\n\tif _, err := models.FindOneByEmail(baseInfo.Email); err != nil {\n\t\tres.Status(http.StatusBadRequest).Error(err).Send(ctx)\n\t\treturn\n\t}\n\n\tres.Success(gin.H{\n\t\t\"isValid\": true,\n\t}).Send(ctx)\n}", "func validate(user *customer_api.DbUser, allowEmpty bool) error {\n\tconst minNameLength, maxNameLength = 3, 20\n\tconst emailRegexString = \"^[a-zA-Z0-9+_.-]+@[a-zA-Z0-9.-]+$\"\n\tvar emailRegex = regexp.MustCompile(emailRegexString)\n\n\tif !(allowEmpty && user.Email == \"\") {\n\t\tif len(user.Email) < 5 || !emailRegex.MatchString(user.Email) {\n\t\t\treturn errors.New(\"invalid email\")\n\t\t}\n\t}\n\n\tif !(allowEmpty && user.FirstName == \"\") {\n\t\tif len(user.FirstName) < minNameLength || len(user.FirstName) > maxNameLength {\n\t\t\treturn errors.New(\"first_name should be between 3 and 20 characters\")\n\t\t}\n\t}\n\n\tif !(allowEmpty && user.LastName == \"\") {\n\t\tif len(user.LastName) < minNameLength || len(user.LastName) > maxNameLength {\n\t\t\treturn errors.New(\"last_name should be between 3 and 20 characters\")\n\t\t}\n\t}\n\n\tif !(allowEmpty && user.Phone == 0) {\n\t\tif user.Phone < 1000000000 || user.Phone > 9999999999 {\n\t\t\treturn errors.New(\"invalid phone no\")\n\t\t}\n\t}\n\n\tif !(allowEmpty && user.Id == \"\") {\n\t\tif user.Id == \"\" {\n\t\t\treturn errors.New(\"id cannot be empty\")\n\t\t}\n\t}\n\treturn nil\n}", "func (u *User) Valid() error {\n\tif u.BearerToken != \"\" {\n\t\treturn nil\n\t}\n\tif u.Username == \"\" {\n\t\treturn errors.New(\"Empty username\")\n\t}\n\tif u.ApiToken == \"\" && u.Password == \"\" {\n\t\treturn errors.New(\"Empty API token and password\")\n\t}\n\treturn nil\n\n}", "func (m *UserDetails) Validate(formats strfmt.Registry) error {\n\treturn nil\n}", "func CheckDetails(details *userpb.User) error {\n\tif details.GetFname() == \"\" {\n\t\treturn errors.New(\"first name cannot be empty\")\n\t} else if details.GetCity() == \"\" {\n\t\treturn errors.New(\"city cannot be empty\")\n\t} else if math.Ceil(math.Log10(float64(details.GetPhone()))) < 10 { //phone number should be 10 digit long\n\t\treturn errors.New(\"phone number should be atleast 10 digit long\")\n\t} else if details.GetHeight() == 0 {\n\t\treturn errors.New(\"height cannot be zero\")\n\t}\n\treturn nil\n}", "func (u User) IsValid() []error{\n\tvar errs []error\n\tfirstname := strings.Trim(u.FirstName, \" \")\n\tlastname := strings.Trim(u.LastName, \" \")\n\n\tif firstname != \"\" {\n\t\tif strings.Contains(firstname, \" \"){\n\t\t\terrs = append(errs, errors.New(\"FirstName can't have spaces\"))\n\t\t}\n\t\tif len(firstname) < 2 {\n\t\t\terrs = append(errs, errors.New(\"FirstName must be at least 2 characters\"))\n\t\t}\n\t\tif !helper.IsLetter(firstname) {\n\t\t\terrs = append(errs, errors.New(\"Firstname contains a number\"))\n\t\t}\n\t}\n\n\tif lastname != \"\"{\n\t\tif strings.Contains(lastname, \" \"){\n\t\t\terrs = append(errs, errors.New(\"LastName can't have spaces\"))\n\t\t}\n\n\t\tif len(lastname) < 2 {\n\t\t\terrs = append(errs, errors.New(\"LastName must be at least 2 characters\"))\n\t\t}\n\n\t\tif !helper.IsLetter(lastname) {\n\t\t\terrs = append(errs, errors.New(\"Lastname contains a number\"))\n\t\t}\n\t}\n\n\tif u.Email != \"\" {\n\t\tre := regexp.MustCompile(\"^[a-zA-Z0-9.!#$%&'*+/=?^_`{|}~-]+@[a-zA-Z0-9](?:[a-zA-Z0-9-]{0,61}[a-zA-Z0-9])?(?:\\\\.[a-zA-Z0-9](?:[a-zA-Z0-9-]{0,61}[a-zA-Z0-9])?)*$\")\n\n\t\tif !re.MatchString(u.Email) {\n\t\t\terrs = append(errs, errors.New(\"Email address is not valid\"))\n\t\t}\n\t}\n\n\n\tyear, _, _, _, _, _ := helper.DateDiff(u.DateOfBirth, time.Now())\n\tif year < 18 {\n\t\terrs = append(errs, errors.New(\"You must be 18 or more\"))\n\t}\n\tif len(errs) > 0 {\n\t\treturn errs\n\t}\n\treturn nil\n}", "func checkUserInfo(info userInfo) responseMsg {\n\tr := responseMsg{}\n\tif info.password != info.password2 {\n\t\tr.success = false\n\t\tr.Message = \"Passwords do not match\"\n\t\treturn r\n\t}\n\n\tif checkPassword(info.password) == false {\n\t\tr.success = false\n\t\tr.Message = \"Password is not strong enough.It should contain at least one capital letter and one special character\"\n\t\treturn r\n\t}\n\t//Check if a user with this username already exists\n\tif getUser(info.Username).Username != \"\" {\n\t\tr.success = false\n\t\tr.Message = \"A user with this username already exists!\"\n\t\treturn r\n\t}\n\tr.success = true\n\tr.Message = \"Open your terminal and join a room!\"\n\treturn r\n}", "func (u *User) Valid() error {\n\n\tif u.Name == \"\" {\n\t\treturn errors.New(\"The name isn't valid\")\n\t}\n\n\tif u.Email == \"\" || !u.ValidEmail() {\n\t\treturn errors.New(\"The email isn't valid\")\n\t}\n\n\treturn nil\n}", "func (u *User) Validate() error {\n\t// check name first: must have no special character\n\tmatched, _ := regexp.Match(\"^[A-Za-z0-9\\\\s]+$\", []byte(u.Name))\n\tif !matched {\n\t\treturn errors.New(\"Nama hanya boleh mengandung karakter alfanumerik\")\n\t}\n\n\t// check phone\n\treturn nil\n}", "func checkUserResponse(user, resp User) (err error) {\n\tif user.Name != resp.Name {\n\t\terr = errors.New(\"Name isn't equal\")\n\t\treturn\n\t}\n\tif user.Username != resp.Username {\n\t\terr = errors.New(\"Username isn't equal\")\n\t\treturn\n\t}\n\tif user.Phone != resp.Phone {\n\t\terr = errors.New(\"Phone isn't equal\")\n\t\treturn\n\t}\n\tif user.Password != \"\" {\n\t\terr = errors.New(\"Password isn't empty\")\n\t\treturn\n\t}\n\treturn\n}", "func validateUser(name, password string) error {\n\tif name == \"\" {\n\t\treturn errors.New(\"User name must not be blank\")\n\t}\n\tif password == \"\" {\n\t\treturn errors.New(\"Password must be at least one character\")\n\t}\n\n\treturn nil\n}", "func ValidUser(un, pw string) bool {\n\t// xyzzy - TODO At this point you really shoudl check v.s. the d.b.\n\treturn true\n}", "func (u *User) Validate() (map[string]string, bool) {\n\terrMessages := map[string]string{}\n\terr := false\n\n\tswitch {\n\tcase u.Name == \"\":\n\t\terrMessages[\"name\"] = \"Nama can't be empty.\"\n\t\terr = true\n\t\tfallthrough\n\tcase u.Username == \"\":\n\t\terrMessages[\"username\"] = \"Username can't be empty.\"\n\t\terr = true\n\t\tfallthrough\n\tcase u.Password == \"\":\n\t\terrMessages[\"password\"] = \"Password can't be empty.\"\n\t\terr = true\n\t}\n\n\treturn errMessages, err\n}", "func (user *User) Validate() (map[string]interface{}, bool) {\n\n\tif user.FirstName == \"\" {\n\t\treturn utils.Message(false, \"User First name should be on the payload\"), false\n\t}\n\n\tif user.LastName == \"\" {\n\t\treturn utils.Message(false, \"User Last name should be on the payload\"), false\n\t}\n\n\tif user.PhoneNumber == \"\" {\n\t\treturn utils.Message(false, \"User Phone number should be on the payload\"), false\n\t}\n\n\tif user.Age == \"\" {\n\t\treturn utils.Message(false, \"User Age should be on the payload\"), false\n\t}\n\n\tif user.Email == \"\" {\n\t\treturn utils.Message(false, \"User Email should be on the payload\"), false\n\t}\n\n\t//All the required parameters are present\n\treturn utils.Message(true, \"success\"), true\n}", "func Validation(a User) error {\n\tfmt.Println(\"user :: \", a)\n\tvar rxEmail = regexp.MustCompile(\"^[a-zA-Z0-9.!#$%&'*+\\\\/=?^_`{|}~-]+@[a-zA-Z0-9](?:[a-zA-Z0-9-]{0,61}[a-zA-Z0-9])?(?:\\\\.[a-zA-Z0-9](?:[a-zA-Z0-9-]{0,61}[a-zA-Z0-9])?)*$\")\n\tswitch {\n\tcase len(strings.TrimSpace(a.Password)) == 0:\n\t\treturn ErrPasswordInvalid\n\tcase len(strings.TrimSpace(a.Email)) == 0 || !rxEmail.MatchString(a.Email):\n\t\treturn ErrEmailInvalid\n\tdefault:\n\t\treturn nil\n\t}\n}", "func (u *userInput) OK() error {\n\t// check the white space\n\tu.Email = strings.TrimSpace(u.Email)\n\tu.Username = strings.TrimSpace(u.Username)\n\n\t// validate the regex\n\tif !user.RXEmail.MatchString(u.Email) {\n\t\treturn user.ErrInvalidEmail\n\t}\n\tif !user.RXUsername.MatchString(u.Username) {\n\t\treturn user.ErrInvalidUsername\n\t}\n\treturn nil\n}", "func (user *User) Validate() *errors.RestErr {\n\t// Delete spaces at first_name, last_name and email before saving\n\tuser.FirstName = strings.TrimSpace(user.FirstName)\n\tuser.LastName = strings.TrimSpace(user.LastName)\n\tuser.Email = strings.TrimSpace(strings.ToLower(user.Email))\n\n\tif user.Email == \"\"{\n\t\treturn errors.NewBadRequestError(\"Email addres is required\")\n\t}\n\tif !ValidateEmail(user.Email){\n\t\treturn errors.NewBadRequestError(\"Wrong email format\")\n\t}\n\tif strings.TrimSpace(user.Password)== \"\" || len(strings.TrimSpace(user.Password)) < 8{\n\t\treturn errors.NewBadRequestError(\"Password is required and password length must be higher than 8 characters\")\n\t}\n\n\n\treturn nil\n}", "func (uu *UserUpdate) check() error {\n\tif v, ok := uu.mutation.AccountName(); ok {\n\t\tif err := user.AccountNameValidator(v); err != nil {\n\t\t\treturn &ValidationError{Name: \"account_name\", err: fmt.Errorf(\"ent: validator failed for field \\\"account_name\\\": %w\", err)}\n\t\t}\n\t}\n\tif v, ok := uu.mutation.StaffType(); ok {\n\t\tif err := user.StaffTypeValidator(v); err != nil {\n\t\t\treturn &ValidationError{Name: \"staff_type\", err: fmt.Errorf(\"ent: validator failed for field \\\"staff_type\\\": %w\", err)}\n\t\t}\n\t}\n\tif v, ok := uu.mutation.FamilyName(); ok {\n\t\tif err := user.FamilyNameValidator(v); err != nil {\n\t\t\treturn &ValidationError{Name: \"family_name\", err: fmt.Errorf(\"ent: validator failed for field \\\"family_name\\\": %w\", err)}\n\t\t}\n\t}\n\tif v, ok := uu.mutation.GivenName(); ok {\n\t\tif err := user.GivenNameValidator(v); err != nil {\n\t\t\treturn &ValidationError{Name: \"given_name\", err: fmt.Errorf(\"ent: validator failed for field \\\"given_name\\\": %w\", err)}\n\t\t}\n\t}\n\tif v, ok := uu.mutation.DisplayName(); ok {\n\t\tif err := user.DisplayNameValidator(v); err != nil {\n\t\t\treturn &ValidationError{Name: \"display_name\", err: fmt.Errorf(\"ent: validator failed for field \\\"display_name\\\": %w\", err)}\n\t\t}\n\t}\n\tif v, ok := uu.mutation.IDNumber(); ok {\n\t\tif err := user.IDNumberValidator(v); err != nil {\n\t\t\treturn &ValidationError{Name: \"id_number\", err: fmt.Errorf(\"ent: validator failed for field \\\"id_number\\\": %w\", err)}\n\t\t}\n\t}\n\tif v, ok := uu.mutation.Sex(); ok {\n\t\tif err := user.SexValidator(v); err != nil {\n\t\t\treturn &ValidationError{Name: \"sex\", err: fmt.Errorf(\"ent: validator failed for field \\\"sex\\\": %w\", err)}\n\t\t}\n\t}\n\tif v, ok := uu.mutation.PhoneNumber(); ok {\n\t\tif err := user.PhoneNumberValidator(v); err != nil {\n\t\t\treturn &ValidationError{Name: \"phone_number\", err: fmt.Errorf(\"ent: validator failed for field \\\"phone_number\\\": %w\", err)}\n\t\t}\n\t}\n\tif v, ok := uu.mutation.Address(); ok {\n\t\tif err := user.AddressValidator(v); err != nil {\n\t\t\treturn &ValidationError{Name: \"address\", err: fmt.Errorf(\"ent: validator failed for field \\\"address\\\": %w\", err)}\n\t\t}\n\t}\n\tif v, ok := uu.mutation.StaffID(); ok {\n\t\tif err := user.StaffIDValidator(v); err != nil {\n\t\t\treturn &ValidationError{Name: \"staff_id\", err: fmt.Errorf(\"ent: validator failed for field \\\"staff_id\\\": %w\", err)}\n\t\t}\n\t}\n\tif v, ok := uu.mutation.PersonalEmail(); ok {\n\t\tif err := user.PersonalEmailValidator(v); err != nil {\n\t\t\treturn &ValidationError{Name: \"personal_email\", err: fmt.Errorf(\"ent: validator failed for field \\\"personal_email\\\": %w\", err)}\n\t\t}\n\t}\n\tif v, ok := uu.mutation.IntranetWorkEmail(); ok {\n\t\tif err := user.IntranetWorkEmailValidator(v); err != nil {\n\t\t\treturn &ValidationError{Name: \"intranet_work_email\", err: fmt.Errorf(\"ent: validator failed for field \\\"intranet_work_email\\\": %w\", err)}\n\t\t}\n\t}\n\tif v, ok := uu.mutation.ExtranetWorkEmail(); ok {\n\t\tif err := user.ExtranetWorkEmailValidator(v); err != nil {\n\t\t\treturn &ValidationError{Name: \"extranet_work_email\", err: fmt.Errorf(\"ent: validator failed for field \\\"extranet_work_email\\\": %w\", err)}\n\t\t}\n\t}\n\treturn nil\n}", "func (m *NamedAuthInfo) Validate(formats strfmt.Registry) error {\n\tvar res []error\n\n\tif err := m.validateUser(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}", "func (mt *User) Validate() (err error) {\n\n\tif mt.Href == \"\" {\n\t\terr = goa.MergeErrors(err, goa.MissingAttributeError(`response`, \"href\"))\n\t}\n\tif mt.Name == \"\" {\n\t\terr = goa.MergeErrors(err, goa.MissingAttributeError(`response`, \"name\"))\n\t}\n\n\treturn\n}", "func (uu *UserUpdate) check() error {\n\tif v, ok := uu.mutation.Username(); ok {\n\t\tif err := user.UsernameValidator(v); err != nil {\n\t\t\treturn &ValidationError{Name: \"username\", err: fmt.Errorf(\"ent: validator failed for field \\\"username\\\": %w\", err)}\n\t\t}\n\t}\n\tif v, ok := uu.mutation.Fullname(); ok {\n\t\tif err := user.FullnameValidator(v); err != nil {\n\t\t\treturn &ValidationError{Name: \"fullname\", err: fmt.Errorf(\"ent: validator failed for field \\\"fullname\\\": %w\", err)}\n\t\t}\n\t}\n\tif v, ok := uu.mutation.Password(); ok {\n\t\tif err := user.PasswordValidator(v); err != nil {\n\t\t\treturn &ValidationError{Name: \"password\", err: fmt.Errorf(\"ent: validator failed for field \\\"password\\\": %w\", err)}\n\t\t}\n\t}\n\tif v, ok := uu.mutation.Email(); ok {\n\t\tif err := user.EmailValidator(v); err != nil {\n\t\t\treturn &ValidationError{Name: \"email\", err: fmt.Errorf(\"ent: validator failed for field \\\"email\\\": %w\", err)}\n\t\t}\n\t}\n\tif v, ok := uu.mutation.Phone(); ok {\n\t\tif err := user.PhoneValidator(v); err != nil {\n\t\t\treturn &ValidationError{Name: \"phone\", err: fmt.Errorf(\"ent: validator failed for field \\\"phone\\\": %w\", err)}\n\t\t}\n\t}\n\tif v, ok := uu.mutation.Bio(); ok {\n\t\tif err := user.BioValidator(v); err != nil {\n\t\t\treturn &ValidationError{Name: \"bio\", err: fmt.Errorf(\"ent: validator failed for field \\\"bio\\\": %w\", err)}\n\t\t}\n\t}\n\tif v, ok := uu.mutation.Intro(); ok {\n\t\tif err := user.IntroValidator(v); err != nil {\n\t\t\treturn &ValidationError{Name: \"intro\", err: fmt.Errorf(\"ent: validator failed for field \\\"intro\\\": %w\", err)}\n\t\t}\n\t}\n\tif v, ok := uu.mutation.GithubProfile(); ok {\n\t\tif err := user.GithubProfileValidator(v); err != nil {\n\t\t\treturn &ValidationError{Name: \"github_profile\", err: fmt.Errorf(\"ent: validator failed for field \\\"github_profile\\\": %w\", err)}\n\t\t}\n\t}\n\tif v, ok := uu.mutation.ProfilePictureURL(); ok {\n\t\tif err := user.ProfilePictureURLValidator(v); err != nil {\n\t\t\treturn &ValidationError{Name: \"profile_picture_url\", err: fmt.Errorf(\"ent: validator failed for field \\\"profile_picture_url\\\": %w\", err)}\n\t\t}\n\t}\n\tif v, ok := uu.mutation.Status(); ok {\n\t\tif err := user.StatusValidator(v); err != nil {\n\t\t\treturn &ValidationError{Name: \"status\", err: fmt.Errorf(\"ent: validator failed for field \\\"status\\\": %w\", err)}\n\t\t}\n\t}\n\treturn nil\n}", "func (u *user) Valid(v *validation.Validation) {\n\tif strings.Index(u.Name, \"admin\") != -1 {\n\t\t// Set error messages of Name by SetError and HasErrors will return true\n\t\tv.SetError(\"Name\", \"Can't contain 'admin' in Name\")\n\t}\n}", "func verifyUserFields(pwdUser ign3types.PasswdUser) error {\n\temptyUser := ign3types.PasswdUser{}\n\ttempUser := pwdUser\n\tif tempUser.Name == constants.CoreUserName && ((tempUser.PasswordHash) != nil || len(tempUser.SSHAuthorizedKeys) >= 1) {\n\t\ttempUser.Name = \"\"\n\t\ttempUser.SSHAuthorizedKeys = nil\n\t\ttempUser.PasswordHash = nil\n\t\tif !reflect.DeepEqual(emptyUser, tempUser) {\n\t\t\treturn fmt.Errorf(\"SSH keys and password hash are not reconcilable\")\n\t\t}\n\t\tklog.Info(\"SSH Keys reconcilable\")\n\t} else {\n\t\treturn fmt.Errorf(\"ignition passwd user section contains unsupported changes: user must be core and have 1 or more sshKeys\")\n\t}\n\treturn nil\n}", "func (p User) IsValid() bool {\n\tif len(strings.TrimSpace(p.Name)) > 0 && len(strings.TrimSpace(p.Email)) > 0 {\n\t\treturn true\n\t}\n\treturn false\n}", "func (uc *UserCreate) check() error {\n\tif _, ok := uc.mutation.Age(); !ok {\n\t\treturn &ValidationError{Name: \"age\", err: errors.New(\"ent: missing required field \\\"age\\\"\")}\n\t}\n\tif v, ok := uc.mutation.Age(); ok {\n\t\tif err := user.AgeValidator(v); err != nil {\n\t\t\treturn &ValidationError{Name: \"age\", err: fmt.Errorf(\"ent: validator failed for field \\\"age\\\": %w\", err)}\n\t\t}\n\t}\n\tif _, ok := uc.mutation.Name(); !ok {\n\t\treturn &ValidationError{Name: \"name\", err: errors.New(\"ent: missing required field \\\"name\\\"\")}\n\t}\n\tif v, ok := uc.mutation.ID(); ok {\n\t\tif err := user.IDValidator(v); err != nil {\n\t\t\treturn &ValidationError{Name: \"id\", err: fmt.Errorf(\"ent: validator failed for field \\\"id\\\": %w\", err)}\n\t\t}\n\t}\n\treturn nil\n}", "func validateUser(userId int) error {\n\n\t// return an error\n\treturn &UnauthorizedError{userId, 1234}\n}", "func checkIfValid(p Passport) bool {\n\tif !intBetween(1920, 2002, p.Byr) {\n\t\treturn false\n\t}\n\tif !intBetween(2010, 2020, p.Iyr) {\n\t\treturn false\n\t}\n\tif !intBetween(2020, 2030, p.Eyr) {\n\t\treturn false\n\t}\n\tif len(p.Pid) != 9 {\n\t\treturn false\n\t}\n\n\t_, err := strconv.Atoi(p.Pid)\n\tif err != nil {\n\t\treturn false\n\t}\n\n\tif !(p.Ecl == \"amb\" || p.Ecl == \"blu\" || p.Ecl == \"brn\" || p.Ecl == \"gry\" || p.Ecl == \"grn\" || p.Ecl == \"hzl\" || p.Ecl == \"oth\") {\n\t\treturn false\n\t}\n\tif !checkHeight(p.Hgt) {\n\t\treturn false\n\t}\n\tif !checkHair(p.Hcl) {\n\t\treturn false\n\t}\n\n\treturn true\n}", "func (u *User) Validate(values ...interface{}) (bool, []error) {\n\treturn true, nil\n}", "func (mt *GoaLocalUser) Validate() (err error) {\n\tif mt.FirstName == \"\" {\n\t\terr = goa.MergeErrors(err, goa.MissingAttributeError(`response`, \"firstName\"))\n\t}\n\tif mt.SecondName == \"\" {\n\t\terr = goa.MergeErrors(err, goa.MissingAttributeError(`response`, \"secondName\"))\n\t}\n\tif mt.Email == \"\" {\n\t\terr = goa.MergeErrors(err, goa.MissingAttributeError(`response`, \"email\"))\n\t}\n\n\treturn\n}", "func (uc *UserCreate) check() error {\n\tif _, ok := uc.mutation.Firstname(); !ok {\n\t\treturn &ValidationError{Name: \"firstname\", err: errors.New(\"ent: missing required field \\\"firstname\\\"\")}\n\t}\n\tif _, ok := uc.mutation.Lastname(); !ok {\n\t\treturn &ValidationError{Name: \"lastname\", err: errors.New(\"ent: missing required field \\\"lastname\\\"\")}\n\t}\n\tif _, ok := uc.mutation.Username(); !ok {\n\t\treturn &ValidationError{Name: \"username\", err: errors.New(\"ent: missing required field \\\"username\\\"\")}\n\t}\n\tif _, ok := uc.mutation.Password(); !ok {\n\t\treturn &ValidationError{Name: \"password\", err: errors.New(\"ent: missing required field \\\"password\\\"\")}\n\t}\n\treturn nil\n}", "func ValidateUser(person *user.Person) error{\n\tlog.Println(\"validate details entered by user\")\n\tv := validator.New()\n\terr := v.Struct(person)\n\treturn err\n}", "func (uuo *UserUpdateOne) check() error {\n\tif v, ok := uuo.mutation.AccountName(); ok {\n\t\tif err := user.AccountNameValidator(v); err != nil {\n\t\t\treturn &ValidationError{Name: \"account_name\", err: fmt.Errorf(\"ent: validator failed for field \\\"account_name\\\": %w\", err)}\n\t\t}\n\t}\n\tif v, ok := uuo.mutation.StaffType(); ok {\n\t\tif err := user.StaffTypeValidator(v); err != nil {\n\t\t\treturn &ValidationError{Name: \"staff_type\", err: fmt.Errorf(\"ent: validator failed for field \\\"staff_type\\\": %w\", err)}\n\t\t}\n\t}\n\tif v, ok := uuo.mutation.FamilyName(); ok {\n\t\tif err := user.FamilyNameValidator(v); err != nil {\n\t\t\treturn &ValidationError{Name: \"family_name\", err: fmt.Errorf(\"ent: validator failed for field \\\"family_name\\\": %w\", err)}\n\t\t}\n\t}\n\tif v, ok := uuo.mutation.GivenName(); ok {\n\t\tif err := user.GivenNameValidator(v); err != nil {\n\t\t\treturn &ValidationError{Name: \"given_name\", err: fmt.Errorf(\"ent: validator failed for field \\\"given_name\\\": %w\", err)}\n\t\t}\n\t}\n\tif v, ok := uuo.mutation.DisplayName(); ok {\n\t\tif err := user.DisplayNameValidator(v); err != nil {\n\t\t\treturn &ValidationError{Name: \"display_name\", err: fmt.Errorf(\"ent: validator failed for field \\\"display_name\\\": %w\", err)}\n\t\t}\n\t}\n\tif v, ok := uuo.mutation.IDNumber(); ok {\n\t\tif err := user.IDNumberValidator(v); err != nil {\n\t\t\treturn &ValidationError{Name: \"id_number\", err: fmt.Errorf(\"ent: validator failed for field \\\"id_number\\\": %w\", err)}\n\t\t}\n\t}\n\tif v, ok := uuo.mutation.Sex(); ok {\n\t\tif err := user.SexValidator(v); err != nil {\n\t\t\treturn &ValidationError{Name: \"sex\", err: fmt.Errorf(\"ent: validator failed for field \\\"sex\\\": %w\", err)}\n\t\t}\n\t}\n\tif v, ok := uuo.mutation.PhoneNumber(); ok {\n\t\tif err := user.PhoneNumberValidator(v); err != nil {\n\t\t\treturn &ValidationError{Name: \"phone_number\", err: fmt.Errorf(\"ent: validator failed for field \\\"phone_number\\\": %w\", err)}\n\t\t}\n\t}\n\tif v, ok := uuo.mutation.Address(); ok {\n\t\tif err := user.AddressValidator(v); err != nil {\n\t\t\treturn &ValidationError{Name: \"address\", err: fmt.Errorf(\"ent: validator failed for field \\\"address\\\": %w\", err)}\n\t\t}\n\t}\n\tif v, ok := uuo.mutation.StaffID(); ok {\n\t\tif err := user.StaffIDValidator(v); err != nil {\n\t\t\treturn &ValidationError{Name: \"staff_id\", err: fmt.Errorf(\"ent: validator failed for field \\\"staff_id\\\": %w\", err)}\n\t\t}\n\t}\n\tif v, ok := uuo.mutation.PersonalEmail(); ok {\n\t\tif err := user.PersonalEmailValidator(v); err != nil {\n\t\t\treturn &ValidationError{Name: \"personal_email\", err: fmt.Errorf(\"ent: validator failed for field \\\"personal_email\\\": %w\", err)}\n\t\t}\n\t}\n\tif v, ok := uuo.mutation.IntranetWorkEmail(); ok {\n\t\tif err := user.IntranetWorkEmailValidator(v); err != nil {\n\t\t\treturn &ValidationError{Name: \"intranet_work_email\", err: fmt.Errorf(\"ent: validator failed for field \\\"intranet_work_email\\\": %w\", err)}\n\t\t}\n\t}\n\tif v, ok := uuo.mutation.ExtranetWorkEmail(); ok {\n\t\tif err := user.ExtranetWorkEmailValidator(v); err != nil {\n\t\t\treturn &ValidationError{Name: \"extranet_work_email\", err: fmt.Errorf(\"ent: validator failed for field \\\"extranet_work_email\\\": %w\", err)}\n\t\t}\n\t}\n\treturn nil\n}", "func (u User) Validate() []string {\n\tvar errs []string\n\tfor _, n := range u.Name {\n\t\tif !unicode.IsLetter(n) && !unicode.IsDigit(n) && !unicode.IsPunct(n) && !unicode.IsSpace(n) {\n\t\t\terrs = append(errs, \"name::is_not_word\")\n\t\t\tbreak\n\t\t}\n\t}\n\tif utf8.RuneCountInString(u.Password) < 8 {\n\t\terrs = append(errs, \"password::min_length_is::8\")\n\t}\n\tif len(u.Password) > 0 {\n\t\tvar PasswordUpper, PasswordLetter, PasswordNumber, PasswordSpaces, PasswordInvalidChar bool\n\t\tfor _, c := range u.Password {\n\t\t\tif c < 33 || c > 126 {\n\t\t\t\tPasswordInvalidChar = true\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tswitch {\n\t\t\tcase unicode.IsNumber(c):\n\t\t\t\tPasswordNumber = true\n\t\t\tcase unicode.IsUpper(c):\n\t\t\t\tPasswordUpper = true\n\t\t\tcase unicode.IsLetter(c):\n\t\t\t\tPasswordLetter = true\n\t\t\tcase c == ' ':\n\t\t\t\tPasswordSpaces = true\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\tif !PasswordUpper || !PasswordLetter || !PasswordNumber || PasswordSpaces || PasswordInvalidChar {\n\t\t\terrs = append(errs, \"password::invalid_password_format\")\n\t\t}\n\t}\n\tfor _, n := range u.Desc {\n\t\tif !unicode.IsLetter(n) && !unicode.IsDigit(n) && !unicode.IsPunct(n) && !unicode.IsSpace(n) {\n\t\t\terrs = append(errs, \"desc::is_not_word\")\n\t\t\tbreak\n\t\t}\n\t}\n\n\treturn errs\n}", "func (uuo *UserUpdateOne) check() error {\n\tif v, ok := uuo.mutation.Username(); ok {\n\t\tif err := user.UsernameValidator(v); err != nil {\n\t\t\treturn &ValidationError{Name: \"username\", err: fmt.Errorf(\"ent: validator failed for field \\\"username\\\": %w\", err)}\n\t\t}\n\t}\n\tif v, ok := uuo.mutation.Fullname(); ok {\n\t\tif err := user.FullnameValidator(v); err != nil {\n\t\t\treturn &ValidationError{Name: \"fullname\", err: fmt.Errorf(\"ent: validator failed for field \\\"fullname\\\": %w\", err)}\n\t\t}\n\t}\n\tif v, ok := uuo.mutation.Password(); ok {\n\t\tif err := user.PasswordValidator(v); err != nil {\n\t\t\treturn &ValidationError{Name: \"password\", err: fmt.Errorf(\"ent: validator failed for field \\\"password\\\": %w\", err)}\n\t\t}\n\t}\n\tif v, ok := uuo.mutation.Email(); ok {\n\t\tif err := user.EmailValidator(v); err != nil {\n\t\t\treturn &ValidationError{Name: \"email\", err: fmt.Errorf(\"ent: validator failed for field \\\"email\\\": %w\", err)}\n\t\t}\n\t}\n\tif v, ok := uuo.mutation.Phone(); ok {\n\t\tif err := user.PhoneValidator(v); err != nil {\n\t\t\treturn &ValidationError{Name: \"phone\", err: fmt.Errorf(\"ent: validator failed for field \\\"phone\\\": %w\", err)}\n\t\t}\n\t}\n\tif v, ok := uuo.mutation.Bio(); ok {\n\t\tif err := user.BioValidator(v); err != nil {\n\t\t\treturn &ValidationError{Name: \"bio\", err: fmt.Errorf(\"ent: validator failed for field \\\"bio\\\": %w\", err)}\n\t\t}\n\t}\n\tif v, ok := uuo.mutation.Intro(); ok {\n\t\tif err := user.IntroValidator(v); err != nil {\n\t\t\treturn &ValidationError{Name: \"intro\", err: fmt.Errorf(\"ent: validator failed for field \\\"intro\\\": %w\", err)}\n\t\t}\n\t}\n\tif v, ok := uuo.mutation.GithubProfile(); ok {\n\t\tif err := user.GithubProfileValidator(v); err != nil {\n\t\t\treturn &ValidationError{Name: \"github_profile\", err: fmt.Errorf(\"ent: validator failed for field \\\"github_profile\\\": %w\", err)}\n\t\t}\n\t}\n\tif v, ok := uuo.mutation.ProfilePictureURL(); ok {\n\t\tif err := user.ProfilePictureURLValidator(v); err != nil {\n\t\t\treturn &ValidationError{Name: \"profile_picture_url\", err: fmt.Errorf(\"ent: validator failed for field \\\"profile_picture_url\\\": %w\", err)}\n\t\t}\n\t}\n\tif v, ok := uuo.mutation.Status(); ok {\n\t\tif err := user.StatusValidator(v); err != nil {\n\t\t\treturn &ValidationError{Name: \"status\", err: fmt.Errorf(\"ent: validator failed for field \\\"status\\\": %w\", err)}\n\t\t}\n\t}\n\treturn nil\n}", "func Validate(config *Main) error {\n\tfor user, userData := range config.Users {\n\t\tif len(userData.Entrypoint) == 0 {\n\t\t\treturn fmt.Errorf(\"The field `entrypoint` is missing for user '%s'\", user)\n\t\t}\n\n\t\tif len(userData.Sitemaps.Default) == 0 {\n\t\t\treturn fmt.Errorf(\"The field `sitemaps.default` is missing for user '%s'\", user)\n\t\t}\n\n\t\tif len(userData.Sitemaps.Allowed) == 0 {\n\t\t\treturn fmt.Errorf(\"The field `sitemaps.allowed` is missing for user '%s'\", user)\n\t\t}\n\t}\n\n\treturn nil\n}", "func UserStructLevelValidation(sl validator.StructLevel) {\n\n\tuser := sl.Current().Interface().(User)\n\n\tif len(user.FirstName) == 0 && len(user.LastName) == 0 {\n\t\tsl.ReportError(user.FirstName, \"FirstName\", \"fname\", \"fnameorlname\", \"\")\n\t\tsl.ReportError(user.LastName, \"LastName\", \"lname\", \"fnameorlname\", \"\")\n\t}\n\n\t// plus can to more, even with different tag than \"fnameorlname\"\n}", "func IsValidUser(name string) bool {\n\tif name == \"\" {\n\t\treturn false\n\t}\n\n\t// \"~\" is prepended (commonly) if there was no ident server response.\n\tif name[0] == '~' {\n\t\t// Means name only contained \"~\".\n\t\tif len(name) < 2 {\n\t\t\treturn false\n\t\t}\n\n\t\tname = name[1:]\n\t}\n\n\t// Check to see if the first index is alphanumeric.\n\tif (name[0] < 'A' || name[0] > 'Z') && (name[0] < 'a' || name[0] > 'z') && (name[0] < '0' || name[0] > '9') {\n\t\treturn false\n\t}\n\n\tfor i := 1; i < len(name); i++ {\n\t\tif (name[i] < 'A' || name[i] > '}') && (name[i] < '0' || name[i] > '9') && name[i] != '-' && name[i] != '.' {\n\t\t\t// a-z, A-Z, 0-9, -, and _\\[]{}^|\n\t\t\treturn false\n\t\t}\n\t}\n\n\treturn true\n}", "func (id UserID) Validate() error {\n\tif id.Name == \"\" {\n\t\treturn errors.New(\"no username is specified\")\n\t}\n\tif id.Realm == \"\" {\n\t\treturn errors.New(\"no realm is specified\")\n\t}\n\treturn nil\n}", "func validateUserName(c *gin.Context) {\n\tuserName := c.Param(\"userName\")\n\n\tif len(userName) < 5 {\n\t\tc.Error(errors.NewCustomError(400, \"userName debe tener al menos 5 caracteres\"))\n\t\tc.Abort()\n\t\treturn\n\t}\n}", "func ValidateStoredUserView(result *StoredUserView) (err error) {\n\tif result.ID == nil {\n\t\terr = goa.MergeErrors(err, goa.MissingFieldError(\"id\", \"result\"))\n\t}\n\tif result.Name == nil {\n\t\terr = goa.MergeErrors(err, goa.MissingFieldError(\"name\", \"result\"))\n\t}\n\tif result.Email == nil {\n\t\terr = goa.MergeErrors(err, goa.MissingFieldError(\"email\", \"result\"))\n\t}\n\tif result.Name != nil {\n\t\tif utf8.RuneCountInString(*result.Name) > 30 {\n\t\t\terr = goa.MergeErrors(err, goa.InvalidLengthError(\"result.name\", *result.Name, utf8.RuneCountInString(*result.Name), 30, false))\n\t\t}\n\t}\n\treturn\n}", "func (mt *EasypostUser) Validate() (err error) {\n\tif mt.ID == \"\" {\n\t\terr = goa.MergeErrors(err, goa.MissingAttributeError(`response`, \"id\"))\n\t}\n\tif mt.Object == \"\" {\n\t\terr = goa.MergeErrors(err, goa.MissingAttributeError(`response`, \"object\"))\n\t}\n\tif mt.Name == \"\" {\n\t\terr = goa.MergeErrors(err, goa.MissingAttributeError(`response`, \"name\"))\n\t}\n\tif mt.Email == \"\" {\n\t\terr = goa.MergeErrors(err, goa.MissingAttributeError(`response`, \"email\"))\n\t}\n\n\tif err2 := mt.Children.Validate(); err2 != nil {\n\t\terr = goa.MergeErrors(err, err2)\n\t}\n\tif ok := goa.ValidatePattern(`^user_`, mt.ID); !ok {\n\t\terr = goa.MergeErrors(err, goa.InvalidPatternError(`response.id`, mt.ID, `^user_`))\n\t}\n\tif ok := goa.ValidatePattern(`^User$`, mt.Object); !ok {\n\t\terr = goa.MergeErrors(err, goa.InvalidPatternError(`response.object`, mt.Object, `^User$`))\n\t}\n\treturn\n}", "func (u *User) Validate(tx *pop.Connection) (*validate.Errors, error) {\n\tvar err error\n\t//validate based on is agent\n\tif !u.IsAgent { //is not an agent\n\t\treturn validate.Validate(\n\t\t\t&validators.StringIsPresent{Field: u.Email, Name: \"Email\"},\n\t\t\t&validators.StringIsPresent{Field: u.PasswordHash, Name: \"PasswordHash\"},\n\t\t\t&validators.StringIsPresent{Field: u.LastName, Name: \"LastName\"},\n\t\t\t&validators.StringIsPresent{Field: u.FirstName, Name: \"FirstName\"},\n\t\t\t&validators.StringIsPresent{Field: u.Phone, Name: \"Phone\"},\n\t\t\t// check to see if the email address is already taken:\n\t\t\t&validators.FuncValidator{\n\t\t\t\tField: u.Email,\n\t\t\t\tName: \"Email\",\n\t\t\t\tMessage: \"%s is already taken\",\n\t\t\t\tFn: func() bool {\n\t\t\t\t\tvar b bool\n\t\t\t\t\tq := tx.Where(\"email = ?\", u.Email)\n\t\t\t\t\tif u.ID != uuid.Nil {\n\t\t\t\t\t\tq = q.Where(\"id != ?\", u.ID)\n\t\t\t\t\t}\n\t\t\t\t\tb, err = q.Exists(u)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\treturn false\n\t\t\t\t\t}\n\t\t\t\t\treturn !b\n\t\t\t\t},\n\t\t\t},\n\t\t), err\n\n\t} else { // trying to save agent\n\t\treturn validate.Validate(\n\t\t\t&validators.StringIsPresent{Field: u.Email, Name: \"Email\"},\n\t\t\t&validators.StringIsPresent{Field: u.PasswordHash, Name: \"PasswordHash\"},\n\t\t\t&validators.StringIsPresent{Field: u.LastName, Name: \"LastName\"},\n\t\t\t&validators.StringIsPresent{Field: u.FirstName, Name: \"FirstName\"},\n\t\t\t&validators.StringIsPresent{Field: u.Phone, Name: \"Phone\"},\n\t\t\t&validators.StringIsPresent{Field: u.PublicEmail.String, Name: \"PublicEmail\"},\n\t\t\t&validators.StringIsPresent{Field: u.Company.String, Name: \"Company\"},\n\t\t\t&validators.StringIsPresent{Field: u.Address1.String, Name: \"Address1\"},\n\t\t\t&validators.StringIsPresent{Field: u.City.String, Name: \"City\"},\n\t\t\t&validators.StringIsPresent{Field: u.State.String, Name: \"State\"},\n\t\t\t&validators.StringIsPresent{Field: u.Zipcode.String, Name: \"Zipcode\"},\n\t\t\t// check to see if the email address is already taken:\n\t\t\t&validators.FuncValidator{\n\t\t\t\tField: u.Email,\n\t\t\t\tName: \"Email\",\n\t\t\t\tMessage: \"%s is already taken\",\n\t\t\t\tFn: func() bool {\n\t\t\t\t\tvar b bool\n\t\t\t\t\tq := tx.Where(\"email = ?\", u.Email)\n\t\t\t\t\tif u.ID != uuid.Nil {\n\t\t\t\t\t\tq = q.Where(\"id != ?\", u.ID)\n\t\t\t\t\t}\n\t\t\t\t\tb, err = q.Exists(u)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\treturn false\n\t\t\t\t\t}\n\t\t\t\t\treturn !b\n\t\t\t\t},\n\t\t\t},\n\t\t), err\n\n\t}\n}", "func (user *User) Validate() (map[string]interface{}, bool) {\n\n\tif !strings.Contains(user.Email, \"@\") {\n\t\treturn u.Message(false, \"Email address is required\"), false\n\t}\n\n\tif len(user.Password) < 6 && user.GoogleUserID == \"\" && user.FacebookUserID == \"\" {\n\t\treturn u.Message(false, \"Password is required\"), false\n\t}\n\n\t//Email must be unique\n\ttemp := &User{}\n\n\t//check for errors and duplicate emails\n\terr := GetDB().Table(\"users\").Where(\"email = ?\", user.Email).First(temp).Error\n\tif err != nil && err != gorm.ErrRecordNotFound {\n\t\tfmt.Printf(\"err = %s\", err)\n\t\treturn u.Message(false, \"Connection error. Please retry\"), false\n\t}\n\tif temp.Email != \"\" {\n\t\treturn u.Message(false, \"Email address already in use by another user.\"), false\n\t}\n\n\treturn u.Message(false, \"Requirement passed\"), true\n}", "func (u *User) Sanitize() {\n\tu.userData.Pswd = \"\"\n\tu.userData.TFAInfo.Secret = \"\"\n\tu.userData.TFAInfo.HOTPCounter = 0\n\tu.userData.TFAInfo.HOTPExpiredAt = time.Time{}\n}", "func postUser(w http.ResponseWriter, r *http.Request) {\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\tvar user User\n\terr := json.NewDecoder(r.Body).Decode(&user)\n\tlog.ErrorHandler(err)\n\tvar (\n\t\temail = strings.ToLower(user.Email)\n\t\talias = user.Alias\n\t\tuserName = user.UserName\n\t\tpassword = user.Password\n\t\tfullName = user.FullName\n\t\tsafeNames bool\n\t\tsafeEmail = emailValidator(email)\n\t\tsafePassword = passwordValidator(password)\n\t\tsimilarToUser = similarToUser(fullName, alias, userName, password)\n\t)\n\n\tduplicateEmail := DuplicateCheck(email)\n\n\tif duplicateEmail {\n\t\tw.WriteHeader(http.StatusConflict)\n\t\terr := json.NewEncoder(w).Encode(core.FourONine)\n\t\tlog.ErrorHandler(err)\n\t\tlog.AccessHandler(r, 409)\n\t\treturn\n\t}\n\n\tsafeNames = userDetails(fullName, alias, userName)\n\n\tif safeNames {\n\t\t// Some or all of the details in the body are empty\n\t\t//\tAll fields are required\n\t\tw.WriteHeader(http.StatusUnprocessableEntity)\n\t\terr := json.NewEncoder(w).Encode(core.FourTwoTwo)\n\t\tlog.ErrorHandler(err)\n\t\tlog.AccessHandler(r, 422)\n\t\treturn\n\t}\n\n\tif !safeEmail {\n\t\t// Issue with Email\n\t\t//Email couldn't be verified or invalid email\n\t\tw.WriteHeader(http.StatusUnprocessableEntity)\n\t\terr := json.NewEncoder(w).Encode(core.FourTwoTwo)\n\t\tlog.ErrorHandler(err)\n\t\tlog.AccessHandler(r, 422)\n\t\treturn\n\t}\n\n\tif similarToUser {\n\t\t// Issue with Password\n\t\t// Password is similar to user information\n\t\tw.WriteHeader(http.StatusUnprocessableEntity)\n\t\terr := json.NewEncoder(w).Encode(core.FourTwoTwo)\n\t\tlog.ErrorHandler(err)\n\t\tlog.AccessHandler(r, 422)\n\t\treturn\n\t}\n\n\tif !safePassword {\n\t\t// Issue with Password\n\t\t//\tPassword doesn't go through the validator successfully\n\t\tw.WriteHeader(http.StatusUnprocessableEntity)\n\t\terr := json.NewEncoder(w).Encode(core.FourTwoTwo)\n\t\tlog.ErrorHandler(err)\n\t\tlog.AccessHandler(r, 422)\n\t\treturn\n\t}\n\n\tpasswordHash, err := generatePasswordHash(password)\n\tlog.ErrorHandler(err)\n\n\tuser = User{\n\t\tUserName: userName,\n\t\tFullName: fullName,\n\t\tAlias: alias,\n\t\tEmail: email,\n\t\tIsAdmin: false,\n\t\tPassword: passwordHash,\n\t\tLastLogin: time.Time{},\n\t\tIsActive: false,\n\t\tIsEmailVerified: false,\n\t}\n\n\t//\tfmt.Println(\"Create The Fucking User Here\")\n\n\tdb.Create(&user)\n\terr = json.NewEncoder(w).Encode(user)\n\tlog.ErrorHandler(err)\n\n\t// Create OTP to verify email by\n\t// OTP expires in 30 minutes\n\t// Stored in Redis with key new_user_otp_email\n\tverifiableToken := generateOTP()\n\terr = redisClient.Set(ctx, \"new_user_otp_\"+email, verifiableToken, 30*time.Minute).Err()\n\tlog.ErrorHandler(err)\n\n\t//payload := struct {\n\t//\tToken string\n\t//}{\n\t//\tToken: verifiableToken,\n\t//}\n\t//\n\t//var status bool\n\t//\n\t////status, err = core.SendEmailNoAttachment(email, \"OTP for Verification\", payload, \"token.txt\")\n\t//if !status {\n\t//\tw.WriteHeader(http.StatusInternalServerError)\n\t//\terr = json.NewEncoder(w).Encode(core.FiveHundred)\n\t//\tlog.ErrorHandler(err)\n\t//\tlog.AccessHandler(r, 500)\n\t//\treturn\n\t//}\n\tlog.ErrorHandler(err)\n\tlog.AccessHandler(r, 200)\n\treturn\n}", "func (ut *updateUserPayload) Validate() (err error) {\n\tif ut.Name == nil {\n\t\terr = goa.MergeErrors(err, goa.MissingAttributeError(`request`, \"name\"))\n\t}\n\tif ut.Email == nil {\n\t\terr = goa.MergeErrors(err, goa.MissingAttributeError(`request`, \"email\"))\n\t}\n\tif ut.Bio == nil {\n\t\terr = goa.MergeErrors(err, goa.MissingAttributeError(`request`, \"bio\"))\n\t}\n\tif ut.Email != nil {\n\t\tif err2 := goa.ValidateFormat(goa.FormatEmail, *ut.Email); err2 != nil {\n\t\t\terr = goa.MergeErrors(err, goa.InvalidFormatError(`request.email`, *ut.Email, goa.FormatEmail, err2))\n\t\t}\n\t}\n\tif ut.Name != nil {\n\t\tif ok := goa.ValidatePattern(`\\S`, *ut.Name); !ok {\n\t\t\terr = goa.MergeErrors(err, goa.InvalidPatternError(`request.name`, *ut.Name, `\\S`))\n\t\t}\n\t}\n\tif ut.Name != nil {\n\t\tif utf8.RuneCountInString(*ut.Name) > 256 {\n\t\t\terr = goa.MergeErrors(err, goa.InvalidLengthError(`request.name`, *ut.Name, utf8.RuneCountInString(*ut.Name), 256, false))\n\t\t}\n\t}\n\treturn\n}", "func (config ConfigUser) Validate() (err error) {\n\terr = config.User.Validate()\n\tif err != nil {\n\t\treturn\n\t}\n\tif config.Groups != nil {\n\t\tif len(*config.Groups) != 0 {\n\t\t\tfor _, e := range *config.Groups {\n\t\t\t\terr = e.Validate()\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\treturn config.Password.Validate()\n}", "func (c *UsersInfoCall) ValidateArgs() error {\n\tif len(c.user) <= 0 {\n\t\treturn errors.New(`required field user not initialized`)\n\t}\n\treturn nil\n}", "func (uu *UserUpdate) check() error {\n\tif v, ok := uu.mutation.Username(); ok {\n\t\tif err := user.UsernameValidator(v); err != nil {\n\t\t\treturn &ValidationError{Name: \"username\", err: fmt.Errorf(\"ent: validator failed for field \\\"username\\\": %w\", err)}\n\t\t}\n\t}\n\tif v, ok := uu.mutation.Password(); ok {\n\t\tif err := user.PasswordValidator(v); err != nil {\n\t\t\treturn &ValidationError{Name: \"password\", err: fmt.Errorf(\"ent: validator failed for field \\\"password\\\": %w\", err)}\n\t\t}\n\t}\n\treturn nil\n}", "func (u *Usecase) validFields(d *Device) error {\n\tif d.Name == \"\" {\n\t\treturn &InvalidError{\"attribute `Name` must not be empty\"}\n\t}\n\n\tif d.User == 0 {\n\t\treturn &InvalidError{\"invalid user\"}\n\t}\n\n\treturn nil\n}", "func (user *User) Validate(action string) map[string]string {\n\tvar errMessages = make(map[string]string)\n\tvar err error\n\n\tswitch strings.ToLower(action) {\n\tcase \"update\":\n\t\tif user.Email == \"\" {\n\t\t\terrMessages[\"email_required\"] = \"email required\"\n\t\t}\n\n\t\tif user.Email != \"\" {\n\t\t\tif err = checkmail.ValidateFormat(user.Email); err != nil {\n\t\t\t\terrMessages[\"invalid_email\"] = \"invalid email\"\n\t\t\t}\n\t\t}\n\tcase \"login\":\n\t\tif user.Password == \"\" {\n\t\t\terrMessages[\"password_required\"] = \"password is required\"\n\t\t}\n\n\t\tif user.Email == \"\" {\n\t\t\terrMessages[\"email_required\"] = \"email required\"\n\t\t}\n\n\t\tif user.Email != \"\" {\n\t\t\tif err = checkmail.ValidateFormat(user.Email); err != nil {\n\t\t\t\terrMessages[\"invalid_email\"] = \"invalid email\"\n\t\t\t}\n\t\t}\n\tcase \"forgotpassword\":\n\t\tif user.Email == \"\" {\n\t\t\terrMessages[\"email_required\"] = \"email required\"\n\t\t}\n\t\tif user.Email != \"\" {\n\t\t\tif err = checkmail.ValidateFormat(user.Email); err != nil {\n\t\t\t\terrMessages[\"invalid_email\"] = \"please provide a valid email\"\n\t\t\t}\n\t\t}\n\tdefault:\n\t\tif user.FirstName == \"\" {\n\t\t\terrMessages[\"firstname_required\"] = \"first name is required\"\n\t\t}\n\n\t\tif user.LastName == \"\" {\n\t\t\terrMessages[\"lastname_required\"] = \"last name is required\"\n\t\t}\n\n\t\tif user.Password == \"\" {\n\t\t\terrMessages[\"password_required\"] = \"password is required\"\n\t\t}\n\n\t\tif user.Password != \"\" && len(user.Password) < 6 {\n\t\t\terrMessages[\"invalid_password\"] = \"password should be at least 6 characters\"\n\t\t}\n\n\t\tif user.Email == \"\" {\n\t\t\terrMessages[\"email_required\"] = \"email is required\"\n\t\t}\n\n\t\tif user.Email != \"\" {\n\t\t\tif err = checkmail.ValidateFormat(user.Email); err != nil {\n\t\t\t\terrMessages[\"invalid_email\"] = \"please provide a valid email\"\n\t\t\t}\n\t\t}\n\t}\n\n\treturn errMessages\n}", "func validateRegister(f *userRequest) (errors map[string]string) {\n\terrors = make(map[string]string)\n\tmodel.Required(&errors, map[string][]string{\n\t\t\"name\": []string{f.Name, \"Name\"},\n\t\t\"email\": []string{f.Email, \"Email\"},\n\t\t\"password\": []string{f.Password, \"Password\"},\n\t\t\"cpassword\": []string{f.Cpassword, \"Confirm Password\"},\n\t})\n\tmodel.Confirmed(&errors, map[string][]string{\n\t\t\"cpassword\": []string{f.Password, f.Cpassword, \"Confirm Password\"},\n\t})\n\tif len(errors) == 0 {\n\t\treturn nil\n\t}\n\treturn\n}", "func ValidaUser(user, password, tipoUser string) (retorno bool) {\n\tvar SQLSelect string\n\n\tdb, err := sql.Open(\"mysql\", UserDB+\":\"+PassDB+\"@tcp(\"+HostDB+\":\"+PortDB+\")/\"+DatabaseDB+\"?charset=utf8\")\n\tcheckErr(err)\n\n\tdefer db.Close()\n\n\t// Se tipoUser = 0 quer dizer que nao eh necessario ser admin para efetuar validacao\n\tif tipoUser == \"0\" {\n\t\tSQLSelect = \"SELECT COUNT(nome_usuario) FROM usuarios WHERE ativo=1 AND usuario='\" + user + \"' AND senha='\" + password + \"'\"\n\t} else {\n\t\tSQLSelect = \"SELECT COUNT(nome_usuario) FROM usuarios WHERE ativo=1 AND usuario='\" + user + \"' AND senha='\" + password + \"' AND admin='\" + tipoUser + \"'\"\n\t}\n\n\trows, err := db.Query(SQLSelect)\n\n\tcheckErr(err)\n\n\tfor rows.Next() {\n\t\tvar count int\n\t\terr = rows.Scan(&count)\n\t\tcheckErr(err)\n\n\t\tif count >= 1 {\n\t\t\treturn true\n\t\t} else {\n\t\t\treturn false\n\t\t}\n\t}\n\n\treturn retorno\n}", "func (m *User) Validate() error {\n\tif m.CreatedAt != \"\" {\n\t\tif _, err := time.Parse(time.RFC3339Nano, m.CreatedAt); err != nil {\n\t\t\treturn errors.Wrap(err, errors.ErrBadRequest, \"invalid time string in field created_at\")\n\t\t}\n\t}\n\n\tif m.UpdatedAt != \"\" {\n\t\tif _, err := time.Parse(time.RFC3339Nano, m.UpdatedAt); err != nil {\n\t\t\treturn errors.Wrap(err, errors.ErrBadRequest, \"invalid time string in field updated_at\")\n\t\t}\n\t}\n\n\treturn nil\n}", "func (m *UsageInfo) Validate(formats strfmt.Registry) error {\n\tvar res []error\n\n\tif err := m.validateActiveUsers(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}", "func (u *UserInfo) ValidateNoUserInfo(path *field.Path) (errs field.ErrorList) {\n\tif u != nil {\n\t\tif len(u.Roles) != 0 {\n\t\t\terrs = append(errs, field.Forbidden(path.Child(\"roles\"), \"Usage of user info is forbidden\"))\n\t\t}\n\t\tif len(u.ClusterRoles) != 0 {\n\t\t\terrs = append(errs, field.Forbidden(path.Child(\"clusterRoles\"), \"Usage of user info is forbidden\"))\n\t\t}\n\t\tif len(u.Subjects) != 0 {\n\t\t\terrs = append(errs, field.Forbidden(path.Child(\"subjects\"), \"Usage of user info is forbidden\"))\n\t\t}\n\t}\n\treturn errs\n}", "func ValidUser(login *Login) bool {\n\tvar err error\n\n\t//Validate function input\n\tif login.Username == \"\" {\n\t\treturn false\n\t}\n\tif login.Password == \"\" {\n\t\treturn false\n\t}\n\n\t//Get user with matching username\n\tuser, err := GetUser(login.Username)\n\tif err != nil {\n\t\treturn false\n\t}\n\n\t//Check if username match\n\tif login.Username != user.Username {\n\t\treturn false\n\t}\n\n\t//Check if password matches password hash\n\tif !passhash.MatchString(login.Password, user.Password) {\n\t\treturn false\n\t}\n\n\treturn true\n}", "func (ut *UpdateUserPayload) Validate() (err error) {\n\tif ut.Name == \"\" {\n\t\terr = goa.MergeErrors(err, goa.MissingAttributeError(`type`, \"name\"))\n\t}\n\tif ut.Email == \"\" {\n\t\terr = goa.MergeErrors(err, goa.MissingAttributeError(`type`, \"email\"))\n\t}\n\tif ut.Bio == \"\" {\n\t\terr = goa.MergeErrors(err, goa.MissingAttributeError(`type`, \"bio\"))\n\t}\n\tif err2 := goa.ValidateFormat(goa.FormatEmail, ut.Email); err2 != nil {\n\t\terr = goa.MergeErrors(err, goa.InvalidFormatError(`type.email`, ut.Email, goa.FormatEmail, err2))\n\t}\n\tif ok := goa.ValidatePattern(`\\S`, ut.Name); !ok {\n\t\terr = goa.MergeErrors(err, goa.InvalidPatternError(`type.name`, ut.Name, `\\S`))\n\t}\n\tif utf8.RuneCountInString(ut.Name) > 256 {\n\t\terr = goa.MergeErrors(err, goa.InvalidLengthError(`type.name`, ut.Name, utf8.RuneCountInString(ut.Name), 256, false))\n\t}\n\treturn\n}", "func (e UserRequired) IsUserRequired() {}", "func (ci *ConnectionInfo) CheckUserInput() (bool, error) {\n\n\tvar ok bool\n\n\tif ci.Username == \"\" || ci.Password == \"\" || ci.BaseURL == \"\" {\n\t\tusage()\n\t\tmsg := \"username, password & url should be part of the api call\"\n\t\treturn ok, errors.New(msg)\n\t}\n\n\terr := checkFlags(ci)\n\tif err != nil {\n\t\tok = false\n\t} else {\n\t\tok = true\n\t}\n\n\treturn ok, err\n}", "func ValidateUserView(result *UserView) (err error) {\n\tif result.ID == nil {\n\t\terr = goa.MergeErrors(err, goa.MissingFieldError(\"id\", \"result\"))\n\t}\n\tif result.Username == nil {\n\t\terr = goa.MergeErrors(err, goa.MissingFieldError(\"username\", \"result\"))\n\t}\n\tif result.Name == nil {\n\t\terr = goa.MergeErrors(err, goa.MissingFieldError(\"name\", \"result\"))\n\t}\n\tif result.Mobile == nil {\n\t\terr = goa.MergeErrors(err, goa.MissingFieldError(\"mobile\", \"result\"))\n\t}\n\tif result.Email == nil {\n\t\terr = goa.MergeErrors(err, goa.MissingFieldError(\"email\", \"result\"))\n\t}\n\tif result.Jobs == nil {\n\t\terr = goa.MergeErrors(err, goa.MissingFieldError(\"jobs\", \"result\"))\n\t}\n\tif result.IsAdmin == nil {\n\t\terr = goa.MergeErrors(err, goa.MissingFieldError(\"is_admin\", \"result\"))\n\t}\n\tif result.Jobs != nil {\n\t\tif !(*result.Jobs == 1 || *result.Jobs == 2 || *result.Jobs == 3) {\n\t\t\terr = goa.MergeErrors(err, goa.InvalidEnumValueError(\"result.jobs\", *result.Jobs, []interface{}{1, 2, 3}))\n\t\t}\n\t}\n\tif result.Superior != nil {\n\t\tif err2 := ValidateSuperiorView(result.Superior); err2 != nil {\n\t\t\terr = goa.MergeErrors(err, err2)\n\t\t}\n\t}\n\tif result.Group != nil {\n\t\tif err2 := ValidateGroupView(result.Group); err2 != nil {\n\t\t\terr = goa.MergeErrors(err, err2)\n\t\t}\n\t}\n\treturn\n}", "func (post *Post) Validate() map[string]string {\n\n\t// if user.Name == \"\" && len(user.Name) < 5 {\n\t// \treturn err(\"The Name you entered was too short\")\n\t// }\n\n\treturn map[string]string{\"Message\": \"\", \"IsValid\": \"1\"}\n}", "func (user User) Validate() (err error) {\n\tif user.Username == \"\" {\n\t\terr = fmt.Errorf(\"Username cannot be empty\")\n\t}\n\treturn\n}", "func (uuo *UserUpdateOne) check() error {\n\tif v, ok := uuo.mutation.Username(); ok {\n\t\tif err := user.UsernameValidator(v); err != nil {\n\t\t\treturn &ValidationError{Name: \"username\", err: fmt.Errorf(\"ent: validator failed for field \\\"username\\\": %w\", err)}\n\t\t}\n\t}\n\tif v, ok := uuo.mutation.Password(); ok {\n\t\tif err := user.PasswordValidator(v); err != nil {\n\t\t\treturn &ValidationError{Name: \"password\", err: fmt.Errorf(\"ent: validator failed for field \\\"password\\\": %w\", err)}\n\t\t}\n\t}\n\treturn nil\n}", "func (user *User) Validate() (map[string]interface{}, bool) {\n\n\tif !strings.Contains(user.Email, \"@\") {\n\t\treturn utils.Message(false, \"Email address is required\"), false\n\t}\n\n\tif len(user.Password) < 6 {\n\t\treturn utils.Message(false, \"Password is required\"), false\n\t}\n\n\t//Email must be unique\n\ttemp := &User{}\n\n\t//check for errors and duplicate emails\n\terr := GetDB().Table(\"users\").Where(\"email = ?\", user.Email).First(temp).Error\n\tif err != nil && err != gorm.ErrRecordNotFound {\n\t\treturn utils.Message(false, \"Connection error. Please retry\"), false\n\t}\n\tif temp.Email != \"\" {\n\t\treturn utils.Message(false, \"Email address already in use by another user.\"), false\n\t}\n\n\treturn utils.Message(false, \"Requirement passed\"), true\n}", "func ValidateUserInput(userInput map[string]string, keys []string) (err error) {\n\n\tfor _, key := range keys {\n\t\tif strings.EqualFold(userInput[key], \"\") {\n\t\t\terr = fmt.Errorf(\"%s should not be an empty string\", key)\n\t\t\tlog.Println(err)\n\t\t\treturn\n\t\t}\n\t\tif strings.HasSuffix(key, \"id\") {\n\t\t\ttempt, err := strconv.Atoi(userInput[key])\n\t\t\tif err != nil {\n\t\t\t\terr = fmt.Errorf(\"%s needs to be a string representation of a postive integer\", key)\n\t\t\t\tlog.Println(err)\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tif tempt <= 0 {\n\t\t\t\terr = fmt.Errorf(\"%s should not be less than zero\", key)\n\t\t\t\tlog.Println(err)\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\treturn\n}", "func (u *User) Validate(tx *pop.Connection) (*validate.Errors, error) {\n\treturn validate.Validate(\n\t\t&validators.StringIsPresent{Field: u.Email, Name: \"Email\"},\n\t\t&validators.StringIsPresent{Field: u.FirstName, Name: \"FirstName\"},\n\t\t&validators.StringIsPresent{Field: u.LastName, Name: \"LastName\"},\n\t\t&validators.StringIsPresent{Field: u.Nickname, Name: \"Nickname\"},\n\t\t&validators.UUIDIsPresent{Field: u.UUID, Name: \"UUID\"},\n\t\t&NullsStringIsURL{Field: u.AuthPhotoURL, Name: \"AuthPhotoURL\"},\n\t\t&domain.StringIsVisible{Field: u.Nickname, Name: \"Nickname\"},\n\t), nil\n}", "func (u User2) Validate() []string {\n\tvar errs []string\n\tfor _, n := range u.Name {\n\t\tif !unicode.IsLetter(n) && !unicode.IsDigit(n) && !unicode.IsPunct(n) && !unicode.IsSpace(n) {\n\t\t\terrs = append(errs, \"name::is_not_word\")\n\t\t\tbreak\n\t\t}\n\t}\n\tif u.PasswordRef != nil && utf8.RuneCountInString(*u.PasswordRef) < 8 {\n\t\terrs = append(errs, \"password_ref::min_length_is::8\")\n\t}\n\tif u.PasswordRef != nil && len(*u.PasswordRef) > 0 {\n\t\tvar PasswordRefUpper, PasswordRefLetter, PasswordRefNumber, PasswordRefSpaces, PasswordRefInvalidChar bool\n\t\tfor _, c := range *u.PasswordRef {\n\t\t\tif c < 33 || c > 126 {\n\t\t\t\tPasswordRefInvalidChar = true\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tswitch {\n\t\t\tcase unicode.IsNumber(c):\n\t\t\t\tPasswordRefNumber = true\n\t\t\tcase unicode.IsUpper(c):\n\t\t\t\tPasswordRefUpper = true\n\t\t\tcase unicode.IsLetter(c):\n\t\t\t\tPasswordRefLetter = true\n\t\t\tcase c == ' ':\n\t\t\t\tPasswordRefSpaces = true\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\tif !PasswordRefUpper || !PasswordRefLetter || !PasswordRefNumber || PasswordRefSpaces || PasswordRefInvalidChar {\n\t\t\terrs = append(errs, \"password_ref::invalid_password_format\")\n\t\t}\n\t}\n\tfor _, n := range u.Desc {\n\t\tif !unicode.IsLetter(n) && !unicode.IsDigit(n) && !unicode.IsPunct(n) && !unicode.IsSpace(n) {\n\t\t\terrs = append(errs, \"desc::is_not_word\")\n\t\t\tbreak\n\t\t}\n\t}\n\n\treturn errs\n}", "func (n UsernsMode) Valid() bool {\n\treturn n == \"\" || n.IsHost()\n}", "func validateTatHeaders(tatHeaders tatHeaders) (models.User, error) {\n\n\tuser := models.User{}\n\tif tatHeaders.trustUsername != \"\" && tatHeaders.trustUsername != \"null\" {\n\t\terr := user.TrustUsername(tatHeaders.trustUsername)\n\t\tif err != nil {\n\t\t\treturn user, fmt.Errorf(\"User %s does not exist. Please register before. Err:%s\", tatHeaders.trustUsername, err.Error())\n\t\t}\n\t} else {\n\t\terr := user.FindByUsernameAndPassword(tatHeaders.username, tatHeaders.password)\n\t\tif err != nil {\n\t\t\treturn user, fmt.Errorf(\"Invalid Tat credentials for username %s, err:%s\", tatHeaders.username, err.Error())\n\t\t}\n\t}\n\n\treturn user, nil\n}", "func (ut *userCreatePayload) Validate() (err error) {\n\tif ut.Email == nil {\n\t\terr = goa.MergeErrors(err, goa.MissingAttributeError(`request`, \"email\"))\n\t}\n\tif ut.Nickname == nil {\n\t\terr = goa.MergeErrors(err, goa.MissingAttributeError(`request`, \"nickname\"))\n\t}\n\tif ut.Password == nil {\n\t\terr = goa.MergeErrors(err, goa.MissingAttributeError(`request`, \"password\"))\n\t}\n\tif ut.Email != nil {\n\t\tif err2 := goa.ValidateFormat(goa.FormatEmail, *ut.Email); err2 != nil {\n\t\t\terr = goa.MergeErrors(err, goa.InvalidFormatError(`request.email`, *ut.Email, goa.FormatEmail, err2))\n\t\t}\n\t}\n\tif ut.Nickname != nil {\n\t\tif utf8.RuneCountInString(*ut.Nickname) < 1 {\n\t\t\terr = goa.MergeErrors(err, goa.InvalidLengthError(`request.nickname`, *ut.Nickname, utf8.RuneCountInString(*ut.Nickname), 1, true))\n\t\t}\n\t}\n\tif ut.Nickname != nil {\n\t\tif utf8.RuneCountInString(*ut.Nickname) > 32 {\n\t\t\terr = goa.MergeErrors(err, goa.InvalidLengthError(`request.nickname`, *ut.Nickname, utf8.RuneCountInString(*ut.Nickname), 32, false))\n\t\t}\n\t}\n\tif ut.Password != nil {\n\t\tif utf8.RuneCountInString(*ut.Password) < 1 {\n\t\t\terr = goa.MergeErrors(err, goa.InvalidLengthError(`request.password`, *ut.Password, utf8.RuneCountInString(*ut.Password), 1, true))\n\t\t}\n\t}\n\tif ut.Password != nil {\n\t\tif utf8.RuneCountInString(*ut.Password) > 32 {\n\t\t\terr = goa.MergeErrors(err, goa.InvalidLengthError(`request.password`, *ut.Password, utf8.RuneCountInString(*ut.Password), 32, false))\n\t\t}\n\t}\n\treturn\n}", "func isValidUser(tokenUsername interface{}, providedUsername string) bool {\n\tif username, ok := tokenUsername.(string); ok {\n\t\tif providedUsername == username {\n\t\t\treturn true\n\t\t}\n\t} else {\n\t\tglog.Error(\"Error casting username to string. This may be due to a invalid token\")\n\t\textension.Exit(extension.ErrorExitCode)\n\t}\n\treturn false\n}", "func isValidUserInput(input string) bool {\n\tvar isValidString = regexp.MustCompile(`^[1-9a-z-]+$`).MatchString\n\n\tif isValidString(input) {\n\t\treturn true\n\t}\n\treturn false\n}", "func checkUserFormChanged(uForm storage.User, originalUser storage.User) (bool, storage.User) {\n\tfmt.Printf(\"---originalUser = %v, type = %T\\n\", originalUser.FirstName, originalUser.FirstName)\n\tfmt.Printf(\"---user in form = %v, type = %T\\n\", uForm.FirstName, uForm.FirstName)\n\n\tchanged := false\n\tif uForm.FirstName != originalUser.FirstName && uForm.FirstName != \"\" {\n\t\toriginalUser.FirstName = uForm.FirstName\n\t\tchanged = true\n\t}\n\tif uForm.LastName != originalUser.LastName && uForm.LastName != \"\" {\n\t\toriginalUser.LastName = uForm.LastName\n\t\tchanged = true\n\t}\n\tif uForm.Mail != originalUser.Mail && uForm.Mail != \"\" {\n\t\toriginalUser.Mail = uForm.Mail\n\t\tchanged = true\n\t}\n\tif uForm.Address != originalUser.Address && uForm.Address != \"\" {\n\t\toriginalUser.Address = uForm.Address\n\t\tchanged = true\n\t}\n\tif uForm.PostNrAndPlace != originalUser.PostNrAndPlace && uForm.PostNrAndPlace != \"\" {\n\t\toriginalUser.PostNrAndPlace = uForm.PostNrAndPlace\n\t\tchanged = true\n\t}\n\tif uForm.PhoneNr != originalUser.PhoneNr && uForm.PhoneNr != \"\" {\n\t\toriginalUser.PhoneNr = uForm.PhoneNr\n\t\tchanged = true\n\t}\n\tif uForm.OrgNr != originalUser.OrgNr && uForm.OrgNr != \"\" {\n\t\toriginalUser.OrgNr = uForm.OrgNr\n\t\tchanged = true\n\t}\n\tif uForm.CountryID != originalUser.CountryID && uForm.CountryID != \"\" {\n\t\toriginalUser.CountryID = uForm.CountryID\n\t\tchanged = true\n\t}\n\tif uForm.BankAccount != originalUser.BankAccount && uForm.BankAccount != \"\" {\n\t\toriginalUser.BankAccount = uForm.BankAccount\n\t\tchanged = true\n\t}\n\treturn changed, originalUser\n}", "func (user *User) Validate() (map[string] interface{}, bool) {\n\n\tif !strings.Contains(user.Email, \"@\") {\n\t\treturn u.Message(false, \"Email address is required\"), false\n\t}\n\n\tif len(user.Password) < 6 {\n\t\treturn u.Message(false, \"Password is required\"), false\n\t}\n\n\t//Email must be unique\n\ttemp := &User{}\n\n\t//check for errors and duplicate emails\n\terr := GetDB().Table(UserTableName).Where(\"email = ?\", user.Email).First(temp).Error\n\tif err != nil && err != gorm.ErrRecordNotFound {\n\t\treturn u.Message(false, \"Connection error. Please retry\"), false\n\t}\n\tif temp.Email != \"\" {\n\t\treturn u.Message(false, \"Email address already in use by another user.\"), false\n\t}\n\n\treturn u.Message(false, \"Requirement passed\"), true\n}", "func (mt *UserTiny) Validate() (err error) {\n\n\tif mt.Href == \"\" {\n\t\terr = goa.MergeErrors(err, goa.MissingAttributeError(`response`, \"href\"))\n\t}\n\tif mt.Name == \"\" {\n\t\terr = goa.MergeErrors(err, goa.MissingAttributeError(`response`, \"name\"))\n\t}\n\treturn\n}", "func validateFlags() error {\n\t// if username == \"\" {\n\t// \treturn fmt.Errorf(\"username is required\")\n\t// }\n\n\tif host == \"\" {\n\t\treturn fmt.Errorf(\"host is required\")\n\t}\n\n\treturn nil\n}", "func (u *UserInfo) Validate(path *field.Path) (errs field.ErrorList) {\n\terrs = append(errs, u.ValidateSubjects(path.Child(\"subjects\"))...)\n\terrs = append(errs, u.ValidateRoles(path.Child(\"roles\"))...)\n\treturn errs\n}", "func (u User) Validate() error {\n\treturn nil\n}", "func (m *GetUserResponse) Validate() error {\n\tif err := m.User.Validate(); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}", "func (uu *UserUpdate) check() error {\n\tif v, ok := uu.mutation.Tenant(); ok {\n\t\tif err := user.TenantValidator(v); err != nil {\n\t\t\treturn &ValidationError{Name: \"tenant\", err: fmt.Errorf(\"ent: validator failed for field \\\"tenant\\\": %w\", err)}\n\t\t}\n\t}\n\tif v, ok := uu.mutation.UUID(); ok {\n\t\tif err := user.UUIDValidator(v); err != nil {\n\t\t\treturn &ValidationError{Name: \"uuid\", err: fmt.Errorf(\"ent: validator failed for field \\\"uuid\\\": %w\", err)}\n\t\t}\n\t}\n\treturn nil\n}", "func (login *Login) Valid() bool {\n\tif \"\" == login.Email || \"\" == login.Password {\n\t\treturn false\n\t}\n\treturn true\n}", "func (account *Account) Validate() (map[string]interface{}, bool) {\n\n\tif !strings.Contains(account.Phone, \"+\") {\n\t\treturn u.Message(false, \"Phone Number address is required\"), false\n\t}\n\n\tif len(account.UserName) < 3 {\n\t\treturn u.Message(false, \"Username is required\"), false\n\t}\n\n\t//PhoneNumber must be unique\n\ttemp := &Account{}\n\n\t//check for errors and duplicate phones\n\terr := GetDB().Table(\"accounts\").Where(\"phone = ?\", account.Phone).First(temp).Error\n\tif err != nil && err != gorm.ErrRecordNotFound {\n\t\treturn u.Message(false, \"Connection error. Please retry\"+err.Error()), false\n\t}\n\tif temp.Phone != \"\" {\n\t\treturn u.Message(false, \"Phone Number address already in use by another user.\"), false\n\t}\n\n\t//check for errors and duplicate username\n\terr = GetDB().Table(\"accounts\").Where(\"user_name = ?\", account.UserName).First(temp).Error\n\tif err != nil && err != gorm.ErrRecordNotFound {\n\t\treturn u.Message(false, \"Connection error. Please retry\"+err.Error()), false\n\t}\n\tif temp.UserName != \"\" {\n\t\tresponse := fmt.Sprintf(\"Username: %d is already in use by another user.\", account.UserName)\n\t\treturn u.Message(false, response), false\n\t}\n\n\treturn u.Message(false, \"Requirement passed\"), true\n}", "func isUserValid(username, password string) bool {\n\tfor _, u := range userList {\n\t\tif u.Username == username && u.Password == password {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func (u User) Validate() error {\n\tif u.Email == \"\" || u.Password == \"\" {\n\t\treturn ErrMalformedEntity\n\t}\n\n\tif !govalidator.IsEmail(u.Email) {\n\t\treturn ErrMalformedEntity\n\t}\n\n\treturn nil\n}", "func (u Username) isValid() bool {\n\treturn len(strings.Split(string(u), \".\")) == 2\n}", "func (u *User) Sanitize() {\n\tu.userData.Pswd = \"\"\n\tu.userData.Active = false\n}", "func (msg Screen) Validate() error {\n\tif len(msg.UserID) == 0 && len(msg.AnonymousID) == 0 {\n\t\treturn FieldError{\n\t\t\tType: \"Screen\",\n\t\t\tName: \"UserID\",\n\t\t\tValue: msg.UserID,\n\t\t}\n\t}\n\n\treturn nil\n}", "func (u *User) Validate() *errors.RestError {\n\tif err := validators.ValidateStruct(u); err != nil {\n\t\treturn err\n\t}\n\t// Sanitize Structure\n\tu.FirstName = strings.TrimSpace(u.FirstName)\n\tu.LastName = strings.TrimSpace(u.LastName)\n\tu.Email = strings.TrimSpace(u.Email)\n\tu.Username = strings.TrimSpace(u.Username)\n\tu.Password = strings.TrimSpace(u.Password)\n\t// Check password\n\tif err := u.validatePassword(); err != nil {\n\t\treturn err\n\t}\n\t// Check uniqueness\n\tif err := u.checkUniqueness(); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}", "func (u *User) Validate() (err error) {\n\t// Validate domain? http://data.iana.org/TLD/tlds-alpha-by-domain.txt\n\tre := regexp.MustCompile(`.+.@..+\\...+`)\n\tif !re.Match([]byte(u.Email)) {\n\t\terr = fmt.Errorf(\"Invalid e-mail: %s\", u.Email)\n\t}\n\treturn\n}", "func (m *AuthenticatedUserDTO) Validate(formats strfmt.Registry) error {\n\treturn nil\n}", "func ValidateUserStatus(value, _ interface{}) bool {\n\tswitch value := value.(type) {\n\tcase string:\n\t\tswitch value {\n\t\tcase Offline, OnlineAvailable, Busy:\n\t\t\treturn true\n\t\tdefault:\n\t\t\treturn false\n\t\t}\n\tdefault:\n\t\tpanic(\"the accessibility field must be a string\")\n\t}\n}", "func (m *UserSettings) Validate(formats strfmt.Registry) error {\n\treturn nil\n}", "func createHandler(w http.ResponseWriter, r *http.Request) {\n user := new(User)\n user.Token = validateToken(r.FormValue(\"token\"))\n user.PasswordHash = validateHash(r.FormValue(\"passHash\"))\n user.PublicKey = validatePublicKey(r.FormValue(\"publicKey\"))\n user.PublicHash = computePublicHash(user.PublicKey)\n user.CipherPrivateKey = validateHex(r.FormValue(\"cipherPrivateKey\"))\n\n log.Printf(\"Woot! New user %s %s\\n\", user.Token, user.PublicHash)\n\n if !SaveUser(user) {\n http.Error(w, \"That username is taken\", http.StatusBadRequest)\n }\n}", "func validateProviderInfo(provider register.ProviderRegistrar) bool {\n\t// All of the fields must have a value in them.\n\tif provider.GetNodeID() == \"\" {\n\t\tlogging.Warn(\"Provider registration issue: NodeID not set\")\n\t\treturn false\n\t}\n\tif provider.GetAddress() == \"\" {\n\t\tlogging.Warn(\"Provider registration issue: Provider IP address or domain name not set\")\n\t\treturn false\n\t}\n\tif provider.GetNetworkInfoGateway() == \"\" {\n\t\tlogging.Warn(\"Provider registration issue: Port for Gateway to Provider communications not set\")\n\t\treturn false\n\t}\n\tif provider.GetNetworkInfoClient() == \"\" {\n\t\tlogging.Warn(\"Provider registration issue: Port for Client to Provider communications not set\")\n\t\treturn false\n\t}\n\tif provider.GetNetworkInfoAdmin() == \"\" {\n\t\tlogging.Warn(\"Provider registration issue: Port for Admin to Provider communications not set\")\n\t\treturn false\n\t}\n\tif provider.GetRegionCode() == \"\" {\n\t\tlogging.Warn(\"Provider registration issue: Region Code not set\")\n\t\treturn false\n\t}\n\t_, err := provider.GetRootSigningKey()\n\tif err != nil {\n\t\tlogging.Warn(\"Provider registration issue: Root Signing Public Key error: %+v\", err)\n\t\treturn false\n\t}\n\t_, err = provider.GetSigningKey()\n\tif err != nil {\n\t\tlogging.Warn(\"Provider registration issue: Retrieval Signing Key error: %+v\", err)\n\t\treturn false\n\t}\n\treturn true\n}", "func Validate(values []Validation) bool {\n\tusername := regexp.MustCompile(\"[A-Za-z0-9]\")\n\temail := regexp.MustCompile(\"^[A-Za-z0-9]+[@]+[A-Za-z0-9]+[.]+[A-Za-z]+$\")\n\n\tfor i := 0; i < len(values); i++ {\n\t\tswitch values[i].Valid {\n\t\tcase \"username\":\n\t\t\tif !username.MatchString(values[i].Value) {\n\t\t\t\treturn false\n\t\t\t}\n\t\tcase \"email\":\n\t\t\tif !email.MatchString(values[i].Value) {\n\t\t\t\treturn false\n\t\t\t}\n\t\tcase \"password\":\n\t\t\tif len(values[i].Value) < 5 {\n\t\t\t\treturn false\n\t\t\t}\n\t\t}\n\t}\n\treturn true\n}", "func ValidateFacebookCredentials(webUser *domain.AuthUser) bool {\n\tres, err := http.Get(fmt.Sprintf(\"https://graph.facebook.com/v2.9/me?access_token=%s\", webUser.ShortLivedToken))\n\n\tif err != nil {\n\t\tlog.Printf(\"Error: %s\", err)\n\t\treturn false\n\t}\n\n\tme := &me{}\n\terr = json.NewDecoder(res.Body).Decode(me)\n\n\tif err != nil {\n\t\tlog.Printf(\"Error: %s\", err)\n\t\treturn false\n\t}\n\n\tif me.ID == webUser.ID {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func (accountInfo *AccountInfo) Validate() error {\n\taccountSchema, _ := GetAccountSchema(accountInfo.Domain)\n\tif accountSchema == nil {\n\t\treturn errors.New(\"schema undefined for domain \" + accountInfo.Domain)\n\t}\n\n\t// Group\n\tif !accountSchema.IsGroupExist(accountInfo.Group) {\n\t\treturn errors.New(\"unknown group \" + accountInfo.Group)\n\t}\n\n\t// UserID\n\tif accountInfo.Uid == \"\" {\n\t\treturn errors.New(\"uid can not be empty\")\n\t}\n\n\t// LoginIDs\n\tif len(accountInfo.LoginIDs) == 0 {\n\t\treturn errors.New(\"should have at least one login id\")\n\t}\n\trequiredIDs := accountSchema.getRequiredLogIDs()\n\tfor _, requiredID := range requiredIDs {\n\t\tif _, ok := accountInfo.LoginIDs[requiredID]; !ok {\n\t\t\treturn errors.New(\"login id:\" + requiredID + \" is required but not specified\")\n\t\t}\n\t}\n\tfor k, v := range accountInfo.LoginIDs {\n\t\tloginIDSchema, _ := accountSchema.GetLoginIDSchema(k)\n\t\tif loginIDSchema == nil {\n\t\t\treturn errors.New(\"login id schema for \" + k + \" is not defined\")\n\t\t} else {\n\t\t\tif !loginIDSchema.NeedVerified {\n\t\t\t\tv.Verified = true\n\t\t\t\t// accountInfo.LoginIDs[k] = v\n\t\t\t}\n\t\t\tif !loginIDSchema.Validator.Validate(v.ID) {\n\t\t\t\treturn errors.New(\"invalid format of login id \" + k + \":\" + v.ID)\n\t\t\t}\n\t\t}\n\t}\n\n\t// options\n\toptionsMap := mergeMaps(accountInfo.Profiles, accountInfo.Others)\n\trequiredOptions := accountSchema.getRequiredOptions()\n\tfor _, requiredOption := range requiredOptions {\n\t\tif _, ok := optionsMap[requiredOption]; !ok {\n\t\t\treturn errors.New(\"option:\" + requiredOption + \" is required but not specified\")\n\t\t}\n\t}\n\n\tfor k, v := range optionsMap {\n\t\toptionSchema, _ := accountSchema.GetOptionSchema(k)\n\t\tif optionSchema == nil {\n\t\t\treturn errors.New(\"option schema for \" + k + \" is not defined\")\n\t\t} else {\n\t\t\tif !optionSchema.Validator.Validate(v) {\n\t\t\t\treturn errors.New(\"invalid format of option \" + k)\n\t\t\t}\n\t\t}\n\t}\n\n\treturn nil\n}", "func ValidateUserCreateRequest(user models.RegisterRequest) []error {\n\tvar errs []error\n\tif user.Login == \"\" {\n\t\terrs = append(errs, fmt.Errorf(isRequired, \"Login\"))\n\t} else {\n\t\tif err := emailx.ValidateFast(user.Login); err != nil {\n\t\t\terrs = append(errs, err)\n\t\t}\n\t}\n\tif user.Password == \"\" {\n\t\terrs = append(errs, fmt.Errorf(isRequired, \"Password\"))\n\t}\n\tif user.ReCaptcha == \"\" {\n\t\terrs = append(errs, fmt.Errorf(isRequired, \"ReCaptcha\"))\n\t}\n\n\tif len(errs) > 0 {\n\t\treturn errs\n\t}\n\treturn nil\n}", "func validateAuth(c *fb.Context, r *http.Request) (bool, *fb.User) {\n\tif c.Auth.Method == \"none\" {\n\t\tc.User = c.DefaultUser\n\t\treturn true, c.User\n\t}\n\n\t// If proxy auth is used do not verify the JWT token if the header is provided.\n\tif c.Auth.Method == \"proxy\" {\n\t\tu, err := c.Store.Users.GetByUsername(r.Header.Get(c.Auth.Header), c.NewFS)\n\t\tif err != nil {\n\t\t\treturn false, nil\n\t\t}\n\t\tc.User = u\n\t\treturn true, c.User\n\t}\n\n\tkeyFunc := func(token *jwt.Token) (interface{}, error) {\n\t\treturn c.Key, nil\n\t}\n\n\tvar claims claims\n\ttoken, err := request.ParseFromRequestWithClaims(r,\n\t\textractor{},\n\t\t&claims,\n\t\tkeyFunc,\n\t)\n\n\tif err != nil || !token.Valid {\n\t\treturn false, nil\n\t}\n\n\tu, err := c.Store.Users.Get(claims.User.ID, c.NewFS)\n\tif err != nil {\n\t\treturn false, nil\n\t}\n\n\tc.User = u\n\treturn true, u\n}" ]
[ "0.69678235", "0.6818074", "0.66516536", "0.66227204", "0.6599855", "0.649384", "0.6470993", "0.64408463", "0.6394916", "0.6368812", "0.63549364", "0.6314162", "0.6273746", "0.6223283", "0.6190359", "0.61876845", "0.6181329", "0.6174275", "0.6158093", "0.61443156", "0.61392903", "0.61265653", "0.61150765", "0.6112772", "0.60959256", "0.6094869", "0.608972", "0.60687363", "0.6068494", "0.6066454", "0.60558736", "0.60393846", "0.6031404", "0.6020526", "0.6013143", "0.60031337", "0.5981396", "0.59766924", "0.5961188", "0.594386", "0.5933611", "0.59165514", "0.5915083", "0.5908736", "0.5906373", "0.5899463", "0.589878", "0.58929574", "0.58802336", "0.5879989", "0.58529806", "0.5846399", "0.5839044", "0.5826241", "0.5825305", "0.5824529", "0.5822746", "0.5806929", "0.5805801", "0.5803008", "0.5802479", "0.58017683", "0.5791951", "0.57873976", "0.57694197", "0.5765443", "0.57620394", "0.5761944", "0.57581234", "0.5752965", "0.5746774", "0.5746614", "0.5737902", "0.57335013", "0.5726934", "0.57208014", "0.5719189", "0.57188565", "0.57167226", "0.5706148", "0.5684485", "0.5683464", "0.5677049", "0.5656904", "0.56521404", "0.5650988", "0.56492394", "0.5645933", "0.56442124", "0.5643926", "0.56390506", "0.56381744", "0.5614422", "0.55991757", "0.55977565", "0.55909604", "0.55892104", "0.55883664", "0.55875343", "0.55834526" ]
0.5871436
50
RegisterUsecase is implementation of RegistryContract.RegisterUsecase()
func (r *arisanSystemRegistry) RegisterUsecase() { databaseConnectionString := config.GetString("database.connectionstring", "test.db") db, err := gorm.Open(sqlite.Open(databaseConnectionString), &gorm.Config{}) if err != nil { panic("failed to connect database") } gw := prod.NewSuperGateway(db) //r.bayarSetoranHandler(gw) r.buatArisanHandler(gw) r.bukaAplikasiHandler(gw) //r.jawabUndanganHandler(gw) r.kocokUndianHandler(gw) r.mulaiArisanHandler(gw) r.registerPesertaHandler(gw) r.setoranTidakDibayarHandler(gw) r.tagihSetoranHandler(gw) r.undangPesertaHandler(gw) }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (reg *registrar) Register(example interface{}) error {\n\treg.lock.Lock()\n\tdefer reg.lock.Unlock()\n\treturn reg.Registry.Register(example)\n}", "func (s SwxProxy) Register(_ context.Context, _ *protos.RegistrationRequest) (*protos.RegistrationAnswer, error) {\n\treturn &protos.RegistrationAnswer{}, nil\n}", "func (_WyvernExchange *WyvernExchangeCaller) Registry(opts *bind.CallOpts) (common.Address, error) {\n\tvar (\n\t\tret0 = new(common.Address)\n\t)\n\tout := ret0\n\terr := _WyvernExchange.contract.Call(opts, out, \"registry\")\n\treturn *ret0, err\n}", "func (turnbull *turnbull) buildRegistryUsecaseInteractor(entity model.Entity) (error){\n\n\t// Build\n\tbuf := &bytes.Buffer{}\n\terr := turnbull.generator.UsecaseInteractorRegistry(entity, buf)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// Exists\n\tif len(buf.String()) > 0 {\n\n\t\t// File Name\n\t\tfileName, err := turnbull.formatter.OutputUsecaseInteractorRegistryFile(entity)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\t// Ensure\n\t\tdirName := filepath.Dir(fileName)\n\t\tif _, serr := os.Stat(dirName); serr != nil {\n\t\t\tmerr := os.MkdirAll(dirName, os.ModePerm)\n\t\t\tif merr != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t\t// File\n\t\tfile, err := os.Create(fileName)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tdefer file.Close()\n\n\t\t// Write\n\t\t_, err = file.WriteString(buf.String())\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t\n\t}\n\n\t\n\n\treturn nil\n}", "func RegFactory(entry BckFactory) { defaultReg.regFactory(entry) }", "func TestRegistryWithImpl(T *testing.T, factory func() Registry) {\n\ttestRegistryIsValid(T, factory())\n\ttestRegisterCantResolveMissingType(T, factory())\n\ttestRegisterCanResolveBoundType(T, factory())\n\ttestRegisterAndBindType(T, factory())\n\ttestBindDoesNotDestroyExistingBindings(T, factory())\n\ttestRecursiveResolution(T, factory())\n}", "func (s NoUseSwxProxy) Register(\n\tctx context.Context,\n\treq *protos.RegistrationRequest,\n) (*protos.RegistrationAnswer, error) {\n\treturn &protos.RegistrationAnswer{}, fmt.Errorf(\"Register is NOT IMPLEMENTED\")\n}", "func RegisterTypes(registry interface {\n\t RegisterType(name string, obj any)\n}) {\n\n}", "func (_KNS *KNSTransactor) Register(opts *bind.TransactOpts, prime_owner common.Address, wallet common.Address, Jid string, tel string) (*types.Transaction, error) {\n\treturn _KNS.contract.Transact(opts, \"Register\", prime_owner, wallet, Jid, tel)\n}", "func (r *Registry) Register(school data.School) {\n\tr.schoolList = append(r.schoolList, school)\n}", "func (tqsc *Controller) Register() {\n}", "func Register(name string, hash Interface) Interface {\n\tsimplepass[name] = hash\n\treturn hash\n}", "func (a *Alias) Register(example interface{}) error {\n\tif !a.aliased {\n\t\tfmt.Println(\"nar\", a.alias)\n\t\tif err := a.registry.Alias(a.alias, example); err != nil {\n\t\t\treturn fmt.Errorf(\"register alias: %w\", err)\n\t\t}\n\t\ta.aliased = true\n\t}\n\n\tif err := a.registry.Register(example); err != nil {\n\t\treturn fmt.Errorf(\"register example: %w\", err)\n\t}\n\n\treturn nil\n}", "func Register(t Task) {\n\tDefaultRegistry.Register(t)\n}", "func (c *Component) Register() {}", "func Register(scheme string, fs func(context.Context) Interface) {\n\tif _, ok := registry[scheme]; ok {\n\t\tpanic(fmt.Sprintf(\"scheme %v already registered\", scheme))\n\t}\n\tregistry[scheme] = fs\n}", "func Register(name string, obj interface{}) {\n\ti.Register(name, obj)\n}", "func (_Ethdkg *EthdkgTransactor) Register(opts *bind.TransactOpts, public_key [2]*big.Int) (*types.Transaction, error) {\n\treturn _Ethdkg.contract.Transact(opts, \"register\", public_key)\n}", "func Register(id int, handlerName string) int", "func register(orgName string, registerReq *msp.RegistrationRequest, sdk *fabsdk.FabricSDK) (string, error) {\n\tmspClient, err := msp.New(sdk.Context(), msp.WithOrg(orgName))\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn mspClient.Register(registerReq)\n}", "func Register(def ...Definition) {\n\tcrypt.Register(def...)\n}", "func (r *Registry) Register(pdb *db.PostgresDB, info *rentities.RegisterInfo) error {\n\tsList, ok := r.ServicesMap[info.TName]\n\tif ok {\n\t\tfor _, ri := range sList {\n\t\t\tif ri.IID == info.IID || ri.IP == info.IP {\n\t\t\t\treturn fmt.Errorf(\"Register error: service instance exists\")\n\t\t\t}\n\t\t}\n\t}\n\tr.ServicesMap[info.TName] = append(r.ServicesMap[info.TName], info)\n\t//add into db\n\tpdb.InsertReg(*info)\n\treturn nil\n}", "func (m *Manager) Register(args RegisterArgs, reply *string) error {\n\tfmt.Println(\"Registering key\", args.Key)\n\tfmt.Println(\"To tenant\", args.TenantID)\n\n\tm.validKeys[args.Key] = args.TenantID\n\t*reply = \"OK\"\n\treturn nil\n}", "func bindRegistry(address common.Address, caller bind.ContractCaller, transactor bind.ContractTransactor, filterer bind.ContractFilterer) (*bind.BoundContract, error) {\n\tparsed, err := abi.JSON(strings.NewReader(RegistryABI))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn bind.NewBoundContract(address, parsed, caller, transactor, filterer), nil\n}", "func bindRegistry(address common.Address, caller bind.ContractCaller, transactor bind.ContractTransactor, filterer bind.ContractFilterer) (*bind.BoundContract, error) {\n\tparsed, err := abi.JSON(strings.NewReader(RegistryABI))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn bind.NewBoundContract(address, parsed, caller, transactor, filterer), nil\n}", "func TestRegisty(t *testing.T) {\n\tr := Registry()\n\tte := NewTestAutoscalingEngine()\n\tr.Put(te)\n\n\tfound := r.IsRegistered(te.name)\n\tassert.True(t, found)\n}", "func Register(scheme string, b Broker) {\n\tbrokerRegistery[scheme] = b\n}", "func RegisterTypes(registry interface {\n\t RegisterType(name string, initializer func() any)\n}) {\n\n}", "func RegisterTypes(registry interface {\n\t RegisterType(name string, initializer func() any)\n}) {\n\n}", "func RegisterTypes(registry interface {\n\t RegisterType(name string, initializer func() any)\n}) {\n\n}", "func (t *targetrunner) register() error {\n\tjsbytes, err := json.Marshal(t.si)\n\tif err != nil {\n\t\tglog.Errorf(\"Unexpected failure to json-marshal %+v, err: %v\", t.si, err)\n\t\treturn err\n\t}\n\turl := ctx.config.Proxy.URL + \"/\" + Rversion + \"/\" + Rcluster\n\t_, err = t.call(url, http.MethodPost, jsbytes)\n\treturn err\n}", "func (_DelegateProfile *DelegateProfileCaller) Register(opts *bind.CallOpts) (common.Address, error) {\n\tvar (\n\t\tret0 = new(common.Address)\n\t)\n\tout := ret0\n\terr := _DelegateProfile.contract.Call(opts, out, \"register\")\n\treturn *ret0, err\n}", "func (registry *Registry) Register(t *task.Task) string {\n\t// For the purposes of this exercise we're using a stringified UUID as\n\t// the task ID. This should be statistically unique - certainly we\n\t// should not expect to see a collision in the lifetime of the server.\n\t// It's reasonably safe to treat the generated handles as unique without\n\t// further checking. In fact, if we see duplicate handles we probably\n\t// have bigger problems on the system than this toy service misbehaving.\n\n\thandle := handleFromUUID()\n\n\tregistry.lock.Lock()\n\tdefer registry.lock.Unlock()\n\n\tregistry.db[handle] = t\n\treturn handle\n}", "func Register(kind string, props actor.Props) {\n\tnameLookup[kind] = props\n}", "func (p *TestProvider) Register(addr string, c Client) {\n\tp.clients[addr] = c\n}", "func TestRegister(t *testing.T) {\n\n\tfabricCAClient, err := NewFabricCAClient(org1, configImp, cryptoSuiteProvider)\n\tif err != nil {\n\t\tt.Fatalf(\"NewFabricCAClient returned error: %v\", err)\n\t}\n\tuser := mocks.NewMockUser(\"test\")\n\t// Register with nil request\n\t_, err = fabricCAClient.Register(user, nil)\n\tif err == nil {\n\t\tt.Fatalf(\"Expected error with nil request\")\n\t}\n\tif err.Error() != \"registration request required\" {\n\t\tt.Fatalf(\"Expected error registration request required. Got: %s\", err.Error())\n\t}\n\n\t//Register with nil user\n\t_, err = fabricCAClient.Register(nil, &ca.RegistrationRequest{})\n\tif err == nil {\n\t\tt.Fatalf(\"Expected error with nil user\")\n\t}\n\tif !strings.Contains(err.Error(), \"failed to create request for signing identity\") {\n\t\tt.Fatalf(\"Expected error failed to create request for signing identity. Got: %s\", err.Error())\n\t}\n\t// Register with nil user cert and key\n\t_, err = fabricCAClient.Register(user, &ca.RegistrationRequest{})\n\tif err == nil {\n\t\tt.Fatalf(\"Expected error without user enrolment information\")\n\t}\n\tif !strings.Contains(err.Error(), \"failed to create request for signing identity\") {\n\t\tt.Fatalf(\"Expected error failed to create request for signing identity. Got: %s\", err.Error())\n\t}\n\n\tuser.SetEnrollmentCertificate(readCert(t))\n\tkey, err := cryptosuite.GetDefault().KeyGen(cryptosuite.GetECDSAP256KeyGenOpts(true))\n\tif err != nil {\n\t\tt.Fatalf(\"KeyGen return error %v\", err)\n\t}\n\tuser.SetPrivateKey(key)\n\t// Register without registration name parameter\n\t_, err = fabricCAClient.Register(user, &ca.RegistrationRequest{})\n\tif !strings.Contains(err.Error(), \"failed to register user\") {\n\t\tt.Fatalf(\"Expected error failed to register user. Got: %s\", err.Error())\n\t}\n\n\t// Register with valid request\n\tvar attributes []ca.Attribute\n\tattributes = append(attributes, ca.Attribute{Key: \"test1\", Value: \"test2\"})\n\tattributes = append(attributes, ca.Attribute{Key: \"test2\", Value: \"test3\"})\n\tsecret, err := fabricCAClient.Register(user, &ca.RegistrationRequest{Name: \"test\",\n\t\tAffiliation: \"test\", Attributes: attributes})\n\tif err != nil {\n\t\tt.Fatalf(\"fabricCAClient Register return error %v\", err)\n\t}\n\tif secret != \"mockSecretValue\" {\n\t\tt.Fatalf(\"fabricCAClient Register return wrong value %s\", secret)\n\t}\n}", "func (cli *CLI) register(rgst any, cmds ...string) {\n\tif len(cmds) > 0 {\n\t\tcmd := cmds[0]\n\t\tif len(cmds) > 1 {\n\t\t\tcli.cmdMap[cmd] = cmds[1]\n\t\t}\n\t\t// make the function map th struct\n\t\tcli.commands[cmd] = Cmd{\n\t\t\tCommand: cmd,\n\t\t\tAlias: cli.cmdMap[cmd],\n\t\t}\n\t\t// register feedback\n\t\tcli.cmds[cmd] = rgst\n\t}\n}", "func Register(fn interface{}) error {\n\t// Validate that its a function\n\tfnType := reflect.TypeOf(fn)\n\tif err := validateFnFormat(fnType); err != nil {\n\t\treturn err\n\t}\n\t// Check if already registered\n\tfnName := getFunctionName(fn)\n\t_, ok := fnLookup.getFn(fnName)\n\tif ok {\n\t\treturn nil\n\t}\n\tfor i := 0; i < fnType.NumIn(); i++ {\n\t\targType := fnType.In(i)\n\t\t// Interfaces cannot be registered, their implementations should be\n\t\t// https://golang.org/pkg/encoding/gob/#Register\n\t\tif argType.Kind() != reflect.Interface {\n\t\t\targ := reflect.Zero(argType).Interface()\n\t\t\tif err := GlobalBackend().Encoder().Register(arg); err != nil {\n\t\t\t\treturn errors.Wrap(err, \"unable to register the message for encoding\")\n\t\t\t}\n\t\t}\n\t}\n\tfnLookup.addFn(fnName, fn)\n\treturn nil\n}", "func (sr *Registry) Register(name string, suite Suite) {\n\tif _, found := sr.registry[name]; found {\n\t\tlog.Fatal(fmt.Sprintf(\"Trying to register the suite %s multiple times\", name))\n\t}\n\n\tsr.registry[name] = suite\n}", "func (a *PushKeyAPI) register(params interface{}) (resp *rpc.Response) {\n\treturn rpc.Success(a.mods.PushKey.Register(cast.ToStringMap(params)))\n}", "func (_KNS *KNSCaller) RegistryT(opts *bind.CallOpts, arg0 string) (struct {\n\tPrimeOwner common.Address\n\tWallet common.Address\n\tTel string\n}, error) {\n\tret := new(struct {\n\t\tPrimeOwner common.Address\n\t\tWallet common.Address\n\t\tTel string\n\t})\n\tout := ret\n\terr := _KNS.contract.Call(opts, out, \"RegistryT\", arg0)\n\treturn *ret, err\n}", "func Register(reg Registry) {\n\treg.Add(\"aws:lambda_alias\", reflect.TypeOf(&Alias{}))\n\treg.Add(\"aws:lambda_event_source_mapping\", reflect.TypeOf(&EventSourceMapping{}))\n\treg.Add(\"aws:lambda_function\", reflect.TypeOf(&Function{}))\n\treg.Add(\"aws:lambda_layer_version_permission\", reflect.TypeOf(&LayerVersionPermission{}))\n\treg.Add(\"aws:lambda_permission\", reflect.TypeOf(&Permission{}))\n}", "func Register(s string, new NewHash) {\n\t// TODO: check registry\n\n\t// check the Hash interface is matched\n\tvar _ Hash = new()\n\n\tsupportedHashes[s] = new\n}", "func (_EthereumIdentityRegistryContract *EthereumIdentityRegistryContractTransactor) RegisterIdentity(opts *bind.TransactOpts, _centrifugeId *big.Int, _identity common.Address) (*types.Transaction, error) {\n\treturn _EthereumIdentityRegistryContract.contract.Transact(opts, \"registerIdentity\", _centrifugeId, _identity)\n}", "func (a *Application) RegisterProvider() {\n\n}", "func TestEventRegistry(t *testing.T, newRegistry func() app.EventRegistry) {\n\ttests := []struct {\n\t\tscenario string\n\t\tsubName string\n\t\thandler func(*bool) interface{}\n\t\tcalled bool\n\t\tdispName string\n\t\tdispArg interface{}\n\t\tpanic bool\n\t}{\n\t\t{\n\t\t\tscenario: \"register and dispatch without arg\",\n\t\t\tsubName: \"test\",\n\t\t\thandler: func(called *bool) interface{} {\n\t\t\t\treturn func() {\n\t\t\t\t\t*called = true\n\t\t\t\t}\n\t\t\t},\n\t\t\tcalled: true,\n\t\t\tdispName: \"test\",\n\t\t\tdispArg: nil,\n\t\t},\n\t\t{\n\t\t\tscenario: \"register without arg and dispatch with arg\",\n\t\t\tsubName: \"test\",\n\t\t\thandler: func(called *bool) interface{} {\n\t\t\t\treturn func() {\n\t\t\t\t\t*called = true\n\t\t\t\t}\n\t\t\t},\n\t\t\tcalled: true,\n\t\t\tdispName: \"test\",\n\t\t\tdispArg: \"foobar\",\n\t\t},\n\t\t{\n\t\t\tscenario: \"register and dispatch with arg\",\n\t\t\tsubName: \"test\",\n\t\t\thandler: func(called *bool) interface{} {\n\t\t\t\treturn func(arg string) {\n\t\t\t\t\t*called = true\n\n\t\t\t\t\tif arg != \"hello\" {\n\t\t\t\t\t\tpanic(\"greet is not hello\")\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t},\n\t\t\tcalled: true,\n\t\t\tdispName: \"test\",\n\t\t\tdispArg: \"hello\",\n\t\t},\n\t\t{\n\t\t\tscenario: \"register and dispatch with bad arg\",\n\t\t\tsubName: \"test\",\n\t\t\thandler: func(called *bool) interface{} {\n\t\t\t\treturn func(arg int) {\n\t\t\t\t\t*called = true\n\t\t\t\t}\n\t\t\t},\n\t\t\tcalled: false,\n\t\t\tdispName: \"test\",\n\t\t\tdispArg: \"hello\",\n\t\t},\n\t\t{\n\t\t\tscenario: \"register non func handler\",\n\t\t\tsubName: \"test\",\n\t\t\thandler: func(called *bool) interface{} { return nil },\n\t\t\tpanic: true,\n\t\t},\n\t}\n\n\tfor _, test := range tests {\n\t\tt.Run(test.scenario, func(t *testing.T) {\n\t\t\tdefer func() {\n\t\t\t\terr := recover()\n\n\t\t\t\tif err != nil && !test.panic {\n\t\t\t\t\tt.Error(err)\n\t\t\t\t}\n\t\t\t}()\n\n\t\t\tcalled := false\n\n\t\t\tr := newRegistry()\n\t\t\tunsub := r.Subscribe(test.subName, test.handler(&called))\n\t\t\tdefer unsub()\n\n\t\t\tr.Dispatch(test.dispName, test.dispArg)\n\n\t\t\tif called != test.called {\n\t\t\t\tt.Error(\"called expected:\", test.called)\n\t\t\t\tt.Error(\"called: \", called)\n\t\t\t}\n\n\t\t\tif test.panic {\n\t\t\t\tt.Error(\"no panic\")\n\t\t\t}\n\t\t})\n\t}\n}", "func doRegistration(data []byte) (err error) {\n\t// Register service via PUT request.\n\tendpointRegister = fmt.Sprintf(\"http://localhost:%s/v1/agent/service/register\", port)\n\tr, err := http.NewRequest(\"PUT\", endpointRegister, bytes.NewBufferString(string(data)))\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tlog.Printf(\"sending service registration request for %s\\n\", consul.Name)\n\n\tclient = NewClient()\n\t_, err = client.Do(r)\n\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}", "func (e *Engine) register() error {\n\t// add rule to rules engine\n\tif e.Configuration.ActuationSet() {\n\t\terr := e.inferRule()\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"can't register engine %s as rule is faulty with actuation %v\", e.Name, e.Configuration.Actuation)\n\t\t}\n\t\terr = e.Rule.Add()\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"can't register engine %s as rule couldn't be added to rules engine\", e.Name)\n\t\t}\n\t}\n\n\t// add engine as export client\n\texportClient := ExportClient{}\n\terr := exportClient.Add(*e)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"can't register engine %s, rule couldn't be added to rules engine\", e.Name)\n\t}\n\n\t// value descriptors init are automatically created by engine and sent to registry for registration\n\treturn nil\n}", "func Register(p ComponentInterface) { Get().(*impl).Register(p) }", "func Register(p Protocol, n NewFunc) {\n\treglock.Lock()\n\tdefer reglock.Unlock()\n\tregistry[p] = n\n}", "func (_WyvernExchange *WyvernExchangeCallerSession) Registry() (common.Address, error) {\n\treturn _WyvernExchange.Contract.Registry(&_WyvernExchange.CallOpts)\n}", "func (s *serverRegistry) Register(*FContext, FAsyncCallback) error {\n\treturn nil\n}", "func (s *servicecenter) Registry(clusterName string, data *pb.SyncData) {\n\tmapping := s.storage.GetMapByCluster(clusterName)\n\tfor _, inst := range data.Instances {\n\t\tsvc := searchService(inst, data.Services)\n\t\tif svc == nil {\n\t\t\terr := errors.New(\"service does not exist\")\n\t\t\tlog.Error(fmt.Sprintf(\"servicecenter.Registry, serviceID = %s, instanceId = %s\", inst.ServiceId, inst.InstanceId), err)\n\t\t\tcontinue\n\t\t}\n\n\t\t// If the svc is in the mapping, just do nothing, if not, created it in servicecenter and get the new serviceID\n\t\tsvcID, err := s.createService(svc)\n\t\tif err != nil {\n\t\t\tlog.Error(\"create service failed\", err)\n\t\t\tcontinue\n\t\t}\n\n\t\t// If inst is in the mapping, just heart beat it in servicecenter\n\t\tlog.Debug(fmt.Sprintf(\"trying to do registration of instance, instanceID = %s\", inst.InstanceId))\n\t\tif s.heartbeatInstances(mapping, inst) {\n\t\t\tcontinue\n\t\t}\n\n\t\t// If inst is not in the mapping, that is because this the first time syncer get the instance data\n\t\t// in this case, we should registry it to the servicecenter and get the new instanceID\n\t\titem := &pb.MappingEntry{\n\t\t\tClusterName: clusterName,\n\t\t\tDomainProject: svc.DomainProject,\n\t\t\tOrgServiceID: svc.ServiceId,\n\t\t\tOrgInstanceID: inst.InstanceId,\n\t\t\tCurServiceID: svcID,\n\t\t\tCurInstanceID: s.registryInstances(svc.DomainProject, svcID, inst),\n\t\t}\n\n\t\t// Use new serviceID and instanceID to update mapping data in this servicecenter\n\t\tif item.CurInstanceID != \"\" {\n\t\t\tmapping = append(mapping, item)\n\t\t}\n\t}\n\t// UnRegistry instances that is not in the data which means the instance in the mapping is no longer actived\n\tmapping = s.unRegistryInstances(data, mapping)\n\t// Update mapping data of the cluster to the storage of the servicecenter\n\ts.storage.UpdateMapByCluster(clusterName, mapping)\n}", "func (r *Resolver) Register(name string, e Extension) bool {\n\tr.Execers[name] = e\n\treturn true\n}", "func (e *Client) Register(ctx context.Context, filename string) (*RegisterResponse, error) {\n\tconst action = \"/register\"\n\turl := e.baseURL + action\n\n\treqBody, err := json.Marshal(map[string]interface{}{\n\t\t\"events\": []EventType{Invoke, Shutdown},\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\thttpReq, err := http.NewRequestWithContext(ctx, \"POST\", url, bytes.NewBuffer(reqBody))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\thttpReq.Header.Set(extensionNameHeader, filename)\n\thttpRes, err := e.httpClient.Do(httpReq)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif httpRes.StatusCode != 200 {\n\t\treturn nil, fmt.Errorf(\"request failed with status %s\", httpRes.Status)\n\t}\n\tdefer httpRes.Body.Close()\n\tbody, err := ioutil.ReadAll(httpRes.Body)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tres := RegisterResponse{}\n\terr = json.Unmarshal(body, &res)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\te.ExtensionID = httpRes.Header.Get(extensionIdentiferHeader)\n\tfmt.Println(\"Extension id:\", e.ExtensionID)\n\treturn &res, nil\n}", "func (_DelegateProfile *DelegateProfileCallerSession) Register() (common.Address, error) {\n\treturn _DelegateProfile.Contract.Register(&_DelegateProfile.CallOpts)\n}", "func (tm *TestManager) Register(obj interface{}) {\n\ttm.ModuleMap[Common.GetTypeName(obj)] = reflect.TypeOf(obj)\n}", "func init() {\n\tworkflow.Register(usecase.Workflow)\n\tactivity.Register(usecase.HelloworldActivity)\n\tactivity.Register(usecase.HelloworldGuys)\n\tactivity.Register(usecase.HelloMoving)\n\tactivity.Register(usecase.HelloNoContext)\n\n\tworkflow.Register(usecase.OrchestrationWorkflow)\n\tactivity.Register(usecase.ValidateProductPackage)\n\tactivity.Register(usecase.ValidateVoucher)\n\tactivity.Register(usecase.CreateOrder)\n\tactivity.Register(usecase.CreatePayment)\n\tactivity.Register(usecase.RollbackOrder)\n}", "func (z *zpoolctl) Reguid(ctx context.Context, name string) *execute {\n\targs := []string{\"reguid\", name}\n\treturn &execute{ctx: ctx, name: z.cmd, args: args}\n}", "func (rs *RegistryService) Register(ctx context.Context, in *proto.RegisterType) (*proto.EmptyResponse, error) {\n\trs.mu.RLock()\n\tdefer rs.mu.RUnlock()\n\n\trs.hosts[in.GetName()] = in.GetHost()\n\n\treturn &proto.EmptyResponse{}, nil\n}", "func (r *AccountDIDRegistry) Register(accountDID DID, addr string, hash []byte) (string, []byte, error) {\n\treturn r.updateByStatus(accountDID, addr, hash, nil, Initial)\n}", "func (f *contentSpecMgr) Reg(contentType string, spec ContentResolver, unitTest ...bool) {\n\terr := f._reg(contentType, spec)\n\tif err != nil && len(unitTest) == 0 {\n\t\tdebug.Assert(false)\n\t\tcos.ExitLog(err)\n\t}\n}", "func Register(name string, fn testFunc) {\n\ttests[name] = fn\n}", "func (ilw *InLogicWarehouse) Register(name string, inst interface{}) {\n\tilw.sync.Lock()\n\tdefer ilw.sync.Unlock()\n\n\tilw.factory.register(name, inst)\n}", "func testRegister(t *testing.T, useTLS bool) {\n\tassert := assert.New(t)\n\tzero := 0\n\tassert.Equal(0, zero)\n\n\t// Create new TestPingServer - needed for calling RPCs\n\tmyJrpcfs := &TestPingServer{}\n\n\trrSvr := getNewServer(10*time.Second, false, useTLS)\n\tassert.NotNil(rrSvr)\n\n\t// Register the Server - sets up the methods supported by the\n\t// server\n\terr := rrSvr.Register(myJrpcfs)\n\tassert.Nil(err)\n\n\t// Make sure we discovered the correct functions\n\tassert.Equal(4, len(rrSvr.svrMap))\n}", "func Register(i interface{}) {\n\tregister(i)\n}", "func (oc *oracleClient) Register(honest bool, id string) {\n\toc.client.Get(register, registerQuery(oc.world, id, honest))\n}", "func (suite *BinPackingTestSuite) TestRegister() {\n\trankers[DeFrag] = nil\n\tregister(DeFrag, nil)\n\tsuite.Nil(rankers[DeFrag])\n\tregister(DeFrag, NewDeFragRanker)\n\tsuite.Nil(rankers[DeFrag])\n\tdelete(rankers, DeFrag)\n\tregister(DeFrag, NewDeFragRanker)\n\tsuite.NotNil(rankers[DeFrag])\n}", "func Register(scheme string, creator econf.DataSource) {\n\tregistry[scheme] = creator\n}", "func Register(scheme string, creator econf.DataSource) {\n\tregistry[scheme] = creator\n}", "func Register(name string, factory acl.Factory) {\n\tmu.Lock()\n\tdefer mu.Unlock()\n\tif _, ok := acls[name]; ok {\n\t\tpanic(fmt.Sprintf(\"register a registered key: %s\", name))\n\t}\n\tacls[name] = factory\n}", "func Register(name string, fn func(ctx Context, payload map[string]interface{}) (Runner, error)) {\n\tonce.Do(func() {\n\t\trunners = factory{\n\t\t\trunners: make(map[string]func(ctx Context, payload map[string]interface{}) (Runner, error)),\n\t\t}\n\t})\n\n\trunners.Lock()\n\tdefer runners.Unlock()\n\n\trunners.runners[name] = fn\n}", "func Register(name string, driver NewInstanceCreatorFunc) {\n\tdefaultRegistry.Register(name, driver)\n}", "func (_EthereumIdentityRegistryContract *EthereumIdentityRegistryContractTransactorSession) RegisterIdentity(_centrifugeId *big.Int, _identity common.Address) (*types.Transaction, error) {\n\treturn _EthereumIdentityRegistryContract.Contract.RegisterIdentity(&_EthereumIdentityRegistryContract.TransactOpts, _centrifugeId, _identity)\n}", "func TestInstructionUpdateRegistry(t *testing.T) {\n\tt.Run(\"NoPubkeyV156\", func(t *testing.T) {\n\t\tentryType := modules.RegistryTypeWithoutPubkey\n\t\ttestInstructionUpdateRegistry(t, entryType, false, func(tb *testProgramBuilder, spk types.SiaPublicKey, rv modules.SignedRegistryValue) {\n\t\t\ttb.AddUpdateRegistryInstructionV156(spk, rv)\n\t\t})\n\t})\n\tt.Run(\"NoPubkey\", func(t *testing.T) {\n\t\tentryType := modules.RegistryTypeWithoutPubkey\n\t\ttestInstructionUpdateRegistry(t, entryType, true, func(tb *testProgramBuilder, spk types.SiaPublicKey, rv modules.SignedRegistryValue) {\n\t\t\ttb.AddUpdateRegistryInstruction(spk, rv)\n\t\t})\n\t})\n\tt.Run(\"WithPubkey\", func(t *testing.T) {\n\t\tentryType := modules.RegistryTypeWithPubkey\n\t\ttestInstructionUpdateRegistry(t, entryType, true, func(tb *testProgramBuilder, spk types.SiaPublicKey, rv modules.SignedRegistryValue) {\n\t\t\ttb.AddUpdateRegistryInstruction(spk, rv)\n\t\t})\n\t})\n\tt.Run(\"Invalid\", func(t *testing.T) {\n\t\tentryType := modules.RegistryTypeInvalid\n\t\ttestInstructionUpdateRegistry(t, entryType, true, func(tb *testProgramBuilder, spk types.SiaPublicKey, rv modules.SignedRegistryValue) {\n\t\t\ttb.AddUpdateRegistryInstruction(spk, rv)\n\t\t})\n\t})\n}", "func Register(c Factory) {\n\tfactories[c.String()] = c\n}", "func (s *supernodeRegister) Register(peerPort int) (*RegisterResult, *errortypes.DfError) {\n\tvar (\n\t\tresp *types.RegisterResponse\n\t\te error\n\t\ti int\n\t\tretryTimes = 0\n\t\tstart = time.Now()\n\t)\n\n\tlogrus.Infof(\"do register to one of %v\", s.cfg.Nodes)\n\tnodes, nLen := s.cfg.Nodes, len(s.cfg.Nodes)\n\treq := s.constructRegisterRequest(peerPort)\n\tfor i = 0; i < nLen; i++ {\n\t\tif s.lastRegisteredNode == nodes[i] {\n\t\t\tlogrus.Warnf(\"the last registered node is the same(%s)\", nodes[i])\n\t\t\tcontinue\n\t\t}\n\t\treq.SupernodeIP = netutils.ExtractHost(nodes[i])\n\t\tresp, e = s.api.Register(nodes[i], req)\n\t\tlogrus.Infof(\"do register to %s, res:%s error:%v\", nodes[i], resp, e)\n\t\tif e != nil {\n\t\t\tlogrus.Errorf(\"register to node:%s error:%v\", nodes[i], e)\n\t\t\tcontinue\n\t\t}\n\t\tif resp.Code == constants.Success || resp.Code == constants.CodeNeedAuth ||\n\t\t\tresp.Code == constants.CodeURLNotReachable {\n\t\t\tbreak\n\t\t}\n\t\tif resp.Code == constants.CodeWaitAuth && retryTimes < 3 {\n\t\t\ti--\n\t\t\tretryTimes++\n\t\t\tlogrus.Infof(\"sleep 2.5s to wait auth(%d/3)...\", retryTimes)\n\t\t\ttime.Sleep(2500 * time.Millisecond)\n\t\t}\n\t}\n\ts.setLastRegisteredNode(i)\n\ts.setRemainderNodes(i)\n\tif err := s.checkResponse(resp, e); err != nil {\n\t\tlogrus.Errorf(\"register fail:%v\", err)\n\t\treturn nil, err\n\t}\n\n\tresult := NewRegisterResult(nodes[i], s.cfg.Nodes, s.cfg.URL,\n\t\tresp.Data.TaskID, resp.Data.FileLength, resp.Data.PieceSize)\n\n\tlogrus.Infof(\"do register result:%s and cost:%.3fs\", resp,\n\t\ttime.Since(start).Seconds())\n\treturn result, nil\n}", "func (a *Client) Register(params *RegisterParams) (*RegisterOK, error) {\n\t// TODO: Validate the params before sending\n\tif params == nil {\n\t\tparams = NewRegisterParams()\n\t}\n\n\tresult, err := a.transport.Submit(&runtime.ClientOperation{\n\t\tID: \"register\",\n\t\tMethod: \"POST\",\n\t\tPathPattern: \"/subjects/{subject}/versions\",\n\t\tProducesMediaTypes: []string{\"application/json; qs=0.5\", \"application/vnd.schemaregistry+json; qs=0.9\", \"application/vnd.schemaregistry.v1+json\"},\n\t\tConsumesMediaTypes: []string{\"application/json\", \"application/octet-stream\", \"application/vnd.schemaregistry+json\", \"application/vnd.schemaregistry.v1+json\"},\n\t\tSchemes: []string{\"http\", \"https\"},\n\t\tParams: params,\n\t\tReader: &RegisterReader{formats: a.formats},\n\t\tContext: params.Context,\n\t\tClient: params.HTTPClient,\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tsuccess, ok := result.(*RegisterOK)\n\tif ok {\n\t\treturn success, nil\n\t}\n\t// unexpected success response\n\t// safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue\n\tmsg := fmt.Sprintf(\"unexpected success response for register: API contract not enforced by server. Client expected to get an error, but got: %T\", result)\n\tpanic(msg)\n}", "func (a *Manager) Register(name string, fn interface{}, description ...string) error {\n\tif name == \"\" {\n\t\treturn errors.New(\"the helper name cannot be empty\")\n\t}\n\n\tif fn == nil {\n\t\treturn errors.New(\"the helper function cannot be nil\")\n\t}\n\n\tif kind := reflect.TypeOf(fn).Kind(); kind != reflect.Func {\n\t\treturn fmt.Errorf(\"wrong type for 'fn', %v , must be a function\", kind)\n\t}\n\n\ta.helpers[name] = &Helper{\n\t\tName: name,\n\t\tFunction: fn,\n\t\tDescription: stringx.GetOrDefault(\"\", description...),\n\t}\n\n\treturn nil\n}", "func Register(name string, port int) (err error) {\n\tr := RegistryRequest{name, port}\n\n\tbyt, err := json.Marshal(r)\n\tif err != nil {\n\t\treturn\n\t}\n\n\tresp, err := http.Post(\"http://127.0.0.1\" + _LOCAL_PORT, \"text/json\", bytes.NewBuffer(byt))\n\tif err != nil {\n\t\treturn\n\t}\n\tif resp.StatusCode != http.StatusOK {\n\t\terr = fmt.Errorf(\"non 200 response %d: %s\", resp.StatusCode, resp.Status)\n\t\treturn\n\t}\n\n\tbyt, err = ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\treturn\n\t}\n\n\treturn\n}", "func (f *Factory) Register(componentName string, constructor Constructor) error {\n\tif _, exists := f.registry[componentName]; exists {\n\t\treturn fmt.Errorf(\"Registry error: component '%s' already registered\", componentName)\n\t}\n\tf.registry[componentName] = registryEntry{\n\t\tConstructor: constructor,\n\t\tInfo: ComponentInfo{\n\t\t\tName: componentName,\n\t\t},\n\t}\n\treturn nil\n}", "func Register(name string, host string, port int, target string, interval time.Duration, ttl int) error {\n\t// get endpoints for register dial address\n\tendpoints := strings.Split(target, \",\")\n\tconf := etcd.Config{\n\t\tEndpoints: endpoints,\n\t}\n\n\tclient, err := etcd.New(conf)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"wonaming: create etcd client error: %v\", err)\n\t}\n\tkeyapi := etcd.NewKeysAPI(client)\n\n\tserviceID := fmt.Sprintf(\"%s-%s-%d\", name, host, port)\n\tserviceKey := fmt.Sprintf(\"/%s/%s/%s\", prefix, name, serviceID)\n\thostKey := fmt.Sprintf(\"/%s/%s/%s/host\", prefix, name, serviceID)\n\tportKey := fmt.Sprintf(\"/%s/%s/%s/port\", prefix, name, serviceID)\n\n\t//de-register if meet signhup\n\tgo func() {\n\t\tch := make(chan os.Signal, 1)\n\t\tsignal.Notify(ch, syscall.SIGTERM, syscall.SIGINT, syscall.SIGKILL, syscall.SIGHUP, syscall.SIGQUIT)\n\t\tx := <-ch\n\t\tlog.Println(\"wonaming: receive signal: \", x)\n\n\t\t_, err := keyapi.Delete(context.Background(), serviceKey, &etcd.DeleteOptions{Recursive: true})\n\t\tif err != nil {\n\t\t\tlog.Println(\"wonaming: deregister service error: \", err.Error())\n\t\t} else {\n\t\t\tlog.Println(\"wonaming: deregistered service from etcd server.\")\n\t\t}\n\n\t\ts, _ := strconv.Atoi(fmt.Sprintf(\"%d\", x))\n\n\t\tos.Exit(s)\n\n\t}()\n\n\tgo func() {\n\t\t// invoke self-register with ticker\n\t\tticker := time.NewTicker(interval)\n\n\t\t// should get first, if not exist, set it\n\t\tfor {\n\t\t\t<-ticker.C\n\t\t\t_, err := keyapi.Get(context.Background(), serviceKey, &etcd.GetOptions{Recursive: true})\n\t\t\tif err != nil {\n\t\t\t\tif _, err := keyapi.Set(context.Background(), hostKey, host, nil); err != nil {\n\t\t\t\t\tlog.Printf(\"wonaming: re-register service '%s' host to etcd error: %s\\n\", name, err.Error())\n\t\t\t\t}\n\t\t\t\tif _, err := keyapi.Set(context.Background(), portKey, fmt.Sprintf(\"%d\", port), nil); err != nil {\n\t\t\t\t\tlog.Printf(\"wonaming: re-register service '%s' port to etcd error: %s\\n\", name, err.Error())\n\t\t\t\t}\n\t\t\t\tsetopt := &etcd.SetOptions{TTL: time.Duration(ttl) * time.Second, PrevExist: etcd.PrevExist, Dir: true}\n\t\t\t\tif _, err := keyapi.Set(context.Background(), serviceKey, \"\", setopt); err != nil {\n\t\t\t\t\tlog.Printf(\"wonaming: set service '%s' ttl to etcd error: %s\\n\", name, err.Error())\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\t// refresh set to true for not notifying the watcher\n\t\t\t\tsetopt := &etcd.SetOptions{TTL: time.Duration(ttl) * time.Second, PrevExist: etcd.PrevExist, Dir: true, Refresh: true}\n\t\t\t\tif _, err := keyapi.Set(context.Background(), serviceKey, \"\", setopt); err != nil {\n\t\t\t\t\tlog.Printf(\"wonaming: set service '%s' ttl to etcd error: %s\\n\", name, err.Error())\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}()\n\n\t// initial register\n\tif _, err := keyapi.Set(context.Background(), hostKey, host, nil); err != nil {\n\t\treturn fmt.Errorf(\"wonaming: initial register service '%s' host to etcd error: %s\", name, err.Error())\n\t}\n\tif _, err := keyapi.Set(context.Background(), portKey, fmt.Sprintf(\"%d\", port), nil); err != nil {\n\t\treturn fmt.Errorf(\"wonaming: initial register service '%s' port to etcd error: %s\", name, err.Error())\n\t}\n\tsetopt := &etcd.SetOptions{TTL: time.Duration(ttl) * time.Second, PrevExist: etcd.PrevExist, Dir: true}\n\tif _, err := keyapi.Set(context.Background(), serviceKey, \"\", setopt); err != nil {\n\t\treturn fmt.Errorf(\"wonaming: set service '%s' ttl to etcd error: %s\", name, err.Error())\n\t}\n\n\treturn nil\n}", "func (m *Monocular) Register(echoContext echo.Context) error {\n\tlog.Debug(\"Helm Repository Register...\")\n\treturn m.portalProxy.RegisterEndpoint(echoContext, m.Info)\n}", "func (c *clientRegistry) Register(ctx *FContext, callback FAsyncCallback) error {\n\topID := ctx.opID()\n\tc.mu.Lock()\n\tdefer c.mu.Unlock()\n\t_, ok := c.handlers[opID]\n\tif ok {\n\t\treturn errors.New(\"frugal: context already registered\")\n\t}\n\topID = atomic.AddUint64(&nextOpID, 1)\n\tctx.setOpID(opID)\n\tc.handlers[opID] = callback\n\treturn nil\n}", "func SetRegistry(spec *v1.ServiceBrokerConfigSpec, name string, expression interface{}) {\n\tvar str string\n\n\tswitch t := expression.(type) {\n\tcase Function:\n\t\tstr = string(t)\n\tcase Pipeline:\n\t\tstr = string(t)\n\tcase string, int, bool, nil:\n\t\tstr = argument(t)\n\tdefault:\n\t\tfmt.Println(\"fail\")\n\t}\n\n\tspec.Bindings[0].ServiceInstance.Registry = []v1.RegistryValue{\n\t\t{\n\t\t\tName: name,\n\t\t\tValue: `{{` + str + `}}`,\n\t\t},\n\t}\n}", "func init() {\n\tRegistry.Add(eksinfo.New())\n\tRegistry.Add(vpcinfo.New())\n\tRegistry.Add(iamresourceusage.New())\n}", "func Register(funcName string, backend TemplateFunc, buildFlags FlagsFunc) {\n\tlock.Lock()\n\tdefer lock.Unlock()\n\tbackends[funcName] = backend\n\tflags[funcName] = buildFlags\n}", "func (_Posminer *PosminerCaller) Registers(opts *bind.CallOpts, arg0 common.Address) (struct {\n\tMinerPool common.Address\n\tRegistryTime *big.Int\n\tPayTime *big.Int\n\tRegister string\n}, error) {\n\tret := new(struct {\n\t\tMinerPool common.Address\n\t\tRegistryTime *big.Int\n\t\tPayTime *big.Int\n\t\tRegister string\n\t})\n\tout := ret\n\terr := _Posminer.contract.Call(opts, out, \"Registers\", arg0)\n\treturn *ret, err\n}", "func (c *mockMediatorClient) Register(connectionID string) error {\n\tif c.RegisterErr != nil {\n\t\treturn c.RegisterErr\n\t}\n\n\treturn nil\n}", "func (s *Service) Register(name string, mp Provider) error {\n\ts.providers[name] = mp\n\treturn nil\n}", "func (sc *SuperChain) chainRegister(stub shim.ChaincodeStubInterface, args []string) pb.Response{\n\tif len(args) != 5 {\n\t\treturn shim.Error(\"Incorrect number of arguments. Expecting 5\")\n\t}\n\n\tinfo := args[0]\n\tip := args[1]\n\tserial := args[2]\n\tcsr := args[3]\n\torgCACert := args[4]\n\n\t// create cert\n\tcert,err := sc.CreateCertWithCsr(stub, []byte(csr))\n\tif err != nil{\n\t\treturn shim.Error(fmt.Errorf(\"create cert with csr error: %w\", err).Error())\n\t}\n\n\t// create ID\n\ttempString := info + ip + serial\n\tSha1Inst := sha1.New()\n\tio.WriteString(Sha1Inst,tempString)\n\tchainID := fmt.Sprintf(\"%x\",Sha1Inst.Sum(nil))\n\n\t// create chain struct\n\tchain := Chain{\n\t\tID: chainID,\n\t\tINFO: info,\n\t\tIP: ip,\n\t\tSERIAL: serial,\n\t}\n\tchainJson, err := json.Marshal(chain)\n\tif err != nil {\n\t\treturn shim.Error(fmt.Errorf(\"chain json marshal error: %w\", err).Error())\n\t}\n\n\t// save chain\n\tif err := stub.PutState(chain.ID, chainJson); err != nil {\n\t\treturn shim.Error(fmt.Errorf(\"save chain error: %w\").Error())\n\t}\n\n\t// save chain's org ca cert\n\tif err := stub.PutState(ToChainOrgCertID(chain.ID), []byte(orgCACert)); err != nil {\n\t\treturn shim.Error(fmt.Errorf(\"save chain's org ca cert error: %w\", err).Error())\n\t}\n\n\t// get root certificate\n\trootCertificateByte, err := stub.GetState(RootCertificate)\n\tif err != nil {\n\t\treturn shim.Error(fmt.Errorf(\"get root certificate error: %w\", err).Error())\n\t}\n\n\t// create return struct\n\trtr := ReturnToRegister{\n\t\tID: chain.ID,\n\t\tCERT: string(cert),\n\t\tROOTCERT: string(rootCertificateByte),\n\t}\n\trtrJson, err := json.Marshal(rtr)\n\tif err != nil {\n\t\treturn shim.Error(fmt.Errorf(\"rtr json marshal error: %w\", err).Error())\n\t}\n\n\treturn shim.Success(rtrJson)\n}", "func AddRegistry(spec *v1.ServiceBrokerConfigSpec, name string, expression interface{}) {\n\tvar str string\n\n\tswitch t := expression.(type) {\n\tcase Function:\n\t\tstr = string(t)\n\tcase Pipeline:\n\t\tstr = string(t)\n\tcase string, int, bool, nil:\n\t\tstr = argument(t)\n\tdefault:\n\t\tfmt.Println(\"fail\")\n\t}\n\n\tspec.Bindings[0].ServiceInstance.Registry = append(spec.Bindings[0].ServiceInstance.Registry, v1.RegistryValue{\n\t\tName: name,\n\t\tValue: `{{` + str + `}}`,\n\t})\n}", "func register(stub shim.ChaincodeStubInterface) peer.Response {\n\t\tMSPid, _ := shim.GetMSPID()\n\t\t// Init datatypes\n\t\tvoter_list := []Operators{}\n\n\t\t// Get Operator list\n\t\tvalue, _ := stub.GetState(\"Operators\")\n\t\tjson.Unmarshal(value, &voter_list)\n\n\t\t// Get current Operator\n\t\tfor i := 0; i < len(voter_list); i++ {\n\t\t\tif(voter_list[i].OperatorID == MSPid) {\n\t\t\t\treturn shim.Success([]byte(\"User already registerd\"))\n\t\t\t}\n\t\t}\n\n\t\tOperator := Operators{OperatorID: MSPid, OclToken: 200}\n\t\turlsJson, _ := json.Marshal(append(voter_list, Operator))\n\n\t\tstub.PutState(\"Operators\", urlsJson)\n\n\t\treturn shim.Success(urlsJson)\n}", "func (r *componentRegistry[T]) registerComponent(tp reflect.Type, totalBits int) T {\n\tid := T(len(r.Components))\n\tif int(id) >= totalBits {\n\t\tpanic(\"maximum of 128 component types exceeded\")\n\t}\n\tr.Components[tp], r.Types[id] = id, tp\n\tr.Used.Set(uint8(id), true)\n\tif r.isRelation(tp) {\n\t\tr.IsRelation.Set(uint8(id), true)\n\t}\n\treturn id\n}", "func (asr *sessionRegistry) register(clt *Client) {\n\tasr.lock.Lock()\n\tasr.registry[clt.Session.Key] = clt\n\tasr.lock.Unlock()\n}", "func (r *registry) Setup() error {\n\tr.Lock()\n\tdefer r.Unlock()\n\n\tif _, err := r.doSetup(); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}", "func (wfw *impl) Register(p ComponentInterface) {\n\tif p == nil {\n\t\tif singleton.debug {\n\t\t\tlog.Debugf(\"Register workflow application nil component. Action missed\")\n\t\t}\n\t\treturn\n\t}\n\tif singleton.debug {\n\t\tlog.Debugf(\"Register workflow application component %q\", packageName(p))\n\t}\n\twfw.Components = append(wfw.Components, p)\n}", "func (r *Registry) Register(t Task) {\n\tr.tasks = append(r.tasks, t)\n}", "func (L *State) register(f interface{}) uint {\n\t//fmt.Printf(\"Registering %v\\n\")\n\tindex, ok := L.getFreeIndex()\n\t//fmt.Printf(\"\\tfreeindex: index = %v, ok = %v\\n\", index, ok)\n\t//if not ok, then we need to add new index by extending the slice\n\tif !ok {\n\t\tindex = uint(len(L.registry))\n\t\t//reallocate backing array if necessary\n\t\tif index+1 > uint(cap(L.registry)) {\n\t\t\tnewcap := cap(L.registry) * 2\n\t\t\tif index+1 > uint(newcap) {\n\t\t\t\tnewcap = int(index + 1)\n\t\t\t}\n\t\t\tnewSlice := make([]interface{}, index, newcap)\n\t\t\tcopy(newSlice, L.registry)\n\t\t\tL.registry = newSlice\n\t\t}\n\t\t//reslice\n\t\tL.registry = L.registry[0 : index+1]\n\t}\n\t//fmt.Printf(\"\\tregistering %d %v\\n\", index, f)\n\tL.registry[index] = f\n\treturn index\n}", "func Register(name string, factory Factory) {\n\tprovisioners[name] = factory\n}" ]
[ "0.61195105", "0.57701206", "0.5768041", "0.56972194", "0.5681758", "0.5675238", "0.56512576", "0.56151867", "0.56094605", "0.55880135", "0.5549466", "0.554876", "0.55433893", "0.5530311", "0.55118173", "0.55116355", "0.5507623", "0.5489956", "0.5486707", "0.54629326", "0.54559827", "0.5447156", "0.5440147", "0.54373705", "0.54373705", "0.5433747", "0.5425732", "0.54208416", "0.54208416", "0.54208416", "0.54136914", "0.5388781", "0.5362931", "0.5360314", "0.5348929", "0.5331993", "0.53126186", "0.53060716", "0.5287825", "0.52814806", "0.5279346", "0.52786845", "0.5278038", "0.5272078", "0.52716976", "0.52519536", "0.5247065", "0.5240463", "0.52227616", "0.52217853", "0.5215399", "0.52139443", "0.5210081", "0.5206068", "0.5201321", "0.5193312", "0.51868653", "0.5172948", "0.51717937", "0.5169333", "0.51543206", "0.5148932", "0.5136888", "0.5130942", "0.51242524", "0.51240283", "0.51226753", "0.51212347", "0.5117702", "0.5117702", "0.5111393", "0.510794", "0.5105406", "0.51006854", "0.5099813", "0.5098959", "0.5095372", "0.5093305", "0.50904256", "0.5081654", "0.5077595", "0.5077015", "0.50760084", "0.5074921", "0.5073557", "0.5072294", "0.5070979", "0.50682324", "0.5067085", "0.5065277", "0.5064637", "0.50637054", "0.5062638", "0.5062458", "0.5060001", "0.5059607", "0.50594616", "0.5058496", "0.50566757", "0.50506276" ]
0.7147535
0
powerOf2 determines if its argument is an integer power of 2.
func powerOf2(x int) bool { var i int = 1 for i > 0 { if i == x { return true } i <<= 1 } return false }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func powerOfTwo(n int64) bool {\n\treturn n != 0 && n&(n-1) == 0\n}", "func IsPowerOf2(x int32) bool {\n\treturn (x & (x - 1)) == 0\n}", "func isPowerOf2(n uint32) bool {\n\treturn n&(n-1) == 0\n}", "func IsPow2(x uint32) bool { return (x & (x - 1)) == 0 }", "func isPowerOfTwo(n int) bool {\n\tif n == 1 {\n\t\treturn true\n\t}\n\n\tif n%2 == 1 {\n\t\treturn false\n\t}\n\n\treturn isPowerOfTwo(n / 2)\n}", "func PowerOfTwo(val int64) bool {\n\treturn val > 0 && val&(val-1) == 0\n}", "func power_of_two(n int) uint64 {\n return uint64(math.Pow(2.0, float64(n)))\n}", "func IsPowerOfTwo(x int) bool {\n\treturn (x & (-x)) == x\n}", "func powerOfTwo(n int) int {\n\tif n&(n-1) == 0 {\n\t\treturn n >> 1\n\t}\n\treturn powerOfTwo0(n)\n}", "func isPowerOfTwo(val int) bool {\n\treturn (val != 0) && (val&(val-1)) == 0\n}", "func isPowerOfTwo(n int) bool {\n\t// Accepted\n\t// 1108/1108 cases passed (0 ms)\n\t// Your runtime beats 100 % of golang submissions\n\t// Your memory usage beats 25 % of golang submissions (2.2 MB)\n\treturn n > 0 && (n&(n-1)) == 0\n}", "func IsPowerOfTwo(x int) bool{\n return (x != 0) && ((x & (x - 1)) == 0);\n}", "func PowerOfTwo(val uint32) bool {\n\tret := (val != 0) && (val&(val-1)) == 0x0\n\treturn ret\n}", "func floorPow2(v int) int {}", "func IsPowerTwo(in uint64) bool {\n\tctr := uint64(0)\n\tfor i := uint64(0); i < 64; i++ {\n\t\tctr += ((in & (1 << i)) >> i)\n\t}\n\tif ctr == 1 {\n\t\treturn true\n\t}\n\treturn false\n}", "func IsPowerOfTwo(v T) bool {\n\tif v == 0 {\n\t\treturn false\n\t}\n\treturn v&(v-1) == 0\n}", "func twoToPow(i int) int {\n\treturn int(1 << uint(i))\n}", "func PowerOf2(x, n, q, qInv uint64) (r uint64) {\n\tahi, alo := x>>(64-n), x<<n\n\tR := alo * qInv\n\tH, _ := bits.Mul64(R, q)\n\tr = ahi - H + q\n\tif r >= q {\n\t\tr -= q\n\t}\n\treturn\n}", "func NextPowOfTwo(n int) int {\n\tswitch {\n\tcase n < 0:\n\t\treturn -NextPowOfTwo(-n)\n\tcase n == 0:\n\t\treturn 1\n\tcase n&(n-1) == 0:\n\t\t// n = 2^k, for some k >= 0\n\t\treturn n\n\tdefault:\n\t\t// 2^k > n where k bits represent n in base 2 (disregarding the leading sign bit)\n\t\treturn 1 << bits.Len(uint(n))\n\t}\n}", "func pow2(v int64) int64 {\n\tfor i := int64(2); i < 1<<62; i *= 2 {\n\t\tif i >= v {\n\t\t\treturn i\n\t\t}\n\t}\n\tpanic(\"unreachable\")\n}", "func nextPowerOfTwo(n int) int {\n\tif n&(n-1) == 0 {\n\t\treturn n\n\t}\n\n\texponent := uint(math.Log2(float64(n))) + 1\n\treturn 1 << exponent // 2^exponent\n}", "func nextPowerOf2(n uint) uint {\n\tif n > 0 && (n&(n-1)) == 0 {\n\t\treturn n\n\t}\n\n\tvar count uint\n\tfor k := n; k != 0; k >>= 1 {\n\t\tlogrus.WithFields(logrus.Fields{\"k\": k, \"count\": count}).Debug(\"iterating to next power of 2\")\n\t\tcount += 1\n\t}\n\tlogrus.WithField(\"nextPower2\", 1<<count).Debug(\"next power number calculated\")\n\treturn 1 << count\n}", "func nextPowerOf2(val int) int {\n\tval--\n\tval |= val >> 1\n\tval |= val >> 2\n\tval |= val >> 4\n\tval |= val >> 8\n\tval |= val >> 16\n\tval++\n\treturn val\n}", "func Pow2(x uint32) uint32 {\n\tx--\n\tx |= x >> 1\n\tx |= x >> 2\n\tx |= x >> 4\n\tx |= x >> 8\n\tx |= x >> 16\n\treturn x + 1\n}", "func isPowerOfThreeV2(n int) bool {\n\t/**\n\tAlgorithm:\n\tIn Base 10, all powers of 10 start with the digit 1 and then are followed only by 0 (e.g. 10, 100, 1000).\n\tThis is true for other bases and their respective powers.\n\tTherefore if we convert our number to base 3 and the representation is of the form 100...0, then the number is a power of 3.\n\n\te.g., FormatInt(int64(27), 3) == \"1000\" thus 27 is power of 3\n\tbut FormatInt(int64(56), 3) == \"1200\" since 56 is not power of 3\n\t*/\n\tbase3 := strconv.FormatInt(int64(n), 3)\n\tif base3[:1] != \"1\" {\n\t\treturn false\n\t}\n\tfor _, d := range base3[1:] {\n\t\tif d != '0' {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}", "func NxtPowerOfTwo(val int64) int64 {\n\tif val <= 1 {\n\t\treturn 1\n\t}\n\tif PowerOfTwo(val) {\n\t\treturn val\n\t}\n\treturn 1 << bits.Len64(uint64(val))\n}", "func pow2(x, n, lim float64) float64 {\n if v := math.Pow(x, n); v < lim {\n return v\n } else {\n fmt.Printf(\"%g >= %g\\n\", v, lim)\n }\n // can't use v here, though\n return lim\n}", "func IsPowOfTwoUseLog(number float64) bool {\n\tif number == 0 || math.Round(number) == math.MaxInt64 {\n\t\treturn false\n\t}\n\tlog := math.Log2(number)\n\treturn log == math.Round(log)\n}", "func pow2(e int) []byte {\n\n\tres := []byte{byte(1 << uint(e%8))}\n\n\tres = append(res, make([]byte, e/8)...)\n\n\treturn res\n}", "func RoundUpPowerOf2(n int) int {\n\tn--\n\tn |= n >> 1\n\tn |= n >> 2\n\tn |= n >> 4\n\tn |= n >> 8\n\tn |= n >> 16\n\tn++\n\treturn n\n}", "func log2(n int) uint {\n\tn--\n\tcount := uint(0)\n\tfor n > 0 {\n\t\tcount++\n\t\tn = n >> 1\n\t}\n\treturn count\n}", "func log2(x uint) uint {\n\tif x == 0 {\n\t\treturn 0\n\t}\n\n\tk := uint(0)\n\tfor (x >> k) > 0 {\n\t\tk += 1\n\t}\n\treturn k - 1\n}", "func (c *Clac) Pow2() error {\n\treturn c.applyFloat(1, func(vals []value.Value) (value.Value, error) {\n\t\treturn binary(value.Int(2), \"**\", vals[0])\n\t})\n}", "func IfExample2(item float64) bool {\n\tif pow := int(math.Pow(item, 2)); pow < rand.Intn(2) {\n\t\treturn false\n\t} else {\n\t\tfmt.Printf(\"Pow %q\", pow)\n\t}\n\treturn true\n}", "func ilog2(n uint64) uint {\n\tvar r uint\n\tfor {\n\t\tn >>= 1\n\t\tif n == 0 {\n\t\t\tbreak\n\t\t}\n\t\tr++\n\t}\n\treturn r\n}", "func to2(off uint32, bits int) int {\n\tvar off2 int\n\t// fmt.Printf(\"off: %x bits: %d %x\\n\", off, bits, (1<<bits))\n\t// if (off >= (1 << bits)) { \n\tif ((off & (1 << (bits))) != 0) { \n\t\t// off = off - (1<<(bits))\n\t\t// off = ^off & ((1<<bits)-1)\n\t\toff = off ^ ((1<<(bits+1))-1) // invert all bits of number incl. sign\n \t\toff2 = -int(off+1)\n\t} else {\n\t\toff2 = int(off)\n\t}\n\treturn off2\n}", "func roundUpToPowerOfTwo(v int) int {\n\t// http://graphics.stanford.edu/~seander/bithacks.html\n\tv -= 1\n\tv |= v >> 1\n\tv |= v >> 2\n\tv |= v >> 4\n\tv |= v >> 8\n\tv |= v >> 16\n\treturn v + 1\n}", "func ilog2(v int) uint64 {\n\tvar r uint64\n\tfor ; v != 0; v >>= 1 {\n\t\tr++\n\t}\n\treturn r\n}", "func (this *MachO_MachoFlags) TwoLevel() (v bool, err error) {\n\tif (this._f_twoLevel) {\n\t\treturn this.twoLevel, nil\n\t}\n\tthis.twoLevel = bool((this.Value & 128) != 0)\n\tthis._f_twoLevel = true\n\treturn this.twoLevel, nil\n}", "func mylog2(avalue int) int {\n\tvar dbits int\n\n\tavalue += (avalue >> 9)\n\tif (avalue) < (1 << 8) {\n\t\tdbits = nbits_table[avalue]\n\n\t\treturn (dbits << 8) + log2_table[(int)(avalue<<uint(9-dbits))&0xff]\n\t} else {\n\t\tif avalue < (1 << 16) {\n\t\t\tdbits = nbits_table[(int)(avalue>>8)] + 8\n\t\t} else if avalue < (1 << 24) {\n\t\t\tdbits = nbits_table[(int)(avalue>>16)] + 16\n\t\t} else {\n\t\t\tdbits = nbits_table[(int)(avalue>>24)] + 24\n\t\t}\n\n\t\treturn (dbits << 8) + log2_table[(avalue>>uint(dbits-9))&0xff]\n\t}\n\treturn 0 // cannot actually reach this line, but keeps the compiler happy :)\n}", "func multi2(x int) int {\n\treturn x * 2\n}", "func RoundUpPowerOfTwo(x int32) int32 {\n\tx--\n\tx |= (x >> 1)\n\tx |= (x >> 2)\n\tx |= (x >> 4)\n\tx |= (x >> 8)\n\tx |= (x >> 16)\n\treturn x + 1\n}", "func DivUpPow2(dividend, divisor int, log2Divisor uint) int {\n\treturn (dividend + divisor - 1) >> log2Divisor\n}", "func CheckInt2(n int) bool {\n\ts := strconv.Itoa(n)\n\treturn reverseStr(s) == s\n}", "func pow(x, n int) int {\n\tif n == 0 {\n\t\treturn 1\n\t}\n\thalf := pow(x, n/2)\n\tres := half * half % bigNum\n\tif n%2 == 1 {\n\t\tres = res * x % bigNum\n\t}\n\treturn res\n}", "func MultiplyByTwo(value int) int {\n\treturn value * 2\n}", "func roundUpToNextPowerOfTwo(x uint64) uint64 {\n\tvar r uint64 = 1\n\tfor i := 0; i < 64; i++ {\n\t\tif x <= r {\n\t\t\treturn r\n\t\t}\n\t\tr = r << 1\n\t}\n\treturn 0 // won't fit in uint64 :-(\n}", "func mod2(n []byte, e int) []byte {\n\n\tres := make([]byte, len(n))\n\tcopy(res, n)\n\n\tmod := sub(pow2(e), []byte{0x01})\n\n\tdiff := len(n) - len(mod)\n\tfor i := len(res) - 1; i >= 0; i-- {\n\n\t\tif i-diff < 0 {\n\t\t\tres[i] = 0x00\n\t\t} else {\n\t\t\tres[i] &= mod[i-diff]\n\t\t}\n\n\t}\n\n\treturn res\n}", "func Log2(x uint32) (uint32, error) {\n\tif x == 0 {\n\t\treturn 0, errors.New(\"Cannot calculate log of a negative or null value\")\n\t}\n\n\treturn Log2NoCheck(x), nil\n}", "func pow(a, b int) int {\n\tswitch {\n\tcase b < 0:\n\t\tpanic(\"We're just handling positive exponents here\")\n\tcase b == 0:\n\t\treturn 1\n\tcase b == 1:\n\t\treturn a\n\tcase b%2 == 0:\n\t\treturn pow(a*a, b/2)\n\tdefault:\n\t\treturn a * pow(a*a, (b-1)/2)\n\t}\n}", "func Log2NoCheck(x uint32) uint32 {\n\tvar res uint32\n\n\tif x >= 1<<16 {\n\t\tx >>= 16\n\t\tres = 16\n\t} else {\n\t\tres = 0\n\t}\n\n\tif x >= 1<<8 {\n\t\tx >>= 8\n\t\tres += 8\n\t}\n\n\treturn res + LOG2[x-1]\n}", "func IsEven(val1 int) bool {\n\n\treturn true\n}", "func pow(a, b int) int {\n\tp := 1\n\tfor b > 0 {\n\t\tif b&1 != 0 {\n\t\t\tp *= a\n\t\t}\n\t\tb >>= 1\n\t\ta *= a\n\t}\n\treturn p\n}", "func (parser *Parser) level2_op() (*SubExpr, fxsymbols.TokenId, error) {\n\tparser.trace(\"LEVEL2_OP\")\n\tdefer parser.untrace()\n\n\t// LEVEL2_OPER ::= '*' | '/' | '%' | '&'\n\tlevel2_ops := []fxsymbols.TokenId{fxsymbols.Mult, fxsymbols.Div, fxsymbols.Mod, fxsymbols.And}\n\top, err := parser.findOperand(level2_ops)\n\tif err != nil {\n\t\treturn nil, fxsymbols.None, err\n\t}\n\tif op.Id == fxsymbols.None {\n\t\treturn nil, op.Id, nil\n\t}\n\tpow, err := parser.pow()\n\tif err == ErrNoMatch {\n\t\terr = parser.Errorf(ErrNoVal)\n\t}\n\tif err != nil {\n\t\treturn nil, fxsymbols.None, err\n\t}\n\tlevel2_op, op2, err := parser.level2_op()\n\tif err != nil {\n\t\treturn nil, fxsymbols.None, err \n\t}\n\tif level2_op == nil {\n\t\treturn pow, op.Id, nil\n\t}\n\treturn NewExprSubExpr(NewExpr(op2, pow, level2_op)), op.Id, nil\n}", "func Part2() int {\n\tvar count int\n\n\tfor i := inputMin; i < inputMax; i++ {\n\t\tif ValidatePasswordV2(i) == nil {\n\t\t\tcount++\n\t\t}\n\t}\n\n\treturn count\n}", "func Pow(arg, arg2 float64) float64 {\n\treturn math.Pow(arg, arg2)\n}", "func op_f64_log2(expr *CXExpression, fp int) {\n\tinp1, out1 := expr.Inputs[0], expr.Outputs[0]\n\toutB1 := FromF64(math.Log2(ReadF64(fp, inp1)))\n\tWriteMemory(GetFinalOffset(fp, out1), outB1)\n}", "func Prime2(n int) bool {\n\tmid := n / 2\n\tfor i := 2; i <= mid; i++ {\n\t\tif n%i == 0 {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn n > 1\n}", "func Log2(x float32) float32 {\n\treturn float32(math.Log2(float64(x)))\n}", "func pow(a, b int64) int64 {\n\tp := int64(1)\n\tfor b > 0 {\n\t\tif b&1 != 0 {\n\t\t\tp *= a\n\t\t}\n\t\tb >>= 1\n\t\ta *= a\n\t}\n\treturn p\n}", "func log2s(value int) int {\n\tif value < 0 {\n\t\treturn -mylog2(-value)\n\t} else {\n\t\treturn mylog2(value)\n\t}\n\treturn 0 // cannot actually reach this line, but keeps the compiler happy :)\n}", "func (parser *Parser) level2() (*SubExpr, error) {\n\tparser.trace(\"LEVEL2\")\n\tdefer parser.untrace()\n\n\tpow, err := parser.pow()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tlevel2_op , op, err := parser.level2_op()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif level2_op == nil {\n\t\treturn pow, nil\n\t}\n\treturn NewExprSubExpr(NewExpr(op, pow, level2_op)), nil\n}", "func exp2s(log int) int {\n\tvar value uint\n\n\tif log < 0 {\n\t\treturn -exp2s(-log)\n\t}\n\n\tvalue = uint(exp2_table[log&0xff] | 0x100)\n\n\tlog >>= 8\n\tif log <= 9 {\n\t\treturn int((value >> uint(9-log)))\n\t} else {\n\t\treturn int((value << uint(log-9)))\n\t}\n\n\treturn 0 // again, cannot actually reach this line, but keeps the compiler happy :)\n}", "func isEvenNumber(i int) bool {\n\treturn i%2 == 0\n}", "func isEven(i int) bool {\n\treturn (i & 0x01) == 0\n}", "func (p password) isValid2() bool {\n\tif p.policy.min > len(p.value) || p.policy.max > len(p.value) {\n\t\t// let's hope this doesn't happen, but also make sure we wouldn't break something ere Christmas.\n\t\treturn false\n\t}\n\t// those -1 are here because someone decided to start counting at 1\n\tfirstPos := string(p.value[p.policy.min-1]) == p.policy.letter\n\tsecondPos := string(p.value[p.policy.max-1]) == p.policy.letter\n\t// fancy XOR operator\n\treturn firstPos != secondPos\n}", "func Shrey2(x int, y int) int {\n\treturn x \n}", "func TestPower(t *testing.T) {\n\t// base = 0, exp = n (>0)\n\ttest1 := pow(0, 23)\n\t// test1 should have output as 0 as 0^n = 0\n\t// if any other result if found then it has failed the test, we now throw an error\n\n\tif test1 != 0 {\n\t\tt.Errorf(\"pow(0, 23) = %f; expected: 0\", test1)\n\t}\n\n\t// base = 45, exp = 0 should return 1, as n^0 = 1\n\ttest2 := pow(45, 0)\n\n\tif test2 != 1 {\n\t\tt.Errorf(\"pow(45,0) = %f; expected: 1\", test2)\n\t}\n}", "func Log2(x float64) float64 {\n\tif haveArchLog2 {\n\t\treturn archLog2(x)\n\t}\n\treturn log2(x)\n}", "func rangeBitwiseAnd2(m int, n int) int {\n\tvar cnt uint\n\tfor m != n {\n\t\t// m比n位少,一定是0\n\t\tif m == 0 {\n\t\t\treturn 0\n\t\t}\n\t\tm = m >> 1\n\t\tn = n >> 1\n\t\tcnt++\n\t}\n\tn = n << cnt\n\treturn n\n}", "func isTwoSidePrime(w http.ResponseWriter, r *http.Request) {\n\tvars := mux.Vars(r)\n\tvalue := vars[\"num\"]\n\t// To convert from String to Int\n\tn, _ := strconv.Atoi(value)\n\tjson.NewEncoder(w).Encode(checkTwoSidePrime(n))\n}", "func isPossible2(x int) int {\n\ts := strconv.Itoa(x)\n\tvar (\n\t\tlastC rune\n\t\tgs [10]int\n\t\tnotIncreasing bool\n\t\thasDouble bool\n\t)\n\tfor pos, c := range s {\n\t\tif pos > 0 {\n\t\t\tif lastC == c {\n\t\t\t\tgs[c - '0']++\n\t\t\t}\n\t\t\tif c < lastC {\n\t\t\t\tnotIncreasing = true\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\tlastC = c\n\t}\n\tfor i := range gs {\n\t\tif gs[i] == 1 {\n\t\t\thasDouble = true\n\t\t\tbreak\n\t\t}\n\t}\n\tif hasDouble && !notIncreasing {\n\t\treturn 1\n\t}\n\treturn 0\n}", "func Part2() int {\n\t// the program is calculating the count of numbers between 108100..125100\n\t// (step 17) which are NOT primes.\n\th := 0\n\tfor b := int64(108100); b <= 125100; b += 17 {\n\t\tif !big.NewInt(b).ProbablyPrime(1) {\n\t\t\th++\n\t\t}\n\t}\n\treturn h\n}", "func RoundUpPow2(val, alignment int) int {\n\treturn (val + alignment - 1) & (^(alignment - 1))\n}", "func add_2(x, y int) int {\n\treturn x + y\n}", "func (x nat) powersOfTwoDecompose() (q nat, k int) {\n\tif len(x) == 0 {\n\t\treturn x, 0\n\t}\n\n\t// One of the words must be non-zero by definition,\n\t// so this loop will terminate with i < len(x), and\n\t// i is the number of 0 words.\n\ti := 0\n\tfor x[i] == 0 {\n\t\ti++\n\t}\n\tn := trailingZeroBits(x[i]) // x[i] != 0\n\n\tq = make(nat, len(x)-i)\n\tshrVU(q, x[i:], uint(n))\n\n\tq = q.norm()\n\tk = i*_W + n\n\treturn\n}", "func add2(x, y int) int {\n\treturn x + y\n}", "func pow(x, n, lim float64) float64 {\n if v := math.Pow(x, n); v < lim {\n return v\n }\n return lim\n}", "func (self *State)Log2(a any)any{\n self.IncOperations(self.coeff[\"log2\"]+self.off[\"log2\"])\n return wrap1(a,math.Log2)\n}", "func Pow(a, b int) int {\n\treturn neogointernal.Opcode2(\"POW\", a, b).(int)\n}", "func pow(n, i int) (res int) {\n\tres = 1\n\tfor i > 0 {\n\t\tif i&1 == 1 {\n\t\t\tres *= n\n\t\t}\n\t\ti >>= 1\n\t\tn *= n\n\t}\n\treturn res\n}", "func superPow(a int, b []int) int {\n\tif len(b) < 1 {\n\t\treturn 1\n\t}\n\n\tbase := 1337\n\n\t// (a*b)%k = (a%k)(b%k)%k\n\tmypow := func(a, k int) int {\n\t\t// k is between [0, 10]\n\t\tres := 1\n\n\t\t// may overflow\n\t\t// for i := 0; i < k; i++ {\n\t\t// \ttmp := a % base\n\t\t// \tres *= tmp\n\t\t// }\n\n\t\ttmp := a % base\n\t\tfor i := 0; i < k; i++ {\n\t\t\tres *= tmp\n\t\t\tres %= base\n\t\t}\n\n\t\treturn res\n\t}\n\n\t// a^1234 = a^4 * a^123^10\n\tlast := b[len(b)-1]\n\tb = b[:len(b)-1]\n\n\t// part1 = ((a^last)%base) % base\n\tpart1 := mypow(a, last)\n\tpart2 := mypow(superPow(a, b), 10)\n\n\treturn part1 * part2 % base\n}", "func PowOfInt(base, power uint) int {\n\tif base == 2 {\n\t\treturn 1 << power\n\t}\n\treturn int(math.RoundToEven(math.Pow(float64(base), float64(power))))\n}", "func canMakeTwoTopOneRight(p int) bool {\n\tif p >= 57 {\n\t\treturn false\n\t}\n\n\tswitch p {\n\tcase 7, 15, 23, 31, 39, 47, 55, 63, 8, 16, 24, 32, 40, 48, 56, 64:\n\t\treturn false\n\t}\n\treturn true\n}", "func sub2(a, b, carry int32) (int32, int32) {\n\ttrace_util_0.Count(_mydecimal_00000, 14)\n\tdiff := a - b - carry\n\tif diff < 0 {\n\t\ttrace_util_0.Count(_mydecimal_00000, 17)\n\t\tcarry = 1\n\t\tdiff += wordBase\n\t} else {\n\t\ttrace_util_0.Count(_mydecimal_00000, 18)\n\t\t{\n\t\t\tcarry = 0\n\t\t}\n\t}\n\ttrace_util_0.Count(_mydecimal_00000, 15)\n\tif diff < 0 {\n\t\ttrace_util_0.Count(_mydecimal_00000, 19)\n\t\tdiff += wordBase\n\t\tcarry++\n\t}\n\ttrace_util_0.Count(_mydecimal_00000, 16)\n\treturn diff, carry\n}", "func returnSideChainPowGet(L *lua.LState) int {\n\tp := checkSideChainPow(L, 1)\n\tfmt.Println(p)\n\n\treturn 0\n}", "func canMakeTwoDownOneRight(p int) bool {\n\tif p >= 57 {\n\t\treturn false\n\t}\n\n\tswitch p {\n\tcase 1, 2, 10, 18, 26, 34, 42, 50, 58, 9, 17, 25, 33, 41, 49:\n\t\treturn false\n\t}\n\treturn true\n}", "func sqrt2(x float64) string {\n\tif x < 0 {\n\t\treturn sqrt(-x) + \"i\"\n\t}\n\treturn fmt.Sprint(math.Sqrt(x))\n}", "func Or2(arg1 TermT, arg2 TermT) TermT {\n\treturn TermT(C.yices_or2(C.term_t(arg1), C.term_t(arg2)))\n}", "func (d Decimal) Pow(d2 Decimal) Decimal {\n\tvar temp Decimal\n\tif d2.IntPart() == 0 {\n\t\treturn NewFromFloat(1)\n\t}\n\ttemp = d.Pow(d2.Div(NewFromFloat(2)))\n\tif d2.IntPart()%2 == 0 {\n\t\treturn temp.Mul(temp)\n\t}\n\tif d2.IntPart() > 0 {\n\t\treturn temp.Mul(temp).Mul(d)\n\t}\n\treturn temp.Mul(temp).Div(d)\n}", "func (d Decimal) Pow(d2 Decimal) Decimal {\n\tvar temp Decimal\n\tif d2.IntPart() == 0 {\n\t\treturn NewFromFloat(1)\n\t}\n\ttemp = d.Pow(d2.Div(NewFromFloat(2)))\n\tif d2.IntPart()%2 == 0 {\n\t\treturn temp.Mul(temp)\n\t}\n\tif d2.IntPart() > 0 {\n\t\treturn temp.Mul(temp).Mul(d)\n\t}\n\treturn temp.Mul(temp).Div(d)\n}", "func Even(i int) bool {\n\treturn i%2 == 0\n}", "func H2(m []byte) *big.Int {\n\t// NIST SP 800-90A § A.5.1: Simple discard method.\n\tbyteLen := (params.BitSize + 7) >> 3\n\th := sha512.New()\n\tfor i := uint32(0); ; i++ {\n\t\t// TODO: Use a NIST specified DRBG.\n\t\th.Reset()\n\t\tbinary.Write(h, binary.BigEndian, i)\n\t\th.Write(m)\n\t\tb := h.Sum(nil)\n\t\tk := new(big.Int).SetBytes(b[:byteLen])\n\t\tif k.Cmp(new(big.Int).Sub(params.N, one)) == -1 {\n\t\t\treturn k.Add(k, one)\n\t\t}\n\t}\n}", "func _pow(x, n, lim float64) float64 {\n\tif v := math.Pow(x, n); v < lim {\n\t\treturn v\n\t} else {\n\t\tfmt.Printf(\"%g >= %g\\n\", v, lim)\n\t}\n\t// can't use v here, though\n\treturn lim\n}", "func R2(n int) int {\n\tif n <= 1 {\n\t\treturn 0\n\t}\n\n\t// store numbers in range [2,n]\n\tnumbers := make([]int, n-1)\n\tfor i := range numbers {\n\t\tnumbers[i] = i + 2\n\t}\n\n\t// sieve method\n\tfor i, v := range numbers {\n\t\tif v == i+2 {\n\t\t\t// v is a prime\n\t\t\tfor j := i + v; j <= n-2; j += v {\n\t\t\t\tnumbers[j] = 0\n\t\t\t}\n\t\t}\n\t}\n\n\tcount := 0\n\tfor i, v := range numbers {\n\t\tif v == i+2 {\n\t\t\t// v is a prime\n\t\t\tj := n - v - 2\n\t\t\tif j >= 0 && numbers[j] > 0 {\n\t\t\t\tcount++\n\t\t\t\tnumbers[j] = 0\n\t\t\t}\n\t\t}\n\t}\n\treturn count\n}", "func (s *HierarchyStructureUpdate) SetLevelTwo(v *HierarchyLevelUpdate) *HierarchyStructureUpdate {\n\ts.LevelTwo = v\n\treturn s\n}", "func IsInt2(val interface{}, minAndMax ...int64) (ok bool) {\n\tif val == nil {\n\t\treturn false\n\t}\n\n\tintVal, err := ToInt(val)\n\tif err != nil {\n\t\treturn false\n\t}\n\n\targLn := len(minAndMax)\n\tif argLn == 0 { // only check type\n\t\treturn true\n\t}\n\n\t// value check\n\tminVal := minAndMax[0]\n\tif argLn == 1 { // only min length check.\n\t\treturn intVal >= minVal\n\t}\n\n\tmaxVal := minAndMax[1]\n\n\t// min and max length check\n\treturn intVal >= minVal && intVal <= maxVal\n}", "func PartTwo(input string) (string, error) {\n\tips := getIPAddresses(input)\n\n\tvalid := 0\n\n\tfor _, ip := range ips {\n\t\tif supportsSLS(ip) {\n\t\t\tvalid++\n\t\t}\n\t}\n\n\treturn strconv.Itoa(valid), nil\n}", "func (s *HierarchyStructure) SetLevelTwo(v *HierarchyLevel) *HierarchyStructure {\n\ts.LevelTwo = v\n\treturn s\n}", "func BigPower(num string, n uint) string {\n\tacc := \"1\"\n\tfor i := uint(0); i < n; i++ {\n\t\tacc = BigProduct(acc, num)\n\t}\n\treturn acc\n}" ]
[ "0.7771833", "0.7695848", "0.7673033", "0.7671674", "0.76035964", "0.7542418", "0.743495", "0.7422344", "0.7394407", "0.7381637", "0.73668385", "0.7319235", "0.7258272", "0.7242367", "0.7227922", "0.68792415", "0.68453276", "0.68033123", "0.6789816", "0.67740595", "0.67543846", "0.67269397", "0.6458477", "0.6297609", "0.62497586", "0.6190832", "0.6185018", "0.6135644", "0.60533804", "0.60132885", "0.5865399", "0.58470607", "0.58187664", "0.57993376", "0.55379343", "0.5515644", "0.5485111", "0.5454201", "0.54343504", "0.5423669", "0.5422105", "0.5397642", "0.5328981", "0.526053", "0.52101207", "0.5198412", "0.5186504", "0.51729935", "0.51210546", "0.51190996", "0.5087253", "0.50391454", "0.49982584", "0.49803644", "0.496682", "0.49617305", "0.4950177", "0.49164468", "0.4914132", "0.49084324", "0.4907587", "0.49015027", "0.49012852", "0.48951662", "0.48784924", "0.4869249", "0.48617902", "0.48594424", "0.4832646", "0.4832267", "0.4817793", "0.48107275", "0.47707275", "0.474052", "0.47364116", "0.47289777", "0.47288454", "0.47202733", "0.46976224", "0.46900636", "0.4684956", "0.46810654", "0.4672569", "0.4658905", "0.4650748", "0.4640992", "0.46205574", "0.46155068", "0.46031013", "0.45916072", "0.45916072", "0.4590604", "0.45895296", "0.45847607", "0.4579991", "0.45770052", "0.45764017", "0.45621747", "0.45609155", "0.45586336" ]
0.78166837
0
New allocates and initialises a circular buffer and returns a pointer to it.
func New(len int) (*CircBuf, error) { if len <= 0 { return nil, fmt.Errorf("len argument, %v, is not positive", len) } if !powerOf2(len) { return nil, fmt.Errorf("len argument, %v, is not an integer power of 2", len) } c := new(CircBuf) c.buf = make([]byte, len) return c, nil }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func New(capacity int) *RingBuffer {\n\treturn &RingBuffer{\n\t\tC: make(chan struct{}, 1),\n\t\tbuf: make([]byte, capacity),\n\t\tlen: 0,\n\t\tindex: 0,\n\t}\n}", "func newBuffer(r io.Reader, offset int64) *buffer {\n\treturn &buffer{\n\t\tr: r,\n\t\toffset: offset,\n\t\tbuf: make([]byte, 0, 4096),\n\t\tallowObjptr: true,\n\t\tallowStream: true,\n\t}\n}", "func newBuffer() Buffer {\n\treturn &buffer{\n\t\tbytes: make([]byte, 0, 64),\n\t}\n}", "func New(size int) *RingBuffer {\n\treturn &RingBuffer{\n\t\tbuf: make([]byte, size),\n\t\tsize: size,\n\t}\n}", "func newBuffer(buf []byte) *Buffer {\n\treturn &Buffer{data: buf}\n}", "func New(capacity int) (b *Ring) {\n\treturn &Ring{\n\t\tbuf: make([]interface{}, capacity),\n\t\thead: -1,\n\t}\n}", "func (p *Pool) NewBuffer() *Buffer {\n\treturn &Buffer{pool: p, bufs: make([][]byte, 0, 128), curBufIdx: -1}\n}", "func NewBuffer() *Buffer {\n\treturn NewBufferWithSize(initialSize)\n}", "func New(b []byte) *Buffer {\n\treturn &Buffer{b: b}\n}", "func NewBuffer(inp []byte) *ByteBuffer {\n\tif inp == nil {\n\t\tinp = make([]byte, 0, 512)\n\t}\n\treturn &ByteBuffer{Buffer: bytes.NewBuffer(inp)}\n}", "func NewBuffer(length int) *Buffer {\n\treturn &Buffer{\n\t\titems: make([]unsafe.Pointer, length),\n\t}\n}", "func newBuffer() *buffer {\n\treturn &buffer{\n\t\tdata: make([]byte, 0),\n\t\tlen: 0,\n\t\tpkg: nil,\n\t\tconn: nil,\n\t\tpkgCh: make(chan *pkg),\n\t\tevCh: make(chan *pkg),\n\t\terrCh: make(chan error, 1),\n\t}\n}", "func New(i int) *Buffer {\n\treturn &Buffer{\n\t\tsize: i,\n\t}\n}", "func newBuffer(b []byte) *buffer {\n\treturn &buffer{proto.NewBuffer(b), 0}\n}", "func newRingBuf(size int) *CircularQueue {\n\tvar buf = new(CircularQueue)\n\tbuf.Make(size)\n\tglog.Info(\"maximum backups: \" + strconv.Itoa(size))\n\treturn buf\n}", "func (pool *BufferPool) New() (buf *bytes.Buffer) {\n\tselect {\n\tcase buf = <-pool.Buffers:\n\tdefault:\n\t\tbuf = &bytes.Buffer{}\n\t}\n\treturn\n}", "func New(capacity int) (*CBuf, error) {\n\tif capacity < 0 {\n\t\treturn nil, fmt.Errorf(\"negative capacity (%d)\",\n\t\t\tcapacity)\n\t}\n\treturn &CBuf{\n\t\tcap: capacity,\n\t\telems: make([]interface{}, capacity),\n\t}, nil\n}", "func NewBuffer() *Buffer { return globalPool.NewBuffer() }", "func NewBuffer(size int) *Buffer {\n\treturn &Buffer{size: size, tail: 0, head: 0, buf: make([]byte, size)}\n}", "func newBuffer(e []byte) *Buffer {\n\tp := buffer_pool.Get().(*Buffer)\n\tp.buf = e\n\treturn p\n}", "func NewBuffer(size int) *Buffer {\n\treturn &Buffer{\n\t\tdata: make([]byte, size),\n\t}\n}", "func NewRingBuffer(initialSize int) *RingBuffer {\n\treturn &RingBuffer{\n\t\tbuf: make([]byte, initialSize),\n\t\tsize: initialSize,\n\t}\n}", "func NewBuffer(size int) *Buffer {\n\tif size <= 0 {\n\t\treturn &Buffer{}\n\t}\n\treturn &Buffer{\n\t\tstorage: make([]byte, size),\n\t\tsize: size,\n\t}\n}", "func NewRingBuffer(cap int) *RingBuffer {\n\tif cap == 0 {\n\t\tcap = 7\n\t}\n\tcap = round8(cap)\n\n\treturn &RingBuffer{\n\t\tcap: cap,\n\t\tdata: make([]byte, cap),\n\t}\n}", "func NewBuffer() Buffer {\n\treturn &buffer{}\n}", "func New(rw io.ReadWriteCloser, t reflect.Type, buffer int) *Chan {\n\tbuffer--\n\tif buffer < 0 {\n\t\tbuffer = 0\n\t}\n\treturn newChan(t, buffer, rw)\n}", "func (b *defaultByteBuffer) NewBuffer() ByteBuffer {\n\treturn NewWriterBuffer(256)\n}", "func NewCapacityBuffer(capacity int) *Buffer {\n return &Buffer{data: make([]byte, capacity)}\n}", "func NewBuffer(player *Player, conn net.Conn, ctrl chan bool) *Buffer {\r\n\tmax := DEFAULT_QUEUE_SIZE\r\n\r\n\tbuf := Buffer{conn: conn}\r\n\tbuf.pending = make(chan []byte, max)\r\n\tbuf.ctrl = ctrl\r\n\tbuf.max = max\r\n\treturn &buf\r\n}", "func Constructor(capacity int) MyCircularQueue {\n\tdummyHead, dummyTail := NewNode(0), NewNode(0)\n\tdummyHead.Next = dummyTail\n\tdummyTail.Pre = dummyHead\n\treturn MyCircularQueue{dummyHead, dummyTail, 0, capacity}\n}", "func NewBuffer(m []byte, skip, size int64) (*Buffer, error) {\n\tb := &Buffer{\n\t\toffset: skip,\n\t\tsize: size,\n\t\tdata: m,\n\t}\n\treturn b, nil\n}", "func newBuffer(bits uint32) buffer {\n\tvar b buffer\n\tb.data = make([]unsafe.Pointer, 1<<bits)\n\tb.free = 1 << bits\n\tb.mask = 1<<bits - 1\n\tb.bits = bits\n\treturn b\n}", "func New(size int) *MsgBuffer {\r\n\r\n\treturn &MsgBuffer{\r\n\t\tb: make([]byte, size),\r\n\t}\r\n}", "func New(w, h int) *Buffer {\n\tb := &Buffer{\n\t\tWidth: w,\n\t\tHeight: h,\n\t\tCursor: NewCursor(0, 0),\n\t\tTiles: make([]*Tile, w*h),\n\t}\n\tb.Resize(w, h)\n\treturn b\n}", "func New(capacity int, fn func(series []*influxdb.Series)) *Buffer {\n\treturn NewBuffer(capacity, fn)\n}", "func NewBuffer() *Buffer {\n\treturn &Buffer{Line: []byte{}, Val: make([]byte, 0, 32)}\n}", "func NewPtrBuffer(aSlice interface{}) *Buffer {\n aSliceValue := sliceValue(aSlice, true)\n return &Buffer{\n buffer: aSliceValue,\n handler: overwriteNilPtrHandler{\n creater: newCreaterFunc(nil, aSliceValue.Type())}}\n}", "func newNetBuf(size int) (*netBuf, []byte) {\n\tnb := &netBuf{\n\t\tbuf: make([]byte, size),\n\t\tpool: -1,\n\t}\n\treturn nb, nb.buf\n}", "func NewEmptyBuffer() *Buffer {\n return &Buffer{data: make([]byte, 0)}\n}", "func New(handler Action, options ...Option) *Buffer {\n\tafb := Buffer{\n\t\tLatch: async.NewLatch(),\n\t\tHandler: handler,\n\t\tParallelism: runtime.NumCPU(),\n\t\tMaxFlushes: DefaultMaxFlushes,\n\t\tMaxLen: DefaultMaxLen,\n\t\tInterval: DefaultFlushInterval,\n\t\tShutdownGracePeriod: DefaultShutdownGracePeriod,\n\t}\n\tfor _, option := range options {\n\t\toption(&afb)\n\t}\n\tafb.contents = collections.NewRingBufferWithCapacity(afb.MaxLen)\n\treturn &afb\n}", "func NewBuffer(e []byte) *Buffer {\n\treturn &Buffer{buf: e, length: len(e)}\n}", "func NewBuffer(e []byte) *Buffer {\n\treturn &Buffer{buf: e}\n}", "func NewPtrGrowingBuffer(\n aSlice interface{},\n length int,\n creater functional.Creater) *GrowingBuffer {\n return newGrowingBuffer(\n sliceType(aSlice, false),\n creater,\n true,\n length)\n}", "func new_buffer(conn *websocket.Conn, ctrl chan struct{}, txqueuelen int) *Buffer {\n\tbuf := Buffer{conn: conn}\n\tbuf.pending = make(chan []byte, txqueuelen)\n\tbuf.ctrl = ctrl\n\tbuf.cache = make([]byte, packet.PACKET_LIMIT+2)\n\treturn &buf\n}", "func Acquire() *Buffer {\n\tv := pool.Get()\n\tif v == nil {\n\t\treturn &Buffer{\n\t\t\tB: make([]byte, 0, defaultByteBufferSize),\n\t\t}\n\t}\n\treturn v.(*Buffer)\n}", "func newCASClonedBuffer(base Buffer, digest digest.Digest, source Source) Buffer {\n\treturn &casClonedBuffer{\n\t\tbase: base,\n\t\tdigest: digest,\n\t\tsource: source,\n\n\t\tconsumersRemaining: 1,\n\t\tmaximumChunkSizeBytes: -1,\n\t}\n}", "func NewByteBuffer(buf []byte) *ByteBuffer {\n\treturn &ByteBuffer{\n\t\tbuf: buf,\n\t}\n}", "func NewBuffer(ssrc uint32, vp, ap *sync.Pool) *Buffer {\n\tb := &Buffer{\n\t\tmediaSSRC: ssrc,\n\t\tvideoPool: vp,\n\t\taudioPool: ap,\n\t\tpacketChan: make(chan rtp.Packet, 100),\n\t}\n\treturn b\n}", "func NewBuffer() Buffer {\n\treturn Buffer{\n\t\tCellMap: make(map[image.Point]Cell),\n\t\tArea: image.Rectangle{}}\n}", "func NewBuffer(p producer.Producer, size int, flushInterval time.Duration, logger log.Logger) *Buffer {\n\tflush := 1 * time.Second\n\tif flushInterval != 0 {\n\t\tflush = flushInterval\n\t}\n\n\tb := &Buffer{\n\t\trecords: make([]*data.Record, 0, size),\n\t\tmu: new(sync.Mutex),\n\t\tproducer: p,\n\t\tbufferSize: size,\n\t\tlogger: logger,\n\t\tshouldFlush: make(chan bool, 1),\n\t\tflushInterval: flush,\n\t\tlastFlushed: time.Now(),\n\t}\n\n\tgo b.runFlusher()\n\n\treturn b\n}", "func NewRingBuffer(maxBytes int64) *RingBuffer {\n\tif maxBytes < 0 {\n\t\tmaxBytes = defaultMaxBytes\n\t}\n\n\trb := &RingBuffer{\n\t\tclosed: false,\n\t\tq: newQueue(),\n\t\tmaxBytes: maxBytes,\n\t}\n\trb.wait = sync.NewCond(&rb.mu)\n\treturn rb\n}", "func newQueueBuffer(limit uint64) *queueBuffer {\n\treturn &queueBuffer{\n\t\tfirst: nil,\n\t\tlast: nil,\n\t\tdepth: 0,\n\t\tlimit: limit,\n\t\tunlimited: (limit == 0),\n\t\tlock: sync.Mutex{},\n\t}\n}", "func New() Queue {\n\tc := make(chan uint16, size)\n\treturn Queue(c)\n}", "func NewByteBuffer(n int) *ByteBuffer {\n\tb := new(ByteBuffer)\n\tif n > 0 {\n\t\tb.B = b.getBuf(n)\n\t\tb.size = n\n\t}\n\treturn b\n}", "func (session *Session) NewRingBuffer() *ringBuffer {\n\tring := new(ringBuffer)\n\n\tring.datagramSize = 6 + int(session.param.blockSize)\n\tring.datagrams = make([]byte, ring.datagramSize*MAX_BLOCKS_QUEUED)\n\n\tring.dataReady = false\n\tring.spaceReady = true\n\n\t/* initialize the indices */\n\tring.countData = 0\n\tring.countReserved = 0\n\tring.baseData = 0\n\n\tring.dataReadyCond = sync.NewCond(&ring.mutex)\n\tring.spaceReadyCond = sync.NewCond(&ring.mutex)\n\n\treturn ring\n}", "func (r *Record) NewBuffer() *bytes.Buffer {\n\tif r.Buffer == nil {\n\t\treturn &bytes.Buffer{}\n\t}\n\n\treturn r.Buffer\n}", "func NewSeekableBuffer() *SeekableBuffer {\n\tdata := make([]byte, 0)\n\n\treturn &SeekableBuffer{\n\t\tdata: data,\n\t}\n}", "func NewReaderBuffer(buf []byte) ByteBuffer {\n\treturn newReaderByteBuffer(buf)\n}", "func NewBuf(b *os.File, c Compare, v interface{}) Buf {\n\treturn Buf{b, bufio.NewScanner(b), c, v}\n}", "func NewBuffer(conn *net.TCPConn, buffOb chan bool, maxQueueSize int) *Buffer {\n\tsize := maxQueueSize\n\n\tif size == -1 {\n\t\tsize = DEFAULT_QUEUE_SIZE\n\t}\n\n\tbuf := new(Buffer)\n\tbuf.conn = conn\n\tbuf.pending = make(chan []byte, size)\n\tbuf.ctrl = make(chan bool)\n\tbuf.ob = buffOb\n\tbuf.max = size\n\n\treturn buf\n}", "func New(bufferLen int) *Chanx {\n\tchanx := &Chanx{\n\t\tchans: list.New(),\n\t\tbufferLen: bufferLen,\n\t\tchansLock: &sync.Mutex{},\n\t\tStopChan: make(chan struct{}),\n\t}\n\tchanx.ReadChan = chanx.createChan()\n\tchanx.WriteChan = chanx.ReadChan\n\tchanx.pushChan(chanx.ReadChan)\n\treturn chanx\n}", "func New[T any](max int) *RingBuffer[T] {\n\treturn &RingBuffer[T]{\n\t\tmax: max,\n\t}\n}", "func (tl *TextBufList) New() *TextBuf {\n\ttb := tl.AddNewChild(KiT_TextBuf, \"newbuf\").(*TextBuf)\n\treturn tb\n}", "func New(capacity int32) *FreeArray {\n\titems := make([]node, capacity)\n\tfor i := range items {\n\t\titems[i] = node{\n\t\t\tnext: int32(i + 1),\n\t\t\tprev: int32(i - 1),\n\t\t\tdata: nil,\n\t\t}\n\t}\n\titems[0].prev = -1\n\titems[capacity-1].next = -1\n\treturn &FreeArray{\n\t\titems: items,\n\t\tfreehead: 0,\n\t\tbusyhead: -1,\n\t}\n}", "func (m *Manager) NewBuffer(conf buffer.Config) (buffer.Streamed, error) {\n\treturn nil, component.ErrInvalidType(\"buffer\", conf.Type)\n}", "func New(buf pi.IBuffer) pi.ICursor {\n\treturn &cursor{\n\t\tx: 0,\n\t\ty: 0,\n\t\tbuf: buf,\n\t\tmode: pi.ModeNormal,\n\t}\n}", "func newBuffer(br *Reader) (*buffer, error) {\n\tn, err := io.ReadFull(br.r, br.buf[:4])\n\t// br.r.Chunk() is only valid after the call the Read(), so this\n\t// must come after the first read in the record.\n\ttx := br.r.Begin()\n\tdefer func() {\n\t\tbr.lastChunk = tx.End()\n\t}()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif n != 4 {\n\t\treturn nil, errors.New(\"bam: invalid record: short block size\")\n\t}\n\tb := &buffer{data: br.buf[:4]}\n\tsize := int(b.readInt32())\n\tif size == 0 {\n\t\treturn nil, io.EOF\n\t}\n\tif size < 0 {\n\t\treturn nil, errors.New(\"bam: invalid record: invalid block size\")\n\t}\n\tif size > cap(br.buf) {\n\t\tb.off, b.data = 0, make([]byte, size)\n\t} else {\n\t\tb.off, b.data = 0, br.buf[:size]\n\t\tb.shared = true\n\t}\n\tn, err = io.ReadFull(br.r, b.data)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif n != size {\n\t\treturn nil, errors.New(\"bam: truncated record\")\n\t}\n\treturn b, nil\n}", "func NewCircleQueue() *CircleQueue {\n\treturn &CircleQueue{\n\t\tcapacity: 16 + 1,\n\t\tdata: make([]interface{}, 16+1),\n\t\thead: 0,\n\t\ttail: 0,\n\t}\n}", "func newDownloadBuffer(length, sectorSize uint64) downloadBuffer {\n\t// Completion the length multiple of sector size(4MB)\n\tif length%sectorSize != 0 {\n\t\tlength += sectorSize - length%sectorSize\n\t}\n\n\tddb := downloadBuffer{\n\t\tbuf: make([][]byte, 0, length/sectorSize),\n\t\tsectorSize: sectorSize,\n\t}\n\tfor length > 0 {\n\t\tddb.buf = append(ddb.buf, make([]byte, sectorSize))\n\t\tlength -= sectorSize\n\t}\n\treturn ddb\n}", "func newSafeBuffer() *safeBuffer {\n\treturn &safeBuffer{\n\t\tbuf: bytes.NewBuffer(nil),\n\t}\n}", "func NewBuffer(reader io.Reader, size int64, path string, cursorPosition []string) *Buffer {\n\tb := new(Buffer)\n\tb.LineArray = NewLineArray(size, reader)\n\n\tb.Settings = DefaultLocalSettings()\n\t//\tfor k, v := range globalSettings {\n\t//\t\tif _, ok := b.Settings[k]; ok {\n\t//\t\t\tb.Settings[k] = v\n\t//\t\t}\n\t//\t}\n\n\tif fileformat == 1 {\n\t\tb.Settings[\"fileformat\"] = \"unix\"\n\t} else if fileformat == 2 {\n\t\tb.Settings[\"fileformat\"] = \"dos\"\n\t}\n\n\tb.Path = path\n\n\tb.EventHandler = NewEventHandler(b)\n\n\tb.update()\n\n\tb.Cursor = Cursor{\n\t\tLoc: Loc{0, 0},\n\t\tbuf: b,\n\t}\n\n\t//InitLocalSettings(b)\n\n\tb.cursors = []*Cursor{&b.Cursor}\n\n\treturn b\n}", "func NewBuffered(bufferLength uint) Executor {\n\treturn make(Executor, bufferLength)\n}", "func NewBuffer() *Buffer {\n\treturn &Buffer{B: &strings.Builder{}}\n}", "func New(bitCapacity int) *Ring {\n\treturn &Ring{\n\t\tbitmap: make([]uint64, pow2BitCapacity(bitCapacity)/64),\n\t\tlowestMarked: -1,\n\t\thighestMarked: -1,\n\t\tlowestPending: -1,\n\t\thighestPending: -1,\n\t\tlowestComplete: -1,\n\t\thighestComplete: -1,\n\t}\n}", "func (b *Buffer) AttachNew() {\n b.data = make([]byte, 0)\n b.size = 0\n b.offset = 0\n}", "func NewBuffer(aSlice interface{}) *Buffer {\n return &Buffer{buffer: sliceValue(aSlice, false), handler: valueHandler{}}\n}", "func Constructor(k int) MyCircularQueue {\n\treturn MyCircularQueue{\n\t\tQueue: make([]int, k),\n\t\tHead: 0,\n\t\tTail: 0,\n\t\tCount: 0,\n\t}\n}", "func (fw *Writer) NewBuf() {\n\tif fw.buf != nil {\n\t\tlog.Panicf(\"Overwriting buffer %+v\", fw)\n\t}\n\tvv, ok := fw.bufFreePool.pool.Get()\n\tif !ok {\n\t\tpanic(\"get\")\n\t}\n\twb := vv.(*fieldWriteBuf)\n\tseq := fw.nextBlockSeq\n\tfw.nextBlockSeq++\n\twb.reset(seq, fmt.Sprintf(\"%s:%d\", fw.label, seq))\n\tfw.buf = wb\n}", "func NewBuffer(capacity int, fn func(series []*influxdb.Series)) *Buffer {\n\tb := &Buffer{\n\t\tfn: fn,\n\t\tin: make(chan *influxdb.Series),\n\t\tseries: make(map[string]*influxdb.Series),\n\t\tcapacity: capacity,\n\t}\n\tif b.capacity > 0 {\n\t\tgo b.aggregate()\n\t}\n\n\treturn b\n}", "func createBuffer() *bytes.Buffer {\n\tbuf := bytes.Buffer{}\n\treturn &buf\n}", "func New(ctx context.Context, rate, timespan int) (Bucket, error) {\n\tq := make(chan struct{}, rate)\n\tb := Bucket{ctx: ctx, queue: q, rate: rate, timespan: timespan}\n\tgo b.leak()\n\treturn b, nil // maybe return pointer?\n}", "func (c Uint8Codec) New() unsafe.Pointer {\n\treturn unsafe.Pointer(new(uint8))\n}", "func NewBuffer(capacity int) Buffer {\n\treturn Buffer{\n\t\tcapacity: capacity,\n\t\tcurrentSize: 0,\n\t\tcontents: map[entity.Key]inventoryapi.PostDeltaBody{},\n\t}\n}", "func NewConsumer(ring *Buffer) *Consumer {\n\treturn &Consumer{ring, NewCache(len(ring.items)), 0}\n}", "func NewRingBuffer(size uint) *RingBuffer {\n\treturn &RingBuffer{data: make([]stats.Record, size)}\n}", "func NewGrowingBuffer(aSlice interface{}, length int) *GrowingBuffer {\n return newGrowingBuffer(\n sliceType(aSlice, false),\n nil,\n false,\n length)\n}", "func Constructor(k int) MyCircularQueue {\n return MyCircularQueue{Size: k, Items: make([]int, k), HeadIndex: -1, TailIndex: -1}\n}", "func newEventBuffer(size int64) *eventBuffer {\n\tzero := int64(0)\n\tb := &eventBuffer{\n\t\tmaxSize: size,\n\t\tsize: &zero,\n\t}\n\n\titem := newBufferItem(&structs.Events{Index: 0, Events: nil})\n\n\tb.head.Store(item)\n\tb.tail.Store(item)\n\n\treturn b\n}", "func New() Framer {\n\tf := &framer{\n\t\tbufLock: &sync.RWMutex{},\n\t\tbuffer: make([]byte, 0),\n\t}\n\n\treturn f\n}", "func Constructor(k int) MyCircularQueue {\n\treturn MyCircularQueue{\n\t\titems: make([]int, k+1),\n\t\tfront: 0,\n\t\trear: 0,\n\t\tsize: k + 1,\n\t}\n}", "func newBufferPool() *bufferPool {\n\treturn &bufferPool{&sync.Pool{\n\t\tNew: func() interface{} {\n\t\t\treturn &bytes.Buffer{}\n\t\t},\n\t}}\n}", "func NewBuffer(conn *sqlite.Conn) (*Buffer, error) {\n\treturn NewBufferSize(conn, 16*1024)\n}", "func NewBuffer(size int) ([]byte, error) {\n\tvar pool *sync.Pool\n\n\t// return buffer size\n\toriginSize := size\n\n\tif size <= 4096 {\n\t\tsize = 4096\n\t\tpool = &buf4kPool\n\t} else if size <= 16*1024 {\n\t\tsize = 16 * 1024\n\t\tpool = &buf16kPool\n\t} else if size <= 64*1024 {\n\t\tsize = 64 * 1024\n\t\tpool = &buf64kPool\n\t} else {\n\t\t// if message is larger than 64K, return err\n\t\treturn nil, ErrTooLarge\n\t}\n\n\tif v := pool.Get(); v != nil {\n\t\treturn v.([]byte)[:originSize], nil\n\t}\n\n\treturn make([]byte, size)[:originSize], nil\n}", "func NewRing(maxSize int, sep byte) History {\n\treturn &ring{\n\t\tmaxSize: maxSize,\n\t\tbuffer: make([]byte, maxSize),\n\t\thead: 0,\n\t\ttail: 0,\n\t\tsep: sep,\n\t}\n}", "func NewCircularQueue(n int) *CircularQueue {\n\tif n == 0 {\n\t\treturn nil\n\t}\n\treturn &CircularQueue{make([]interface{}, n), 0, 0, 0}\n}", "func New() Queue {\n\treturn Queue{list: linkedlist.New()}\n}", "func Constructor(k int) MyCircularDeque {\n\treturn MyCircularDeque{\n\t\thead: nil,\n\t\ttail: nil,\n\t\tsize: k,\n\t\tlen: 0,\n\t}\n}", "func NewRing(size int) *Ring {\n\tr := &Ring{\n\t\tsize: size,\n\t\tdata: make([]interface{}, size),\n\t}\n\treturn r\n}", "func NewSafeBuffer() *SafeBuffer {\n\treturn &SafeBuffer{\n\t\tb: bytes.NewBuffer(nil),\n\t\tm: sync.RWMutex{},\n\t}\n}", "func New(ch chan interface{}) Ctx {\n\treturn Ctx{bridge.New(ch)}\n}" ]
[ "0.7357195", "0.69552714", "0.6855946", "0.68303967", "0.6786163", "0.6757838", "0.6757083", "0.67039424", "0.66764987", "0.662815", "0.6606655", "0.6553242", "0.6535949", "0.65314984", "0.652369", "0.65169406", "0.6485235", "0.6475258", "0.6470897", "0.6411134", "0.6381886", "0.63735926", "0.6316285", "0.6307953", "0.6278154", "0.6269391", "0.62346435", "0.62136036", "0.6188765", "0.61858314", "0.617173", "0.6155743", "0.61360514", "0.60984737", "0.60943425", "0.60839933", "0.603126", "0.6011827", "0.60099286", "0.60058856", "0.59988207", "0.59752285", "0.5972593", "0.59566134", "0.5932361", "0.5918511", "0.5918218", "0.5910212", "0.5901303", "0.5898189", "0.5898171", "0.5884857", "0.5877982", "0.58391047", "0.58277524", "0.5825389", "0.5820765", "0.58137935", "0.58001566", "0.5796924", "0.57915205", "0.5790478", "0.5763638", "0.5759512", "0.5743369", "0.5737282", "0.5728161", "0.57230645", "0.57193214", "0.5714927", "0.571057", "0.57066035", "0.56990683", "0.5687404", "0.5674542", "0.56424725", "0.5604445", "0.55891895", "0.5586879", "0.5578791", "0.55762035", "0.55754626", "0.55465233", "0.5536045", "0.55179226", "0.5505757", "0.5484095", "0.54771614", "0.54639995", "0.5459591", "0.5453335", "0.54378396", "0.54362285", "0.54232675", "0.54194427", "0.5416346", "0.5415533", "0.53947645", "0.5386442", "0.5374109" ]
0.6916476
2
spaceToEnd counts the number of unused bytes in a circular buffer to the end of the linear buffer or the end of the circular buffer whichever is smaller.
func (c *CircBuf) spaceToEnd() int { spaceEndLinearBuf := len(c.buf) - c.head spaceEndCircBuf := (c.tail + spaceEndLinearBuf - 1) & (len(c.buf) - 1) if spaceEndLinearBuf < spaceEndCircBuf { return spaceEndLinearBuf } return spaceEndCircBuf }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (c *CircBuf) countToEnd() int {\n\treturn c.countToEndArg(c.tail)\n}", "func (c *CircBuf) countToEndArg(tail int) int {\n\tcountEndLinearBuf := len(c.buf) - tail\n\tcountEndCircBuf := (c.head + countEndLinearBuf) & (len(c.buf) - 1)\n\tif countEndCircBuf < countEndLinearBuf {\n\t\treturn countEndCircBuf\n\t}\n\treturn countEndLinearBuf\n}", "func (c *CircBuf) Space() int {\n\treturn (c.tail - c.head - 1) & (len(c.buf) - 1)\n}", "func (ls *linestate) deleteToEnd() {\n\tls.buf = ls.buf[:ls.pos]\n\tls.refreshLine()\n}", "func (f *Flex) SpaceEnd() (out *Flex) {\n\tf.Spacing = SpaceEnd\n\treturn f\n}", "func (s *SinglyLinkedList) AddToEnd(val interface{}) {\n\tnode := &Node{val, nil}\n\ts.Count += 1\n\tlast := s.LastNode()\n\tif last != nil {\n\t\tlast.Next = node\n\t}\n}", "func (b *Buffer) Remaining(from Cursor) uint64 {\n\tb.mu.RLock()\n\tdefer b.mu.RUnlock()\n\n\tif from.offset > b.last {\n\t\treturn 0\n\t}\n\n\toff := from.offset\n\tif off < b.first {\n\t\toff = b.first\n\t}\n\tremaining := b.last - off\n\tremaining += uint64(b.frameSize(b.last))\n\treturn remaining\n}", "func (cur *cursor) invalidateAtEnd() {\n\tcur.idx = int(cur.nd.count)\n}", "func (b *Buffer) End() Loc {\n\treturn Loc{utf8.RuneCount(b.lines[b.NumLines-1].data), b.NumLines - 1}\n}", "func (r *Record) End() int {\n\tif r.Flags&Unmapped != 0 || len(r.Cigar) == 0 {\n\t\treturn r.Pos + 1\n\t}\n\tpos := r.Pos\n\tend := pos\n\tfor _, co := range r.Cigar {\n\t\tpos += co.Len() * co.Type().Consumes().Reference\n\t\tend = max(end, pos)\n\t}\n\treturn end\n}", "func (c *CircBuf) Consume(nbytes int) int {\n\tvar count int\n\tvar num int\n\tfor {\n\t\tcount = c.countToEnd()\n\t\tif nbytes - num < count {\n\t\t\tcount = nbytes - num\n\t\t}\n\t\tif count <= 0 {\n\t\t\tbreak\n\t\t}\n\t\tc.tail = (c.tail + count) & (len(c.buf) - 1)\n\t\tnum += count\n\t}\n\treturn num\n}", "func (tv *TextView) RenderRegionToEnd(st TextPos, sty *gi.Style, bgclr *gi.ColorSpec) {\n\tspos := tv.CharStartPos(st)\n\tepos := spos\n\tepos.Y += tv.LineHeight\n\tepos.X = float32(tv.VpBBox.Max.X)\n\tif int(mat32.Ceil(epos.Y)) < tv.VpBBox.Min.Y || int(mat32.Floor(spos.Y)) > tv.VpBBox.Max.Y {\n\t\treturn\n\t}\n\n\trs := &tv.Viewport.Render\n\tpc := &rs.Paint\n\n\tpc.FillBox(rs, spos, epos.Sub(spos), bgclr) // same line, done\n}", "func (b *ringBuf) clearTo(hi uint64) (removedBytes, removedEntries int32) {\n\tif b.len == 0 || hi < first(b).index(b) {\n\t\treturn\n\t}\n\tit, ok := first(b), true\n\tfirstIndex := it.index(b)\n\tfor ok && it.index(b) < hi {\n\t\tremovedBytes += int32(it.entry(b).Size())\n\t\tremovedEntries++\n\t\tit.clear(b)\n\t\tit, ok = it.next(b)\n\t}\n\toffset := int(hi - firstIndex)\n\tif offset > b.len {\n\t\toffset = b.len\n\t}\n\tb.len = b.len - offset\n\tb.head = (b.head + offset) % len(b.buf)\n\tif b.len < (len(b.buf) / shrinkThreshold) {\n\t\trealloc(b, 0, b.len)\n\t}\n\treturn\n}", "func (list *List) AppendToEnd(data int) {\n // 1. Create a new Node\n newNode := &Node{data: data, next: nil}\n\n // 2a. If list contains no elements, set new node as head of list\n // 2b. If list contains any element, traverse till last and append new node\n if list.size == 0 {\n list.head = newNode\n } else if list.size > 0 {\n current := list.head\n for current.next != nil {\n current = current.next\n }\n current.next = newNode\n }\n\n // 3. Increment the list size\n list.size++\n}", "func (r MemRegion) End() uint64 {\n\treturn r.base + r.size\n}", "func (f *Field) End() int64", "func (r Range) End() int64 {\n\treturn r.Pos + r.Size\n}", "func (q *Queue) MoveToEnd(key string) {\n\tq.Delete(key)\n\tq.Push(key)\n}", "func (ci *ContigIndex) SetEnd() {\n\tciLineNum := ci.length / ci.basePerLine\n\tciRemainder := ci.length % ci.basePerLine\n\tci.end = ci.start + (ciLineNum * ci.bytePerLine) + ciRemainder - 1\n}", "func (i *ImageBuf) YEnd() int {\n\tret := int(C.ImageBuf_yend(i.ptr))\n\truntime.KeepAlive(i)\n\treturn ret\n}", "func MoveElementToEnd(array []int, toMove int) []int {\n\tarrayCopy := []int{}\n\tcount := 0\n\n\tfor _, v := range array {\n\t\tif v != toMove {\n\t\t\tarrayCopy = append(arrayCopy, v)\n\t\t} else {\n\t\t\tcount++\n\t\t}\n\t}\n\tfor i := 0; i < count; i++ {\n\t\tarrayCopy = append(arrayCopy, toMove)\n\t}\n\n\treturn arrayCopy\n}", "func (r Range) End() uint64 {\n\treturn r.Offset + r.Length\n}", "func (*inMemoryAllocator) End() int64 {\n\t// It doesn't matter.\n\treturn 0\n}", "func (o Int64RangeMatchOutput) RangeEnd() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v Int64RangeMatch) *string { return v.RangeEnd }).(pulumi.StringPtrOutput)\n}", "func (s *Stream) ListEnd() error {\n\tif len(s.stack) == 0 {\n\t\treturn errNotInList\n\t}\n\ttos := s.stack[len(s.stack)-1]\n\tif tos.pos != tos.size {\n\t\treturn errNotAtEOL\n\t}\n\ts.stack = s.stack[:len(s.stack)-1] // pop out information from the stack\n\tif len(s.stack) > 0 {\n\t\ts.stack[len(s.stack)-1].pos += tos.size\n\t}\n\ts.kind = -1\n\ts.size = 0\n\treturn nil\n}", "func (o Int64RangeMatchPtrOutput) RangeEnd() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v *Int64RangeMatch) *string {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn v.RangeEnd\n\t}).(pulumi.StringPtrOutput)\n}", "func (c *CircBuf) Write(buf []byte) int {\n\tvar space int\n\tvar num int\n\tfor {\n\t\tspace = c.spaceToEnd()\n\t\tif len(buf) - num < space {\n\t\t\tspace = len(buf) - num\n\t\t}\n\t\tif space <= 0 {\n\t\t\tbreak\n\t\t}\n\t\tcopy(c.buf[c.head : c.head + space], buf[num : num + space])\n\t\tc.head = (c.head + space) & (len(c.buf) - 1)\n\t\tnum += space\n\t}\n\treturn num\n}", "func (r *baseNsRange) End() int { return r.end }", "func (b *Bar) TrimRightSpace() *Bar {\n\tif isClosed(b.done) {\n\t\treturn b\n\t}\n\tb.trimRightCh <- true\n\treturn b\n}", "func (b *recordBuffer) writeSpace() {\n\tif b.spaces {\n\t\t_ = b.WriteByte(' ')\n\t}\n}", "func last(b *ringBuf) iterator {\n\treturn iterator((b.head + b.len - 1) % len(b.buf))\n}", "func (pb *PageBuffer) End() bool {\n return pb.is_end\n}", "func TestEndToEnd(t *testing.T) {\n\tdir, gonulljson := buildNullJson(t)\n\tdefer os.RemoveAll(dir)\n\t// Read the testdata directory.\n\twalkDir(dir, gonulljson, \"testdata\", t)\n}", "func MoveElementToEnd(array []int, toMove int) []int {\n\ti, j := 0, len(array) - 1\n\tfor i < j {\n\t\tfor i < j && array[j] == toMove {\n\t\t\tj--\n\t\t}\n\t\tif array[i] == toMove {\n\t\t\tarray[i], array[j] = array[j], array[i]\n\t\t}\n\t\ti++\n\t}\n\treturn array\n}", "func (jbobject *JavaNioCharBuffer) Length() int {\n\tjret, err := jbobject.CallMethod(javabind.GetEnv(), \"length\", javabind.Int)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn jret.(int)\n}", "func (tb *TextBuf) EndPos() TextPos {\n\ttb.LinesMu.RLock()\n\tdefer tb.LinesMu.RUnlock()\n\n\tif tb.NLines == 0 {\n\t\treturn TextPosZero\n\t}\n\ted := TextPos{tb.NLines - 1, len(tb.Lines[tb.NLines-1])}\n\treturn ed\n}", "func (sp *Space) Length() int {\n\treturn len(*sp)\n}", "func (h *Heap) TrimTail(distance int) {\n\tif (h.trimValue == -1) || (h.trimValue > distance) {\n\t\th.trimValue = distance\n\t}\n}", "func BenchmarkEndToEnd(b *testing.B) {\n\tvar tracer trace.Trace\n\n\tfor i := 0; i < b.N; i++ {\n\t\t//tracer = trace.New(os.Stderr, true) // use stderr to trace\n\t\ttracer = trace.New(ioutil.Discard, true) // and this to not\n\n\t\tvar tokens = xml_lexer.Lex(xmlInput, tracer) // xml\n\t\t//var tokens = json_lexer.Lex(jsonInput, tracer) // json\n\t\t//var tokens []token.Token = csv_lexer.Lex(jsonInput, tracer) // csv\n\t\tvar tests = []struct {\n\t\t\texpr string\n\t\t\texpect string\n\t\t}{\n\t\t\t// current debug cases\n\t\t\t{expr: `universe/galaxy[world=\"earth\"]/timelord`, expect: `who`},\n\t\t}\n\n\t\ttracer.Begin()()\n\t\texplain := false\n\t\tfor _, test := range tests {\n\t\t\tevaluate(tokens, test.expr, explain, tracer)\n\t\t}\n\t}\n}", "func (s *Scanner) isAtEnd() bool {\n\tif s.current >= len(s.source) {\n\t\treturn true\n\t}\n\treturn false\n}", "func (h *Queue) TrimTail(distance types.Distance) {\n\tif !h.trimSet || (h.trimValue > distance) {\n\t\th.trimSet = true\n\t\th.trimValue = distance\n\t}\n}", "func (r *RingBuffer) Length() int {\n\tr.mu.Lock()\n\tdefer r.mu.Unlock()\n\n\tif r.wPos == r.rPos {\n\t\tif r.isFull {\n\t\t\treturn r.size\n\t\t}\n\t\treturn 0\n\t}\n\n\tif r.wPos > r.rPos {\n\t\treturn r.wPos - r.rPos\n\t}\n\n\treturn r.size - r.rPos + r.wPos\n}", "func (t *Token) TruncateEnd() *Token {\n\trtn := t.Next\n\tt.Next = nil\n\tif rtn != nil {\n\t\trtn.Prev = nil\n\t}\n\treturn rtn\n}", "func (n *ninjaWriterWithWrap) Space() {\n\tif n.err != nil {\n\t\treturn\n\t}\n\tif n.space {\n\t\t// A space was already pending, and the space plus any strings written after the space did\n\t\t// not reach the maxmimum line length, so write out the old space and pending strings.\n\t\t_, n.err = n.writer.WriteString(\" \")\n\t\tn.lineLen++\n\t\tn.writePending()\n\t}\n\tn.space = true\n}", "func elideToLen(inBuf bytes.Buffer, length int) bytes.Buffer {\n\tif inBuf.Len() > length {\n\t\tinBuf.Truncate(length)\n\t\tinBuf.WriteString(\"...\")\n\t}\n\treturn inBuf\n}", "func (tm *Term) End() error {\n\ttm.RowOff = 0\n\ttm.RowSt = tm.MaxRows - tm.RowsPer\n\treturn tm.Draw()\n}", "func (i *ImageBuf) ZEnd() int {\n\tret := int(C.ImageBuf_zend(i.ptr))\n\truntime.KeepAlive(i)\n\treturn ret\n}", "func PadEnd(s string, l int, c string) string {\n\tvar strLen int\n\tif l <= 0 {\n\t\tstrLen = 0\n\t} else {\n\t\tstrLen = len(s)\n\t}\n\n\tif strLen < l {\n\t\treturn s + createPadding(l-strLen, c)\n\t}\n\n\treturn s\n}", "func (c CountUnit) Space(s SpaceUnit, dimension int8) MetricUnit {\n\treturn (&metricUnit{uint32(c)}).Space(s, dimension)\n}", "func (s *Srt) TrimTo(ms uint32) int {\n\tvar deleted int = 0\n\ts.ForEach(func(_ int, sub *Subtitle) bool {\n\t\tif sub.StartMs < ms {\n\t\t\tsub.MarkAsDeleted()\n\t\t\tdeleted++\n\t\t\ts.count--\n\t\t\treturn true\n\t\t}\n\t\tsub.StartMs -= ms\n\t\tsub.EndMs -= ms\n\t\treturn true\n\t})\n\treturn deleted\n}", "func (e *ObservableEditableBuffer) End() OffsetTuple {\n\treturn e.f.End()\n}", "func (p Partition) End() uint32 {\n\treturn p.Start + p.Length() - 1\n}", "func (o TableRangePartitioningRangePtrOutput) End() pulumi.IntPtrOutput {\n\treturn o.ApplyT(func(v *TableRangePartitioningRange) *int {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn &v.End\n\t}).(pulumi.IntPtrOutput)\n}", "func (s *OverallTestResultItem) SetEndToEndResultCounts(v map[string]*int64) *OverallTestResultItem {\n\ts.EndToEndResultCounts = v\n\treturn s\n}", "func (b *BufferedFrameReader) WriteTo(w io.Writer) (n int64, err error) {\n\tvar nn int\n\tif b.Next != nil {\n\t\tby, err := b.TmpFrame.Marshal(b.Reader.By)\n\t\tnn, err = w.Write(by)\n\t\tif err != nil {\n\t\t\treturn int64(nn), err\n\t\t}\n\t\tb.Next = nil\n\t}\n\tn += int64(nn)\n\tm, err := b.Reader.R.WriteTo(w)\n\tn += m\n\treturn n, err\n}", "func (i *ImageBuf) XEnd() int {\n\tret := int(C.ImageBuf_xend(i.ptr))\n\truntime.KeepAlive(i)\n\treturn ret\n}", "func (s *Series) End() int64 {\n\tif s.Len() == 0 {\n\t\treturn -1\n\t}\n\treturn s.Start() + int64(s.Step()*(s.Len()-1))\n}", "func (o Int64RangeMatchResponseOutput) RangeEnd() pulumi.StringOutput {\n\treturn o.ApplyT(func(v Int64RangeMatchResponse) string { return v.RangeEnd }).(pulumi.StringOutput)\n}", "func (b *BaseImpl) LenBuf() int {\n\n\tif len(b.Diffs) < 1 {\n\t\treturn len(b.bytes)\n\t}\n\n\tmax := len(b.bytes)\n\tfor i := range b.Diffs {\n\t\tif b.Diffs[i].Offset+len(b.Diffs[i].bytes) > max {\n\t\t\tmax = b.Diffs[i].Offset + len(b.Diffs[i].bytes)\n\t\t}\n\t}\n\n\treturn max\n\n}", "func (rb *RingBuffer) Length() int {\n\treturn rb.count\n}", "func (self *TextIter) IsEnd() bool {\n\tb := C.gtk_text_iter_is_end(&self.object)\n\treturn gobject.GoBool(unsafe.Pointer(&b))\n}", "func (ll *Doubly[T]) AddAtEnd(val T) {\n\tll.lazyInit()\n\tll.insertValue(val, ll.Head.Prev)\n}", "func countTrailingZeroes(i int, word int32) int {\n\ttrace_util_0.Count(_mydecimal_00000, 28)\n\ttrailing := 0\n\tfor word%powers10[i] == 0 {\n\t\ttrace_util_0.Count(_mydecimal_00000, 30)\n\t\ti++\n\t\ttrailing++\n\t}\n\ttrace_util_0.Count(_mydecimal_00000, 29)\n\treturn trailing\n}", "func (b *Buffer) truncate(n int) int {\n\tif n > len(b.data) {\n\t\tpanic(\"Trying to truncate past end of array.\")\n\t}\n\tb.data = b.data[n:]\n\treturn len(b.data)\n}", "func (jbobject *JavaLangCharSequence) Length() int {\n\tjret, err := jbobject.CallMethod(javabind.GetEnv(), \"length\", javabind.Int)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn jret.(int)\n}", "func (t Time) AppendTo(b []byte, separator byte) []byte {\n\n\tf := func(b []byte) {\n\t\thh, mm, ss := normalizeTime(t.Split())\n\n\t\tcopy(b[0:2], _TIME_PART_AS_NUM_STR[hh])\n\t\tcopy(b[3:5], _TIME_PART_AS_NUM_STR[mm])\n\t\tcopy(b[6:8], _TIME_PART_AS_NUM_STR[ss])\n\n\t\tb[2] = separator\n\t\tb[5] = separator\n\t}\n\n\tif c, l := cap(b), len(b); c - l >= 8 {\n\t\tb = b[:l+8]\n\t\tf(b[l:])\n\t\treturn b\n\t} else {\n\t\t// One more allocation\n\t\tb2 := make([]byte, 8)\n\t\tf(b2)\n\t\tb = append(b, b2...)\n\t\treturn b\n\t}\n}", "func (q *BytesQueue) canInsertAfterTail(need int) bool {\n\tif q.full {\n\t\treturn false\n\t}\n\tif q.tail >= q.head {\n\t\treturn q.capacity-q.tail >= need\n\t}\n\t// 1. there is exactly need bytes between head and tail, so we do not need\n\t// to reserve extra space for a potential empty entry when realloc this queue\n\t// 2. still have unused space between tail and head, then we must reserve\n\t// at least headerEntrySize bytes so we can put an empty entry\n\treturn q.head-q.tail == need || q.head-q.tail >= need+minimumHeaderSize\n}", "func (a *reader) WriteTo(w io.Writer) (n int64, err error) {\n\tif a.err != nil {\n\t\treturn 0, a.err\n\t}\n\tn = 0\n\tfor {\n\t\terr = a.fill()\n\t\tif err != nil {\n\t\t\treturn n, err\n\t\t}\n\t\tn2, err := w.Write(a.cur.buffer())\n\t\ta.cur.inc(n2)\n\t\tn += int64(n2)\n\t\tif err != nil {\n\t\t\treturn n, err\n\t\t}\n\t\tif a.cur.err != nil {\n\t\t\t// io.Writer should return nil if we are at EOF.\n\t\t\tif a.cur.err == io.EOF {\n\t\t\t\ta.err = a.cur.err\n\t\t\t\treturn n, nil\n\t\t\t}\n\t\t\ta.err = a.cur.err\n\t\t\treturn n, a.cur.err\n\t\t}\n\t}\n}", "func (rcv *Buffer) Length() int64 {\n\treturn rcv._tab.GetInt64(rcv._tab.Pos + flatbuffers.UOffsetT(8))\n}", "func WordEnd(bytes []byte, pos int) int {\n\tl := len(bytes)\n\tif pos < 0 || pos >= l {\n\t\treturn InvalidPos\n\t}\n\tif pos == l-1 {\n\t\treturn l\n\t}\n\n\tpos = wSequenceBegin(bytes, pos)\n\n\treturn wordEnd(bytes, pos)\n}", "func skipToEnd(yylex interface{}) {\n\tyylex.(*Tokenizer).SkipToEnd = true\n}", "func skipToEnd(yylex interface{}) {\n\tyylex.(*Tokenizer).SkipToEnd = true\n}", "func skipToEnd(yylex interface{}) {\n\tyylex.(*Tokenizer).SkipToEnd = true\n}", "func skipToEnd(yylex interface{}) {\n\tyylex.(*Tokenizer).SkipToEnd = true\n}", "func skipToEnd(yylex interface{}) {\n\tyylex.(*Tokenizer).SkipToEnd = true\n}", "func (ring *ringBuffer) isFull() bool {\n\tring.mutex.Lock()\n\tfull := !ring.spaceReady\n\tring.mutex.Unlock()\n\treturn full\n}", "func getSequenceLength(initialSequence []byte, removeChar rune) int {\n\texplodedSequence := make([]byte, 0, len(initialSequence))\n\n\tfor i := 0; i < len(initialSequence)-1; i++ {\n\t\tif unicode.ToUpper(rune(initialSequence[i])) == unicode.ToUpper(rune(removeChar)) {\n\t\t\t// ignoring the element\n\t\t\tcontinue\n\t\t} else if compareBytes(initialSequence[i], initialSequence[i+1]) {\n\t\t\t// ignoring next 2 elements if they match\n\t\t\t// fmt.Println(\"Not writing\", string(initialSequence[i]), string(initialSequence[i+1]))\n\t\t\ti++\n\t\t} else if len(explodedSequence) > 0 && compareBytes(explodedSequence[len(explodedSequence)-1], initialSequence[i]) {\n\t\t\t// removing last element if it matches the next one\n\t\t\t// fmt.Println(\"Backtracking due to\", string(explodedSequence[len(explodedSequence)-1]), string(initialSequence[i]))\n\t\t\texplodedSequence = explodedSequence[:len(explodedSequence)-1]\n\t\t} else {\n\t\t\t// otherwise we add the element in the sequence\n\t\t\t// fmt.Println(\"Writing\", string(initialSequence[i]))\n\t\t\texplodedSequence = append(explodedSequence, initialSequence[i])\n\t\t}\n\t}\n\treturn len(explodedSequence)\n}", "func (f genHelperDecoder) DecReadArrayEnd() { f.d.arrayEnd() }", "func (b *Buffer) Capacity() int { return len(b.data) }", "func (mes *MarkerEncodingScheme) Tail(b byte, pos int) checked.Bytes { return mes.tails[int(b)][pos-1] }", "func (cll *CircularLinkedList) DeleteEnd() int {\n\tif !(cll.CheckIfEmpty()) {\n\t\thead := cll.Start\n\t\tdeletedEle := head.Data\n\t\tif cll.Len == 1 {\n\t\t\t// delete from beginning\n\t\t\tdeletedEle = cll.DeleteBeginning()\n\t\t\treturn deletedEle\n\t\t}\n\t\t//traverse till end\n\t\tfor {\n\t\t\tif head.Next.Next == cll.Start {\n\t\t\t\tdeletedEle = head.Next.Data\n\t\t\t\tbreak\n\t\t\t}\n\t\t\thead = head.Next\n\t\t}\n\t\t// update last element's next pointer\n\t\thead.Next = cll.Start\n\t\tcll.Len--\n\t\treturn deletedEle\n\t}\n\treturn -1\n}", "func (m *BarTimeRangeMutation) End() (r time.Time, exists bool) {\n\tv := m.end\n\tif v == nil {\n\t\treturn\n\t}\n\treturn *v, true\n}", "func (b *recordBuffer) AppendSpaces(arg bool) {\n\tb.spaces = arg\n}", "func wordEnd(bytes []byte, l0Pos int) int {\n\tl := len(bytes)\n\n\tl1, l1Pos := wLastSequence(bytes[:l0Pos])\n\tl0, r0Delta := wFirstSequence(bytes[l0Pos:])\n\tlOddRI := wIsOpenRI(bytes[:l1Pos], l1, l0)\n\tr0Pos := l0Pos + r0Delta\n\tr0, r1Delta := wFirstSequence(bytes[r0Pos:])\n\n\tfor r0Pos < l {\n\t\tr1, r2Delta := wFirstSequence(bytes[r0Pos+r1Delta:])\n\t\tif wDecision(l1, l0, lOddRI, r0, r1) {\n\t\t\treturn r0Pos\n\t\t}\n\t\tl1 = l0\n\t\tl0 = r0\n\t\tr0 = r1\n\t\tr0Pos += r1Delta\n\t\tr1Delta = r2Delta\n\t\tlOddRI = l0 == wClassRI && !lOddRI\n\t}\n\treturn l\n}", "func (b *Buffer) Len() (n int) {\n\tfor _, l := range b.lines {\n\t\tn += utf8.RuneCount(l.data)\n\t}\n\n\tif len(b.lines) > 1 {\n\t\tn += len(b.lines) - 1 // account for newlines\n\t}\n\n\treturn\n}", "func (r *RingBuffer) Free() int {\n\tr.mu.Lock()\n\tdefer r.mu.Unlock()\n\n\tif r.wPos == r.rPos {\n\t\tif r.isFull {\n\t\t\treturn 0\n\t\t}\n\t\treturn r.size\n\t}\n\n\tif r.wPos < r.rPos {\n\t\treturn r.rPos - r.wPos\n\t}\n\n\treturn r.size - r.wPos + r.rPos\n}", "func (c *RingBuffer) Capacity() int {\n\tc.mutex.Lock()\n\tdefer c.mutex.Unlock()\n\treturn len(c.buf)\n}", "func (r *baseNsRange) SetEnd(end int) { r.end = end }", "func JumpForwardEnd(b *novi.Buffer, c *novi.Cursor) (int, int) {\n\treturn JumpAlNumSepForwardEnd(b, c, false)\n}", "func (destination *streamDestination) appendToBuffer(message string) string {\n\tif len(message)+len(destination.buffer) >= MAX_MESSAGE_SIZE {\n\t\tremainingSpaceInBuffer := MAX_MESSAGE_SIZE - len(destination.buffer)\n\t\tdestination.buffer = append(destination.buffer, []byte(message[0:remainingSpaceInBuffer])...)\n\n\t\tr, _ := utf8.DecodeLastRune(destination.buffer[0:len(destination.buffer)])\n\n\t\t// if we error initially, go back to preserve utf8 boundaries\n\t\tbytesToCut := 0\n\t\tfor r == utf8.RuneError && bytesToCut < 3 {\n\t\t\tbytesToCut++\n\t\t\tr, _ = utf8.DecodeLastRune(destination.buffer[0 : len(destination.buffer)-bytesToCut])\n\t\t}\n\n\t\tindex := remainingSpaceInBuffer - bytesToCut\n\t\tif index < 0 {\n\t\t\tindex = 0\n\t\t\tdestination.buffer = destination.buffer[0 : len(destination.buffer)-remainingSpaceInBuffer]\n\t\t} else {\n\t\t\tdestination.buffer = destination.buffer[0 : len(destination.buffer)-bytesToCut]\n\t\t}\n\n\t\treturn message[index:]\n\t}\n\n\tdestination.buffer = append(destination.buffer, []byte(message)...)\n\treturn \"\"\n}", "func countTrailingZeroes(i int, word int32) int {\n\ttrailing := 0\n\tfor word%powers10[i] == 0 {\n\t\ti++\n\t\ttrailing++\n\t}\n\treturn trailing\n}", "func (r *Range) Length() int {\n\treturn r.Stop - r.Start\n}", "func (o TableRangePartitioningRangeOutput) End() pulumi.IntOutput {\n\treturn o.ApplyT(func(v TableRangePartitioningRange) int { return v.End }).(pulumi.IntOutput)\n}", "func (this *MyCircularDeque) DeleteLast() bool {\n\tif this.IsEmpty() {\n\t\treturn false\n\t}\n\tthis.end = (len(this.data) + this.end - 1) % len(this.data)\n\treturn true\n}", "func (b *ringBuf) truncateFrom(lo uint64) (removedBytes, removedEntries int32) {\n\tit, ok := iterateFrom(b, lo)\n\tfor ok {\n\t\tremovedBytes += int32(it.entry(b).Size())\n\t\tremovedEntries++\n\t\tit.clear(b)\n\t\tit, ok = it.next(b)\n\t}\n\tb.len -= int(removedEntries)\n\tif b.len < (len(b.buf) / shrinkThreshold) {\n\t\trealloc(b, 0, b.len)\n\t}\n\treturn\n}", "func (b *ByteArray) WriteTo(w io.Writer) (n int64, err error) {\n\tfor b.readPos.current < b.usedBytes {\n\t\tslice, er := b.ReadSlice()\n\t\tif slice != nil {\n\t\t\tread, err := w.Write(slice)\n\t\t\tb.readPos = b.seek(b.readPos, read, SEEK_CUR)\n\t\t\tn += int64(read)\n\t\t\tif err != nil {\n\t\t\t\treturn n, err\n\t\t\t}\n\t\t} else {\n\t\t\tif er != io.EOF {\n\t\t\t\terr = er\n\t\t\t}\n\t\t\tbreak\n\t\t}\n\t}\n\treturn n, err\n}", "func (r *textprotoReader) skipSpace() int {\n\tn := 0\n\tfor {\n\t\tc, err := r.R.ReadByte()\n\t\tif err != nil {\n\t\t\t// Bufio will keep err until next read.\n\t\t\tbreak\n\t\t}\n\t\tif c != ' ' && c != '\\t' {\n\t\t\tr.R.UnreadByte()\n\t\t\tbreak\n\t\t}\n\t\tn++\n\t}\n\treturn n\n}", "func (c *CircBuf) Count() int {\n\treturn (c.head - c.tail) & (len(c.buf) - 1)\n}", "func (b *Buffer) WriteTo(w io.Writer) (n int64, err error) {\n\tif nBytes := b.inner.Len(); nBytes > 0 {\n\t\tm, e := w.Write(b.Bytes())\n\t\tif m > nBytes {\n\t\t\tpanic(\"Buffer.WriteTo: invalid Write count\")\n\t\t}\n\t\tn = int64(m)\n\t\tif e != nil {\n\t\t\treturn n, e\n\t\t}\n\t\t// all bytes should have been written, by definition of\n\t\t// Write method in io.Writer\n\t\tif m != nBytes {\n\t\t\treturn n, io.ErrShortWrite\n\t\t}\n\t}\n\treturn n, nil\n}", "func (s *UserTurnResult) SetEndToEndResult(v string) *UserTurnResult {\n\ts.EndToEndResult = &v\n\treturn s\n}" ]
[ "0.7006514", "0.6811424", "0.60563624", "0.5431559", "0.5166042", "0.5164739", "0.51341945", "0.49703035", "0.4915284", "0.49113682", "0.48816666", "0.47869635", "0.4761626", "0.47505003", "0.47214672", "0.467069", "0.46684036", "0.46165514", "0.4569496", "0.45542207", "0.45328385", "0.45242536", "0.45119643", "0.45103073", "0.4506947", "0.44815642", "0.4474665", "0.44716376", "0.4455784", "0.4445947", "0.4445141", "0.44372717", "0.44249144", "0.4420738", "0.44193393", "0.44186056", "0.44083118", "0.44050077", "0.43893003", "0.43799204", "0.43795556", "0.43760875", "0.4369297", "0.43677646", "0.4354629", "0.43504736", "0.43463483", "0.43434802", "0.43136394", "0.43042016", "0.43026093", "0.43006358", "0.42789423", "0.42746353", "0.42593864", "0.42372414", "0.42293277", "0.42219362", "0.42214745", "0.42153868", "0.42095286", "0.42054787", "0.42031944", "0.4201556", "0.41841334", "0.4182348", "0.41812807", "0.41764572", "0.41725785", "0.41678485", "0.41653693", "0.41653693", "0.41653693", "0.41653693", "0.41653693", "0.41557828", "0.41550812", "0.41535532", "0.41500336", "0.41500157", "0.4147946", "0.41449642", "0.41352868", "0.41348055", "0.4132686", "0.41326815", "0.41316485", "0.41232964", "0.4118851", "0.41184878", "0.4118426", "0.41157463", "0.4110964", "0.41109222", "0.41075748", "0.41068086", "0.41055736", "0.4096562", "0.40960026", "0.40950897" ]
0.82667094
0
countToEndArg counts the number of used bytes in a circular buffer to the end of the liner buffer or the end of the circular buffer whichever is smaller. The tail parameter allows the Peek function to count used bytes without having to update the tail index in the circular buffer.
func (c *CircBuf) countToEndArg(tail int) int { countEndLinearBuf := len(c.buf) - tail countEndCircBuf := (c.head + countEndLinearBuf) & (len(c.buf) - 1) if countEndCircBuf < countEndLinearBuf { return countEndCircBuf } return countEndLinearBuf }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (c *CircBuf) countToEnd() int {\n\treturn c.countToEndArg(c.tail)\n}", "func (c *CircBuf) Consume(nbytes int) int {\n\tvar count int\n\tvar num int\n\tfor {\n\t\tcount = c.countToEnd()\n\t\tif nbytes - num < count {\n\t\t\tcount = nbytes - num\n\t\t}\n\t\tif count <= 0 {\n\t\t\tbreak\n\t\t}\n\t\tc.tail = (c.tail + count) & (len(c.buf) - 1)\n\t\tnum += count\n\t}\n\treturn num\n}", "func (c *CircBuf) Peek(buf []byte) int {\n\tvar count int\n\tvar tail int = c.tail // Use a local tail variable\n\tvar num int\n\tfor {\n\t\tcount = c.countToEndArg(tail)\n\t\tif len(buf) - num < count {\n\t\t\tcount = len(buf) - num\n\t\t}\n\t\tif count <= 0 {\n\t\t\tbreak\n\t\t}\n\t\tcopy(buf[num : num + count], c.buf[tail : tail + count])\n\t\ttail = (tail + count) & (len(c.buf) - 1)\n\t\tnum += count\n\t}\n\treturn num\n}", "func (c *CircBuf) spaceToEnd() int {\n\tspaceEndLinearBuf := len(c.buf) - c.head\n\tspaceEndCircBuf := (c.tail + spaceEndLinearBuf - 1) & (len(c.buf) - 1)\n\tif spaceEndLinearBuf < spaceEndCircBuf {\n\t\treturn spaceEndLinearBuf\n\t}\n\treturn spaceEndCircBuf\n}", "func journal_seek_tail() int {\n\n\treturn int(C.journal_seek_tail())\n}", "func (mes *MarkerEncodingScheme) Tail(b byte, pos int) checked.Bytes { return mes.tails[int(b)][pos-1] }", "func (e *engine) Tail(context.Context, *Spec, *Step) (io.ReadCloser, error) {\n\treturn nil, nil // no-op for bash implementation\n}", "func (c *CircBuf) Count() int {\n\treturn (c.head - c.tail) & (len(c.buf) - 1)\n}", "func tail(filename string,lines int){\nfp,err:=os.Open(filename)\nif err!=nil{\n fmt.Printf(\"error reading file %v\",err)\n os.Exit(1)\n}\ndefer fp.Close()\n//counting total lines\nscanner:=bufio.NewScanner(fp)\n counter:=0\n for scanner.Scan(){\n counter++\n }\n//calculating last n lines from opt\nline:=counter-lines\nfp1,err:=os.Open(filename)\nif err!=nil{\n fmt.Printf(\"error reading file\")\n os.Exit(1)\n}\nscanner1:=bufio.NewScanner(fp1)\ncounter1:=0\n//printing last n lines\nfor scanner1.Scan(){\n counter1++\n text:=scanner1.Text()\n if counter1>line {\n fmt.Printf(\"%d \\t\",counter1)\n fmt.Printf(\"%s \\n\",text)\n }\n}\ndefer fp1.Close()\n\n}", "func (c *CircBuf) Read(buf []byte) int {\n\tvar count int\n\tvar num int\n\tfor {\n\t\tcount = c.countToEnd()\n\t\tif len(buf) - num < count {\n\t\t\tcount = len(buf) - num\n\t\t}\n\t\tif count <= 0 {\n\t\t\tbreak\n\t\t}\n\t\tcopy(buf[num : num + count], c.buf[c.tail : c.tail + count])\n\t\tc.tail = (c.tail + count) & (len(c.buf) - 1)\n\t\tnum += count\n\t}\n\treturn num\n}", "func (s *Stream) ListEnd() error {\n\tif len(s.stack) == 0 {\n\t\treturn errNotInList\n\t}\n\ttos := s.stack[len(s.stack)-1]\n\tif tos.pos != tos.size {\n\t\treturn errNotAtEOL\n\t}\n\ts.stack = s.stack[:len(s.stack)-1] // pop out information from the stack\n\tif len(s.stack) > 0 {\n\t\ts.stack[len(s.stack)-1].pos += tos.size\n\t}\n\ts.kind = -1\n\ts.size = 0\n\treturn nil\n}", "func (b *Buffer) Len() (n int) {\n\tfor _, l := range b.lines {\n\t\tn += utf8.RuneCount(l.data)\n\t}\n\n\tif len(b.lines) > 1 {\n\t\tn += len(b.lines) - 1 // account for newlines\n\t}\n\n\treturn\n}", "func (p *parser) consume(r rune, limit ...int64) int {\n\tif len(limit) > 1 {\n\t\tpanic(\"only one argument is allowed\")\n\t}\n\tvar max int64 = math.MaxInt64\n\tif len(limit) == 1 {\n\t\tmax = limit[0]\n\t}\n\tvar count int\n\tfor pr := p.peek(); pr == r && count < int(max); {\n\t\tpr = p.next()\n\t\tcount++\n\t}\n\treturn count\n}", "func getLastTailIndex(c context.Context, cache storage.Cache, project string, path types.StreamPath) int64 {\n\tdata, ok := cache.Get(c, mkLastTailKey(project, path))\n\tif !ok {\n\t\treturn 0\n\t}\n\n\tv, err := binary.ReadVarint(bytes.NewReader(data))\n\tif err != nil {\n\t\tlog.Fields{\n\t\t\tlog.ErrorKey: err,\n\t\t\t\"project\": project,\n\t\t\t\"path\": path,\n\t\t}.Warningf(c, \"Could not decode last tail cache.\")\n\t\treturn 0\n\t}\n\n\tlog.Fields{\n\t\t\"index\": v,\n\t}.Infof(c, \"Using cached tail index.\")\n\treturn v\n}", "func (a Arguments) Tail() Arguments {\n\tif len(a.args) <= 1 {\n\t\treturn Arguments{bin: a.bin, args: []string{}}\n\t}\n\treturn Arguments{bin: a.bin, args: a.args[1:]}\n}", "func (c *Client) countURL(end endpoint, opts ...FuncOption) (string, error) {\n\topt, err := newOpt(opts...)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\turl := c.rootURL + string(end) + \"count\"\n\turl = encodeURL(&opt.Values, url)\n\n\treturn url, nil\n}", "func (f *FileCache) tailLog(rr *bufio.Reader) (b []byte, err error) {\n\tvar (\n\t\tt []byte\n\t)\n\t// peek magic\n\tfor {\n\t\tif b, err = rr.Peek(_logMagicSize); err != nil {\n\t\t\treturn\n\t\t}\n\t\tif bytes.Equal(b, logMagic) {\n\t\t\tbreak\n\t\t}\n\t\trr.Discard(1)\n\t}\n\t// peek length\n\tif t, err = rr.Peek(_logHeadSize); err != nil {\n\t\tif err != io.EOF {\n\t\t\tlog.Error(\"rr.Peek(len:%d) error(%v)\", _logLenSize, err)\n\t\t}\n\t\treturn\n\t}\n\t// peek body\n\tl := int(binary.BigEndian.Uint32(t[_logMagicSize:_logHeadSize]))\n\tif t, err = rr.Peek(_logHeadSize + l); err != nil {\n\t\tif err != io.EOF {\n\t\t\tlog.Error(\"rr.Peek(%d) error(%v)\", l, err)\n\t\t}\n\t\treturn\n\t}\n\tb = t[_logHeadSize:]\n\trr.Discard(l + _logHeadSize)\n\treturn\n}", "func Tail(c rune) rune {\n\tif TailG <= c && c <= TailH {\n\t\treturn c\n\t}\n\tif t, ok := toTail[c]; ok {\n\t\treturn t\n\t}\n\n\treturn 0\n}", "func Tail(c rune) rune {\n\tif TAIL_G <= c && c <= TAIL_H {\n\t\treturn c\n\t}\n\tif t, ok := toTail[c]; ok {\n\t\treturn t\n\t}\n\n\treturn 0\n}", "func (self File) TailBytes(limitSize int) ([]byte, error) {\n\tvar limitBytes []byte\n\tfile := self.Open()\n\n\treadBytes, err := io.ReadAtLeast(file, limitBytes, limitSize)\n\tif readBytes != limitSize {\n\t\treturn limitBytes, fmt.Errorf(\"error: failed to complete read: read \", readBytes, \" out of \", limitSize, \"bytes\")\n\t} else {\n\t\treturn limitBytes, err\n\t}\n}", "func tail(msg string, maxBytes int) string {\n\tmsg = strings.Trim(msg, \"\\n\")\n\tif len(msg) <= maxBytes {\n\t\treturn msg\n\t}\n\tlines := strings.Split(msg, \"\\n\")\n\tif len(lines) == 1 {\n\t\tif len(lines[0]) <= maxBytes {\n\t\t\treturn lines[0]\n\t\t}\n\t\treturn \"...\" + lines[0][max(0, len(lines[0])-maxBytes+3):]\n\t}\n\n\tprefix := \"...\"\n\tret := []string{}\n\tn := len(prefix)\n\tfor i := len(lines) - 1; i >= 0; i-- {\n\t\tif n+len(\"\\n\")+len(lines[i]) > maxBytes {\n\t\t\tif len(ret) == 0 {\n\t\t\t\treturn \"...\" + lines[0][max(0, len(lines[0])-maxBytes+3):]\n\t\t\t}\n\t\t\tbreak\n\t\t}\n\t\tret = append(ret, lines[i])\n\t\tn += len(lines[i]) + len(\"\\n\")\n\t}\n\n\treturn strings.Join(reverse(append(ret, prefix)), \"\\n\")\n}", "func (h headTailIndex) tail() uint32 {\n\treturn uint32(h)\n}", "func (p *Parser) Len(pattern string) int {\n return p.json.Len(pattern)\n}", "func BenchmarkEndToEnd(b *testing.B) {\n\tvar tracer trace.Trace\n\n\tfor i := 0; i < b.N; i++ {\n\t\t//tracer = trace.New(os.Stderr, true) // use stderr to trace\n\t\ttracer = trace.New(ioutil.Discard, true) // and this to not\n\n\t\tvar tokens = xml_lexer.Lex(xmlInput, tracer) // xml\n\t\t//var tokens = json_lexer.Lex(jsonInput, tracer) // json\n\t\t//var tokens []token.Token = csv_lexer.Lex(jsonInput, tracer) // csv\n\t\tvar tests = []struct {\n\t\t\texpr string\n\t\t\texpect string\n\t\t}{\n\t\t\t// current debug cases\n\t\t\t{expr: `universe/galaxy[world=\"earth\"]/timelord`, expect: `who`},\n\t\t}\n\n\t\ttracer.Begin()()\n\t\texplain := false\n\t\tfor _, test := range tests {\n\t\t\tevaluate(tokens, test.expr, explain, tracer)\n\t\t}\n\t}\n}", "func ReadTail() string {\n\tif logTail == nil {\n\t\treturn \"\"\n\t}\n\treturn logTail.Read()\n}", "func IsTail(r rune) bool {\n\tif TailG <= r && r <= TailH {\n\t\treturn true\n\t}\n\treturn false\n}", "func (s *BaseCGListener) ExitUtf8_tail(ctx *Utf8_tailContext) {}", "func (mr *FakeTailerMockRecorder) Tail(arg0, arg1, arg2 interface{}, arg3 ...interface{}) *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\tvarargs := append([]interface{}{arg0, arg1, arg2}, arg3...)\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"Tail\", reflect.TypeOf((*FakeTailer)(nil).Tail), varargs...)\n}", "func IsTail(r rune) bool {\n\tif TAIL_G <= r && r <= TAIL_H {\n\t\treturn true\n\t}\n\treturn false\n}", "func Tail(s string) string {\n\tif s == \"\" {\n\t\tpanic(\"empty list\")\n\t}\n\n\t_, sz := utf8.DecodeRuneInString(s)\n\treturn s[sz:]\n}", "func (b *loopBuffer) Len() int {\n\tif b.looped {\n\t\treturn cap(b.buf)\n\t}\n\treturn b.off\n}", "func (h *atomicHeadTailIndex) incTail() headTailIndex {\n\tht := headTailIndex(h.u.Add(1))\n\t// Check for overflow.\n\tif ht.tail() == 0 {\n\t\tprint(\"runtime: head = \", ht.head(), \", tail = \", ht.tail(), \"\\n\")\n\t\tthrow(\"headTailIndex overflow\")\n\t}\n\treturn ht\n}", "func (log *LogFile) tailPush(sym *SymFile, newRecSize uint32) error {\n\n\t// lap >= 2\n\t// |-----|-----|------|------|.......|\n\t// ^ ^ ^\n\t// head tail maxsize\n\t// |-gap-|\n\t// loop reading in records from the tail\n\t// until the accumulated size (including head-tail gap is greater than the new rec size\n\t// tail points to oldest complete record\n\n\ttailGap := log.tailOffset - log.headOffset\n\tsizeAvailable := tailGap\n\tstdlog.Printf(\"Tail push for new rec size of %v - avail of %v\\n\", newRecSize, sizeAvailable)\n\tif tailGap >= 0 { // tail in front of head\n\t // set read pos to the tail\n\t stdlog.Printf(\"Seek to tail %v\", log.tailOffset)\n\t\tlog.entryReadFile.Seek(int64(log.tailOffset), 0)\n\t\tfor {\n\t\t\tif uint64(newRecSize) <= sizeAvailable {\n\t\t\t\t// moved tail far enough and we have space for a new record\n\t\t\t\tstdlog.Printf(\"Moved tail far enough. available=%v, newRecSize=%v\\n\", sizeAvailable, newRecSize)\n\t\t\t\treturn nil\n\t\t\t}\n\t\t\tvar err error\n\t\t\tvar entry LogEntry\n\t\t\t\n\t\t\t// read entry from tail - so need to set read file pos to the tail offset\n\t\t\tif log.tailOffset >= log.numSizeBytes {\n\t\t\t\tstdlog.Printf(\"Tail has reached end of data %v, equivalent to EOF\", log.numSizeBytes)\n\t\t\t\terr = io.EOF\n\t\t\t} else {\n\t\t\t\tentry, err = log.ReadEntryData(sym)\n\t\t\t}\n\t\t\tif err == nil {\n\t\t\t\treclen := entry.SizeBytes()\n\t\t\t\tsizeAvailable += uint64(reclen) // size engulfs old tail record\n\t\t\t\tlog.tailOffset += uint64(reclen) // tail moves forward to newer record\n\t\t\t\tlog.NumEntries--\n\t\t\t\tstdlog.Printf(\"Move tail over 1 record, tail=%v, avail=%v, numRecs=%v\\n\", log.tailOffset, sizeAvailable, log.NumEntries)\n\t\t\t} else if err == io.EOF {\n\t\t\t\tstdlog.Printf(\"We hit EOF, no more tail entries to read\\n\")\n\t\t\t\t// we hit the end and no more tail entries to read\n\t\t\t\t// BUT if there is a gap at the end it might be\n\t\t\t\t// big enough for the head entry\n\t\t\t\t// compare tailOffset with maxsize\n\t\t\t\t// |------|000000000|\n\t\t\t\t// ^ ^---------^\n\t\t\t\t// tail maxsize\n\t\t\t\tendGap := log.maxSizeBytes - log.tailOffset\n\t\t\t\tif uint64(newRecSize) <= sizeAvailable+endGap {\n\t\t\t\t\t// then fit in the end gap\n\t\t\t\t\tstdlog.Printf(\"Fit into end gap\\n\")\n\t\t\t\t\tsizeAvailable += endGap\n\t\t\t\t\t\n\t\t\t\t\tlog.numSizeBytes = log.headOffset + uint64(newRecSize)\n\t\t\t\t\tstdlog.Printf(\"Update numSizeBytes as we have moved into gap: %v\", log.numSizeBytes)\n\t\t\t\t} else {\n\t\t\t\t\t// zero out where head is and move head around\n\t\t\t\t\tstdlog.Printf(\"Zero out where head is and move head around to the start (wrap)\\n\")\n\t\t\t\t\tsizeAvailable = 0\n\t\t\t\t\tlog.numSizeBytes = log.headOffset\n\t\t\t\t\tstdlog.Printf(\"Update numSizeBytes to head %v before moving head to zero\", log.numSizeBytes)\n\t\t\t\t\tlog.headOffset = 0\n\t\t\t\t\tlog.wrapNum++\n\t\t\t\t\tlog.setWriteZeroPos()\n\t\t\t\t\tlog.nextLogID.wrap()\n\t\t\t\t}\n\t\t\t\tlog.tailOffset = 0 // wrap around tail\n\t\t\t\terr = log.setReadZeroPos()\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\treturn err\n\t\t\t}\n\t\t} // for each tail record\n\t} // if\n\treturn nil\n}", "func (i *ImageBuf) ZEnd() int {\n\tret := int(C.ImageBuf_zend(i.ptr))\n\truntime.KeepAlive(i)\n\treturn ret\n}", "func (fi *funcInfo) emitTailCall(line, a, nArgs int) {\r\n\tfi.emitABC(line, OP_TAILCALL, a, nArgs+1, 0)\r\n}", "func (c *Consumer) tail() error {\n\tshards, err := c.listShards()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tgo c.monitor()\n\n\tfor _, id := range withNoChildren(shards) {\n\t\tc.startShardConsumer(id, LATEST, c.processor)\n\t}\n\n\treturn nil\n}", "func tail(ctx context.Context, logFile string, outChan chan string) {\n\tsource := config.NewLogSource(\"access\", &config.LogsConfig{Type: \"file\", Path: logFile})\n\tsleepDuration := time.Millisecond * 100\n\n\tpp := mock.NewMockProvider()\n\tfilesScanner := tailer.New([]*config.LogSource{source}, 4, pp, auditor.New(nil, \"\"), sleepDuration)\n\tfilesScanner.Start()\n\tfor {\n\t\tselect {\n\t\tcase m := <-pp.NextPipelineChan():\n\t\t\toutChan <- string(m.Content())\n\t\tcase <-ctx.Done():\n\t\t\tfilesScanner.Stop()\n\t\t\treturn\n\t\t}\n\t}\n}", "func (b *Buffer) End() Loc {\n\treturn Loc{utf8.RuneCount(b.lines[b.NumLines-1].data), b.NumLines - 1}\n}", "func (ls *linestate) deleteToEnd() {\n\tls.buf = ls.buf[:ls.pos]\n\tls.refreshLine()\n}", "func (d *Decoder) remaining() (int64, error) {\n\n\t// TODO change this to use d.ParentBounds()\n\n\tpos, err := d.f.Tell()\n\tif err != nil {\n\t\treturn -1, err\n\t}\n\n\tstart, s, err := d.currentStruct()\n\tif err != nil {\n\t\treturn -1, err\n\t}\n\n\tlen, err := d.eval(s.Length())\n\tif err != nil {\n\t\treturn -1, err\n\t}\n\n\treturn (start - pos + len), nil\n}", "func (c *RingBuffer) peek(p []byte) (int, error) {\n\tif c.len == 0 && c.closed {\n\t\treturn 0, io.EOF\n\t}\n\tl := len(p)\n\tif l > c.len {\n\t\tl = c.len\n\t}\n\tn := 0\n\tleftBeforeEnd := len(c.buf) - c.index\n\tif l < leftBeforeEnd {\n\t\tn = copy(p, c.buf[c.index:c.index+l])\n\t} else {\n\t\tn = copy(p, c.buf[c.index:])\n\t\tn += copy(p[n:], c.buf[:l-n])\n\t}\n\treturn n, nil\n}", "func seekBackwardsToLineCount(lc int, f *os.File) *os.File {\n\t// line count counter\n\tl := 0\n\t// offset counter, negative because counting backwards\n\tvar offset int64 = -1\n\n\tfinfo, err := os.Stat(f.Name())\n\tif err != nil {\n\t\t_, _ = fmt.Fprintf(os.Stderr, \"error while getting fileinfo: %s\", f.Name())\n\t\treturn nil\n\t}\n\n\tfsize := finfo.Size()\n\n\t// loop until lc is passed\n\tfor ; ; offset-- {\n\t\t// check if we are past the file start\n\t\tif offset+fsize == 0 {\n\t\t\t// if so, return this position, there's no room to backup\n\t\t\tbreak\n\t\t}\n\n\t\t// seek backwards by offset from the end\n\t\tp, err := f.Seek(int64(offset), io.SeekEnd)\n\t\tif err != nil {\n\t\t\t_, _ = fmt.Fprintf(os.Stderr, \"error while seeking by char at %d: %s\", offset, err)\n\t\t\treturn nil\n\t\t}\n\n\t\t// read one char, a new reader is needed from seeked File ref\n\t\tr := bufio.NewReader(f)\n\t\tb, err := r.ReadByte()\n\t\tif err != nil {\n\t\t\t_, _ = fmt.Fprintf(os.Stderr, \"error while reading char at %d: %s\", p, err)\n\t\t\treturn nil\n\t\t}\n\n\t\t// check if read char is new line\n\t\ts := string(b)\n\t\tif s == \"\\n\" {\n\t\t\tl++\n\t\t\t// if line count is passed\n\t\t\tif l > lc {\n\t\t\t\t// increase the offset by one (to compensate for last\n\t\t\t\t// read new line\n\t\t\t\toffset++\n\n\t\t\t\t// escape from loop\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t}\n\n\t// seek to the found position\n\t_, err = f.Seek(int64(offset), io.SeekEnd)\n\tif err != nil {\n\t\t_, _ = fmt.Fprintf(os.Stderr, \"end: error while seeking by char at %d: %s\", offset, err)\n\t\treturn nil\n\t}\n\n\treturn f\n}", "func (p *Pattern) readLength() uint64 {\n\tif p.lastErr != nil {\n\t\treturn 0\n\t}\n\n\tvar length uint64\n\tp.read(&length)\n\n\treturn length\n}", "func (this *FeedableBuffer) Feed(bytesToFeed []byte) (bytesRemaining []byte) {\n\tbyteCount := this.maxByteCount - len(this.Data)\n\tconsumedPortion, bytesRemaining := ConsumeBytes(byteCount, bytesToFeed)\n\tthis.Data = append(this.Data, consumedPortion...)\n\treturn bytesRemaining\n}", "func (cr *CountingReader) Read(dst []byte) (int, error) {\n\n\tread, err := cr.R.Read(dst)\n\tcr.bytesRead += int64(read)\n\treturn read, err\n}", "func (c *Counter) Count(b []byte) {\n\tfor i := 0; i < len(b); i++ {\n\t\tswitch b[i] {\n\t\t// '\\' means to concat next line\n\t\tcase '\\\\':\n\t\t\t//len(b)-2 because len(b)-1 is \"\\n\", we take line-break into consideration\n\t\t\tif i == len(b)-1 || i == len(b)-2 {\n\t\t\t\tc.NextLineConcats = 1\n\t\t\t\tbreak\n\t\t\t}\n\t\tcase '{':\n\t\t\tc.CurlyBrackets++\n\t\tcase '}':\n\t\t\tc.CurlyBrackets--\n\t\tcase '(':\n\t\t\tc.Parentheses++\n\t\tcase ')':\n\t\t\tc.Parentheses--\n\t\tcase '[':\n\t\t\tc.SquareBrackets++\n\t\tcase ']':\n\t\t\tc.SquareBrackets--\n\t\t}\n\t}\n}", "func (b *Buffer) Remaining(from Cursor) uint64 {\n\tb.mu.RLock()\n\tdefer b.mu.RUnlock()\n\n\tif from.offset > b.last {\n\t\treturn 0\n\t}\n\n\toff := from.offset\n\tif off < b.first {\n\t\toff = b.first\n\t}\n\tremaining := b.last - off\n\tremaining += uint64(b.frameSize(b.last))\n\treturn remaining\n}", "func (b *Buffer) Len() int { return b.length }", "func Tail(data interface{}) (interface{}, error) {\n\tvar err error\n\n\tresult := func(err *error) interface{} {\n\t\tdefer catch(err)\n\n\t\tif !isNonNilData(err, \"data\", data) {\n\t\t\treturn nil\n\t\t}\n\n\t\tdataValue, dataType, _, dataValueLen := inspectData(data)\n\n\t\tif !isSlice(err, \"data\", dataValue) {\n\t\t\treturn nil\n\t\t}\n\n\t\tif dataValueLen == 0 {\n\t\t\treturn makeSlice(dataType).Interface()\n\t\t}\n\n\t\tresult := dataValue.Slice(1, dataValueLen)\n\t\treturn result.Interface()\n\t}(&err)\n\n\treturn result, err\n}", "func (cl *CompositeLogger) ReadTailLog(offset, length int64) (string, int64, bool, error) {\n\treturn cl.loggers[0].ReadTailLog(offset, length)\n}", "func (jbobject *JavaNioCharBuffer) Length() int {\n\tjret, err := jbobject.CallMethod(javabind.GetEnv(), \"length\", javabind.Int)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn jret.(int)\n}", "func checkTailCursor( c mongo.Cursor ) ( doBreak, hasNext bool ){\n\n\tif err := c.Err(); err != nil {\n\t\tlog.Println( fmt.Sprint( \"mongo permanent cursor error: \", err,\"\\n\") )\n\t\treturn true, false\n\t}\n\n\thasNext = \tc.HasNext()\n\n\t// AFTER hasNext check !\n\talive := false\n\tif c.GetId() > 0 {\n\t\talive = true\n\t}\n\n\n\tif alive == false || hasNext == false {\n\n\n\t\tif !alive {\n\t\t\tfmt.Print( fmt.Sprintf( \" cursor is dead - id %v.\", c.GetId()) )\n\t\t}\n\n\t\tif hasNext == false {\n\t\t\tfmt.Print( \" hasNext() is false - await is over.\")\n\t\t}\n\n\t\tfmt.Print( fmt.Sprintf( \" stillSeeking/Dead/Exhausted %v (max %v).\", countNoNextValue, noNextValueMax) )\n\n\t\tcountNoNextValue++\n\t\tif countNoNextValue > noNextValueMax {\n\t\t\treturn true , hasNext\n\t\t}\n\n\t}\n\n\treturn false, hasNext\t\t\t\n\n\n}", "func (c *RingBuffer) consume(n int) int {\n\tif n > c.len {\n\t\tn = c.len\n\t}\n\tc.index = (c.index + n) % len(c.buf)\n\tc.addLen(-n)\n\treturn n\n}", "func (s *BaseCGListener) EnterUtf8_tail(ctx *Utf8_tailContext) {}", "func (s *SinglyLinkedList) AddToEnd(val interface{}) {\n\tnode := &Node{val, nil}\n\ts.Count += 1\n\tlast := s.LastNode()\n\tif last != nil {\n\t\tlast.Next = node\n\t}\n}", "func (s *LogStore) Tail(n int) string {\n\treturn s.tailHelper(n, s.spans, true)\n}", "func (rb *RingBuffer[T]) Len() int {\n\tif rb == nil {\n\t\treturn 0\n\t}\n\trb.mu.Lock()\n\tdefer rb.mu.Unlock()\n\treturn len(rb.buf)\n}", "func Len(scope common.Scope, args ...interface{}) interface{} {\n\tif s, ok := args[0].(string); ok {\n\t\treturn int64(len(s))\n\t}\n\treturn 0\n}", "func (reader *ProtocolReader) Len() int { return len(reader.buf) - reader.off }", "func lineCounter(r io.Reader) (int, error) {\n\tbuf := make([]byte, 32*1024)\n\tcount := 0\n\tlineSep := []byte{'\\n'}\n\tvar lastByte byte\n\tlastByte = '\\n'\n\n\tfor {\n\t\tc, err := r.Read(buf)\n\t\tif c > 0 {\n\t\t\tlastByte = buf[c-1]\n\t\t}\n\t\tcount += bytes.Count(buf[:c], lineSep)\n\n\t\tswitch {\n\t\tcase err == io.EOF:\n\t\t\tif lastByte != '\\n' {\n\t\t\t\tlog.Warn(fmt.Sprintf(\"Last byte in buffer is '%v'\", lastByte))\n\t\t\t\tcount += 1\n\t\t\t}\n\t\t\treturn count, nil\n\n\t\tcase err != nil:\n\t\t\treturn count, err\n\t\t}\n\t}\n}", "func Tail(log []string, N int, subStr string) string {\n\tw := io.NewBufferWriter()\n\tret := make([]string, 0)\n\tfor _, line := range log {\n\t\tif s.Contains(line, subStr) {\n\t\t\tret = append(ret, line)\n\t\t}\n\t}\n\tN = int(math.Min(float64(N), 1000))\n\ttailN := int(math.Max(0, float64(len(ret)-N)))\n\tfor _, line := range ret[tailN:] {\n\t\tif s.Contains(line, \"ERROR\") || s.Contains(line, \"Error:\") {\n\t\t\tio.Reply(w, io.Red)\n\t\t} else if s.Contains(line, \"WARN\") {\n\t\t\tio.Reply(w, io.Yellow)\n\t\t} else {\n\t\t\tio.Reply(w, io.Grey)\n\t\t}\n\t\tio.ReplyNL(w, line)\n\t}\n\tio.ReplyNL(w, io.Yellow, fmt.Sprintf(\"[time now %s]\", time.Now().String()))\n\treturn w.String()\n}", "func (a *ASTNode) showTail(fn string) error {\n\tr, err := os.Open(fn)\n\tif err != nil {\n\t\treturn err\n\t}\n\trb := bufio.NewReader(r)\n\tlast := \"\"\n\tfor {\n\t\tln, err := rb.ReadString('\\n')\n\t\tif err == io.EOF {\n\t\t\tbreak\n\t\t}\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif ln != \"\" {\n\t\t\tlast = ln\n\t\t}\n\t}\n\tif last != \"\" {\n\t\tfmt.Fprint(os.Stderr, last)\n\t}\n\treturn nil\n}", "func Tail(\n\tctx context.Context,\n\tcli plugin.CliConnection,\n\targs []string,\n\tc http.Client,\n\tlog Logger,\n\tw io.Writer,\n\topts ...TailOption,\n) {\n\to, err := newTailOptions(cli, args, log)\n\tif err != nil {\n\t\tlog.Fatalf(\"%s\", err)\n\t}\n\n\tfor _, opt := range opts {\n\t\topt(&o)\n\t}\n\n\tformatter := newFormatter(o.source.Name, o.follow, formatterKindFromOptions(o), log, o.outputTemplate, o.newLineReplacer)\n\tlw := lineWriter{w: w}\n\n\tdefer func() {\n\t\tif value, ok := formatter.flush(); ok {\n\t\t\tlw.Write(value)\n\t\t}\n\t}()\n\n\tlogCacheAddr := os.Getenv(\"LOG_CACHE_ADDR\")\n\tif logCacheAddr == \"\" {\n\t\thasAPI, err := cli.HasAPIEndpoint()\n\t\tif err != nil {\n\t\t\tlog.Fatalf(\"%s\", err)\n\t\t}\n\n\t\tif !hasAPI {\n\t\t\tlog.Fatalf(\"No API endpoint targeted.\")\n\t\t}\n\n\t\ttokenURL, err := cli.ApiEndpoint()\n\t\tif err != nil {\n\t\t\tlog.Fatalf(\"%s\", err)\n\t\t}\n\n\t\tuser, err := cli.Username()\n\t\tif err != nil {\n\t\t\tlog.Fatalf(\"%s\", err)\n\t\t}\n\n\t\torg, err := cli.GetCurrentOrg()\n\t\tif err != nil {\n\t\t\tlog.Fatalf(\"%s\", err)\n\t\t}\n\n\t\tspace, err := cli.GetCurrentSpace()\n\t\tif err != nil {\n\t\t\tlog.Fatalf(\"%s\", err)\n\t\t}\n\n\t\tlogCacheAddr = strings.Replace(tokenURL, \"api\", \"log-cache\", 1)\n\n\t\theaderPrinter := formatter.sourceHeader\n\t\tswitch o.source.Type {\n\t\tcase _application:\n\t\t\theaderPrinter = formatter.appHeader\n\t\tcase _service:\n\t\t\theaderPrinter = formatter.serviceHeader\n\t\t}\n\n\t\tif !o.noHeaders {\n\t\t\theader, ok := headerPrinter(o.source.Name, org.Name, space.Name, user)\n\t\t\tif ok {\n\t\t\t\tlw.Write(header)\n\t\t\t\tlw.Write(\"\")\n\t\t\t}\n\t\t}\n\t}\n\n\tfilterAndFormat := func(e *loggregator_v2.Envelope) (string, bool) {\n\t\tif !typeFilter(e, o) {\n\t\t\treturn \"\", false\n\t\t}\n\n\t\treturn formatter.formatEnvelope(e)\n\t}\n\n\tif strings.ToLower(os.Getenv(\"LOG_CACHE_SKIP_AUTH\")) != \"true\" {\n\t\tc = http.NewTokenClient(c, func() string {\n\t\t\ttoken, err := cli.AccessToken()\n\t\t\tif err != nil {\n\t\t\t\tlog.Fatalf(\"Unable to get Access Token: %s\", err)\n\t\t\t}\n\t\t\treturn token\n\t\t})\n\t}\n\n\tclient := logcache.NewClient(logCacheAddr, logcache.WithHTTPClient(c))\n\n\tcheckFeatureVersioning(client, ctx, log, o.nameFilter)\n\n\tsourceID := o.source.GUID\n\tif o.source.Type == _unknown {\n\t\t// fall back to provided name\n\t\tsourceID = o.source.Name\n\t}\n\n\twalkStartTime := time.Now().Add(-5 * time.Second).UnixNano()\n\tif o.lines > 0 {\n\t\tenvelopes, err := client.Read(\n\t\t\tcontext.Background(),\n\t\t\tsourceID,\n\t\t\to.startTime,\n\t\t\tlogcache.WithEndTime(o.endTime),\n\t\t\tlogcache.WithEnvelopeTypes(o.envelopeType),\n\t\t\tlogcache.WithLimit(o.lines),\n\t\t\tlogcache.WithDescending(),\n\t\t\tlogcache.WithNameFilter(o.nameFilter),\n\t\t)\n\n\t\tif err != nil && !o.follow {\n\t\t\tlog.Fatalf(\"%s\", err)\n\t\t}\n\n\t\t// we get envelopes in descending order but want to print them ascending\n\t\tfor i := len(envelopes) - 1; i >= 0; i-- {\n\t\t\twalkStartTime = envelopes[i].Timestamp + 1\n\t\t\tif formatted, ok := filterAndFormat(envelopes[i]); ok {\n\t\t\t\tlw.Write(formatted)\n\t\t\t}\n\t\t}\n\t}\n\n\tif o.follow {\n\t\tlogcache.Walk(\n\t\t\tctx,\n\t\t\tsourceID,\n\t\t\tlogcache.Visitor(func(envelopes []*loggregator_v2.Envelope) bool {\n\t\t\t\tfor _, e := range envelopes {\n\t\t\t\t\tif formatted, ok := filterAndFormat(e); ok {\n\t\t\t\t\t\tlw.Write(formatted)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\treturn true\n\t\t\t}),\n\t\t\tclient.Read,\n\t\t\tlogcache.WithWalkStartTime(time.Unix(0, walkStartTime)),\n\t\t\tlogcache.WithWalkEnvelopeTypes(o.envelopeType),\n\t\t\tlogcache.WithWalkBackoff(logcache.NewAlwaysRetryBackoff(250*time.Millisecond)),\n\t\t\tlogcache.WithWalkNameFilter(o.nameFilter),\n\t\t)\n\t}\n}", "func (h CRConfigHistoryThreadsafe) Len() uint64 {\n\tif h.length == nil {\n\t\treturn 0\n\t}\n\treturn *h.length\n}", "func TestEndToEnd(t *testing.T) {\n\tdir, gonulljson := buildNullJson(t)\n\tdefer os.RemoveAll(dir)\n\t// Read the testdata directory.\n\twalkDir(dir, gonulljson, \"testdata\", t)\n}", "func (obj *Doc) BackCount(ctx context.Context) (int, error) {\n\tresult := &struct {\n\t\tReturn int `json:\"qReturn\"`\n\t}{}\n\terr := obj.RPC(ctx, \"BackCount\", result)\n\treturn result.Return, err\n}", "func (e *DockerEngine) tail(ctx context.Context, s *State) error {\n\topts := types.ContainerLogsOptions{\n\t\tFollow: true,\n\t\tShowStdout: true,\n\t\tShowStderr: true,\n\t\tDetails: false,\n\t\tTimestamps: false,\n\t}\n\n\tlogs, err := e.client.ContainerLogs(ctx, s.step.ID, opts)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tgo func() {\n\t\ts.output(logs)\n\t\tlogs.Close()\n\t}()\n\treturn nil\n}", "func (b *Buffer) truncate(n int) int {\n\tif n > len(b.data) {\n\t\tpanic(\"Trying to truncate past end of array.\")\n\t}\n\tb.data = b.data[n:]\n\treturn len(b.data)\n}", "func (r *Renderer) countLines(buf bytes.Buffer) int {\n\tw, err := r.termWidth()\n\tif err != nil || w == 0 {\n\t\t// if we got an error due to terminal.GetSize not being supported\n\t\t// on current platform then just assume a very wide terminal\n\t\tw = 10000\n\t}\n\n\tbufBytes := buf.Bytes()\n\n\tcount := 0\n\tcurr := 0\n\tdelim := -1\n\tfor curr < len(bufBytes) {\n\t\t// read until the next newline or the end of the string\n\t\trelDelim := bytes.IndexRune(bufBytes[curr:], '\\n')\n\t\tif relDelim != -1 {\n\t\t\tcount += 1 // new line found, add it to the count\n\t\t\tdelim = curr + relDelim\n\t\t} else {\n\t\t\tdelim = len(bufBytes) // no new line found, read rest of text\n\t\t}\n\n\t\tif lineWidth := utf8.RuneCount(bufBytes[curr:delim]); lineWidth > w {\n\t\t\t// account for word wrapping\n\t\t\tcount += lineWidth / w\n\t\t\tif (lineWidth % w) == 0 {\n\t\t\t\t// content whose width is exactly a multiplier of available width should not\n\t\t\t\t// count as having wrapped on the last line\n\t\t\t\tcount -= 1\n\t\t\t}\n\t\t}\n\t\tcurr = delim + 1\n\t}\n\n\treturn count\n}", "func (c *ReferencesBasesListCall) End(end int64) *ReferencesBasesListCall {\n\tc.urlParams_.Set(\"end\", fmt.Sprint(end))\n\treturn c\n}", "func FindReaderSize(r io.Reader, needle []byte, bufSize int) (int, error) {\n\tif bufSize < len(needle) {\n\t\tbufSize = 2 * len(needle)\n\t}\n\tneedleLen := len(needle)\n\tbuf := make([]byte, bufSize)\n\tvar off, start int\n\tfor {\n\t\tn, err := io.ReadAtLeast(r, buf[start:], needleLen)\n\t\tif errors.Is(err, io.ErrUnexpectedEOF) {\n\t\t\terr = io.EOF\n\t\t}\n\t\tif n == 0 && errors.Is(err, io.EOF) {\n\t\t\treturn -1, nil\n\t\t}\n\t\t//fmt.Println(off, start, n)\n\t\tif i := bytes.Index(buf[:start+n], needle); i >= 0 {\n\t\t\t//fmt.Printf(\"buf=%q\\n\", buf[i:i+len(needle)])\n\t\t\treturn off + i, nil\n\t\t}\n\t\tif err != nil {\n\t\t\treturn -1, err\n\t\t}\n\t\t// copy the end to the start\n\t\tcopy(buf[0:], buf[start+n-needleLen+1:start+n])\n\t\tif off == 0 {\n\t\t\toff = n - needleLen + 1\n\t\t\tstart = needleLen - 1\n\t\t} else {\n\t\t\toff += n\n\t\t}\n\t\t//fmt.Printf(\"buf=%q n=%d needle=%d off=%d\\n\", buf[:start], n, len(needle), off)\n\t}\n}", "func (b *BaseImpl) LenBuf() int {\n\n\tif len(b.Diffs) < 1 {\n\t\treturn len(b.bytes)\n\t}\n\n\tmax := len(b.bytes)\n\tfor i := range b.Diffs {\n\t\tif b.Diffs[i].Offset+len(b.Diffs[i].bytes) > max {\n\t\t\tmax = b.Diffs[i].Offset + len(b.Diffs[i].bytes)\n\t\t}\n\t}\n\n\treturn max\n\n}", "func (o *txSocketQueue) copyFromTail(p *queuePtr, size uint32, tobuf []byte, boffset uint32) uint32 {\n\tm := o.getIndex(p)\n\tfree := p.getFree()\n\tz := uint16(minSpace(free, size))\n\tof := o.tail.getRelativeOffset(p)\n\tcopy(tobuf[boffset:boffset+uint32(z)], m.GetData()[of:of+z])\n\tp.Inc(z, o.refSize)\n\treturn uint32(z)\n}", "func countEntries(buf []byte) int {\n\treturn bytes.Count(buf, []byte(\"\\n\"))\n}", "func (a *LogOptions) GetTailLines() int64 {\n\tif a.Tail > 0 {\n\t\treturn a.Tail\n\t}\n\treturn 500\n}", "func (c Chain) Length() int {\n\tcount := 1\n\tcurrent := c.Rest\n\tfor {\n\t\tif current == nil {\n\t\t\tbreak\n\t\t}\n\t\tcount++\n\t\tcurrent = current.Rest\n\t}\n\treturn count\n}", "func (sb *SeekableBuffer) Len() int {\n\treturn len(sb.data)\n\n}", "func (list *List) AppendToEnd(data int) {\n // 1. Create a new Node\n newNode := &Node{data: data, next: nil}\n\n // 2a. If list contains no elements, set new node as head of list\n // 2b. If list contains any element, traverse till last and append new node\n if list.size == 0 {\n list.head = newNode\n } else if list.size > 0 {\n current := list.head\n for current.next != nil {\n current = current.next\n }\n current.next = newNode\n }\n\n // 3. Increment the list size\n list.size++\n}", "func CountArgs(args MyType) int {\n\treturn len(args)\n}", "func (i *ImageBuf) YEnd() int {\n\tret := int(C.ImageBuf_yend(i.ptr))\n\truntime.KeepAlive(i)\n\treturn ret\n}", "func (jbobject *JavaLangCharSequence) Length() int {\n\tjret, err := jbobject.CallMethod(javabind.GetEnv(), \"length\", javabind.Int)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn jret.(int)\n}", "func (args MyType) CountArgs() int {\n\treturn len(args)\n}", "func (tb *TextBuf) EndPos() TextPos {\n\ttb.LinesMu.RLock()\n\tdefer tb.LinesMu.RUnlock()\n\n\tif tb.NLines == 0 {\n\t\treturn TextPosZero\n\t}\n\ted := TextPos{tb.NLines - 1, len(tb.Lines[tb.NLines-1])}\n\treturn ed\n}", "func (d *Decoder) Len() int { return d.buf.Len() }", "func Fscanln(r io.Reader, a ...interface{}) (n int, err error) {}", "func (s *Stream) Count() int {\n\treturn len(s.b) + s.stream.count()\n}", "func endPosInBuffer(env *Env, name string) fake.Pos {\n\tbuffer := env.Editor.BufferText(name)\n\tlines := strings.Split(buffer, \"\\n\")\n\tnumLines := len(lines)\n\n\treturn fake.Pos{\n\t\tLine: numLines - 1,\n\t\tColumn: len([]rune(lines[numLines-1])),\n\t}\n}", "func (t *Transport) finish(stream *Stream, out []byte) (n int) {\n\tatomic.AddUint64(&t.nTxfin, 1)\n\tvar scratch [16]byte\n\tn = tag2cbor(tagCborPrefix, out) // prefix\n\tout[n] = 0xc8 // 0xc8 (end stream, 0b110_01000 <tag,8>)\n\tn++ //\n\tm := tag2cbor(stream.opaque, scratch[:]) // tag-opaque\n\tscratch[m] = 0x40 // zero-len byte-string\n\tm++\n\tn += valbytes2cbor(scratch[:m], out[n:]) // packet\n\tout[n] = 0xff // 0xff CBOR indefinite end.\n\tn++\n\treturn n\n}", "func (rb *RingBuffer) Len() int {\n\trb.lock.RLock()\n\tdefer rb.lock.RUnlock()\n\tif n := len(rb.data); rb.seq < uint64(n) {\n\t\treturn int(rb.seq)\n\t} else {\n\t\treturn n\n\t}\n}", "func (tailer *Tailer) Tail(out chan<- *redispub.Publication, stop <-chan bool) {\n\tchildStopC := make(chan bool)\n\twasStopped := false\n\n\tgo func() {\n\t\t<-stop\n\t\twasStopped = true\n\t\tchildStopC <- true\n\t}()\n\n\tfor {\n\t\tlog.Log.Info(\"Starting oplog tailing\")\n\t\ttailer.tailOnce(out, childStopC)\n\t\tlog.Log.Info(\"Oplog tailing ended\")\n\n\t\tif wasStopped {\n\t\t\treturn\n\t\t}\n\n\t\tlog.Log.Errorw(\"Oplog tailing stopped prematurely. Waiting a second an then retrying.\")\n\t\ttime.Sleep(requeryDuration)\n\t}\n}", "func (buf *queueBuffer) Len() uint64 {\n\treturn buf.depth\n}", "func consume(in []byte, n int) (out, data []byte, err error) {\n\tif n < 0 || len(in) < n {\n\t\treturn nil, nil, bufferTooSmall\n\t}\n\treturn in[n:], in[:n], nil\n}", "func (i *queueIndex) putTail(aid, pos int) {\n\ti.indexArena.WriteUint64(16, uint64(aid))\n\ti.indexArena.WriteUint64(24, uint64(pos))\n}", "func (q *queue) Len() int {\n\tq.lock.RLock()\n\tdefer q.lock.RUnlock()\n\tc := q.tail - q.head\n\tif c < 0 {\n\t\tc = 0\n\t}\n\n\treturn c\n}", "func (fs *FollowService) Count(opts ...Option) (int, error) {\n\tct, err := fs.client.getCount(fs.end, opts...)\n\tif err != nil {\n\t\treturn 0, errors.Wrap(err, \"cannot count Follows\")\n\t}\n\n\treturn ct, nil\n}", "func (f *Frame) Len() int64 {\n\treturn f.Nchars\n}", "func tailCall(i Instruction, ls *LuaState) {\n\ta, b, _ := i.ABC()\n\ta += 1\n\n\t// todo: optimize tail call!\n\tc := 0\n\tnArgs := _pushFuncAndArgs(a, b, ls)\n\tls.Call(nArgs, c-1)\n\t_popResults(a, c, ls)\n}", "func fnLen(ctx Context, doc *JDoc, params []string) interface{} {\n\tstats := ctx.Value(EelTotalStats).(*ServiceStats)\n\tif params == nil || len(params) != 1 {\n\t\tctx.Log().Error(\"error_type\", \"func_len\", \"op\", \"len\", \"cause\", \"wrong_number_of_parameters\", \"params\", params)\n\t\tstats.IncErrors()\n\t\tAddError(ctx, SyntaxError{fmt.Sprintf(\"wrong number of parameters in call to len function\"), \"len\", params})\n\t\treturn nil\n\t}\n\tvar obj interface{}\n\terr := json.Unmarshal([]byte(extractStringParam(params[0])), &obj)\n\tif err != nil {\n\t\treturn len(extractStringParam(params[0]))\n\t}\n\tswitch obj.(type) {\n\tcase []interface{}:\n\t\treturn len(obj.([]interface{}))\n\tcase map[string]interface{}:\n\t\treturn len(obj.(map[string]interface{}))\n\t}\n\treturn 0\n}", "func BytesWithTail(b []byte, width uint, tail []byte) []byte {\n\tf := NewWriter(width, string(tail))\n\t_, _ = f.Write(b)\n\n\treturn f.Bytes()\n}", "func (a Args) SetLast(count int32) { a[2] = &count }" ]
[ "0.7553367", "0.55397993", "0.55372894", "0.5447011", "0.5136406", "0.5096522", "0.5053771", "0.48868006", "0.4742708", "0.47175592", "0.47164515", "0.4693058", "0.46704385", "0.46618178", "0.4602077", "0.45943382", "0.45895612", "0.45864716", "0.45026076", "0.44995427", "0.44792396", "0.44429526", "0.44288334", "0.44090727", "0.44019282", "0.4401014", "0.43948272", "0.43932658", "0.43902636", "0.43831578", "0.4379615", "0.43611205", "0.43536466", "0.4339825", "0.43369177", "0.43269303", "0.43168217", "0.4306919", "0.43021846", "0.42843592", "0.42811498", "0.42653203", "0.42520455", "0.42514032", "0.42490226", "0.42389002", "0.4238506", "0.42337444", "0.42198294", "0.4188493", "0.41867957", "0.41863415", "0.41851667", "0.41823688", "0.41724366", "0.41696692", "0.41673866", "0.4156479", "0.41520622", "0.41516173", "0.41471648", "0.41333818", "0.4127505", "0.41200483", "0.41176486", "0.4117641", "0.41065827", "0.41003522", "0.40983915", "0.4097899", "0.409646", "0.40931278", "0.40904388", "0.4082942", "0.40810627", "0.40788874", "0.4076624", "0.40749723", "0.40745378", "0.40698564", "0.40694275", "0.40676346", "0.40665516", "0.40637568", "0.40592396", "0.40585324", "0.40518108", "0.40441748", "0.4042864", "0.40389624", "0.40344638", "0.40329254", "0.4031408", "0.40291876", "0.4028838", "0.40284228", "0.40196514", "0.40165922", "0.4014246", "0.401392" ]
0.81202745
0
countToEnd counts the number of used bytes in a circular buffer to the end of the liner buffer or the end of the circular buffer whichever is smaller.
func (c *CircBuf) countToEnd() int { return c.countToEndArg(c.tail) }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (c *CircBuf) countToEndArg(tail int) int {\n\tcountEndLinearBuf := len(c.buf) - tail\n\tcountEndCircBuf := (c.head + countEndLinearBuf) & (len(c.buf) - 1)\n\tif countEndCircBuf < countEndLinearBuf {\n\t\treturn countEndCircBuf\n\t}\n\treturn countEndLinearBuf\n}", "func (c *CircBuf) spaceToEnd() int {\n\tspaceEndLinearBuf := len(c.buf) - c.head\n\tspaceEndCircBuf := (c.tail + spaceEndLinearBuf - 1) & (len(c.buf) - 1)\n\tif spaceEndLinearBuf < spaceEndCircBuf {\n\t\treturn spaceEndLinearBuf\n\t}\n\treturn spaceEndCircBuf\n}", "func (c *CircBuf) Consume(nbytes int) int {\n\tvar count int\n\tvar num int\n\tfor {\n\t\tcount = c.countToEnd()\n\t\tif nbytes - num < count {\n\t\t\tcount = nbytes - num\n\t\t}\n\t\tif count <= 0 {\n\t\t\tbreak\n\t\t}\n\t\tc.tail = (c.tail + count) & (len(c.buf) - 1)\n\t\tnum += count\n\t}\n\treturn num\n}", "func (b *Buffer) Len() (n int) {\n\tfor _, l := range b.lines {\n\t\tn += utf8.RuneCount(l.data)\n\t}\n\n\tif len(b.lines) > 1 {\n\t\tn += len(b.lines) - 1 // account for newlines\n\t}\n\n\treturn\n}", "func (ls *linestate) deleteToEnd() {\n\tls.buf = ls.buf[:ls.pos]\n\tls.refreshLine()\n}", "func (c *CircBuf) Count() int {\n\treturn (c.head - c.tail) & (len(c.buf) - 1)\n}", "func (b *Buffer) End() Loc {\n\treturn Loc{utf8.RuneCount(b.lines[b.NumLines-1].data), b.NumLines - 1}\n}", "func (r *Record) End() int {\n\tif r.Flags&Unmapped != 0 || len(r.Cigar) == 0 {\n\t\treturn r.Pos + 1\n\t}\n\tpos := r.Pos\n\tend := pos\n\tfor _, co := range r.Cigar {\n\t\tpos += co.Len() * co.Type().Consumes().Reference\n\t\tend = max(end, pos)\n\t}\n\treturn end\n}", "func TestEndToEnd(t *testing.T) {\n\tdir, gonulljson := buildNullJson(t)\n\tdefer os.RemoveAll(dir)\n\t// Read the testdata directory.\n\twalkDir(dir, gonulljson, \"testdata\", t)\n}", "func (b *Buffer) Remaining(from Cursor) uint64 {\n\tb.mu.RLock()\n\tdefer b.mu.RUnlock()\n\n\tif from.offset > b.last {\n\t\treturn 0\n\t}\n\n\toff := from.offset\n\tif off < b.first {\n\t\toff = b.first\n\t}\n\tremaining := b.last - off\n\tremaining += uint64(b.frameSize(b.last))\n\treturn remaining\n}", "func (s *Stream) ListEnd() error {\n\tif len(s.stack) == 0 {\n\t\treturn errNotInList\n\t}\n\ttos := s.stack[len(s.stack)-1]\n\tif tos.pos != tos.size {\n\t\treturn errNotAtEOL\n\t}\n\ts.stack = s.stack[:len(s.stack)-1] // pop out information from the stack\n\tif len(s.stack) > 0 {\n\t\ts.stack[len(s.stack)-1].pos += tos.size\n\t}\n\ts.kind = -1\n\ts.size = 0\n\treturn nil\n}", "func (c *CircBuf) Read(buf []byte) int {\n\tvar count int\n\tvar num int\n\tfor {\n\t\tcount = c.countToEnd()\n\t\tif len(buf) - num < count {\n\t\t\tcount = len(buf) - num\n\t\t}\n\t\tif count <= 0 {\n\t\t\tbreak\n\t\t}\n\t\tcopy(buf[num : num + count], c.buf[c.tail : c.tail + count])\n\t\tc.tail = (c.tail + count) & (len(c.buf) - 1)\n\t\tnum += count\n\t}\n\treturn num\n}", "func (b *Buffer) Len() int { return b.length }", "func (jbobject *JavaNioCharBuffer) Length() int {\n\tjret, err := jbobject.CallMethod(javabind.GetEnv(), \"length\", javabind.Int)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn jret.(int)\n}", "func (b *loopBuffer) Len() int {\n\tif b.looped {\n\t\treturn cap(b.buf)\n\t}\n\treturn b.off\n}", "func (tb *TextBuf) EndPos() TextPos {\n\ttb.LinesMu.RLock()\n\tdefer tb.LinesMu.RUnlock()\n\n\tif tb.NLines == 0 {\n\t\treturn TextPosZero\n\t}\n\ted := TextPos{tb.NLines - 1, len(tb.Lines[tb.NLines-1])}\n\treturn ed\n}", "func BenchmarkEndToEnd(b *testing.B) {\n\tvar tracer trace.Trace\n\n\tfor i := 0; i < b.N; i++ {\n\t\t//tracer = trace.New(os.Stderr, true) // use stderr to trace\n\t\ttracer = trace.New(ioutil.Discard, true) // and this to not\n\n\t\tvar tokens = xml_lexer.Lex(xmlInput, tracer) // xml\n\t\t//var tokens = json_lexer.Lex(jsonInput, tracer) // json\n\t\t//var tokens []token.Token = csv_lexer.Lex(jsonInput, tracer) // csv\n\t\tvar tests = []struct {\n\t\t\texpr string\n\t\t\texpect string\n\t\t}{\n\t\t\t// current debug cases\n\t\t\t{expr: `universe/galaxy[world=\"earth\"]/timelord`, expect: `who`},\n\t\t}\n\n\t\ttracer.Begin()()\n\t\texplain := false\n\t\tfor _, test := range tests {\n\t\t\tevaluate(tokens, test.expr, explain, tracer)\n\t\t}\n\t}\n}", "func (b *BaseImpl) LenBuf() int {\n\n\tif len(b.Diffs) < 1 {\n\t\treturn len(b.bytes)\n\t}\n\n\tmax := len(b.bytes)\n\tfor i := range b.Diffs {\n\t\tif b.Diffs[i].Offset+len(b.Diffs[i].bytes) > max {\n\t\t\tmax = b.Diffs[i].Offset + len(b.Diffs[i].bytes)\n\t\t}\n\t}\n\n\treturn max\n\n}", "func (c *Counter) Count(b []byte) {\n\tfor i := 0; i < len(b); i++ {\n\t\tswitch b[i] {\n\t\t// '\\' means to concat next line\n\t\tcase '\\\\':\n\t\t\t//len(b)-2 because len(b)-1 is \"\\n\", we take line-break into consideration\n\t\t\tif i == len(b)-1 || i == len(b)-2 {\n\t\t\t\tc.NextLineConcats = 1\n\t\t\t\tbreak\n\t\t\t}\n\t\tcase '{':\n\t\t\tc.CurlyBrackets++\n\t\tcase '}':\n\t\t\tc.CurlyBrackets--\n\t\tcase '(':\n\t\t\tc.Parentheses++\n\t\tcase ')':\n\t\t\tc.Parentheses--\n\t\tcase '[':\n\t\t\tc.SquareBrackets++\n\t\tcase ']':\n\t\t\tc.SquareBrackets--\n\t\t}\n\t}\n}", "func (r *Renderer) countLines(buf bytes.Buffer) int {\n\tw, err := r.termWidth()\n\tif err != nil || w == 0 {\n\t\t// if we got an error due to terminal.GetSize not being supported\n\t\t// on current platform then just assume a very wide terminal\n\t\tw = 10000\n\t}\n\n\tbufBytes := buf.Bytes()\n\n\tcount := 0\n\tcurr := 0\n\tdelim := -1\n\tfor curr < len(bufBytes) {\n\t\t// read until the next newline or the end of the string\n\t\trelDelim := bytes.IndexRune(bufBytes[curr:], '\\n')\n\t\tif relDelim != -1 {\n\t\t\tcount += 1 // new line found, add it to the count\n\t\t\tdelim = curr + relDelim\n\t\t} else {\n\t\t\tdelim = len(bufBytes) // no new line found, read rest of text\n\t\t}\n\n\t\tif lineWidth := utf8.RuneCount(bufBytes[curr:delim]); lineWidth > w {\n\t\t\t// account for word wrapping\n\t\t\tcount += lineWidth / w\n\t\t\tif (lineWidth % w) == 0 {\n\t\t\t\t// content whose width is exactly a multiplier of available width should not\n\t\t\t\t// count as having wrapped on the last line\n\t\t\t\tcount -= 1\n\t\t\t}\n\t\t}\n\t\tcurr = delim + 1\n\t}\n\n\treturn count\n}", "func (s *Stream) Count() int {\n\treturn len(s.b) + s.stream.count()\n}", "func (s *Scanner) isAtEnd() bool {\n\tif s.current >= len(s.source) {\n\t\treturn true\n\t}\n\treturn false\n}", "func (s *OverallTestResultItem) SetEndToEndResultCounts(v map[string]*int64) *OverallTestResultItem {\n\ts.EndToEndResultCounts = v\n\treturn s\n}", "func (d *Decoder) remaining() (int64, error) {\n\n\t// TODO change this to use d.ParentBounds()\n\n\tpos, err := d.f.Tell()\n\tif err != nil {\n\t\treturn -1, err\n\t}\n\n\tstart, s, err := d.currentStruct()\n\tif err != nil {\n\t\treturn -1, err\n\t}\n\n\tlen, err := d.eval(s.Length())\n\tif err != nil {\n\t\treturn -1, err\n\t}\n\n\treturn (start - pos + len), nil\n}", "func (s *safeBuffer) Len() int {\n\ts.Lock()\n\tdefer s.Unlock()\n\treturn s.buf.Len()\n}", "func (b *SafeBuffer) Len() int {\n\tb.m.Lock()\n\tdefer b.m.Unlock()\n\treturn b.b.Len()\n}", "func (t *Transport) finish(stream *Stream, out []byte) (n int) {\n\tatomic.AddUint64(&t.nTxfin, 1)\n\tvar scratch [16]byte\n\tn = tag2cbor(tagCborPrefix, out) // prefix\n\tout[n] = 0xc8 // 0xc8 (end stream, 0b110_01000 <tag,8>)\n\tn++ //\n\tm := tag2cbor(stream.opaque, scratch[:]) // tag-opaque\n\tscratch[m] = 0x40 // zero-len byte-string\n\tm++\n\tn += valbytes2cbor(scratch[:m], out[n:]) // packet\n\tout[n] = 0xff // 0xff CBOR indefinite end.\n\tn++\n\treturn n\n}", "func (sb *SeekableBuffer) Len() int {\n\treturn len(sb.data)\n\n}", "func journal_seek_tail() int {\n\n\treturn int(C.journal_seek_tail())\n}", "func (pb *PageBuffer) End() bool {\n return pb.is_end\n}", "func (i *ImageBuf) YEnd() int {\n\tret := int(C.ImageBuf_yend(i.ptr))\n\truntime.KeepAlive(i)\n\treturn ret\n}", "func lineCounter(r io.Reader) (int, error) {\n\tbuf := make([]byte, 32*1024)\n\tcount := 0\n\tlineSep := []byte{'\\n'}\n\tvar lastByte byte\n\tlastByte = '\\n'\n\n\tfor {\n\t\tc, err := r.Read(buf)\n\t\tif c > 0 {\n\t\t\tlastByte = buf[c-1]\n\t\t}\n\t\tcount += bytes.Count(buf[:c], lineSep)\n\n\t\tswitch {\n\t\tcase err == io.EOF:\n\t\t\tif lastByte != '\\n' {\n\t\t\t\tlog.Warn(fmt.Sprintf(\"Last byte in buffer is '%v'\", lastByte))\n\t\t\t\tcount += 1\n\t\t\t}\n\t\t\treturn count, nil\n\n\t\tcase err != nil:\n\t\t\treturn count, err\n\t\t}\n\t}\n}", "func (reader *ProtocolReader) Len() int { return len(reader.buf) - reader.off }", "func (rb *RingBuffer) Len() int {\n\trb.lock.RLock()\n\tdefer rb.lock.RUnlock()\n\tif n := len(rb.data); rb.seq < uint64(n) {\n\t\treturn int(rb.seq)\n\t} else {\n\t\treturn n\n\t}\n}", "func seekBackwardsToLineCount(lc int, f *os.File) *os.File {\n\t// line count counter\n\tl := 0\n\t// offset counter, negative because counting backwards\n\tvar offset int64 = -1\n\n\tfinfo, err := os.Stat(f.Name())\n\tif err != nil {\n\t\t_, _ = fmt.Fprintf(os.Stderr, \"error while getting fileinfo: %s\", f.Name())\n\t\treturn nil\n\t}\n\n\tfsize := finfo.Size()\n\n\t// loop until lc is passed\n\tfor ; ; offset-- {\n\t\t// check if we are past the file start\n\t\tif offset+fsize == 0 {\n\t\t\t// if so, return this position, there's no room to backup\n\t\t\tbreak\n\t\t}\n\n\t\t// seek backwards by offset from the end\n\t\tp, err := f.Seek(int64(offset), io.SeekEnd)\n\t\tif err != nil {\n\t\t\t_, _ = fmt.Fprintf(os.Stderr, \"error while seeking by char at %d: %s\", offset, err)\n\t\t\treturn nil\n\t\t}\n\n\t\t// read one char, a new reader is needed from seeked File ref\n\t\tr := bufio.NewReader(f)\n\t\tb, err := r.ReadByte()\n\t\tif err != nil {\n\t\t\t_, _ = fmt.Fprintf(os.Stderr, \"error while reading char at %d: %s\", p, err)\n\t\t\treturn nil\n\t\t}\n\n\t\t// check if read char is new line\n\t\ts := string(b)\n\t\tif s == \"\\n\" {\n\t\t\tl++\n\t\t\t// if line count is passed\n\t\t\tif l > lc {\n\t\t\t\t// increase the offset by one (to compensate for last\n\t\t\t\t// read new line\n\t\t\t\toffset++\n\n\t\t\t\t// escape from loop\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t}\n\n\t// seek to the found position\n\t_, err = f.Seek(int64(offset), io.SeekEnd)\n\tif err != nil {\n\t\t_, _ = fmt.Fprintf(os.Stderr, \"end: error while seeking by char at %d: %s\", offset, err)\n\t\treturn nil\n\t}\n\n\treturn f\n}", "func (f *Field) End() int64", "func endPosInBuffer(env *Env, name string) fake.Pos {\n\tbuffer := env.Editor.BufferText(name)\n\tlines := strings.Split(buffer, \"\\n\")\n\tnumLines := len(lines)\n\n\treturn fake.Pos{\n\t\tLine: numLines - 1,\n\t\tColumn: len([]rune(lines[numLines-1])),\n\t}\n}", "func (b *Buf) Len() int { return len(b.b) }", "func (jbobject *JavaLangCharSequence) Length() int {\n\tjret, err := jbobject.CallMethod(javabind.GetEnv(), \"length\", javabind.Int)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn jret.(int)\n}", "func (subr *SRCountersData) Len() (l uint16) {\n\tencoded, _ := subr.Encode()\n\tl = uint16(len(encoded))\n\treturn l\n}", "func (r Range) End() int64 {\n\treturn r.Pos + r.Size\n}", "func (s *SinglyLinkedList) AddToEnd(val interface{}) {\n\tnode := &Node{val, nil}\n\ts.Count += 1\n\tlast := s.LastNode()\n\tif last != nil {\n\t\tlast.Next = node\n\t}\n}", "func countLines(output string) int {\n\tcon, err := console.ConsoleFromFile(os.Stdin)\n\tif err != nil {\n\t\treturn 0\n\t}\n\tws, err := con.Size()\n\tif err != nil {\n\t\treturn 0\n\t}\n\twidth := int(ws.Width)\n\tif width <= 0 {\n\t\treturn 0\n\t}\n\tstrlines := strings.Split(output, \"\\n\")\n\tlines := -1\n\tfor _, line := range strlines {\n\t\tlines += (len(stripLine(line))-1)/width + 1\n\t}\n\treturn lines\n}", "func (i *ImageBuf) ZEnd() int {\n\tret := int(C.ImageBuf_zend(i.ptr))\n\truntime.KeepAlive(i)\n\treturn ret\n}", "func (c *CircBuf) Write(buf []byte) int {\n\tvar space int\n\tvar num int\n\tfor {\n\t\tspace = c.spaceToEnd()\n\t\tif len(buf) - num < space {\n\t\t\tspace = len(buf) - num\n\t\t}\n\t\tif space <= 0 {\n\t\t\tbreak\n\t\t}\n\t\tcopy(c.buf[c.head : c.head + space], buf[num : num + space])\n\t\tc.head = (c.head + space) & (len(c.buf) - 1)\n\t\tnum += space\n\t}\n\treturn num\n}", "func (list *List) AppendToEnd(data int) {\n // 1. Create a new Node\n newNode := &Node{data: data, next: nil}\n\n // 2a. If list contains no elements, set new node as head of list\n // 2b. If list contains any element, traverse till last and append new node\n if list.size == 0 {\n list.head = newNode\n } else if list.size > 0 {\n current := list.head\n for current.next != nil {\n current = current.next\n }\n current.next = newNode\n }\n\n // 3. Increment the list size\n list.size++\n}", "func (r *Receiver) countBytes(length uint64, update bool) error {\n\tif length > MaxTransfer {\n\t\treturn fmt.Errorf(\"file too large, %d\", length)\n\t}\n\tif r.byteLimit != 0 && r.totalBytes > uint64(r.byteLimit)-length {\n\t\treturn fmt.Errorf(\"file too large, %d\", length)\n\t}\n\tif update {\n\t\tr.totalBytes += length\n\t}\n\treturn nil\n}", "func (ci *ContigIndex) SetEnd() {\n\tciLineNum := ci.length / ci.basePerLine\n\tciRemainder := ci.length % ci.basePerLine\n\tci.end = ci.start + (ciLineNum * ci.bytePerLine) + ciRemainder - 1\n}", "func (b *Buffer) truncate(n int) int {\n\tif n > len(b.data) {\n\t\tpanic(\"Trying to truncate past end of array.\")\n\t}\n\tb.data = b.data[n:]\n\treturn len(b.data)\n}", "func (r *baseNsRange) End() int { return r.end }", "func (c *CircBuf) Peek(buf []byte) int {\n\tvar count int\n\tvar tail int = c.tail // Use a local tail variable\n\tvar num int\n\tfor {\n\t\tcount = c.countToEndArg(tail)\n\t\tif len(buf) - num < count {\n\t\t\tcount = len(buf) - num\n\t\t}\n\t\tif count <= 0 {\n\t\t\tbreak\n\t\t}\n\t\tcopy(buf[num : num + count], c.buf[tail : tail + count])\n\t\ttail = (tail + count) & (len(c.buf) - 1)\n\t\tnum += count\n\t}\n\treturn num\n}", "func (r Range) End() uint64 {\n\treturn r.Offset + r.Length\n}", "func (d *Decoder) Len() int { return d.buf.Len() }", "func (f *Frame) Len() int64 {\n\treturn f.Nchars\n}", "func countRepetitions(strand Strand, length, limit int) int {\n\tcurrLen := 0\n\treps := 0\n\tvar prev rune\n\tfor _, r := range strand {\n\t\tif prev != r {\n\t\t\tcurrLen = 0\n\t\t}\n\t\tcurrLen++\n\t\tif currLen >= length {\n\t\t\treps++ // sequences can overlap\n\t\t}\n\t\tif limit > 0 && reps >= limit {\n\t\t\tbreak\n\t\t}\n\t\tprev = r\n\t}\n\n\treturn reps\n}", "func readLastLines(ctx context.Context, file *os.File, endCursor int64) ([]string, int, error) {\n\tvar lines []byte\n\tvar firstNonNewlinePos int\n\tvar cursor = endCursor\n\tvar size int64 = 256\n\tfor {\n\t\t// stop if we are at the begining\n\t\t// check it in the start to avoid read beyond the size\n\t\tif cursor <= 0 {\n\t\t\tbreak\n\t\t}\n\n\t\t// enlarge the read cache to avoid too many memory move.\n\t\tsize = size * 2\n\t\tif size > maxReadCacheSize {\n\t\t\tsize = maxReadCacheSize\n\t\t}\n\t\tif cursor < size {\n\t\t\tsize = cursor\n\t\t}\n\t\tcursor -= size\n\n\t\t_, err := file.Seek(cursor, io.SeekStart)\n\t\tif err != nil {\n\t\t\treturn nil, 0, ctx.Err()\n\t\t}\n\t\tchars := make([]byte, size)\n\t\t_, err = file.Read(chars)\n\t\tif err != nil {\n\t\t\treturn nil, 0, ctx.Err()\n\t\t}\n\t\tlines = append(chars, lines...)\n\n\t\t// find first '\\n' or '\\r'\n\t\tfor i := 0; i < len(chars)-1; i++ {\n\t\t\t// reach the line end\n\t\t\t// the first newline may be in the line end at the first round\n\t\t\tif i >= len(lines)-1 {\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tif (chars[i] == 10 || chars[i] == 13) && chars[i+1] != 10 && chars[i+1] != 13 {\n\t\t\t\tfirstNonNewlinePos = i + 1\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\tif firstNonNewlinePos > 0 {\n\t\t\tbreak\n\t\t}\n\t\tif isCtxDone(ctx) {\n\t\t\treturn nil, 0, ctx.Err()\n\t\t}\n\t}\n\tfinalStr := string(lines[firstNonNewlinePos:])\n\treturn strings.Split(strings.ReplaceAll(finalStr, \"\\r\\n\", \"\\n\"), \"\\n\"), len(finalStr), nil\n}", "func (mes *MarkerEncodingScheme) Tail(b byte, pos int) checked.Bytes { return mes.tails[int(b)][pos-1] }", "func (tv *TextView) RenderRegionToEnd(st TextPos, sty *gi.Style, bgclr *gi.ColorSpec) {\n\tspos := tv.CharStartPos(st)\n\tepos := spos\n\tepos.Y += tv.LineHeight\n\tepos.X = float32(tv.VpBBox.Max.X)\n\tif int(mat32.Ceil(epos.Y)) < tv.VpBBox.Min.Y || int(mat32.Floor(spos.Y)) > tv.VpBBox.Max.Y {\n\t\treturn\n\t}\n\n\trs := &tv.Viewport.Render\n\tpc := &rs.Paint\n\n\tpc.FillBox(rs, spos, epos.Sub(spos), bgclr) // same line, done\n}", "func (r *textprotoReader) upcomingHeaderNewlines() (n int) {\n\t// Try to determine the 'hint' size.\n\tr.R.Peek(1) // force a buffer load if empty\n\ts := r.R.Buffered()\n\tif s == 0 {\n\t\treturn\n\t}\n\tpeek, _ := r.R.Peek(s)\n\treturn bytes.Count(peek, nl)\n}", "func (b *Buffer) size() int {\n\treturn len(b.data)\n}", "func (r *DecReader) WriteTo(w io.Writer) (int64, error) {\n\tvar n int64\n\tif r.err != nil {\n\t\treturn n, r.err\n\t}\n\tif r.firstRead {\n\t\tr.firstRead = false\n\t\tnn, err := r.readFragment(r.buffer, 0)\n\t\tif err != nil && err != io.EOF {\n\t\t\treturn n, err\n\t\t}\n\t\tnn, err = writeTo(w, r.buffer[:nn])\n\t\tif err != nil {\n\t\t\treturn n, err\n\t\t}\n\t\tn += int64(nn)\n\t\tif r.closed {\n\t\t\treturn n, nil\n\t\t}\n\t}\n\tif r.offset > 0 {\n\t\tnn, err := writeTo(w, r.plaintextBuffer[r.offset:])\n\t\tif err != nil {\n\t\t\tr.err = err\n\t\t\treturn n, err\n\t\t}\n\t\tr.offset = 0\n\t\tn += int64(nn)\n\t}\n\tif r.closed {\n\t\treturn n, io.EOF\n\t}\n\tfor {\n\t\tnn, err := r.readFragment(r.buffer, 1)\n\t\tif err != nil && err != io.EOF {\n\t\t\treturn n, err\n\t\t}\n\t\tnn, err = writeTo(w, r.buffer[:nn])\n\t\tif err != nil {\n\t\t\tr.err = err\n\t\t\treturn n, err\n\t\t}\n\t\tn += int64(nn)\n\t\tif r.closed {\n\t\t\treturn n, nil\n\t\t}\n\t}\n}", "func (s *downStream) endStream() {\n\tif s.responseSender != nil && !s.downstreamRecvDone {\n\t\t// not reuse buffer\n\t\tatomic.StoreUint32(&s.reuseBuffer, 0)\n\t}\n\ts.cleanStream()\n\n\t// note: if proxy logic resets the stream, there maybe some underlying data in the conn.\n\t// we ignore this for now, fix as a todo\n}", "func (b *Buffer) Len() int {\n\treturn len(b.buf)\n}", "func (r *EncReader) WriteTo(w io.Writer) (int64, error) {\n\tvar n int64\n\tif r.firstRead {\n\t\tr.firstRead = false\n\t\tnn, err := r.readFragment(r.buffer, 0)\n\t\tif err != nil && err != io.EOF {\n\t\t\treturn n, err\n\t\t}\n\t\tnn, err = writeTo(w, r.buffer[:nn])\n\t\tif err != nil {\n\t\t\treturn n, err\n\t\t}\n\t\tn += int64(nn)\n\t\tif r.closed {\n\t\t\treturn n, nil\n\t\t}\n\t}\n\tif r.err != nil {\n\t\treturn n, r.err\n\t}\n\tif r.offset > 0 {\n\t\tnn, err := writeTo(w, r.ciphertextBuffer[r.offset:])\n\t\tif err != nil {\n\t\t\tr.err = err\n\t\t\treturn n, err\n\t\t}\n\t\tr.offset = 0\n\t\tn += int64(nn)\n\t}\n\tif r.closed {\n\t\treturn n, io.EOF\n\t}\n\tfor {\n\t\tnn, err := r.readFragment(r.buffer, 1)\n\t\tif err != nil && err != io.EOF {\n\t\t\treturn n, err\n\t\t}\n\t\tnn, err = writeTo(w, r.buffer[:nn])\n\t\tif err != nil {\n\t\t\tr.err = err\n\t\t\treturn n, err\n\t\t}\n\t\tn += int64(nn)\n\t\tif r.closed {\n\t\t\treturn n, nil\n\t\t}\n\t}\n}", "func (b *Buffer) Len() uint64 {\n\tb.mu.RLock()\n\tdefer b.mu.RUnlock()\n\treturn b.length\n}", "func readFull(r io.Reader, p []byte) (int, error) {\n\tcur := 0\n\tfor cur < len(p) {\n\t\tamt, err := r.Read(p[cur:])\n\t\tcur += amt\n\t\tif err != nil {\n\t\t\treturn cur, err\n\t\t}\n\t}\n\treturn cur, nil\n}", "func (r *Reader) Remaining() int {\n\treturn len(r.buf)\n}", "func (r *Reader) Len() int {\n\tif r.i >= int64(len(r.s)) {\n\t\treturn 0\n\t}\n\treturn int(int64(len(r.s)) - r.i)\n}", "func (o *A_2) Len() uint64 {\r\n\treturn 0 +\r\n\t\to.Cursor.Len() +\r\n\t\to.Limit.Len() +\r\n\t\t0\r\n}", "func LineCounter(content *bytes.Buffer) int {\n\tcount := 0\n\n\tfor {\n\t\t_, err := content.ReadBytes('\\n')\n\n\t\tif err == nil {\n\t\t\tcount += 1\n\t\t} else {\n\t\t\treturn count\n\t\t}\n\t}\n}", "func isReadLimitReached(bytesLoaded int64, linesLoaded int64, logFilePosition string) bool {\n\treturn (logFilePosition == logs.Beginning && bytesLoaded >= byteReadLimit) ||\n\t\t(logFilePosition == logs.End && linesLoaded >= lineReadLimit)\n}", "func (rb *RingBuffer) Length() int {\n\treturn rb.count\n}", "func FindReaderSize(r io.Reader, needle []byte, bufSize int) (int, error) {\n\tif bufSize < len(needle) {\n\t\tbufSize = 2 * len(needle)\n\t}\n\tneedleLen := len(needle)\n\tbuf := make([]byte, bufSize)\n\tvar off, start int\n\tfor {\n\t\tn, err := io.ReadAtLeast(r, buf[start:], needleLen)\n\t\tif errors.Is(err, io.ErrUnexpectedEOF) {\n\t\t\terr = io.EOF\n\t\t}\n\t\tif n == 0 && errors.Is(err, io.EOF) {\n\t\t\treturn -1, nil\n\t\t}\n\t\t//fmt.Println(off, start, n)\n\t\tif i := bytes.Index(buf[:start+n], needle); i >= 0 {\n\t\t\t//fmt.Printf(\"buf=%q\\n\", buf[i:i+len(needle)])\n\t\t\treturn off + i, nil\n\t\t}\n\t\tif err != nil {\n\t\t\treturn -1, err\n\t\t}\n\t\t// copy the end to the start\n\t\tcopy(buf[0:], buf[start+n-needleLen+1:start+n])\n\t\tif off == 0 {\n\t\t\toff = n - needleLen + 1\n\t\t\tstart = needleLen - 1\n\t\t} else {\n\t\t\toff += n\n\t\t}\n\t\t//fmt.Printf(\"buf=%q n=%d needle=%d off=%d\\n\", buf[:start], n, len(needle), off)\n\t}\n}", "func (image *image) IsEndOfStream() bool {\n\tif image.IsClosed() {\n\t\treturn image.isEos\n\t}\n\treturn image.subscriberPosition.get() >= image.logBuffers.Meta().EndOfStreamPosOff.Get()\n}", "func (cr *CountingReader) Read(dst []byte) (int, error) {\n\n\tread, err := cr.R.Read(dst)\n\tcr.bytesRead += int64(read)\n\treturn read, err\n}", "func (mes *MarkerEncodingScheme) EndOfStream() Marker { return mes.endOfStream }", "func wordEnd(bytes []byte, l0Pos int) int {\n\tl := len(bytes)\n\n\tl1, l1Pos := wLastSequence(bytes[:l0Pos])\n\tl0, r0Delta := wFirstSequence(bytes[l0Pos:])\n\tlOddRI := wIsOpenRI(bytes[:l1Pos], l1, l0)\n\tr0Pos := l0Pos + r0Delta\n\tr0, r1Delta := wFirstSequence(bytes[r0Pos:])\n\n\tfor r0Pos < l {\n\t\tr1, r2Delta := wFirstSequence(bytes[r0Pos+r1Delta:])\n\t\tif wDecision(l1, l0, lOddRI, r0, r1) {\n\t\t\treturn r0Pos\n\t\t}\n\t\tl1 = l0\n\t\tl0 = r0\n\t\tr0 = r1\n\t\tr0Pos += r1Delta\n\t\tr1Delta = r2Delta\n\t\tlOddRI = l0 == wClassRI && !lOddRI\n\t}\n\treturn l\n}", "func (o Int64RangeMatchOutput) RangeEnd() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v Int64RangeMatch) *string { return v.RangeEnd }).(pulumi.StringPtrOutput)\n}", "func (rb *RingBuffer[T]) Len() int {\n\tif rb == nil {\n\t\treturn 0\n\t}\n\trb.mu.Lock()\n\tdefer rb.mu.Unlock()\n\treturn len(rb.buf)\n}", "func last(b *ringBuf) iterator {\n\treturn iterator((b.head + b.len - 1) % len(b.buf))\n}", "func (reader *StunReader) Size() int64 { return reader.limit - reader.base }", "func (r *Reader) Size() int64 { return int64(len(r.s)) }", "func (subr *SRRecordResponse) Len() (l uint16) {\n\tencoded, _ := subr.Encode()\n\tl = uint16(len(encoded))\n\treturn l\n}", "func (s *Stream) GetSize() uint { return s.cur_size }", "func (self File) TailBytes(limitSize int) ([]byte, error) {\n\tvar limitBytes []byte\n\tfile := self.Open()\n\n\treadBytes, err := io.ReadAtLeast(file, limitBytes, limitSize)\n\tif readBytes != limitSize {\n\t\treturn limitBytes, fmt.Errorf(\"error: failed to complete read: read \", readBytes, \" out of \", limitSize, \"bytes\")\n\t} else {\n\t\treturn limitBytes, err\n\t}\n}", "func buffer(rd io.Reader) (buf []byte, endInd int, streamInd int, streamOffset int64, err error) {\n\n\t// process: # gen obj ... obj dict ... {stream ... data ... endstream} ... endobj\n\t// streamInd endInd\n\t// -1 if absent -1 if absent\n\n\t//log.Read.Println(\"buffer: begin\")\n\n\tendInd, streamInd = -1, -1\n\n\tfor endInd < 0 && streamInd < 0 {\n\n\t\tbuf, err = growBufBy(buf, defaultBufSize, rd)\n\t\tif err != nil {\n\t\t\treturn nil, 0, 0, 0, err\n\t\t}\n\n\t\tline := string(buf)\n\t\tendInd = strings.Index(line, \"endobj\")\n\t\tstreamInd = strings.Index(line, \"stream\")\n\n\t\tif endInd > 0 && (streamInd < 0 || streamInd > endInd) {\n\t\t\t// No stream marker in buf detected.\n\t\t\tbreak\n\t\t}\n\n\t\t// For very rare cases where \"stream\" also occurs within obj dict\n\t\t// we need to find the last \"stream\" marker before a possible end marker.\n\t\tfor streamInd > 0 && !keywordStreamRightAfterEndOfDict(line, streamInd) {\n\t\t\tlastStreamMarker(&streamInd, endInd, line)\n\t\t}\n\n\t\tlog.Read.Printf(\"buffer: endInd=%d streamInd=%d\\n\", endInd, streamInd)\n\n\t\tif streamInd > 0 {\n\n\t\t\t// streamOffset ... the offset where the actual stream data begins.\n\t\t\t// is right after the eol after \"stream\".\n\n\t\t\tslack := 10 // for optional whitespace + eol (max 2 chars)\n\t\t\tneed := streamInd + len(\"stream\") + slack\n\n\t\t\tif len(line) < need {\n\n\t\t\t\t// to prevent buffer overflow.\n\t\t\t\tbuf, err = growBufBy(buf, need-len(line), rd)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn nil, 0, 0, 0, err\n\t\t\t\t}\n\n\t\t\t\tline = string(buf)\n\t\t\t}\n\n\t\t\tstreamOffset = int64(nextStreamOffset(line, streamInd))\n\t\t}\n\t}\n\n\t//log.Read.Printf(\"buffer: end, returned bufsize=%d streamOffset=%d\\n\", len(buf), streamOffset)\n\n\treturn buf, endInd, streamInd, streamOffset, nil\n}", "func countEntries(buf []byte) int {\n\treturn bytes.Count(buf, []byte(\"\\n\"))\n}", "func CountFrom(start, step int) Stream {\n return &count{start, step}\n}", "func (self *TextIter) IsEnd() bool {\n\tb := C.gtk_text_iter_is_end(&self.object)\n\treturn gobject.GoBool(unsafe.Pointer(&b))\n}", "func (cur *cursor) invalidateAtEnd() {\n\tcur.idx = int(cur.nd.count)\n}", "func (is *InputStream) End(data []byte) {\n\tif len(data) > 0 {\n\t\tif len(data) != len(is.b) {\n\t\t\tis.b = append(is.b[:0], data...)\n\t\t}\n\t} else if len(is.b) > 0 {\n\t\tis.b = is.b[:0]\n\t}\n}", "func (is *InputStream) End(data []byte) {\n\tif len(data) > 0 {\n\t\tif len(data) != len(is.b) {\n\t\t\tis.b = append(is.b[:0], data...)\n\t\t}\n\t} else if len(is.b) > 0 {\n\t\tis.b = is.b[:0]\n\t}\n}", "func (is *InputStream) End(data []byte) {\n\tif len(data) > 0 {\n\t\tif len(data) != len(is.b) {\n\t\t\tis.b = append(is.b[:0], data...)\n\t\t}\n\t} else if len(is.b) > 0 {\n\t\tis.b = is.b[:0]\n\t}\n}", "func (b *FixedBuffer) Len() int {\n\treturn b.w - b.r\n}", "func (r *Record) Len() int {\n\treturn r.End() - r.Start()\n}", "func readStreamContent(rd io.Reader, streamLength int) ([]byte, error) {\n\n\tlog.Read.Printf(\"readStreamContent: begin streamLength:%d\\n\", streamLength)\n\n\tif streamLength == 0 {\n\t\t// Read until \"endstream\" then fix \"Length\".\n\t\treturn readStreamContentBlindly(rd)\n\t}\n\n\tbuf := make([]byte, streamLength)\n\n\tfor totalCount := 0; totalCount < streamLength; {\n\t\tcount, err := fillBuffer(rd, buf[totalCount:])\n\t\tif err != nil {\n\t\t\tif err != io.EOF {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\t// Weak heuristic to detect the actual end of this stream\n\t\t\t// once we have reached EOF due to incorrect streamLength.\n\t\t\teob := bytes.Index(buf, []byte(\"endstream\"))\n\t\t\tif eob < 0 {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\treturn buf[:eob], nil\n\t\t}\n\n\t\tlog.Read.Printf(\"readStreamContent: count=%d, buflen=%d(%X)\\n\", count, len(buf), len(buf))\n\t\ttotalCount += count\n\t}\n\n\tlog.Read.Printf(\"readStreamContent: end\\n\")\n\n\treturn buf, nil\n}", "func LineCounter(r io.Reader) (int, error) {\n buf := make([]byte, 32*1024)\n count := 0\n lineSep := []byte{'\\n'}\n\n for {\n c, err := r.Read(buf)\n count = count + bytes.Count(buf[:c], lineSep)\n switch {\n case err == io.EOF:\n return count, nil\n case err != nil:\n return count, err\n }\n }\n}", "func (w *writer) chunkCount(b []byte) int {\n\tlenB := len(b)\n\tif lenB <= w.chunkSize {\n\t\treturn 1\n\t}\n\n\treturn len(b)/w.chunkDataSize + 1\n}", "func (c *RingBuffer) Len() int {\n\tc.mutex.Lock()\n\tdefer c.mutex.Unlock()\n\treturn c.len\n}", "func (n *RNSNum) size() int {\n\treturn len(n.buffer)\n}" ]
[ "0.7395144", "0.66005623", "0.5781479", "0.5436952", "0.5287207", "0.52536094", "0.52114034", "0.50922066", "0.5077863", "0.50059414", "0.49654856", "0.49562413", "0.48809332", "0.48796532", "0.48721987", "0.48706177", "0.48603386", "0.48066366", "0.47985214", "0.47886223", "0.47305316", "0.4720703", "0.47062248", "0.47058418", "0.47031522", "0.4699737", "0.46943164", "0.46859962", "0.46831506", "0.46784186", "0.46683282", "0.46471763", "0.46194863", "0.4616478", "0.46145847", "0.4612561", "0.4609561", "0.45965785", "0.45879942", "0.45855254", "0.4549533", "0.45478627", "0.4537659", "0.45343772", "0.45328006", "0.45265234", "0.45217943", "0.45134315", "0.45100382", "0.45090204", "0.45065802", "0.45049694", "0.44896218", "0.4487835", "0.44701043", "0.44664523", "0.44656894", "0.44655383", "0.4462547", "0.44546494", "0.44530812", "0.44438076", "0.44328138", "0.442456", "0.44208977", "0.44180492", "0.44151738", "0.4410988", "0.4405769", "0.44044253", "0.4398435", "0.43981016", "0.4395002", "0.43860093", "0.43837306", "0.43801838", "0.43798462", "0.43791255", "0.43785417", "0.43755415", "0.43709448", "0.4369853", "0.43601352", "0.4355171", "0.43473512", "0.4344696", "0.43409392", "0.4340259", "0.4337974", "0.43367404", "0.43334743", "0.43334743", "0.43334743", "0.43262693", "0.4325256", "0.43237826", "0.43139982", "0.43084154", "0.43025315", "0.4301365" ]
0.8227545
0
Space returns the number of unused bytes in a circular buffer.
func (c *CircBuf) Space() int { return (c.tail - c.head - 1) & (len(c.buf) - 1) }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (b *Ring) Capacity() int {\n\tb.lock.RLock()\n\tdefer b.lock.RUnlock()\n\treturn len(b.buf)\n}", "func (c *RingBuffer) Capacity() int {\n\tc.mutex.Lock()\n\tdefer c.mutex.Unlock()\n\treturn len(c.buf)\n}", "func (r Ring) Capacity() int {\n\treturn len(r.buff)\n}", "func (b *Buffer) Free() int {\n\treturn b.size - b.used\n}", "func (r *PortAllocator) Used() int {\n\treturn r.portRange.Size - r.alloc.Free()\n}", "func (r *RingBuffer) Free() int {\n\tr.mu.Lock()\n\tdefer r.mu.Unlock()\n\n\tif r.wPos == r.rPos {\n\t\tif r.isFull {\n\t\t\treturn 0\n\t\t}\n\t\treturn r.size\n\t}\n\n\tif r.wPos < r.rPos {\n\t\treturn r.rPos - r.wPos\n\t}\n\n\treturn r.size - r.wPos + r.rPos\n}", "func (c *CircBuf) Count() int {\n\treturn (c.head - c.tail) & (len(c.buf) - 1)\n}", "func (b *Buffer) Used() int {\n\treturn b.used\n}", "func (r *RingBuffer) Length() int {\n\tr.mu.Lock()\n\tdefer r.mu.Unlock()\n\n\tif r.wPos == r.rPos {\n\t\tif r.isFull {\n\t\t\treturn r.size\n\t\t}\n\t\treturn 0\n\t}\n\n\tif r.wPos > r.rPos {\n\t\treturn r.wPos - r.rPos\n\t}\n\n\treturn r.size - r.rPos + r.wPos\n}", "func (sp *Space) Length() int {\n\treturn len(*sp)\n}", "func (b *Buffer) Capacity() int { return len(b.data) }", "func (pb *PacketBuffer) BytesRemaining() int {\n\treturn len(pb.data) - pb.bytesStored()\n}", "func (rb *RingBuffer) Length() int {\n\treturn rb.count\n}", "func (this *FeedableBuffer) GetUsedByteCountOverMinimum() int {\n\treturn len(this.Data) - this.minByteCount\n}", "func (c *CircBuf) spaceToEnd() int {\n\tspaceEndLinearBuf := len(c.buf) - c.head\n\tspaceEndCircBuf := (c.tail + spaceEndLinearBuf - 1) & (len(c.buf) - 1)\n\tif spaceEndLinearBuf < spaceEndCircBuf {\n\t\treturn spaceEndLinearBuf\n\t}\n\treturn spaceEndCircBuf\n}", "func (b *Buffer) Remaining(from Cursor) uint64 {\n\tb.mu.RLock()\n\tdefer b.mu.RUnlock()\n\n\tif from.offset > b.last {\n\t\treturn 0\n\t}\n\n\toff := from.offset\n\tif off < b.first {\n\t\toff = b.first\n\t}\n\tremaining := b.last - off\n\tremaining += uint64(b.frameSize(b.last))\n\treturn remaining\n}", "func (r *Reader) SkipSpace() byte {\n\tfor r.p < len(r.v) {\n\t\tif libbytes.IsSpace(r.v[r.p]) {\n\t\t\tr.p++\n\t\t\tcontinue\n\t\t}\n\t\tbreak\n\t}\n\tif r.p == len(r.v) {\n\t\treturn 0\n\t}\n\treturn r.v[r.p]\n}", "func (pk PacketBufferPtr) Size() int {\n\treturn int(pk.buf.Size()) - pk.headerOffset()\n}", "func (r *RingBuffer) Capacity() int {\n\treturn r.size\n}", "func (r *Reader) skipSpace() int {\n\tn := 0\n\tfor {\n\t\tc, err := r.R.ReadByte()\n\t\tif err != nil {\n\t\t\t// Bufio will keep err until next read.\n\t\t\tbreak\n\t\t}\n\t\tif c != ' ' && c != '\\t' {\n\t\t\tr.R.UnreadByte()\n\t\t\tbreak\n\t\t}\n\t\tn++\n\t}\n\treturn n\n}", "func (q *UnsafeQueue64) Size() uint64 {\n\tif q.putPos >= q.getPos {\n\t\treturn q.putPos - q.getPos\n\t}\n\treturn q.capMod + (q.putPos - q.getPos)\n}", "func (q *UnsafeQueue16) Size() uint16 {\n\tif q.putPos >= q.getPos {\n\t\treturn q.putPos - q.getPos\n\t}\n\treturn q.capMod + (q.putPos - q.getPos)\n}", "func (r *Ring) Size() int {\n\treturn (r.in - r.out + r.size) % r.size\n}", "func (q *Queue) Length() uint64 {\n\treturn (MaxQueueSize + q.tail - q.head) % MaxQueueSize\n}", "func (t Fixed) Available() int {\n\treturn cap(t) - len(t)\n}", "func (self *nativePduBuffer) Size() int {\n\treturn self.count\n}", "func (q *Queue) Size() int {\n\tif q.Empty() {\n\t\treturn 0\n\t}\n\treturn (q.tail + 1) - (q.head % cap(q.data))\n}", "func (a *Allocator) Used() int64 {\n\treturn a.acc.Used()\n}", "func (b *Buffer) TotalSize() int64 {\n\treturn int64(len(b.bufs) * allocBufLen)\n}", "func (b *Ring) Size() int {\n\tb.lock.RLock()\n\tdefer b.lock.RUnlock()\n\treturn b.size\n}", "func (b *FixedBuffer) Len() int {\n\treturn b.w - b.r\n}", "func (c *CircBuf) Consume(nbytes int) int {\n\tvar count int\n\tvar num int\n\tfor {\n\t\tcount = c.countToEnd()\n\t\tif nbytes - num < count {\n\t\t\tcount = nbytes - num\n\t\t}\n\t\tif count <= 0 {\n\t\t\tbreak\n\t\t}\n\t\tc.tail = (c.tail + count) & (len(c.buf) - 1)\n\t\tnum += count\n\t}\n\treturn num\n}", "func (buf *queueBuffer) Len() uint64 {\n\treturn buf.depth\n}", "func (b *Buffer) Size() int { return b.size }", "func (c *RingBuffer) Len() int {\n\tc.mutex.Lock()\n\tdefer c.mutex.Unlock()\n\treturn c.len\n}", "func (q *queue) Length() uint64 {\n\treturn q.Tail - q.Head\n}", "func (rcv *Buffer) Length() int64 {\n\treturn rcv._tab.GetInt64(rcv._tab.Pos + flatbuffers.UOffsetT(8))\n}", "func (b *Buffer) Len() int {\n\treturn b.Width * b.Height\n}", "func (r *textprotoReader) skipSpace() int {\n\tn := 0\n\tfor {\n\t\tc, err := r.R.ReadByte()\n\t\tif err != nil {\n\t\t\t// Bufio will keep err until next read.\n\t\t\tbreak\n\t\t}\n\t\tif c != ' ' && c != '\\t' {\n\t\t\tr.R.UnreadByte()\n\t\t\tbreak\n\t\t}\n\t\tn++\n\t}\n\treturn n\n}", "func (u *Allocator) GetRemainingPGCount() int {\n\tu.pgLock.Lock()\n\tdefer u.pgLock.Unlock()\n\n\treturn u.pgVlanMgr.GetFreeVlanCount() / 2\n}", "func (rb *RingBuffer) Len() int {\n\trb.lock.RLock()\n\tdefer rb.lock.RUnlock()\n\tif n := len(rb.data); rb.seq < uint64(n) {\n\t\treturn int(rb.seq)\n\t} else {\n\t\treturn n\n\t}\n}", "func (p packPlan) getAvailSpace(cell openCell) (length, width uint8) {\n\tvar test bool\n\tvar i uint8\n\n\tif test, _ = p.isEmptyCell(cell.col, cell.row); !test {\n\t\treturn 0, 0\n\t}\n\n\tfor i, test = cell.col+1, true; test; i++ {\n\t\ttest, _ = p.isEmptyCell(i, cell.row)\n\t}\n\tlength = i - cell.col - 1\n\n\t// we will never 'undercut' a row, so width always extends\n\t// to the bottom edge of the pallet\n\twidth = palletWidth - cell.row\n\treturn\n}", "func SpaceAvailForPath(path string) (uint64, error) {\n\tstat := &syscall.Statfs_t{}\n\terr := syscall.Statfs(path, stat)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\treturn (uint64(stat.Frsize) * stat.Bavail) / 1024, nil\n}", "func (b *Buddy) UsedSpaceSize() int {\n\treturn b.usedSpaceSize\n}", "func (pk PacketBufferPtr) AvailableHeaderBytes() int {\n\treturn pk.reserved - pk.pushed\n}", "func bytesToClear(total, free int64, quotaPct, lowWatermark, highWatermark uint64) uint64 {\n\tused := total - free\n\tquotaAllowed := total * (int64)(quotaPct) / 100\n\thighWMUsage := total * (int64)(highWatermark*quotaPct) / (100 * 100)\n\tif used < highWMUsage {\n\t\treturn 0\n\t}\n\t// Return bytes needed to reach low watermark.\n\tlowWMUsage := total * (int64)(lowWatermark*quotaPct) / (100 * 100)\n\treturn (uint64)(math.Min(float64(quotaAllowed), math.Max(0.0, float64(used-lowWMUsage))))\n}", "func (b *CompactableBuffer) Count() int {\n\treturn int(atomic.LoadInt64(&b.count))\n}", "func (b *BufferPool) Put(data interface{}) (n int64, err error) {\n\tvar putData []byte\n\tvar putDataLength int64\n\tvar blackspaceLength int64\n\tswitch info := data.(type) {\n\tcase string:\n\t\tputData = []byte(info)\n\t\tputDataLength = int64(len(putData))\n\tcase []byte:\n\t\tputData = info\n\t\tputDataLength = int64(len(putData))\n\tdefault:\n\t\treturn 0, TYPEERROR\n\t}\n\n\tb.Lock()\n\t// free buffer size smaller than data size which will be written to.\n\tif b.Free < int64(len(putData)) {\n\t\taddRate := math.Ceil(float64(putDataLength) / float64(b.Size))\n\t\tif addRate <= 1 {\n\t\t\taddRate = 2\n\t\t}\n\t\tif b.AutoIncrement == true {\n\t\t\tblackspaceLength = b.Size*int64(addRate) - b.Used - putDataLength\n\t\t} else {\n\t\t\treturn 0, BUFFERNOTENOUGH\n\t\t}\n\t} else {\n\t\tblackspaceLength = b.Free - putDataLength\n\t}\n\tb.Data = append(b.Data[:b.Used], putData...)\n\tb.Data = append(b.Data, make([]byte, blackspaceLength)...)\n\tb.Used = b.Used + putDataLength\n\tb.Free = blackspaceLength\n\tb.Size = b.Used + b.Free\n\tb.Unlock()\n\treturn putDataLength, nil\n}", "func (pool *ComplexPool) SizeUnused() int {\n\treturn pool.Unused.Length()\n}", "func (this *CirCleQueue)Size() int{\n return (this.tail + this.maxSize- this.head) % this.maxSize\n}", "func (b *Writer) Available() int { return len(b.buf) - b.n }", "func getRemainingDiskSpace(ctxPtr *volumemgrContext) (uint64, error) {\n\n\tvar totalDiskSize uint64\n\n\tpubContentTree := ctxPtr.pubContentTreeStatus\n\titemsContentTree := pubContentTree.GetAll()\n\tfor _, iterContentTreeStatusJSON := range itemsContentTree {\n\t\titerContentTreeStatus := iterContentTreeStatusJSON.(types.ContentTreeStatus)\n\t\tif iterContentTreeStatus.State < types.LOADED {\n\t\t\tlog.Tracef(\"Content tree %s State %d < LOADED\",\n\t\t\t\titerContentTreeStatus.Key(), iterContentTreeStatus.State)\n\t\t\tcontinue\n\t\t}\n\t\ttotalDiskSize += uint64(iterContentTreeStatus.CurrentSize)\n\t}\n\n\tpubVolume := ctxPtr.pubVolumeStatus\n\titemsVolume := pubVolume.GetAll()\n\tfor _, iterVolumeStatusJSON := range itemsVolume {\n\t\titerVolumeStatus := iterVolumeStatusJSON.(types.VolumeStatus)\n\t\t// we start consume space when moving into CREATING_VOLUME state\n\t\tif iterVolumeStatus.State < types.CREATING_VOLUME {\n\t\t\tlog.Tracef(\"Volume %s State %d < CREATING_VOLUME\",\n\t\t\t\titerVolumeStatus.Key(), iterVolumeStatus.State)\n\t\t\tcontinue\n\t\t}\n\t\ttotalDiskSize += volumehandlers.GetVolumeHandler(log, ctxPtr, &iterVolumeStatus).UsageFromStatus()\n\t}\n\tdeviceDiskUsage, err := diskmetrics.PersistUsageStat(log)\n\tif err != nil {\n\t\terr := fmt.Errorf(\"Failed to get diskUsage for /persist. err: %s\", err)\n\t\tlog.Error(err)\n\t\treturn 0, err\n\t}\n\tdeviceDiskSize := deviceDiskUsage.Total\n\tdiskReservedForDom0 := diskmetrics.Dom0DiskReservedSize(log, ctxPtr.globalConfig, deviceDiskSize)\n\tvar allowedDeviceDiskSize uint64\n\tif deviceDiskSize < diskReservedForDom0 {\n\t\terr = fmt.Errorf(\"Total Disk Size(%d) <= diskReservedForDom0(%d)\",\n\t\t\tdeviceDiskSize, diskReservedForDom0)\n\t\tlog.Errorf(\"***getRemainingDiskSpace: err: %s\", err)\n\t\treturn uint64(0), err\n\t}\n\tallowedDeviceDiskSize = deviceDiskSize - diskReservedForDom0\n\tif allowedDeviceDiskSize < totalDiskSize {\n\t\treturn 0, nil\n\t} else {\n\t\treturn allowedDeviceDiskSize - totalDiskSize, nil\n\t}\n}", "func (rb *RingBuffer[T]) Len() int {\n\tif rb == nil {\n\t\treturn 0\n\t}\n\trb.mu.Lock()\n\tdefer rb.mu.Unlock()\n\treturn len(rb.buf)\n}", "func (b *Buffer) Len() uint64 {\n\tb.mu.RLock()\n\tdefer b.mu.RUnlock()\n\treturn b.length\n}", "func (b *SafeBuffer) Len() int {\n\tb.m.Lock()\n\tdefer b.m.Unlock()\n\treturn b.b.Len()\n}", "func (p *Plan) GetSpace() int {\n\tif p == nil || p.Space == nil {\n\t\treturn 0\n\t}\n\treturn *p.Space\n}", "func (q *queue) Length() int {\n\treturn len(q.channel)\n}", "func (n *RNSNum) size() int {\n\treturn len(n.buffer)\n}", "func (s *safeBuffer) Len() int {\n\ts.Lock()\n\tdefer s.Unlock()\n\treturn s.buf.Len()\n}", "func (q *UnsafeQueue64) Capacity() uint64 {\n\treturn q.capacity\n}", "func (b *CompactableBuffer) RemovedCount() int {\n\treturn int(atomic.LoadInt64(&b.removedCount))\n}", "func (q *UnsafeQueue16) Capacity() uint32 {\n\treturn q.capacity\n}", "func (b *Buffer) size() int {\n\treturn len(b.data)\n}", "func (b *Buddy) SpaceSize() int {\n\treturn b.spaceSize\n}", "func (o *txSocketQueue) getFreeSize() uint32 {\n\treturn o.sb_hiwat - o.getSize()\n}", "func (c CountUnit) Space(s SpaceUnit, dimension int8) MetricUnit {\n\treturn (&metricUnit{uint32(c)}).Space(s, dimension)\n}", "func (p *Lexer) Space() (int, byte) {\n\n\t// Need a bufio that unreads many bytes for the Block case\n\n\tn := 0\n\tm := 0\n\n\tfor {\n\t\tc, _ := p.Byte()\n\t\tif c != '\\t' && c != ' ' {\n\t\t\tbreak\n\t\t}\n\t\tif c == ' ' {\n\t\t\tn++\n\t\t} else {\n\t\t\tm++\n\t\t}\n\n\t}\n\n\tp.UnreadByte()\n\n\tvar r byte\n\tif m == 0 {\n\t\tr = ' '\n\t} else if n == 0 {\n\t\tr = '\\t'\n\t}\n\n\treturn n + m, r\n}", "func (cb *Buffer) Size() int {\n\treturn len(cb.buffer)\n}", "func (g *Graph) MissingLen() int {\n\tg.RLock()\n\tnum := len(g.missing)\n\tg.RUnlock()\n\n\treturn num\n}", "func BufferSize() uint {\n\treturn uint(globalBufferSize)\n}", "func (s *f64) Capacity() int {\n\tif s.channels == 0 {\n\t\treturn 0\n\t}\n\treturn cap(s.buffer) / int(s.channels)\n}", "func (q *Queue) Len() int {\n\tlength := 0\n\n\tfor _, current := range q.Items {\n\t\tif !current.IsReserved() {\n\t\t\tlength++\n\t\t}\n\t}\n\n\treturn length\n}", "func (p *Packet) Size() int {\n\treturn len(p.Buf)\n}", "func (jbobject *JavaNioCharBuffer) Length() int {\n\tjret, err := jbobject.CallMethod(javabind.GetEnv(), \"length\", javabind.Int)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn jret.(int)\n}", "func (h *MaxHeap) Used() int {\n\treturn h.used\n}", "func (r *RingT[T]) Len() int {\n\treturn int((r.head - r.tail) & r.mask)\n}", "func updateRemainingSpace(ctx *downloaderContext) {\n\n\tctx.globalStatus.RemainingSpace = ctx.globalConfig.MaxSpace -\n\t\tctx.globalStatus.UsedSpace - ctx.globalStatus.ReservedSpace\n\n\tlog.Infof(\"RemainingSpace %d, maxspace %d, usedspace %d, reserved %d\\n\",\n\t\tctx.globalStatus.RemainingSpace, ctx.globalConfig.MaxSpace,\n\t\tctx.globalStatus.UsedSpace, ctx.globalStatus.ReservedSpace)\n}", "func (n Nil) Length() int {\n\treturn 0\n}", "func (b *Buffer) Len() int { return b.length }", "func (t Fixed) Capacity() int {\n\treturn cap(t)\n}", "func (pb *PacketBuffer) PacketCap() int {\n\treturn len(pb.offsets)\n}", "func (q *Queue) Length() uint64 {\n\tq.RLock()\n\tdefer q.RUnlock()\n\treturn q.length()\n}", "func (self *bipbuf_t) Size() uint32{\n\treturn self.size\n}", "func (e *Encoder) UnusedBuffer() []byte {\n\t// NOTE: We don't return e.buf[len(e.buf):cap(e.buf)] since WriteValue would\n\t// need to take special care to avoid mangling the data while reformatting.\n\t// WriteValue can't easily identify whether the input RawValue aliases e.buf\n\t// without using unsafe.Pointer. Thus, we just return a different buffer.\n\t// Should this ever alias e.buf, we need to consider how it operates with\n\t// the specialized performance optimization for bytes.Buffer.\n\tn := 1 << bits.Len(uint(e.maxValue|63)) // fast approximation for max length\n\tif cap(e.unusedCache) < n {\n\t\te.unusedCache = make([]byte, 0, n)\n\t}\n\treturn e.unusedCache\n}", "func (q *UnsafeQueue16) Quantity() uint16 {\n\tif q.putPos >= q.getPos {\n\t\treturn q.putPos - q.getPos\n\t}\n\treturn q.capMod + (q.putPos - q.getPos)\n}", "func (t *freeSpaceManager) totalFreeSpaceSize() uint64 {\n\tt.Lock()\n\tdefer t.Unlock()\n\tt.waitForExtractedFreeSpaces()\n\treturn atomic.LoadUint64(&t.totalFreeSpace)\n}", "func (h *MinHeap) Used() int {\n\treturn h.used\n}", "func (h ReqHeap) Len() int { return len(h) }", "func (q *UnsafeQueue64) Quantity() uint64 {\n\tif q.putPos >= q.getPos {\n\t\treturn q.putPos - q.getPos\n\t}\n\treturn q.capMod + (q.putPos - q.getPos)\n}", "func (r *RlogcHeap) Len() int { return r.queue.Len() }", "func (queue *Queue) Length() int {\n\treturn len(queue.data)\n}", "func (this *byteBuffer) SlotCount() int {\n\treturn this.slotCount\n}", "func (b *Buffer) Cap() int { return len(b.buf) }", "func (b *buffer) GetBytesConsumed() int {\n\treturn b.position\n}", "func (q *BytesQueue) Len() int {\n\treturn q.count\n}", "func (sc *simpleLRUStatsCache) BytesConsumed() int64 {\n\tsc.mu.Lock()\n\tdefer sc.mu.Unlock()\n\treturn sc.memTracker.BytesConsumed()\n}", "func (bin *Bin) Remaining() Size {\n\treturn bin.Capacity - bin.Usage\n}", "func (l *Limiter) remaining() int {\n\tif remaining := int(l.bucket.Available()); remaining > 0 {\n\t\treturn remaining\n\t}\n\treturn 0\n}", "func (q *Queue) Count() int {\n\treturn q.front - q.back + 1\n}", "func (p *Pool) Free() int {\n\treturn p.Cap() - p.Running()\n}" ]
[ "0.60508096", "0.60107887", "0.599063", "0.59441715", "0.5918014", "0.5911479", "0.5804458", "0.57233024", "0.5705884", "0.5685238", "0.56844336", "0.56709677", "0.5629115", "0.5553519", "0.5534923", "0.5512265", "0.55108804", "0.55047554", "0.54945487", "0.5472052", "0.54632306", "0.54413015", "0.54329324", "0.53770596", "0.5363905", "0.53527635", "0.5351922", "0.5349648", "0.53449047", "0.53398114", "0.53236014", "0.5302799", "0.5300461", "0.5284968", "0.5278776", "0.5276175", "0.52616256", "0.5260082", "0.52566594", "0.52523786", "0.52406335", "0.5238155", "0.5237123", "0.52101123", "0.52046824", "0.52040315", "0.5195538", "0.51910716", "0.5181482", "0.51716304", "0.5158624", "0.5157958", "0.5147953", "0.51255155", "0.51216185", "0.5121525", "0.51187354", "0.5109585", "0.5095408", "0.5085893", "0.5078979", "0.50716615", "0.5069153", "0.5066351", "0.50522786", "0.50510603", "0.5041181", "0.504054", "0.5032906", "0.50280136", "0.5011102", "0.5008708", "0.50010425", "0.49992666", "0.49933895", "0.49932232", "0.49920306", "0.49906972", "0.49794123", "0.49752963", "0.49635962", "0.4962407", "0.4956098", "0.494761", "0.4943492", "0.49305236", "0.4929506", "0.49253818", "0.4924364", "0.4919211", "0.49185288", "0.49184963", "0.49158916", "0.49098173", "0.49055514", "0.49028364", "0.49022192", "0.48993057", "0.4893814", "0.48846605" ]
0.7629871
0
Count returns the number of used bytes in a circular buffer.
func (c *CircBuf) Count() int { return (c.head - c.tail) & (len(c.buf) - 1) }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (b *CompactableBuffer) Count() int {\n\treturn int(atomic.LoadInt64(&b.count))\n}", "func (r *ringBufferRateLimiter) Count(ref time.Time) int {\n\tr.mu.Lock()\n\tdefer r.mu.Unlock()\n\treturn r.countUnsynced(ref)\n}", "func (s *Stream) Count() int {\n\treturn len(s.b) + s.stream.count()\n}", "func (c *RingBuffer) Len() int {\n\tc.mutex.Lock()\n\tdefer c.mutex.Unlock()\n\treturn c.len\n}", "func (rb *RingBuffer) Length() int {\n\treturn rb.count\n}", "func (rb *RingBuffer[T]) Len() int {\n\tif rb == nil {\n\t\treturn 0\n\t}\n\trb.mu.Lock()\n\tdefer rb.mu.Unlock()\n\treturn len(rb.buf)\n}", "func (l *ChannelList) Count() int {\n\tc := 0\n\tfor i := 0; i < Conf.ChannelBucketCount; i++ {\n\t\tc += len(l.channels[i].data)\n\t}\n\treturn c\n}", "func (b *Buffer) Used() int {\n\treturn b.used\n}", "func (sw *scanWrap) Count() (int64, error) {\n\treturn sw.scan.Count()\n}", "func (c *countHashReader) Count() int {\n\treturn c.n\n}", "func (c *RingBuffer) Capacity() int {\n\tc.mutex.Lock()\n\tdefer c.mutex.Unlock()\n\treturn len(c.buf)\n}", "func (b *Array) Count() (c int) {\n\tfor _, v := range b.Bits() {\n\t\tc += nSetBits(uintptr(v))\n\t}\n\treturn\n}", "func (bA *CompactBitArray) Count() int {\n\tif bA == nil {\n\t\treturn 0\n\t} else if bA.ExtraBitsStored == 0 {\n\t\treturn len(bA.Elems) * 8\n\t}\n\n\treturn (len(bA.Elems)-1)*8 + int(bA.ExtraBitsStored)\n}", "func (self *nativePduBuffer) Size() int {\n\treturn self.count\n}", "func count(in *os.File) []int {\n\tin.Seek(0, 0)\n\tdefer func() {in.Seek(0, 0)}()\n\tcounts := make([]int, 256)\n\trd := bufio.NewReader(in)\n\tb, err := rd.ReadByte()\n\tfor err == nil {\n\t\tcounts[b]++\n\t\tb, err = rd.ReadByte()\n\t}\n\treturn counts\n}", "func (c *Counter) Count() int64 { return c.count }", "func (h *HashReader) Count() uint64 {\n\treturn h.count\n}", "func (cb *Buffer) Size() int {\n\treturn len(cb.buffer)\n}", "func (b *Ring) Capacity() int {\n\tb.lock.RLock()\n\tdefer b.lock.RUnlock()\n\treturn len(b.buf)\n}", "func (t *TrafficClones) GetCount() int {\n\tif t == nil || t.Count == nil {\n\t\treturn 0\n\t}\n\treturn *t.Count\n}", "func (llrb *LLRB) Count() int64 {\n\treturn atomic.LoadInt64(&llrb.n_count)\n}", "func (c Command) Count() int {\n\treturn int(uint32(c) >> 3)\n}", "func (c *Consistent) Size() int {\n\treturn int(c.count)\n}", "func (c ShmCounter) Count() int64 {\n\treturn atomic.LoadInt64((*int64)(unsafe.Pointer(c)))\n}", "func (m *minioStorage) Count(ctx context.Context, params FileFilterParams) (int64, error) {\n\treturn -1, nil\n}", "func (b *Ring) Size() int {\n\tb.lock.RLock()\n\tdefer b.lock.RUnlock()\n\treturn b.size\n}", "func (rb *RingBuffer) Len() int {\n\trb.lock.RLock()\n\tdefer rb.lock.RUnlock()\n\tif n := len(rb.data); rb.seq < uint64(n) {\n\t\treturn int(rb.seq)\n\t} else {\n\t\treturn n\n\t}\n}", "func (fm *FileMapMutex) Count() (size int) {\n\tfm.mu.RLock()\n\tdefer fm.mu.RUnlock()\n\treturn len(fm.Files)\n}", "func (b *Buffer) Size() int { return b.size }", "func (b *Buffer) Len() uint64 {\n\tb.mu.RLock()\n\tdefer b.mu.RUnlock()\n\treturn b.length\n}", "func (v *Bitmap256) Count() int {\n\treturn bits.OnesCount64(v[0]) +\n\t\tbits.OnesCount64(v[1]) +\n\t\tbits.OnesCount64(v[2]) +\n\t\tbits.OnesCount64(v[3])\n}", "func (q *queue) Count() int {\n\tq.Lock()\n\tdefer q.Unlock()\n\n\treturn q.count\n}", "func (s *safeBuffer) Len() int {\n\ts.Lock()\n\tdefer s.Unlock()\n\treturn s.buf.Len()\n}", "func (NilCounter) Count() int64 { return 0 }", "func (NilCounter) Count() int64 { return 0 }", "func (q *Queue) Count() int {\n\tq.m.Lock()\n\tdefer q.m.Unlock()\n\treturn q.j - q.i\n}", "func (ctx Context) Count(input chan float64) (n uint) {\n\tfor _ = range input {\n\t\tn++\n\t}\n\n\treturn n\n}", "func (b *SafeBuffer) Len() int {\n\tb.m.Lock()\n\tdefer b.m.Unlock()\n\treturn b.b.Len()\n}", "func (c *countHashWriter) Count() int {\n\treturn c.n\n}", "func (c *CounterReader) Count() int64 {\n\treturn c.c\n}", "func (c *CircBuf) Consume(nbytes int) int {\n\tvar count int\n\tvar num int\n\tfor {\n\t\tcount = c.countToEnd()\n\t\tif nbytes - num < count {\n\t\t\tcount = nbytes - num\n\t\t}\n\t\tif count <= 0 {\n\t\t\tbreak\n\t\t}\n\t\tc.tail = (c.tail + count) & (len(c.buf) - 1)\n\t\tnum += count\n\t}\n\treturn num\n}", "func (s *SafeCount) Count() int {\n\treturn s.i\n}", "func (b *CompactableBuffer) RemovedCount() int {\n\treturn int(atomic.LoadInt64(&b.removedCount))\n}", "func (r *RingBuffer) Length() int {\n\tr.mu.Lock()\n\tdefer r.mu.Unlock()\n\n\tif r.wPos == r.rPos {\n\t\tif r.isFull {\n\t\t\treturn r.size\n\t\t}\n\t\treturn 0\n\t}\n\n\tif r.wPos > r.rPos {\n\t\treturn r.wPos - r.rPos\n\t}\n\n\treturn r.size - r.rPos + r.wPos\n}", "func (r *Receiver) countBytes(length uint64, update bool) error {\n\tif length > MaxTransfer {\n\t\treturn fmt.Errorf(\"file too large, %d\", length)\n\t}\n\tif r.byteLimit != 0 && r.totalBytes > uint64(r.byteLimit)-length {\n\t\treturn fmt.Errorf(\"file too large, %d\", length)\n\t}\n\tif update {\n\t\tr.totalBytes += length\n\t}\n\treturn nil\n}", "func (s MemoryStorage) Count(q Query) (int, error) {\n\tfmt.Println(\"LEN\", len(s.bookmarks))\n\treturn len(s.bookmarks), nil\n}", "func BytesCounter(r io.Reader) (int64, error) {\n\tvar readSizeTmp int\n\tvar err error\n\tbuf := make([]byte, 1024)\n\tvar total int64\n\tfor {\n\t\treadSizeTmp, err = r.Read(buf)\n\t\ttotal = total + int64(readSizeTmp)\n\t\tif err != nil {\n\t\t\tbreak\n\t\t}\n\t\tif err == io.EOF {\n\t\t\treturn total, nil\n\t\t}\n\t}\n\treturn total, err\n}", "func (b *Buffer) TotalSize() int64 {\n\treturn int64(len(b.bufs) * allocBufLen)\n}", "func (r *Resource[R, T]) Count() int64 {\n\treturn atomic.LoadInt64(&r.count)\n}", "func (cache *Cache) Count() int {\n\tcache.mutex.RLock()\n\tcount := len(cache.items)\n\tcache.mutex.RUnlock()\n\treturn count\n}", "func (ct *CountingTransport) Count() int32 {\n\treturn atomic.LoadInt32(&ct.count)\n}", "func (r *ringBufferRateLimiter) countUnsynced(ref time.Time) int {\n\tbeginningOfWindow := ref.Add(-r.window)\n\n\t// This loop is a little gnarly, I know. We start at one before the cursor because that's\n\t// the newest event, and we're trying to count how many events are in the window; so\n\t// iterating backwards from the cursor and wrapping around to the end of the ring is the\n\t// same as iterating events in reverse chronological order starting with most recent. When\n\t// we encounter the first element that's outside the window, then eventsInWindow has the\n\t// correct count of events within the window.\n\tfor eventsInWindow := 0; eventsInWindow < len(r.ring); eventsInWindow++ {\n\t\t// start at cursor, add difference between ring length and offset (eventsInWindow),\n\t\t// then subtract 1 because we want to start 1 before cursor (newest event), then\n\t\t// modulus the ring length to wrap around if necessary\n\t\ti := (r.cursor + (len(r.ring) - eventsInWindow - 1)) % len(r.ring)\n\t\tif r.ring[i].Before(beginningOfWindow) {\n\t\t\treturn eventsInWindow\n\t\t}\n\t}\n\n\t// if we looped the entire ring, all events are within the window\n\treturn len(r.ring)\n}", "func (s *ScanKeys) Count() int {\n\treturn len(s.skbs)\n}", "func (q *Queue) Count() int {\n\treturn q.front - q.back + 1\n}", "func (b *Buffer) Len() int { return b.length }", "func (r *Ring) Size() int {\n\treturn (r.in - r.out + r.size) % r.size\n}", "func (ps *peerStore) Count(infoHash InfoHash) int {\n\tset := ps.Set(infoHash)\n\tif set == nil {\n\t\treturn 0\n\t}\n\n\treturn set.Size()\n}", "func (l *Layer) Count(value int) int {\n\tcount := 0\n\tfor _, b := range l.Bytes {\n\t\tif b == value {\n\t\t\tcount++\n\t\t}\n\t}\n\n\treturn count\n}", "func (m ConcurrentMap[K, V]) Count() int {\n\tcount := 0\n\tfor i := 0; i < SHARD_COUNT; i++ {\n\t\tshard := m.shards[i]\n\t\tshard.RLock()\n\t\tcount += len(shard.items)\n\t\tshard.RUnlock()\n\t}\n\treturn count\n}", "func (s *Stat) GetCount() int {\n\treturn s.n\n}", "func (b *Buffer) Len() int {\n\treturn b.Width * b.Height\n}", "func CountBytes(b []byte) (int, error) {\n\tvar count int\n\tfor len(b) >= 8 {\n\t\tv := binary.BigEndian.Uint64(b[:8])\n\t\tb = b[8:]\n\n\t\tsel := v >> 60\n\t\tif sel >= 16 {\n\t\t\treturn 0, fmt.Errorf(\"invalid selector value: %v\", sel)\n\t\t}\n\t\tcount += selector[sel].n\n\t}\n\n\tif len(b) > 0 {\n\t\treturn 0, fmt.Errorf(\"invalid slice len remaining: %v\", len(b))\n\t}\n\treturn count, nil\n}", "func (c CounterSnapshot) Count() int64 { return int64(c) }", "func (r *RingT[T]) Len() int {\n\treturn int((r.head - r.tail) & r.mask)\n}", "func (this *FeedableBuffer) GetUsedByteCountOverMinimum() int {\n\treturn len(this.Data) - this.minByteCount\n}", "func (p *MemProvider) Count() int {\n\treturn p.list.Len()\n}", "func (r *room) count() int {\n\tr.RLock()\n\tdefer r.RUnlock()\n\treturn len(r.peers)\n}", "func (bits *BitArray) Count() int {\n\tlength := 0\n\n\tfor i := 0; i < bits.lenpad; i += _BytesPW {\n\t\tw := bytes2word(bits.bytes[i : i+_BytesPW])\n\t\tlength += countbits64(w)\n\t}\n\n\treturn length\n}", "func (c *standardResettingCounter) Count() int64 {\n\treturn atomic.LoadInt64(&c.count)\n}", "func (c *Counter) Len() int {\n\treturn hdrLen + c.bits.Len()\n}", "func (sb *SeekableBuffer) Len() int {\n\treturn len(sb.data)\n\n}", "func (this *byteBuffer) SlotCount() int {\n\treturn this.slotCount\n}", "func (mp *Mempool) CountInUse() int {\n\treturn int(C.rte_mempool_in_use_count(mp.ptr()))\n}", "func (data *Data) GetCount() float64 {\n\tdata.RLock()\n\tdefer data.RUnlock()\n\treturn data.count\n}", "func (r *Registry) Count() int {\n\tr.mu.Lock()\n\tdefer r.mu.Unlock()\n\treturn len(r.rateLimiters)\n}", "func (h *heapTracker) Count() int {\n\th.mu.Lock()\n\tdefer h.mu.Unlock()\n\treturn h.mu.rs.Len()\n}", "func (jbobject *JavaNioCharBuffer) Length() int {\n\tjret, err := jbobject.CallMethod(javabind.GetEnv(), \"length\", javabind.Int)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn jret.(int)\n}", "func (c *Signature) Count() int {\n\tif c == nil || c.CountValue == nil {\n\t\treturn 0\n\t}\n\treturn toolbox.AsInt(c.CountValue)\n}", "func (b *Buffer) size() int {\n\treturn len(b.data)\n}", "func (s *Pool) Len() int { return int(atomic.LoadUint32(&s.avail)) }", "func (m *Cmap) Count() int {\n\treturn int(atomic.LoadInt64(&m.count))\n}", "func (b *Buffer) Len() int {\n\treturn len(b.buf)\n}", "func (o GroupContainerGpuLimitPtrOutput) Count() pulumi.IntPtrOutput {\n\treturn o.ApplyT(func(v *GroupContainerGpuLimit) *int {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn v.Count\n\t}).(pulumi.IntPtrOutput)\n}", "func (g *GPIOControllerPCF8574T) Count() int {\n\treturn pinCount\n}", "func (c *StepLookbackAccumulator) BufferStepCount() int {\n\treturn len(c.unconsumed)\n}", "func (s *Stack) Count() uint {\n\treturn s.count\n}", "func (s Stream) Count() (int, error) {\n\tcounter := 0\n\n\terr := s.Iterate(func(r Record) error {\n\t\tcounter++\n\t\treturn nil\n\t})\n\n\treturn counter, err\n}", "func (c *Canvas) Count() int {\n\treturn len(c.paint)\n}", "func (r Ring) Capacity() int {\n\treturn len(r.buff)\n}", "func (pk PacketBufferPtr) Size() int {\n\treturn int(pk.buf.Size()) - pk.headerOffset()\n}", "func (s *Server) Count() int {\n\ts.cond.L.Lock()\n\tdefer s.cond.L.Unlock()\n\treturn len(s.points)\n}", "func (c *Cache) GetCount() int {\n\treturn len(c.items)\n}", "func (b *FixedBuffer) Len() int {\n\treturn b.w - b.r\n}", "func (d *Deck) Count() int {\n\treturn len(d.Cards)\n}", "func (b *Buffer) Len() (n int) {\n\tfor _, l := range b.lines {\n\t\tn += utf8.RuneCount(l.data)\n\t}\n\n\tif len(b.lines) > 1 {\n\t\tn += len(b.lines) - 1 // account for newlines\n\t}\n\n\treturn\n}", "func (this *List) Len() int {\n this.lock.RLock()\n this.lock.RUnlock()\n\n return len(this.counters)\n}", "func (r *PortAllocator) Used() int {\n\treturn r.portRange.Size - r.alloc.Free()\n}", "func CountOpenFiles() int {\n\tt.Lock()\n\tdefer t.Unlock()\n\treturn len(t.entries)\n}", "func (b Bits) Count() int {\n\treturn bits.OnesCount64(uint64(b))\n}", "func (buf *queueBuffer) Len() uint64 {\n\treturn buf.depth\n}" ]
[ "0.73884064", "0.70699453", "0.67749035", "0.64650244", "0.6427924", "0.6411939", "0.6388923", "0.6383929", "0.6334693", "0.63088864", "0.63073707", "0.6238066", "0.62366873", "0.62118644", "0.6146236", "0.6129091", "0.61287105", "0.61233836", "0.6118108", "0.6118099", "0.6108712", "0.60895914", "0.60883373", "0.6078632", "0.6076552", "0.6074999", "0.6063112", "0.605549", "0.60519624", "0.6049362", "0.60473263", "0.60215336", "0.6018773", "0.6014785", "0.6014785", "0.5994374", "0.5982092", "0.5927105", "0.59143907", "0.5912456", "0.5899797", "0.589476", "0.58880854", "0.5886575", "0.5886451", "0.58629614", "0.5861571", "0.5857954", "0.58559406", "0.5842871", "0.58223814", "0.58128864", "0.5811941", "0.580824", "0.5798193", "0.57925314", "0.5779598", "0.57750326", "0.57685876", "0.57637215", "0.5757524", "0.57532364", "0.57521504", "0.57512593", "0.5750888", "0.57430893", "0.5742609", "0.5732658", "0.5719326", "0.5699158", "0.5698671", "0.5691441", "0.5687038", "0.5684977", "0.56841815", "0.56802136", "0.5676337", "0.56746083", "0.5673959", "0.5663692", "0.5660936", "0.5655775", "0.5655734", "0.56517375", "0.56409246", "0.56394666", "0.5638454", "0.5633845", "0.56332713", "0.56309146", "0.5629511", "0.56222236", "0.56201845", "0.56134546", "0.56114626", "0.56072366", "0.5597539", "0.5594816", "0.55819345", "0.55720055" ]
0.77011263
0
Read reads data from a circular buffer. The number of bytes read is returned.
func (c *CircBuf) Read(buf []byte) int { var count int var num int for { count = c.countToEnd() if len(buf) - num < count { count = len(buf) - num } if count <= 0 { break } copy(buf[num : num + count], c.buf[c.tail : c.tail + count]) c.tail = (c.tail + count) & (len(c.buf) - 1) num += count } return num }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (rb *RingBuffer) Read(p []byte) (int, error) {\n\texpected := len(p)\n\tn := 0\n\n\tif rb.writeOff > rb.readOff {\n\t\tn += copy(p, rb.data[rb.readOff:rb.writeOff])\n\t} else {\n\t\tpos := copy(p, rb.data[rb.readOff:])\n\t\tn += pos\n\n\t\tif n < expected {\n\t\t\tn += copy(p[pos:], rb.data[:rb.writeOff])\n\t\t}\n\t}\n\n\trb.count -= n\n\trb.readOff += (rb.readOff + n) % rb.cap\n\n\treturn n, nil\n}", "func (cr *CountingReader) Read(dst []byte) (int, error) {\n\n\tread, err := cr.R.Read(dst)\n\tcr.bytesRead += int64(read)\n\treturn read, err\n}", "func (b *Buffer) Read(out []byte) (n int, err error) {\n\tif b.readCursor >= b.Size() {\n\t\t// we read the entire buffer, let's loop back to the beginning\n\t\tb.readCursor = 0\n\t} else if b.readCursor+int64(len(out)) > b.Size() {\n\t\t// we don't have enough data in our buffer to fill the passed buffer\n\t\t// we need to do multiple passes\n\t\tn := copy(out, b.data[b.offset+b.readCursor:])\n\t\tb.readCursor += int64(n)\n\t\t// TMP check, should remove\n\t\tif b.readCursor != b.Size() {\n\t\t\tpanic(fmt.Sprintf(\"off by one much? %d - %d\", b.readCursor, b.Size()))\n\t\t}\n\t\tn2, _ := b.Read(out[n:])\n\t\tb.readCursor += int64(n2)\n\t\treturn int(n + n2), nil\n\t}\n\tn = copy(out, b.data[b.offset+b.readCursor:])\n\treturn\n}", "func (b *Buffer) Read(data []byte, c Cursor) (n int, next Cursor, err error) {\n\tb.mu.RLock()\n\tdefer b.mu.RUnlock()\n\n\tseq, offset := c.seq, c.offset\n\n\tif seq >= b.nextSeq || offset > b.last {\n\t\treturn 0, next, ErrNotArrived\n\t}\n\n\tf := b.frame(offset)\n\tif f.size() == 0 || f.seq() != seq {\n\t\treturn b.readFirst(data)\n\t}\n\n\treturn b.readOffset(data, offset)\n}", "func (bb *BytesBuffer) Read(p []byte) (n int, err error) {\n\treturn bb.reader.Read(p)\n}", "func (d *Decoder) NRead() int64 {\n\tr := d.dec.Buffered().(*bytes.Reader)\n\treturn d.r.n - int64(r.Len())\n}", "func (c *RingBuffer) Read(p []byte) (int, error) {\n\tc.mutex.Lock()\n\tdefer c.mutex.Unlock()\n\tn, err := c.peek(p)\n\tif err != nil {\n\t\treturn n, err\n\t}\n\treturn c.consume(n), nil\n}", "func (r *RingBuffer) Read(p []byte) (n int, err error) {\n\tif len(p) == 0 {\n\t\treturn 0, nil\n\t}\n\n\tr.mu.Lock()\n\tdefer r.mu.Unlock()\n\tn, err = r.read(p)\n\treturn\n}", "func (sb *SeekableBuffer) Read(p []byte) (n int, err error) {\n\tdefer func() {\n\t\tif state := recover(); state != nil {\n\t\t\terr = state.(error)\n\t\t}\n\t}()\n\n\tif sb.position >= len64(sb.data) {\n\t\treturn 0, io.EOF\n\t}\n\n\tn = copy(p, sb.data[sb.position:])\n\tsb.position += int64(n)\n\n\treturn n, nil\n\n}", "func (cb *Buffer) Read(buf []byte) (int, error) {\n\tif buf == nil || len(buf) == 0 {\n\t\treturn 0, fmt.Errorf(\"Target buffer is null or empty\")\n\t}\n\n\ttoRead := min(len(buf), cb.ReadAvailability())\n\n\tlBytes := min(cb.rpos, toRead)\n\tcopy(buf[toRead-lBytes:toRead], cb.buffer[cb.rpos-lBytes:cb.rpos])\n\n\tif toRead > lBytes {\n\t\trBytes := toRead - lBytes\n\t\tcopy(buf[:rBytes], cb.buffer[len(cb.buffer)-rBytes:len(cb.buffer)])\n\t\tcb.rpos = len(cb.buffer) - rBytes\n\t} else {\n\t\tcb.rpos -= lBytes\n\t}\n\n\tcb.full = false\n\treturn toRead, nil\n}", "func (pipe *slimPipe) Read(buffer []byte) (int, error) {\n\terrChannel := make(chan error)\n\tcountChannel := make(chan int)\n\tgo func() {\n\t\treadBytes, err := io.ReadAtLeast(pipe.reader, buffer, 1)\n\t\tif err != nil {\n\t\t\terrChannel <- err\n\t\t} else {\n\t\t\tcountChannel <- readBytes\n\t\t}\n\t\tclose(errChannel)\n\t\tclose(countChannel)\n\t}()\n\tselect {\n\tcase count := <-countChannel:\n\t\treturn count, nil\n\tcase err := <-errChannel:\n\t\treturn 0, err\n\tcase <-time.After(pipe.timeout):\n\t\treturn 0, fmt.Errorf(\"Timeout (%v)\", pipe.timeout)\n\t}\n}", "func (r *wrappedReader) Read(p []byte) (n int, err error) {\n\tdefer func() {\n\t\tr.totalBytesRead += n\n\t}()\n\n\tif !r.useBuf {\n\t\tif r.bufR != nil {\n\t\t\treturn r.bufR.Read(p)\n\t\t}\n\t\treturn r.r.Read(p)\n\t}\n\n\tn = copy(p, r.buf)\n\tr.useBuf = false\n\tr.buf = []byte{}\n\n\tif n < len(p) {\n\t\tl := io.LimitReader(r.bufR, int64(len(p)-n))\n\t\tb, err := l.Read(p[n:])\n\t\tn += b\n\t\tif err != nil {\n\t\t\treturn n, err\n\t\t}\n\t}\n\n\treturn n, nil\n}", "func (s *Reader) Read(p []byte) (int, error) {\n\tlimiter := s.getRateLimit()\n\tif limiter == nil {\n\t\treturn s.r.Read(p)\n\t}\n\tn, err := s.r.Read(p)\n\tif err != nil {\n\t\treturn n, err\n\t}\n\t// log.Printf(\"read: %d\", n)\n\tif err := limiter.WaitN(s.ctx, n); err != nil {\n\t\treturn n, err\n\t}\n\treturn n, nil\n}", "func (a *reader) Read(p []byte) (n int, err error) {\n\tif a.err != nil {\n\t\treturn 0, a.err\n\t}\n\t// Swap buffer and maybe return error\n\terr = a.fill()\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\n\t// Copy what we can\n\tn = copy(p, a.cur.buffer())\n\ta.cur.inc(n)\n\n\tif a.cur.isEmpty() {\n\t\t// Return current, so a fetch can start.\n\t\tif a.cur != nil {\n\t\t\t// If at end of buffer, return any error, if present\n\t\t\ta.err = a.cur.err\n\t\t\ta.reuse <- a.cur\n\t\t\ta.cur = nil\n\t\t}\n\t\treturn n, a.err\n\t}\n\treturn n, nil\n}", "func (r *Reader) Read(data []byte) (int, error) {\n\tvar totalRead int\n\tdefer func() {\n\t\tr.len -= int64(totalRead)\n\t}()\n\tfor len(data) > 0 {\n\t\tn, err := r.r.Read(data)\n\t\tdata = data[n:]\n\t\ttotalRead += n\n\t\tif err != nil {\n\t\t\t// If all DataRefs have been read, then io.EOF.\n\t\t\tif len(r.dataRefs) == 0 {\n\t\t\t\treturn totalRead, io.EOF\n\t\t\t}\n\t\t\tif err := r.nextDataRef(); err != nil {\n\t\t\t\treturn totalRead, err\n\t\t\t}\n\t\t}\n\t}\n\treturn totalRead, nil\n\n}", "func (bb *blockingBuffer) Read(p []byte) (int, error) {\n\tfor {\n\t\tbb.mu.Lock()\n\t\tn, err := bb.buf.Read(p)\n\t\tif n > 0 || err != io.EOF {\n\t\t\tif bb.buf.Len() == 0 {\n\t\t\t\t// Reset the readCh.\n\t\t\t\tselect {\n\t\t\t\tcase <-bb.readCh:\n\t\t\t\tdefault:\n\t\t\t\t}\n\t\t\t}\n\t\t\tbb.mu.Unlock()\n\t\t\treturn n, err\n\t\t}\n\t\tbb.mu.Unlock()\n\n\t\t// Wait for new data.\n\t\t<-bb.readCh\n\t}\n}", "func (b *FixedBuffer) Read(p []byte) (n int, err error) {\n\tif b.r == b.w {\n\t\treturn 0, errReadEmpty\n\t}\n\tn = copy(p, b.buf[b.r:b.w])\n\tb.r += n\n\tif b.r == b.w {\n\t\tb.r = 0\n\t\tb.w = 0\n\t}\n\treturn n, nil\n}", "func (c *countingReader) Read(p []byte) (int, error) {\n\tn, err := c.reader.Read(p)\n\tc.count += int64(n)\n\treturn n, err\n}", "func (o *ODirectReader) Read(buf []byte) (n int, err error) {\n\tif o.err != nil && (len(o.buf) == 0 || !o.seenRead) {\n\t\treturn 0, o.err\n\t}\n\tif o.buf == nil {\n\t\tif o.SmallFile {\n\t\t\to.bufp = ODirectPoolSmall.Get().(*[]byte)\n\t\t} else {\n\t\t\to.bufp = ODirectPoolLarge.Get().(*[]byte)\n\t\t}\n\t}\n\tif !o.seenRead {\n\t\to.buf = *o.bufp\n\t\tn, err = o.File.Read(o.buf)\n\t\tif err != nil && err != io.EOF {\n\t\t\tif isSysErrInvalidArg(err) {\n\t\t\t\tif err = disk.DisableDirectIO(o.File); err != nil {\n\t\t\t\t\to.err = err\n\t\t\t\t\treturn n, err\n\t\t\t\t}\n\t\t\t\tn, err = o.File.Read(o.buf)\n\t\t\t}\n\t\t\tif err != nil && err != io.EOF {\n\t\t\t\to.err = err\n\t\t\t\treturn n, err\n\t\t\t}\n\t\t}\n\t\tif n == 0 {\n\t\t\t// err is likely io.EOF\n\t\t\to.err = err\n\t\t\treturn n, err\n\t\t}\n\t\to.err = err\n\t\to.buf = o.buf[:n]\n\t\to.seenRead = true\n\t}\n\tif len(buf) >= len(o.buf) {\n\t\tn = copy(buf, o.buf)\n\t\to.seenRead = false\n\t\treturn n, o.err\n\t}\n\tn = copy(buf, o.buf)\n\to.buf = o.buf[n:]\n\t// There is more left in buffer, do not return any EOF yet.\n\treturn n, nil\n}", "func (download *Download) Read(data []byte) (n int, err error) {\n\tif download.closed {\n\t\treturn 0, Error.New(\"already closed\")\n\t}\n\n\tif download.reader == nil {\n\t\terr = download.resetReader(download.offset)\n\t\tif err != nil {\n\t\t\treturn 0, err\n\t\t}\n\t}\n\n\tif download.limit == 0 {\n\t\treturn 0, io.EOF\n\t}\n\tif download.limit > 0 && download.limit < int64(len(data)) {\n\t\tdata = data[:download.limit]\n\t}\n\tn, err = download.reader.Read(data)\n\tif download.limit >= 0 {\n\t\tdownload.limit -= int64(n)\n\t}\n\tdownload.offset += int64(n)\n\n\treturn n, err\n}", "func (b *Buffer) Read(reader io.Reader) (error) {\n\tif b.isCompacted {\n\t\tb.isCompacted = false\n\n\t\t// we want to read into the buffer from where it last was,\n\t\tvar slice = b.internal[b.index:]\n\t\tvar length, err = reader.Read(slice)\n\t\tb.index = 0 // start the index over, so reading starts from beginning again\n\t\tb.length += uint32(length) // increment the number of bytes read\n\t\treturn err\n\t}\n\tvar length, err = reader.Read(b.internal)\n\tb.index = 0\n\tb.length = uint32(length)\n\treturn err\n}", "func (c *ByteBuffer) ReadN(n int) (r []byte, err error) {\n\tif n > 0 {\n\t\tif c.Len() >= n { // optimistic branching\n\t\t\tr = make([]byte, n)\n\t\t\t_, _ = c.Read(r)\n\t\t} else {\n\t\t\terr = ErrBufferNotEnoughByteToRead\n\t\t}\n\t}\n\treturn\n}", "func (b *SafeBuffer) Read(p []byte) (n int, err error) {\n\tb.m.RLock()\n\tdefer b.m.RUnlock()\n\treturn b.b.Read(p)\n}", "func (cr *CountingReader) Read(p []byte) (n int, err error) {\n\tn, err = cr.Reader.Read(p)\n\tcr.Total += int64(n)\n\treturn\n}", "func (r *trackingreader) Read(b []byte) (int, error) {\n\tn, err := r.Reader.Read(b)\n\tr.pos += int64(n)\n\treturn n, err\n}", "func (r *progressReader) Read(p []byte) (n int, err error) {\n\tr.lastRead = time.Now()\n\tlei, err := (*(r.reader)).Read(p)\n\tr.pos += int64(lei)\n\treturn lei, err\n}", "func (b *Buffer) Read(p []byte) (n int, err error) {\n\tb.m.RLock()\n\tdefer b.m.RUnlock()\n\treturn b.b.Read(p)\n}", "func (ch *Chunk) Read(p []byte) (n int, err error) {\n\tif ch == nil || ch.R == nil {\n\t\treturn 0, errors.New(\"nil chunk/reader pointer\")\n\t}\n\tn, err = ch.R.Read(p)\n\tch.Pos += n\n\treturn n, err\n}", "func BufferedRead(reader *bufio.Reader, msgBuf []byte) (uint64, error) {\n\tlen := uint64(0)\n\tvar lengthBuf [8]byte\n\t_, err := io.ReadFull(reader, lengthBuf[:])\n\tlength := binary.BigEndian.Uint64(lengthBuf[:])\n\tif err != nil {\n\t\treturn len, err\n\t}\n\tfor bytesRead := uint64(0); bytesRead < length; {\n\t\treadLen, err := reader.Read(msgBuf[bytesRead:])\n\t\tif err != nil {\n\t\t\treturn len, err\n\t\t}\n\t\tbytesRead += uint64(readLen)\n\t\tlen += uint64(readLen)\n\t}\n\treturn len, nil\n}", "func (c *TestConnection) Read(b []byte) (n int, err error) {\n toRet := 0\n if b == nil {\n return 0, errors.New(\"b cannot be nil\")\n }\n\n if c.ReadError != nil && c.TimesReadCalled == c.ThrowReadErrorAfter {\n return 0, c.ReadError\n }\n\n if len(c.ToRead) == 0 {\n return 0, nil\n } \n \n dataToRet := c.ToRead[0]\n buffLength := len(b)\n \n // b is big enough to hold dataToRet\n if buffLength >= len(dataToRet) {\n copy(b, []byte(dataToRet))\n c.ToRead = append(c.ToRead[:0], c.ToRead[1:]...) // remove the first element \n toRet = len(dataToRet)\n } else {\n // need to only return the maximum we can\n remains := dataToRet[buffLength:len(dataToRet)]\n c.ToRead[0] = remains // keep the remainder of the data\n copy(b, dataToRet[0:buffLength])\n toRet = buffLength\n }\n \n c.TimesReadCalled++\n return toRet, nil\n}", "func Read(b []byte) (n int, err error) {\n\treturn io.ReadFull(r, b)\n}", "func (s *Stream) Read(byteCount int) ([]byte, error) {\n\tdata := make([]byte, byteCount)\n\tif _, err := io.ReadFull(s.buffer, data); err != nil {\n\t\treturn []byte{}, err\n\t}\n\treturn data, nil\n}", "func (file *Remote) Read(buf []byte) (int, error) {\n\tfile.m.Lock()\n\tdefer file.m.Unlock()\n\n\tn, err := file.ReadAt(buf, int64(file.pos))\n\tfile.pos += uint64(n)\n\treturn n, err\n}", "func (s *DownloadStream) Read(buf []uint8) (int, error) {\n\t// acquire mutex\n\ts.mutex.Lock()\n\tdefer s.mutex.Unlock()\n\n\t// check if closed\n\tif s.closed {\n\t\treturn 0, gridfs.ErrStreamClosed\n\t}\n\n\t// ensure file is loaded\n\terr := s.load()\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\n\t// check position\n\tif s.position >= s.file.Length {\n\t\treturn 0, io.EOF\n\t}\n\n\t// fill buffer\n\tread := 0\n\tfor read < len(buf) {\n\t\t// check if buffer is empty\n\t\tif len(s.buffer) == 0 {\n\t\t\t// get next chunk\n\t\t\terr = s.next()\n\t\t\tif err == io.EOF {\n\t\t\t\t// only return EOF if no data has been read\n\t\t\t\tif read == 0 {\n\t\t\t\t\treturn 0, io.EOF\n\t\t\t\t}\n\n\t\t\t\treturn read, nil\n\t\t\t} else if err != nil {\n\t\t\t\treturn read, err\n\t\t\t}\n\t\t}\n\n\t\t// copy data\n\t\tn := copy(buf[read:], s.buffer)\n\n\t\t// resize buffer\n\t\ts.buffer = s.buffer[n:]\n\n\t\t// update position\n\t\ts.position += n\n\n\t\t// increment counter\n\t\tread += n\n\t}\n\n\treturn read, nil\n}", "func (r *Reader) Read(p []byte) (n int, err error) {\n\tr.ResetBuf(p)\n\tn, err = r.srcR.Read(r.buf)\n\treturn\n}", "func (w *WatchBuffer) Read(p []byte) (n int, err error) {\n\tif w.closed {\n\t\treturn 0, io.EOF\n\t}\n\tw.read <- p\n\tret := <-w.retc\n\treturn ret.n, ret.e\n}", "func (gc *gcsCache) Read(b []byte) (int, error) {\n\tif gc.closed {\n\t\treturn 0, os.ErrClosed\n\t} else if gc.offset >= gc.size {\n\t\treturn 0, io.EOF\n\t}\n\n\tr, err := gc.oh.NewRangeReader(gc.ctx, gc.offset, -1)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\tdefer r.Close()\n\n\tn, err := r.Read(b)\n\tgc.offset += int64(n)\n\n\treturn n, err\n}", "func (b *Buffer) Read(p []byte) (n int, err error) {\n\tbuf := b.Bytes()\n\tif len(buf) == 0 {\n\t\tif len(p) == 0 {\n\t\t\treturn 0, nil\n\t\t}\n\t\treturn 0, io.EOF\n\t}\n\n\tn = copy(p, buf)\n\treturn n, nil\n}", "func (b *Buffer) ReadNFrom(reader io.Reader, n int) (int, error) {\n\t// Loop until we've filled completed the read, run out of storage, or\n\t// encountered a read error.\n\tvar read, result int\n\tvar err error\n\tfor n > 0 && b.used != b.size && err == nil {\n\t\t// Compute the first available contiguous free storage segment.\n\t\tfreeStart := (b.start + b.used) % b.size\n\t\tfree := b.storage[freeStart:min(freeStart+(b.size-b.used), b.size)]\n\n\t\t// If the storage segment is larger than we need, then truncate it.\n\t\tif len(free) > n {\n\t\t\tfree = free[:n]\n\t\t}\n\n\t\t// Perform the read.\n\t\tread, err = reader.Read(free)\n\n\t\t// Update indices and tracking.\n\t\tresult += read\n\t\tb.used += read\n\t\tn -= read\n\t}\n\n\t// If we couldn't complete the read due to a lack of storage, then we need\n\t// to return an error. However, if a read error occurred simultaneously with\n\t// running out of storage, then we don't overwrite it.\n\tif n > 0 && b.used == b.size && err == nil {\n\t\terr = ErrBufferFull\n\t}\n\n\t// If we encountered io.EOF simultaneously with completing the read, then we\n\t// can clear the error.\n\tif err == io.EOF && n == 0 {\n\t\terr = nil\n\t}\n\n\t// Done.\n\treturn result, err\n}", "func (this *reader) ioRead(buffer []byte) (n int, err error) {\n\tn, err = this.ioReader.Read(buffer)\n\tif err != nil {\n\t\treturn\n\t}\n\tif n != len(buffer) {\n\t\terr = fmt.Errorf(\"Reading failed. Expected %v bytes but %v was read\",\n\t\t\tlen(buffer), n)\n\t}\n\treturn\n}", "func (p *ProgressReader) Read(b []byte) (n int, err error) {\n\tn, err = p.r.Read(b)\n\tp.updateProgress(int64(n))\n\treturn\n}", "func (s *Stream) Read(p []byte) (n int, err error) {\n\tn, err = s.rc.Read(p)\n\n\tif s.pos+n <= s.metaint {\n\t\ts.pos = s.pos + n\n\t\treturn n, err\n\t}\n\n\t// extract stream metadata\n\tmetadataStart := s.metaint - s.pos\n\tmetadataLength := int(p[metadataStart : metadataStart+1][0]) * 16\n\tif metadataLength > 0 {\n\t\tm := NewMetadata(p[metadataStart+1 : metadataStart+1+metadataLength])\n\t\tif !m.Equals(s.metadata) {\n\t\t\ts.metadata = m\n\t\t\tif s.MetadataCallbackFunc != nil {\n\t\t\t\ts.MetadataCallbackFunc(s.metadata)\n\t\t\t}\n\t\t}\n\t}\n\n\t// roll over position + metadata block\n\ts.pos = ((s.pos + n) - s.metaint) - metadataLength - 1\n\n\t// shift buffer data to account for metadata block\n\tcopy(p[metadataStart:], p[metadataStart+1+metadataLength:])\n\tn = n - 1 - metadataLength\n\n\treturn n, err\n}", "func (s *safeBuffer) Read(p []byte) (int, error) {\n\ts.Lock()\n\tdefer s.Unlock()\n\treturn s.buf.Read(p)\n}", "func (r *ReaderWithProgress) Read(p []byte) (n int, err error) {\n\tn, err = r.innerReadCloser.Read(p)\n\tif err == nil {\n\t\tr.progressStatus.ReportBytesProcessedCount(int64(n))\n\t}\n\treturn\n}", "func (framed *Reader) Read(buffer []byte) (n int, err error) {\n\tframed.mutex.Lock()\n\tdefer framed.mutex.Unlock()\n\n\tvar nb uint16\n\terr = binary.Read(framed.Stream, endianness, &nb)\n\tif err != nil {\n\t\treturn\n\t}\n\n\tn = int(nb)\n\n\tbufferSize := len(buffer)\n\tif n > bufferSize {\n\t\treturn 0, fmt.Errorf(\"Buffer of size %d is too small to hold frame of size %d\", bufferSize, n)\n\t}\n\n\t// Read into buffer\n\tn, err = io.ReadFull(framed.Stream, buffer[:n])\n\treturn\n}", "func (mCn mockConn) Read(b []byte) (n int, err error) {\n\tfmt.Printf(\"reading: %d of %d.\\n\", *mCn.readCount, len(mockConnOutpBytes))\n\tif *mCn.readCount < len(mockConnOutpBytes) {\n\t\tcopy(b, mockConnOutpBytes[*mCn.readCount])\n\t\t*mCn.readCount = *mCn.readCount + 1\n\t}\n\treturn len(b), nil\n}", "func (r *MockReadWriteCloser) Read(p []byte) (n int, err error) {\n\n\tif err = r.ReadErr; err == nil {\n\t\tr.BytesRead = p\n\t\tn = len(p)\n\t}\n\treturn\n}", "func (br *BandwidthMeter) BytesRead() (bytes uint64) {\n bytes = br.bytesRead\n return\n}", "func (br *BufferedReader) Read(v Decoder) (n int, err error) {\n\nRetry:\n\tif br.mustFill {\n\t\t// The buffer needs to be filled before trying to decode\n\t\t// another record.\n\t\tif br.mode == ModeManual {\n\t\t\treturn 0, ErrMustFill\n\t\t}\n\n\t\terr = br.Fill()\n\t\tif err != nil {\n\t\t\treturn 0, err\n\t\t}\n\t}\n\n\tif br.eof && br.offset == br.buffered {\n\t\t// We've reached EOF on a previous Fill attempt and the\n\t\t// buffered data has been fully consumed.\n\t\treturn 0, io.EOF\n\t}\n\n\tn, err = v.Decode(br.buffer[br.offset:br.buffered])\n\n\tif err == ErrShortBuffer {\n\t\t// Unable to decode a full record.\n\n\t\tif br.offset == 0 && br.buffered == len(br.buffer) {\n\t\t\t// We've tried to decode from the start of a full\n\t\t\t// buffer, so it seems we won't be able to fit this\n\t\t\t// record in our buffer.\n\t\t\treturn 0, ErrTooLarge\n\t\t}\n\n\t\tif br.eof {\n\t\t\t// We won't be able to read more bytes yet there's\n\t\t\t// a partial record left to decode.\n\t\t\treturn 0, io.ErrUnexpectedEOF\n\t\t}\n\n\t\tbr.mustFill = true\n\n\t\tgoto Retry\n\t}\n\n\tbr.offset += n\n\n\tif err != nil {\n\t\treturn n, err\n\t}\n\n\treturn n, nil\n}", "func (r *reader) Read(p []byte) (n int, err error) {\n\tif r.sb == nil {\n\t\treturn 0, ErrClosedReader\n\t}\n\n\tr.sb.lock.Lock()\n\tdefer r.sb.lock.Unlock()\n\tn, err = 0, nil\n\n\t// Block until available data or error\n\tfor !r.availableData() && !r.sb.closed {\n\t\tr.sb.lock.Unlock()\n\t\t<-r.sb.newData\n\t\tr.sb.lock.Lock()\n\t}\n\tif !r.availableData() && r.sb.closed {\n\t\treturn 0, io.EOF\n\t}\n\n\t// Copy data and move the reader's position in the buffer\n\treadStart := r.at - r.sb.start\n\tn = copy(p, r.sb.buf[readStart:])\n\tr.at += n\n\n\t// Tell SharedBuffer to resort its readers\n\theap.Fix(&r.sb.readers, r.idx)\n\tr.sb.flush()\n\n\treturn\n}", "func (s *Stream) Read(p []byte) (n int, err error) {\n\tif s.i >= int64(len(s.data)) {\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase <-s.done:\n\t\t\t\treturn 0, io.EOF\n\n\t\t\tcase data := <-s.recv:\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn 0, err\n\t\t\t\t}\n\t\t\t\ts.data = data\n\t\t\t\ts.i = 0\n\t\t\t\treturn s.Read(p)\n\t\t\t}\n\t\t}\n\t}\n\tn = copy(p, s.data[s.i:])\n\ts.i += int64(n)\n\treturn n, nil\n}", "func (rr *Reader) Read(dst []byte) (n int, err error) {\n\treturn rr.br.Read(dst)\n}", "func (d *Device) Read(b []byte) (n int, err error) {\n\t// TODO Check threading iomplication here\n\tfor !d.DataAvailable {\n\t\ttime.Sleep(3 * time.Millisecond)\n\t}\n\td.readLock.Lock()\n\n\tll := d.ReadLength\n\t//\tfmt.Printf(\"RL - %d\\n\", d.ReadLength)\n\tfor i := 0; i < d.ReadLength; i++ {\n\t\tb[i] = d.ReadBuffer[d.ReadPosition]\n\t\td.ReadPosition++\n\t\tif d.ReadPosition >= 1024 {\n\t\t\td.ReadPosition = 0\n\t\t}\n\t}\n\td.ReadLength = 0\n\td.DataAvailable = false\n\td.readLock.Unlock()\n\treturn ll, nil\n\n}", "func (r *sliceReader) Read(p []byte) (n int, err error) {\n\tif r.off >= len(r.buf) {\n\t\tif len(p) == 0 {\n\t\t\treturn\n\t\t}\n\t\treturn 0, io.EOF\n\t}\n\tn = copy(p, r.buf[r.off:])\n\tr.off += n\n\treturn\n}", "func (rwc *noPIReadWriteCloser) Read(p []byte) (n int, err error) {\n\tn, err = rwc.ReadWriteCloser.Read(rwc.rBuffer)\n\tif err == nil && n >= 4 {\n\t\tcopy(p, rwc.rBuffer[4:n])\n\t\tn -= 4\n\t}\n\treturn\n}", "func (dw downloadBuffer) ReadFrom(r io.Reader) (int64, error) {\n\tvar n int64\n\tfor len(dw.buf) > 0 {\n\t\tread, err := io.ReadFull(r, dw.buf[0])\n\n\t\tif err == io.ErrUnexpectedEOF || err == io.EOF {\n\t\t\tn += int64(read)\n\t\t\treturn n, nil\n\t\t}\n\n\t\tif err != nil {\n\t\t\treturn n, err\n\t\t}\n\n\t\tdw.buf = dw.buf[1:]\n\t\tn += int64(read)\n\t}\n\treturn n, nil\n}", "func (r *bytesReader) Read(b []byte) (n int, err error) {\n\tif r.index >= int64(len(r.bs)) {\n\t\treturn 0, io.EOF\n\t}\n\tn = copy(b, r.bs[r.index:])\n\tr.index += int64(n)\n\treturn\n}", "func (d *Decoder) Read(p []byte) (int, error) {\n\tn, err := d.r.Read(p)\n\t// enforce an absolute maximum size limit\n\tif d.n += n; d.n > MaxObjectSize {\n\t\tbuild.Critical(ErrObjectTooLarge)\n\t}\n\treturn n, err\n}", "func Read(r io.Reader, data []byte) ([]byte, error) {\n\tj := 0\n\tfor {\n\t\tn, err := r.Read(data[j:])\n\t\tj = j + n\n\t\tif err != nil {\n\t\t\tif err == io.EOF {\n\t\t\t\tbreak\n\t\t\t}\n\t\t\treturn nil, errors.Wrap(err, \"Read failure\")\n\t\t}\n\n\t\tif (n == 0 && j == len(data)) || j > len(data) {\n\t\t\treturn nil, errors.New(\"Size of requested data is too large\")\n\t\t}\n\t}\n\n\treturn data[:j], nil\n}", "func (r *ThrottledReadCloser) Read(buf []byte) (int, error) {\n\tsubBuff, delay, err := getBufferAndDelay(r.pool, r.id, len(buf))\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\ttime.Sleep(delay)\n\tif subBuff > len(buf) {\n\t\tsubBuff = len(buf)\n\t}\n\tn, err := r.origReadCloser.Read(buf[:subBuff])\n\treturn n, err\n}", "func (c *CircBuf) Consume(nbytes int) int {\n\tvar count int\n\tvar num int\n\tfor {\n\t\tcount = c.countToEnd()\n\t\tif nbytes - num < count {\n\t\t\tcount = nbytes - num\n\t\t}\n\t\tif count <= 0 {\n\t\t\tbreak\n\t\t}\n\t\tc.tail = (c.tail + count) & (len(c.buf) - 1)\n\t\tnum += count\n\t}\n\treturn num\n}", "func (r *ChannelReader) Read(b []byte) (sz int, err error) {\n\tif len(b) == 0 {\n\t\treturn 0, io.ErrShortBuffer\n\t}\n\n\tfor {\n\t\tif len(r.buf) > 0 {\n\t\t\tif len(r.buf) <= len(b) {\n\t\t\t\tsz = len(r.buf)\n\t\t\t\tcopy(b, r.buf)\n\t\t\t\tr.buf = nil\n\t\t\t} else {\n\t\t\t\tcopy(b, r.buf)\n\t\t\t\tr.buf = r.buf[len(b):]\n\t\t\t\tsz = len(b)\n\t\t\t}\n\t\t\treturn sz, nil\n\t\t}\n\n\t\tvar ok bool\n\t\tif r.deadline.IsZero() {\n\t\t\tr.buf, ok = <-r.c\n\t\t} else {\n\t\t\ttimer := time.NewTimer(r.deadline.Sub(time.Now()))\n\t\t\tdefer timer.Stop()\n\n\t\t\tselect {\n\t\t\tcase r.buf, ok = <-r.c:\n\t\t\tcase <-timer.C:\n\t\t\t\treturn 0, context.DeadlineExceeded\n\t\t\t}\n\t\t}\n\t\tif len(r.buf) == 0 && !ok {\n\t\t\treturn 0, io.EOF\n\t\t}\n\t}\n}", "func (jbobject *JavaNioCharBuffer) Read(a JavaNioCharBufferInterface) (int, error) {\n\tconv_a := javabind.NewGoToJavaCallable()\n\tif err := conv_a.Convert(a); err != nil {\n\t\tpanic(err)\n\t}\n\tjret, err := jbobject.CallMethod(javabind.GetEnv(), \"read\", javabind.Int, conv_a.Value().Cast(\"java/nio/CharBuffer\"))\n\tif err != nil {\n\t\tvar zero int\n\t\treturn zero, err\n\t}\n\tconv_a.CleanUp()\n\treturn jret.(int), nil\n}", "func Read(r io.Reader, n uint64) ([]byte, error) {\n\tread := make([]byte, n)\n\t_, err := r.Read(read)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"could not read from reader: %s\", err)\n\t}\n\n\treturn read, nil\n}", "func (c *ChanReader) Read(out []byte) (int, error) {\n\tif c.buffer == nil {\n\t\treturn 0, io.EOF\n\t}\n\tn := copy(out, c.buffer)\n\tc.buffer = c.buffer[n:]\n\tif len(out) <= len(c.buffer) {\n\t\treturn n, nil\n\t} else if n > 0 {\n\t\t// We have some data to return, so make the channel read optional\n\t\tselect {\n\t\tcase p := <-c.input:\n\t\t\tif p == nil { // Stream was closed\n\t\t\t\tc.buffer = nil\n\t\t\t\tif n > 0 {\n\t\t\t\t\treturn n, nil\n\t\t\t\t}\n\t\t\t\treturn 0, io.EOF\n\t\t\t}\n\t\t\tn2 := copy(out[n:], p.Data)\n\t\t\tc.buffer = p.Data[n2:]\n\t\t\treturn n + n2, nil\n\t\tdefault:\n\t\t\treturn n, nil\n\t\t}\n\t}\n\tvar p *StreamChunk\n\tselect {\n\tcase p = <-c.input:\n\tcase <-c.interrupt:\n\t\tc.buffer = c.buffer[:0]\n\t\treturn n, ErrInterrupted\n\t}\n\tif p == nil { // Stream was closed\n\t\tc.buffer = nil\n\t\treturn 0, io.EOF\n\t}\n\tn2 := copy(out[n:], p.Data)\n\tc.buffer = p.Data[n2:]\n\treturn n + n2, nil\n}", "func (c *CounterReader) Read(p []byte) (int, error) {\n\tread, err := c.r.Read(p)\n\n\tc.c += int64(read)\n\n\treturn read, err\n}", "func (r *testReader) Read(p []byte) (n int, err error) {\n\tl := len(p)\n\tif l < r.Size {\n\t\tn = l\n\t} else {\n\t\tn = r.Size\n\t\terr = io.EOF\n\t}\n\tr.Size -= n\n\treturn n, err\n}", "func (b *ByteArray) Read(p []byte) (n int, err error) {\n\tfor n = 0; n < len(p); {\n\t\tvar slice []byte\n\t\tslice, err = b.ReadSlice()\n\t\tif slice != nil {\n\t\t\tread := copy(p[n:], slice)\n\t\t\tb.readPos = b.seek(b.readPos, read, SEEK_CUR)\n\t\t\tn += read\n\t\t} else {\n\t\t\tbreak\n\t\t}\n\t}\n\tif n < len(p) {\n\t\terr = io.EOF\n\t}\n\treturn n, err\n}", "func (r *Reader) ReadFull(n int) ([]byte, error) {\n\tunreadBytes := r.unreadBytes()\n\tif unreadBytes >= n {\n\t\tresult := r.buf[r.r : r.r+n]\n\t\tr.r += n\n\t\treturn result, nil\n\t}\n\n\tneedToRead := n - unreadBytes\n\tif r.capLeft() >= needToRead {\n\t\t// enough room to Read\n\t\tif err := r.readAtLeast(needToRead); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tresult := r.buf[r.r : r.r+n]\n\t\tr.r += n\n\t\treturn result, nil\n\t}\n\n\t// not enough room\n\t// check if buf is large enough\n\tif n > len(r.buf) {\n\t\tif cap(r.buf) == 0 {\n\t\t\treturn nil, ErrBufReaderAlreadyClosed\n\t\t}\n\n\t\t// make a larger buf\n\t\tnewBuf := slabPool.Alloc(n + 128)\n\t\tr.w = copy(newBuf, r.buf[r.r:r.w])\n\t\tr.r = 0\n\t\tslabPool.Free(r.buf)\n\t\tr.buf = newBuf\n\t} else {\n\t\t// enough room, shift existing data to left\n\t\tr.w = copy(r.buf, r.buf[r.r:r.w])\n\t\tr.r = 0\n\t}\n\n\tif err := r.readAtLeast(needToRead); err != nil {\n\t\treturn nil, err\n\t}\n\n\tresult := r.buf[r.r : r.r+n]\n\tr.r += n\n\treturn result, nil\n}", "func (r *testReader) Read(p []byte) (n int, err error) {\n\tif len(r.data) == 0 {\n\t\treturn 0, io.EOF\n\t}\n\tchunkSize := r.chunkSize\n\tif chunkSize > len(r.data) {\n\t\tchunkSize = len(r.data)\n\t}\n\tn = copy(p, r.data[:chunkSize])\n\tr.data = r.data[n:]\n\treturn n, nil\n}", "func (r *RepeatReader) Read(data []byte) (int, error) {\n\tif r.read >= r.count {\n\t\treturn 0, io.EOF\n\t}\n\tvar copied int\n\tfor i := 0; i < len(data); i++ {\n\t\tdata[i] = r.repeat\n\t\tcopied++\n\t\tr.read++\n\t\tif r.read >= r.count {\n\t\t\tbreak\n\t\t}\n\t}\n\treturn copied, nil\n}", "func (wpr *Wrapper) Read(p []byte) (n int, err error) {\n\tn, err = wpr.ReadAt(p, 0)\n\twpr.L -= int64(n)\n\twpr.O += int64(n)\n\twpr.O %= wpr.N\n\treturn n, err\n}", "func (b *Blush) Read(p []byte) (n int, err error) {\n\tif b.closed {\n\t\treturn 0, ErrClosed\n\t}\n\tb.once.Do(func() {\n\t\tb.buf = new(bytes.Buffer)\n\t\tif _, er := b.WriteTo(b.buf); er != nil {\n\t\t\terr = er\n\t\t}\n\t})\n\tif err != nil {\n\t\tb.closed = true\n\t\treturn\n\t}\n\treturn b.buf.Read(p)\n}", "func (b *QueueBuffer) Read(p []byte) (int, error) {\n\tif x := len(*b) - len(p); x >= 0 {\n\t\tn := copy(p, (*b)[x:])\n\t\t*b = (*b)[:x]\n\t\treturn n, nil\n\t}\n\tn := copy(p, *b)\n\t*b = nil\n\treturn n, io.EOF\n}", "func (r *fakeRandReader) Read(p []byte) (int, error) {\n\tn := r.n\n\tif n > len(p) {\n\t\tn = len(p)\n\t}\n\treturn n, r.err\n}", "func (r *LimiterReader) Read(p []byte) (int, error) {\n\ttc := time.Now()\n\twd, abc := r.lim.request(tc, len(p))\n\tif 0 < wd {\n\t\ttimer := time.NewTimer(wd)\n\t\tselect {\n\t\tcase <-timer.C:\n\t\tcase <-r.closedChan:\n\t\t\tif !timer.Stop() {\n\t\t\t\t<-timer.C\n\t\t\t}\n\t\t\treturn 0, ErrClosed\n\t\t}\n\t}\n\tn, err := r.rd.Read(p[:abc])\n\tif n < abc {\n\t\tr.lim.refund(abc - n)\n\t}\n\treturn n, err\n}", "func (*Client) BytesRead() uint64 {\n\tbytesRead.Write(m)\n\treturn uint64(*m.Counter.Value)\n}", "func (s *session) Read(data []byte) (int, error) {\n\ts.readLock.Lock()\n\tdefer s.readLock.Unlock()\n\t// TODO: check error?\n\tn := len(data)\n\t// If there is anything unread, it's part of a partially read message. Return it.\n\tnu := len(s.unread)\n\tif nu > 0 {\n\t\tcopy(data, s.unread)\n\t\tif nu > n {\n\t\t\ts.unread = s.unread[n:]\n\t\t\treturn n, nil\n\t\t} else {\n\t\t\ts.unread = nil\n\t\t\treturn nu, nil\n\t\t}\n\t}\n\n\tm, ok := <-s.readQueue\n\tif !ok {\n\t\t// We're closed\n\t\treturn 0, io.EOF\n\t}\n\tmbytes := m.bytes()\n\tcopy(data, mbytes)\n\tnm := len(mbytes)\n\tif nm > n {\n\t\ts.unread = mbytes[n:]\n\t\treturn n, nil\n\t}\n\treturn nm, nil\n}", "func (r *Reader) Read(bs []byte) (int, error) {\n\treturn r.R(0).Read(bs)\n}", "func (c *Conn) Read(p []byte) (n int, err error) {\n\treturn c.bufr.Read(p)\n}", "func (r *azblobObjectReader) Read(p []byte) (n int, err error) {\n\tmaxCnt := r.totalSize - r.pos\n\tif maxCnt > int64(len(p)) {\n\t\tmaxCnt = int64(len(p))\n\t}\n\tif maxCnt == 0 {\n\t\treturn 0, io.EOF\n\t}\n\tresp, err := r.blobClient.DownloadStream(r.ctx, &blob.DownloadStreamOptions{\n\t\tRange: blob.HTTPRange{\n\t\t\tOffset: r.pos,\n\t\t\tCount: maxCnt,\n\t\t},\n\n\t\tCPKInfo: r.cpkInfo,\n\t})\n\tif err != nil {\n\t\treturn 0, errors.Annotatef(err, \"Failed to read data from azure blob, data info: pos='%d', count='%d'\", r.pos, maxCnt)\n\t}\n\tbody := resp.NewRetryReader(r.ctx, &blob.RetryReaderOptions{\n\t\tMaxRetries: azblobRetryTimes,\n\t})\n\tn, err = body.Read(p)\n\tif err != nil && err != io.EOF {\n\t\treturn 0, errors.Annotatef(err, \"Failed to read data from azure blob response, data info: pos='%d', count='%d'\", r.pos, maxCnt)\n\t}\n\tr.pos += int64(n)\n\treturn n, body.Close()\n}", "func (d *Decoder) Read() uint64 {\n\tv := d.buf[d.i]\n\treturn v\n}", "func (reader MyReader) Read([]byte) (int, error) {\n\tvar b = make([]byte)\n\treturn 0, ErrRead(b)\n}", "func (c *poolConn) ReadBuffer(size int) ([]byte, error) {\n\tif c.mustRead == true {\n\t\terr := c.ReadTcpBlock()\n\t\tif err != nil {\n\t\t\tc.err = err\n\t\t\treturn nil, err\n\t\t}\n\t\tc.buffer.index = 0\n\t\tc.mustRead = false\n\t}\n\n\t//if size < c.buffer.size-c.buffer.index, normal stitching\n\t//if c.buffer.size-c.buffer.index < size < c.buffer.capacity-c.buffer.size+c.buffer.index, move usable data in buffer to front\n\t//if size > c.buffer.capacity, directly read the specified size\n\tif size+2 <= c.buffer.size-c.buffer.index {\n\n\t\tif c.buffer.realBuffer[c.buffer.index+size] == '\\r' && c.buffer.realBuffer[c.buffer.index+size+1] == '\\n' {\n\t\t\tcpy_index := c.buffer.index\n\t\t\tc.buffer.index = c.buffer.index + size + 2\n\t\t\tif c.buffer.index >= c.buffer.size {\n\t\t\t\tc.mustRead = true\n\t\t\t}\n\t\t\treturn c.buffer.realBuffer[cpy_index: cpy_index+size], nil\n\t\t} else {\n\t\t\treturn nil, errors.New(\"ReadBuffer is read wrong!\")\n\t\t}\n\t} else if size+2 <= c.buffer.capacity-c.buffer.size+c.buffer.index {\n\t\tc.ReadUnsafeBuffer()\n\t\tif c.buffer.realBuffer[c.buffer.index+size] == '\\r' && c.buffer.realBuffer[c.buffer.index+size+1] == '\\n' {\n\t\t\tc.buffer.index = c.buffer.index + size + 2\n\t\t\tif c.buffer.index >= c.buffer.size {\n\t\t\t\tc.mustRead = true\n\t\t\t}\n\t\t\treturn c.buffer.realBuffer[0:size], nil\n\t\t} else {\n\t\t\treturn nil, errors.New(\"ReadBuffer is read wrong!\")\n\t\t}\n\n\t} else {\n\t\tvar err error\n\t\tbigBuffer := make([]byte, size+2)\n\t\tcopy(bigBuffer, c.buffer.realBuffer[c.buffer.index:])\n\n\t\t//Make the results right , when the BigSize < buffer.capacity\n\t\tif len(bigBuffer) > c.buffer.size-c.buffer.index {\n\t\t\tbigBuffer, err = c.ReadTcpBigBlockLink(bigBuffer)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t}\n\n\t\t//judge weather the bigBuffer is right\n\t\tif bigBuffer[size] == '\\r' && bigBuffer[size+1] == '\\n' {\n\t\t\tc.buffer.index = c.buffer.index + size + 2\n\t\t\tif c.buffer.index >= c.buffer.size {\n\t\t\t\tc.mustRead = true\n\t\t\t}\n\t\t\treturn bigBuffer[:size], nil\n\t\t} else {\n\t\t\treturn nil, errors.New(\"bigBuffer is read wrong!\")\n\t\t}\n\t}\n}", "func (de *Decoder) Read(p []byte) (int, error) {\n\treturn de.buffer.Read(p)\n}", "func (p *progress) Read(b []byte) (n int, err error) {\n\tn, err = p.rc.Read(b)\n\tif err != nil && err != io.EOF {\n\t\treturn\n\t}\n\tp.offset += int64(n)\n\t// Invokes the user's callback method to report progress\n\tp.pr(p.offset)\n\treturn\n}", "func (r *Reader) Read(buf []byte) (int, error) {\n\tdefer func() {\n\t\tr.offset = r.h.Offset()\n\t\tr.frameInfo = r.h.FrameInfo()\n\n\t\tf := r.h.MetaCheck()\n\t\tswitch {\n\t\tcase f&MetaNewID3 != 0:\n\t\t\tid3v2, err := r.h.MetaID3()\n\t\t\tif id3v2 != nil && err == nil {\n\t\t\t\tr.meta.ID3v2 = id3v2\n\t\t\t}\n\t\t}\n\n\t}()\n\tif r.nextOffset > r.totalRead {\n\t\tn, err := io.CopyN(ioutil.Discard, r.input, r.nextOffset-r.totalRead)\n\t\tr.totalRead += n\n\t\tif err != nil {\n\t\t\treturn 0, err\n\t\t}\n\t}\n\tfor r.bytesSinceOk < r.maxBadBytes {\n\t\tvar feed []byte\n\t\tif r.needMore {\n\t\t\tr.needMore = false\n\t\t\tfeedLen, err := r.input.Read(r.feedBuf)\n\t\t\tr.totalRead += int64(feedLen)\n\t\t\tr.nextOffset = r.totalRead\n\t\t\tif feedLen == 0 && err != nil {\n\t\t\t\treturn 0, err\n\t\t\t}\n\t\t\tfeed = r.feedBuf[:feedLen]\n\t\t\tr.bytesSinceOk += feedLen\n\t\t}\n\n\t\tswitch n, err := r.h.Decode(feed, buf); err {\n\t\tcase ErrNewFormat:\n\t\t\tr.outputFormat = r.h.OutputFormat()\n\t\t\tr.bytesSinceOk = 0\n\t\t\tif len(buf) == 0 {\n\t\t\t\treturn n, nil\n\t\t\t}\n\t\tcase ErrNeedMore:\n\t\t\tr.needMore = true\n\t\t\tif n > 0 {\n\t\t\t\tr.bytesSinceOk = 0\n\t\t\t\treturn n, nil\n\t\t\t}\n\t\tcase ErrDone:\n\t\t\treturn n, io.EOF\n\t\tdefault:\n\t\t\tr.bytesSinceOk = 0\n\t\t\treturn n, nil\n\n\t\t}\n\n\t}\n\tr.bytesSinceOk = 0\n\treturn 0, errors.New(\"No valid data found\")\n}", "func (c *DecoderReadCloser) Read(b []byte) (int, error) {\n\treturn c.d.Read(b)\n}", "func (r *Reader) Read(p []byte) (n int, err error) {\n\treturn r.reader.Read(p)\n}", "func (r *copyReader) Read(b []byte) (int, error) {\n\tif r.rerr != nil {\n\t\treturn 0, r.rerr\n\t}\n\n\tr.once.Do(r.init)\n\treturn r.rbuf.Read(b)\n}", "func (c *Client) Read(path gfs.Path, offset gfs.Offset, data []byte) (n int, err error) {\n\tvar f gfs.GetFileInfoReply\n\terr = util.Call(c.master, \"Master.RPCGetFileInfo\", gfs.GetFileInfoArg{path}, &f)\n\tif err != nil {\n\t\treturn -1, err\n\t}\n\n\tif int64(offset/gfs.MaxChunkSize) > f.Chunks {\n\t\treturn -1, fmt.Errorf(\"read offset exceeds file size\")\n\t}\n\n\tpos := 0\n\tfor pos < len(data) {\n\t\tindex := gfs.ChunkIndex(offset / gfs.MaxChunkSize)\n\t\tchunkOffset := offset % gfs.MaxChunkSize\n\n\t\tif int64(index) >= f.Chunks {\n\t\t\terr = gfs.Error{gfs.ReadEOF, \"EOF over chunks\"}\n\t\t\tbreak\n\t\t}\n\n\t\tvar handle gfs.ChunkHandle\n\t\thandle, err = c.GetChunkHandle(path, index)\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\n\t\tvar n int\n\t\t//wait := time.NewTimer(gfs.ClientTryTimeout)\n\t\t//loop:\n\t\tfor {\n\t\t\t//select {\n\t\t\t//case <-wait.C:\n\t\t\t// err = gfs.Error{gfs.Timeout, \"Read Timeout\"}\n\t\t\t// break loop\n\t\t\t//default:\n\t\t\t//}\n\t\t\tn, err = c.ReadChunk(handle, chunkOffset, data[pos:])\n\t\t\tif err == nil || err.(gfs.Error).Code == gfs.ReadEOF {\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tlog.Warning(\"Read \", handle, \" connection error, try again: \", err)\n\t\t}\n\n\t\toffset += gfs.Offset(n)\n\t\tpos += n\n\t\tif err != nil {\n\t\t\tbreak\n\t\t}\n\t}\n\n\tif err != nil && err.(gfs.Error).Code == gfs.ReadEOF {\n\t\treturn pos, io.EOF\n\t} else {\n\t\treturn pos, err\n\t}\n}", "func (s *Stream) Read(p []byte) (n int, err error) {\n\tif s.unread == nil {\n\t\tselect {\n\t\tcase <-s.closeChan:\n\t\t\treturn 0, io.EOF\n\t\tcase read, ok := <-s.dataChan:\n\t\t\tif !ok {\n\t\t\t\treturn 0, io.EOF\n\t\t\t}\n\t\t\ts.unread = read\n\t\t}\n\t}\n\tn = copy(p, s.unread)\n\tif n < len(s.unread) {\n\t\ts.unread = s.unread[n:]\n\t} else {\n\t\ts.unread = nil\n\t}\n\treturn\n}", "func Read(fd int32, p unsafe.Pointer, n int32) int32", "func (mc *MockConn) Read(b []byte) (int, error) {\n\tif mc.closed {\n\t\treturn 0, errors.New(\"Connection closed.\")\n\t}\n\n\ti := 0\n\tfor i < len(b) {\n\t\tif mc.outMessage == nil {\n\t\t\tselect {\n\t\t\tcase <-mc.done:\n\t\t\t\treturn 0, errors.New(\"Connection closed.\")\n\t\t\tcase mc.outMessage = <-mc.receiveChan:\n\t\t\t}\n\t\t\tmc.outPlace = 0\n\t\t}\n\n\t\tfor mc.outPlace < len(mc.outMessage) && i < len(b) {\n\t\t\tb[i] = mc.outMessage[mc.outPlace]\n\t\t\tmc.outPlace++\n\t\t\ti++\n\t\t}\n\n\t\tif mc.outPlace == len(mc.outMessage) {\n\t\t\tmc.outMessage = nil\n\t\t}\n\t}\n\n\treturn i, nil\n}", "func (f *ClientFD) Read(ctx context.Context, dst []byte, offset uint64) (uint64, error) {\n\tvar resp PReadResp\n\t// maxDataReadSize represents the maximum amount of data we can read at once\n\t// (maximum message size - metadata size present in resp). Uninitialized\n\t// resp.SizeBytes() correctly returns the metadata size only (since the read\n\t// buffer is empty).\n\tmaxDataReadSize := uint64(f.client.maxMessageSize) - uint64(resp.SizeBytes())\n\treturn chunkify(maxDataReadSize, dst, func(buf []byte, curOff uint64) (uint64, error) {\n\t\treq := PReadReq{\n\t\t\tOffset: offset + curOff,\n\t\t\tFD: f.fd,\n\t\t\tCount: uint32(len(buf)),\n\t\t}\n\n\t\t// This will be unmarshalled into. Already set Buf so that we don't need to\n\t\t// allocate a temporary buffer during unmarshalling.\n\t\t// PReadResp.UnmarshalBytes expects this to be set.\n\t\tresp.Buf = buf\n\t\tctx.UninterruptibleSleepStart(false)\n\t\terr := f.client.SndRcvMessage(PRead, uint32(req.SizeBytes()), req.MarshalUnsafe, resp.UnmarshalBytes, nil)\n\t\tctx.UninterruptibleSleepFinish(false)\n\t\treturn uint64(resp.NumBytes), err\n\t})\n}", "func (p *TBufferedReadTransport) Read(buf []byte) (int, error) {\n\tin, err := p.readBuf.Read(buf)\n\treturn in, thrift.NewTTransportExceptionFromError(err)\n}", "func (r *ReaderCloser) Read(p []byte) (int, error) {\n\tcount, err := unix.Read(r.fd, p)\n\tif count < 0 && err != nil {\n\t\tcount = 0\n\t}\n\treturn count, err\n}", "func (r *NaClReader) Read(p []byte) (n int, err error) {\n\tbuf := make([]byte, 1024*32)\n\tbuflen, err := r.Reader.Read(buf)\n\tif buflen > 24 {\n\t\t// parse first 24 bytes as nounce\n\t\tnounce := new([24]byte)\n\t\tcopy(nounce[:], buf[:24])\n\n\t\t// open the encrypted message\n\t\topened, ok := box.OpenAfterPrecomputation(nil, buf[24:buflen], nounce, r.skey)\n\t\tif !ok {\n\t\t\treturn 0, errors.New(\"unable to decrypt\")\n\t\t}\n\n\t\tif len(p) < len(opened) {\n\t\t\treturn 0, errors.New(\"read buffer exceeded\")\n\t\t}\n\n\t\tn = copy(p, opened)\n\t\treturn n, nil\n\n\t}\n\n\tif err != nil {\n\t\treturn n, err\n\t}\n\n\treturn 0, errors.New(\"unable to read enough data to decrypt\")\n}", "func (c *Conn) Read(b []byte) (n int, err error) {\n\tif len(b) == 0 {\n\t\treturn 0, nil\n\t}\n\terr = tryAgain\n\tfor err == tryAgain {\n\t\tn, errcb := c.read(b)\n\t\terr = c.handleError(errcb)\n\t\tif err == nil {\n\t\t\tgo c.flushOutputBuffer()\n\t\t\treturn n, nil\n\t\t}\n\t\tif err == io.ErrUnexpectedEOF {\n\t\t\terr = io.EOF\n\t\t}\n\t}\n\treturn 0, err\n}", "func (r *Reader) Read(p []byte) (n int, err error) {\n\tif r.frame == nil {\n\t\tif !r.fragmented() {\n\t\t\t// Every new Read() must be preceded by NextFrame() call.\n\t\t\treturn 0, ErrNoFrameAdvance\n\t\t}\n\t\t// Read next continuation or intermediate control frame.\n\t\t_, err := r.NextFrame()\n\t\tif err != nil {\n\t\t\treturn 0, err\n\t\t}\n\t\tif r.frame == nil {\n\t\t\t// We handled intermediate control and now got nothing to read.\n\t\t\treturn 0, nil\n\t\t}\n\t}\n\n\tn, err = r.frame.Read(p)\n\tif err != nil && err != io.EOF {\n\t\treturn\n\t}\n\tif err == nil && r.raw.N != 0 {\n\t\treturn n, nil\n\t}\n\n\t// EOF condition (either err is io.EOF or r.raw.N is zero).\n\tswitch {\n\tcase r.raw.N != 0:\n\t\terr = io.ErrUnexpectedEOF\n\n\tcase r.fragmented():\n\t\terr = nil\n\t\tr.resetFragment()\n\n\tcase r.CheckUTF8 && !r.utf8.Valid():\n\t\t// NOTE: check utf8 only when full message received, since partial\n\t\t// reads may be invalid.\n\t\tn = r.utf8.Accepted()\n\t\terr = ErrInvalidUTF8\n\n\tdefault:\n\t\tr.reset()\n\t\terr = io.EOF\n\t}\n\n\treturn\n}" ]
[ "0.72553504", "0.70701253", "0.70154315", "0.6986585", "0.6971439", "0.6958848", "0.6937826", "0.6927884", "0.68971074", "0.68480355", "0.6798851", "0.67744625", "0.6758449", "0.67381537", "0.6736907", "0.6727699", "0.6715925", "0.6706231", "0.6691289", "0.66754776", "0.6643859", "0.6631805", "0.6629334", "0.6617672", "0.66172534", "0.6615125", "0.65897596", "0.65792924", "0.6550572", "0.6544778", "0.6538421", "0.65379846", "0.653482", "0.6521106", "0.6520539", "0.6513252", "0.64701563", "0.64476687", "0.642275", "0.64221746", "0.6414572", "0.6412825", "0.6404169", "0.6401675", "0.63988173", "0.6393626", "0.63907886", "0.638907", "0.63875973", "0.6384005", "0.63805395", "0.6380489", "0.63750017", "0.6371304", "0.6364844", "0.6343503", "0.6338212", "0.6336084", "0.6313787", "0.63130915", "0.6296592", "0.6285772", "0.62753946", "0.6262844", "0.62590086", "0.6251839", "0.6244374", "0.6238292", "0.6235076", "0.62231076", "0.6219506", "0.6208224", "0.6203755", "0.6202396", "0.6200969", "0.6199388", "0.6195824", "0.61913675", "0.6187105", "0.6186141", "0.61811745", "0.6170611", "0.6169977", "0.61595863", "0.61590993", "0.6156242", "0.6153538", "0.61385655", "0.61371064", "0.6134728", "0.6121585", "0.6120363", "0.6119555", "0.6117014", "0.61166686", "0.6116262", "0.6112153", "0.61085033", "0.6100814", "0.6094267" ]
0.717935
1
Write writes data to a circular buffer. The number of bytes written is returned.
func (c *CircBuf) Write(buf []byte) int { var space int var num int for { space = c.spaceToEnd() if len(buf) - num < space { space = len(buf) - num } if space <= 0 { break } copy(c.buf[c.head : c.head + space], buf[num : num + space]) c.head = (c.head + space) & (len(c.buf) - 1) num += space } return num }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (c *RingBuffer) Write(p []byte) (int, error) {\n\tc.mutex.Lock()\n\tdefer c.mutex.Unlock()\n\tif c.closed {\n\t\treturn 0, io.EOF\n\t}\n\tif len(p) > len(c.buf)-c.len {\n\t\t_, err := c.resize(len(p) + c.len)\n\t\tif err != nil {\n\t\t\treturn 0, err\n\t\t}\n\t}\n\n\tstartWritingAt := (c.index + c.len) % len(c.buf)\n\tleftBeforeEnd := len(c.buf) - startWritingAt\n\tn := 0\n\tif len(p) <= leftBeforeEnd {\n\t\t// it all fits in before it's time to wrap\n\t\tn = copy(c.buf[startWritingAt:], p)\n\t} else {\n\t\t// it didn't all fit in before we have to wrap\n\t\tn = copy(c.buf[startWritingAt:], p[:leftBeforeEnd])\n\t\tn += copy(c.buf[:startWritingAt], p[leftBeforeEnd:])\n\t}\n\tc.addLen(len(p))\n\treturn n, nil\n}", "func (rb *RingBuffer) Write(p []byte) (int, error) {\n\tlen := len(p)\n\texpected := len\n\n\tif len > rb.cap-rb.count {\n\t\tif err := rb.grow(len + rb.count - rb.cap); err != nil {\n\t\t\texpected = rb.cap - rb.count\n\t\t}\n\t}\n\n\tn := copy(rb.data[rb.writeOff:], p[:expected])\n\tif n < expected {\n\t\tn += copy(rb.data[:rb.readOff], p[n:expected])\n\t}\n\n\trb.count += n\n\trb.writeOff = (rb.writeOff + n) % rb.cap\n\n\treturn n, nil\n}", "func (r *RingBuffer) Write(p []byte) (n int, err error) {\n\tr.mu.Lock()\n\tdefer r.mu.Unlock()\n\n\tn, err = r.write(p)\n\treturn n, err\n}", "func (c *concurrentWriter) Write(data []byte) (n int, err error) {\n\tc.Lock()\n\tdefer c.Unlock()\n\tif c.w == nil {\n\t\treturn 0, nil\n\t}\n\treturn c.w.Write(data)\n}", "func (cb *Buffer) Write(buf []byte) (n int, err error) {\n\tif buf == nil || len(buf) == 0 {\n\t\treturn 0, fmt.Errorf(\"Input buffer is null or empty\")\n\t}\n\n\ttoWrite := min(cb.WriteAvailability(), len(buf))\n\n\tif toWrite == 0 {\n\t\treturn 0, fmt.Errorf(\"Buffer is full\")\n\t}\n\n\tif cb.wpos <= cb.rpos {\n\t\tif toWrite < cb.wpos {\n\t\t\tcopy(cb.buffer[cb.wpos-toWrite:cb.wpos], buf[:toWrite])\n\t\t\tcb.wpos -= toWrite\n\t\t} else {\n\t\t\tcopy(cb.buffer[:cb.wpos], buf[:cb.wpos])\n\t\t\tcopy(cb.buffer[len(cb.buffer)-toWrite+cb.wpos:len(cb.buffer)], buf[cb.wpos:toWrite])\n\t\t\tcb.wpos = len(cb.buffer) - toWrite + cb.wpos\n\t\t}\n\t} else {\n\t\tcopy(cb.buffer[cb.wpos-toWrite:cb.wpos], buf[:toWrite])\n\t\tcb.wpos -= toWrite\n\t}\n\n\tcb.full = cb.wpos == cb.rpos\n\treturn toWrite, nil\n}", "func (fb *fakeBuffer) Write(p []byte) (int, error) {\n\tsize := len(p)\n\tfb.n += int64(size)\n\treturn size, nil\n}", "func (w *Worker) Write(data []byte) (n int, err error) {\n\tlength := len(data)\n\tw.lock.Lock()\n\tif (length + w.position) > capacity {\n\t\tn, err = w.save()\n\t\tif err != nil {\n\t\t\tw.errorCallback()\n\t\t\treturn n, err\n\t\t}\n\t}\n\tcopy(w.buffer[w.position:], data)\n\tw.position += length\n\tw.lock.Unlock()\n\treturn n, err\n}", "func (s *Buffer) Write(p []byte) (n int, err error) {\n\ts.mutex.Lock()\n\tdefer s.mutex.Unlock()\n\treturn s.buffer.Write(p)\n}", "func (sb *SeekableBuffer) Write(p []byte) (n int, err error) {\n\tdefer func() {\n\t\tif state := recover(); state != nil {\n\t\t\terr = state.(error)\n\t\t}\n\t}()\n\n\t// The current position we're already at is past the end of the data we\n\t// actually have. Extend our buffer up to our current position.\n\tif sb.position > len64(sb.data) {\n\t\textra := make([]byte, sb.position-len64(sb.data))\n\t\tsb.data = append(sb.data, extra...)\n\t}\n\n\tpositionFromEnd := len64(sb.data) - sb.position\n\ttailCount := positionFromEnd - len64(p)\n\n\tvar tailBytes []byte\n\tif tailCount > 0 {\n\t\ttailBytes = sb.data[len64(sb.data)-tailCount:]\n\t\tsb.data = append(sb.data[:sb.position], p...)\n\t} else {\n\t\tsb.data = append(sb.data[:sb.position], p...)\n\t}\n\n\tif tailBytes != nil {\n\t\tsb.data = append(sb.data, tailBytes...)\n\t}\n\n\tdataSize := len64(p)\n\tsb.position += dataSize\n\n\treturn int(dataSize), nil\n\n}", "func (aio *AsyncIO) Write(b []byte) (int, error) {\n\tnw, err := aio.WriteAt(b, aio.offset)\n\taio.offset += int64(nw)\n\treturn nw, err\n}", "func (b *BufferedStream) Write(message []byte) (int, error) {\n\tvar oldBuffer [][]byte\n\tvar oldSize int\n\n\tb.bufferMutex.Lock()\n\n\t// If capped transmission is enabled and the set limit has been reached\n\t// then substitute the buffer and prepare the old one for transmission.\n\tif b.limit > 0 && b.bufferCount >= b.limit {\n\t\toldBuffer, oldSize = b.flush()\n\t}\n\n\t// If the buffer count has reached the total length of the buffer then we\n\t// need a new slot for the received message, allocate it empty.\n\tif b.bufferCount >= len(b.buffer) {\n\t\tb.buffer = append(b.buffer, []byte{})\n\t}\n\n\t// Set the message in the buffer and then increment the position counter.\n\tb.buffer[b.bufferCount] = message\n\tb.bufferCount++\n\t// Concurrently safe read the value to be returned.\n\tnewCount := b.bufferCount\n\tb.bufferMutex.Unlock()\n\n\t// If the buffer was full fire a transmission with provided data.\n\tif oldBuffer != nil && oldSize != 0 {\n\t\tgo func(buffer [][]byte, size int) {\n\t\t\tif err := b.fireTransmission(buffer, size); err != nil {\n\t\t\t\tif b.fatal != nil {\n\t\t\t\t\tb.fatal(fmt.Errorf(\"gonyan buffered stream failure during data transmission: %s\", err.Error()))\n\t\t\t\t}\n\t\t\t}\n\t\t}(oldBuffer, oldSize)\n\t}\n\n\treturn newCount, nil\n}", "func (w *ByteCountWriter) Write(data []byte) (int, error) {\n\tatomic.AddInt64(&w.written, int64(len(data)))\n\treturn len(data), nil\n}", "func (wc *WriteCounter) Write(p []byte) (int, error) {\n\t// Bytes written this cycle.\n\tn := len(p)\n\n\t// Add the written bytes to the total.\n\twc.Written += float32(n)\n\n\t// Calculate the percentage and send it on the channel.\n\twc.progress <- wc.Written / wc.Total\n\n\t// Return the length of the written bytes this cycle.\n\treturn n, nil\n}", "func (file *Remote) Write(data []byte) (int, error) {\n\tfile.m.Lock()\n\tdefer file.m.Unlock()\n\n\tn, err := file.WriteAt(data, int64(file.pos))\n\tfile.pos += uint64(n)\n\treturn n, err\n}", "func (cw *CountedWriter) Write(data []byte) (int, error) {\n\tcw.total += uint64(len(data))\n\treturn cw.w.Write(data)\n}", "func (b *Buffer) Write(buf []byte) (int, error) {\n\t// Account for total bytes written\n\tn := len(buf)\n\tb.written += int64(n)\n\n\t// If the buffer is larger than ours, then we only care\n\t// about the last size bytes anyways\n\tif int64(n) > b.size {\n\t\tbuf = buf[int64(n)-b.size:]\n\t}\n\n\t// Copy in place\n\tremain := b.size - b.writeCursor\n\tcopy(b.data[b.offset+b.writeCursor:], buf)\n\tif int64(len(buf)) > remain {\n\t\tcopy(b.data[b.offset:], buf[remain:])\n\t}\n\n\t// Update location of the cursor\n\tb.writeCursor = ((b.writeCursor + int64(len(buf))) % b.size)\n\treturn n, nil\n}", "func (b *SafeBuffer) Write(p []byte) (n int, err error) {\n\tb.m.Lock()\n\tdefer b.m.Unlock()\n\treturn b.b.Write(p)\n}", "func (s *Stream) Write(buf []byte) (int, error) {\n\t// Trivial Case: len of buffer is less than capacity of remote buffer\n\t// No need to wait for an ack packet in this case\n\ts.capLock.Lock()\n\tdefer s.capLock.Unlock()\n\n\tif s.closed {\n\t\treturn 0, errBrokenPipe\n\t}\n\n\t// Length of buffer is greater than remoteCapacity\n\tl, written := len(buf), 0\n\tfor written != l {\n\t\t// If remote capacity is zero, wait for an ack packet\n\t\tif s.remoteCapacity == 0 {\n\t\t\tvar timeout <-chan time.Time\n\t\t\tif !s.writeDeadline.IsZero() {\n\t\t\t\ttimeout = time.After(s.writeDeadline.Sub(time.Now()))\n\t\t\t}\n\t\t\tselect {\n\t\t\tcase <-timeout:\n\t\t\t\t// caplock would be unlocked by Wait in waitForAck\n\t\t\t\t// lock before returning\n\t\t\t\ts.capLock.Lock()\n\t\t\t\treturn written, errWriteTimeout\n\t\t\tcase <-s.waitForAck():\n\t\t\t}\n\t\t}\n\t\tcap := min(len(buf), s.remoteCapacity)\n\t\tframe := newDataFrame(s.id, buf[:cap])\n\t\tbuf = buf[cap:]\n\t\ts.writeChan <- frame\n\t\twritten += cap\n\t\ts.remoteCapacity -= cap\n\t}\n\treturn l, nil\n}", "func (r *MockReadWriteCloser) Write(p []byte) (n int, err error) {\n\n\tif err = r.WriteErr; err != nil {\n\t\tr.BytesWritten = p\n\t\tn = len(p)\n\t}\n\treturn\n}", "func ringBufferWrite(bytes []byte, n uint, rb *ringBuffer) {\n\tif rb.pos_ == 0 && uint32(n) < rb.tail_size_ {\n\t\t/* Special case for the first write: to process the first block, we don't\n\t\t need to allocate the whole ring-buffer and we don't need the tail\n\t\t either. However, we do this memory usage optimization only if the\n\t\t first write is less than the tail size, which is also the input block\n\t\t size, otherwise it is likely that other blocks will follow and we\n\t\t will need to reallocate to the full size anyway. */\n\t\trb.pos_ = uint32(n)\n\n\t\tringBufferInitBuffer(rb.pos_, rb)\n\t\tcopy(rb.buffer_, bytes[:n])\n\t\treturn\n\t}\n\n\tif rb.cur_size_ < rb.total_size_ {\n\t\t/* Lazily allocate the full buffer. */\n\t\tringBufferInitBuffer(rb.total_size_, rb)\n\n\t\t/* Initialize the last two bytes to zero, so that we don't have to worry\n\t\t later when we copy the last two bytes to the first two positions. */\n\t\trb.buffer_[rb.size_-2] = 0\n\n\t\trb.buffer_[rb.size_-1] = 0\n\t}\n\t{\n\t\tvar masked_pos uint = uint(rb.pos_ & rb.mask_)\n\n\t\t/* The length of the writes is limited so that we do not need to worry\n\t\t about a write */\n\t\tringBufferWriteTail(bytes, n, rb)\n\n\t\tif uint32(masked_pos+n) <= rb.size_ {\n\t\t\t/* A single write fits. */\n\t\t\tcopy(rb.buffer_[masked_pos:], bytes[:n])\n\t\t} else {\n\t\t\t/* Split into two writes.\n\t\t\t Copy into the end of the buffer, including the tail buffer. */\n\t\t\tcopy(rb.buffer_[masked_pos:], bytes[:brotli_min_size_t(n, uint(rb.total_size_-uint32(masked_pos)))])\n\n\t\t\t/* Copy into the beginning of the buffer */\n\t\t\tcopy(rb.buffer_, bytes[rb.size_-uint32(masked_pos):][:uint32(n)-(rb.size_-uint32(masked_pos))])\n\t\t}\n\t}\n\t{\n\t\tvar not_first_lap bool = rb.pos_&(1<<31) != 0\n\t\tvar rb_pos_mask uint32 = (1 << 31) - 1\n\t\trb.data_[0] = rb.buffer_[rb.size_-2]\n\t\trb.data_[1] = rb.buffer_[rb.size_-1]\n\t\trb.pos_ = (rb.pos_ & rb_pos_mask) + uint32(uint32(n)&rb_pos_mask)\n\t\tif not_first_lap {\n\t\t\t/* Wrap, but preserve not-a-first-lap feature. */\n\t\t\trb.pos_ |= 1 << 31\n\t\t}\n\t}\n}", "func (s *safeBuffer) Write(p []byte) (int, error) {\n\ts.Lock()\n\tdefer s.Unlock()\n\treturn s.buf.Write(p)\n}", "func (r *WriteCounter) Write(b []byte) (n int, err error) {\n\tif nil == r {\n\t\treturn 0, errors.New(\"Invalid parameter, 'nil' value\")\n\t}\n\n\tr.numWrites++\n\tr.numBytes += uint(len(b))\n\treturn len(b), nil\n}", "func (b *Writer) Write(buf []byte) (n int, err error)", "func (c *LimitedConnection) Write(b []byte) (written int, err error) {\n\treturn c.rateLimitLoop(&c.writeNotBefore, &c.writeDeadline, c.inner.Write, b)\n}", "func (br *BandwidthMeter) Write(p []byte) (int, error) {\n // Always completes and never returns an error.\n br.lastRead = time.Now().UTC()\n n := len(p)\n br.bytesRead += uint64(n)\n if br.start.IsZero() {\n br.start = br.lastRead\n }\n\n return n, nil\n}", "func (d *Device) Write(p []byte) (n int, err error) {\n\td.writeLock.Lock()\n\tavailableBuffer := 1024 - d.WriteLength\n\tif len(p) < availableBuffer {\n\t\tavailableBuffer = len(p)\n\t}\n\tpos := d.WritePosition + d.WriteLength\n\tfor i := 0; i < availableBuffer; i++ {\n\t\tif pos >= 1024 {\n\t\t\tpos = 0\n\t\t}\n\t\td.WriteBuffer[pos] = p[i]\n\t\tpos++\n\t}\n\td.WriteLength = d.WriteLength + availableBuffer\n\td.writeLock.Unlock()\n\treturn availableBuffer, nil\n}", "func (b *Buffer) Write(p []byte) (n int, err error) {\n\tb.m.Lock()\n\tdefer b.m.Unlock()\n\treturn b.b.Write(p)\n}", "func (c *TestConnection) Write(b []byte) (n int, err error) {\n if c.WriteError != nil && c.ThrowWriteErrorAfter == c.TimesWriteCalled {\n return 0, c.WriteError\n }\n\n if c.WriteCount > -1 {\n return c.WriteCount, nil\n }\n\n c.TimesWriteCalled++\n c.Written = append(c.Written, string(b))\n return len(b), nil\n}", "func (d *Device) Write(data []byte) (int, error) {\n\tdl := len(data)\n\tbl := len(d.buf) / 2\n\tfor len(data) > 0 {\n\t\tn := bl\n\t\tif n > len(data) {\n\t\t\tn = len(data)\n\t\t}\n\t\tk := 0\n\t\tfor _, b := range data[:n] {\n\t\t\td.buf[k] = d.rs | b>>4\n\t\t\td.buf[k+1] = d.rs | b&0x0f\n\t\t\tk += 2\n\t\t}\n\t\tk, err := d.w.Write(d.buf[:k])\n\t\tif err != nil {\n\t\t\treturn dl - len(data) + k/2, err\n\t\t}\n\t\tdata = data[n:]\n\t}\n\treturn dl, nil\n}", "func (b *ByteCounter) Write(p []byte) (int, error) {\n\t*b += ByteCounter(len(p)) // b is pointer so *b is b value\n\treturn len(p), nil\n}", "func (m *pipeBuffer) Write(p []byte) (int, error) {\n\tselect {\n\tcase <-m.done:\n\t\tif m.closeError != nil {\n\t\t\treturn 0, m.closeError\n\t\t}\n\t\treturn 0, ErrBufferClosed\n\tdefault:\n\t}\n\n\tselect {\n\tcase <-m.stop:\n\t\treturn 0, ErrBufferStopped\n\tdefault:\n\t\tm.mtx.Lock()\n\t\tdefer m.mtx.Unlock()\n\t}\n\n\tif m.buf[m.wr].Len() == 0 {\n\t\tm.swap <- struct{}{} // prevent swap empty buffers\n\t}\n\n\treturn m.buf[m.wr].Write(p)\n}", "func (wc *WriteCounter) Write(p []byte) (int, error) {\n\tn := len(p)\n\twc.Total += int64(n)\n\t//fmt.Printf(\"\\rRead %d bytes for a total of %d\\n\", n, wc.Total)\n\treturn n, nil\n}", "func (d *Download) Write(b []byte) (int, error) {\n\tn := len(b)\n\tatomic.AddUint64(&d.size, uint64(n))\n\treturn n, nil\n}", "func (b *FixedBuffer) Write(p []byte) (n int, err error) {\n\t// Slide existing data to beginning.\n\tif b.r > 0 && len(p) > len(b.buf)-b.w {\n\t\tcopy(b.buf, b.buf[b.r:b.w])\n\t\tb.w -= b.r\n\t\tb.r = 0\n\t}\n\n\t// Write new data.\n\tn = copy(b.buf[b.w:], p)\n\tb.w += n\n\tif n < len(p) {\n\t\terr = errWriteFull\n\t}\n\treturn n, err\n}", "func (e EncoderV2) Write(p []byte) (n int, err error) {\n\t//log.Trace(\"in encode write\")\n\t//log.Trace(fmt.Sprintf(\"len writing is %v\", len(p)))\n\t//log.Trace(fmt.Sprintf(\"%x\", p))\n\n\tn, err = e.buf.Write(p)\n\tif err != nil {\n\t\terr = errors.Wrap(err, \"An error occurred writing to encoder temp buffer\")\n\t\treturn n, err\n\t}\n\n\tlength := e.buf.Len()\n\tfor length >= int(e.chunkSize) {\n\t\tif err := binary.Write(e.w, binary.BigEndian, e.chunkSize); err != nil {\n\t\t\treturn 0, errors.Wrap(err, \"An error occured writing chunksize\")\n\t\t}\n\n\t\tnumWritten, err := e.w.Write(e.buf.Next(int(e.chunkSize)))\n\t\tif err != nil {\n\t\t\terr = errors.Wrap(err, \"An error occured writing a chunk\")\n\t\t}\n\n\t\treturn numWritten, err\n\t}\n\n\treturn n, nil\n}", "func (c *StreamingCall) Write(p []byte) (n int, err error) {\n\tm, err := io.Copy(c.writer, bytes.NewReader(p))\n\tn = int(m)\n\tif f, ok := c.writer.(http.Flusher); ok {\n\t\tf.Flush()\n\t}\n\treturn\n}", "func (s *Stream) Write(data []byte) (n int, err error) {\n\terr = s.WriteData(data, false)\n\tif err == nil {\n\t\tn = len(data)\n\t}\n\treturn\n}", "func (f *BufioWriter) Write(b []byte) (int, error) {\n\tf.Lock()\n\tdefer f.Unlock()\n\treturn f.buf.Write(b)\n}", "func (d *Device) Write(p []byte) (n int, err error) {\n\td.mu.Lock()\n\tdefer d.mu.Unlock()\n\tif d.paused {\n\t\tdefer func() {\n\t\t\t// Recover from panic if the buffer becomes too large\n\t\t\t// nolint\n\t\t\trecover()\n\t\t}()\n\t\treturn d.buf.Write(p)\n\t}\n\treturn d.writer.Write(p)\n}", "func (p *Pipe) Write(buffer []byte) (int, error) {\n\tfor receiver := range p.receivers {\n\t\t// errors from one of the receivers shouldn't affect any others\n\t\treceiver.Write(buffer)\n\t}\n\tbytes := len(buffer)\n\tp.bytes += bytes\n\tp.written.WriteCompleted(bytes)\n\treturn bytes, nil\n}", "func (cw *chanWriter) Write(b []byte) (int, error) {\n cw.downstream <- b\n\n return len(b), nil\n}", "func (ch *IsaChannel) Write(b []byte) (int, error) {\n\treturn 0, nil\n}", "func (f *FixedBuffer) Write(data []byte) (int, error) {\n\treturn f.w.Write(data)\n}", "func (bc BufConn) Write(p []byte) (int, error) {\n\tif bc.IgnoreWrite {\n\t\treturn len(p), nil\n\t}\n\tif bc.OnWrite != nil {\n\t\tbc.OnWrite(p)\n\t\treturn len(p), nil\n\t}\n\treturn bc.Buf.Write(p)\n}", "func (upload *Upload) Write(data []byte) (n int, err error) {\n\tif upload.isClosed() {\n\t\treturn 0, Error.New(\"already closed\")\n\t}\n\treturn upload.writer.Write(data)\n}", "func (b *LimitedBuffer) Write(p []byte) (n int, err error) {\n\tb.writeMutex.Lock()\n\tdefer b.writeMutex.Unlock()\n\n\tgotLen := len(p)\n\tif gotLen >= b.limit {\n\t\tb.buf = p[gotLen-b.limit:]\n\t} else if gotLen > 0 {\n\t\tnewLength := len(b.buf) + gotLen\n\t\tif newLength <= b.limit {\n\t\t\tb.buf = append(b.buf, p...)\n\t\t} else {\n\t\t\ttruncateIndex := newLength - b.limit\n\t\t\tb.buf = append(b.buf[truncateIndex:], p...)\n\t\t}\n\t}\n\treturn gotLen, nil\n}", "func (w *ConcurrentFileWriter) Write(p []byte) (n int, err error) {\n\tshard := runtime_procPin()\n\truntime_procUnpin() // can't hold the lock for long\n\n\tw.locks[shard].Lock()\n\tn, err = w.buffers[shard].Write(p)\n\tw.locks[shard].Unlock()\n\treturn\n}", "func (w *testWriter) Write(p []byte) (n int, err error) {\n\tif w.buf == nil {\n\t\tw.buf = make([]byte, w.chunkSize)\n\t}\n\tn = copy(w.buf, p)\n\tassert.Equal(w.t, w.data[w.offset:w.offset+n], w.buf[:n])\n\tw.offset += n\n\treturn n, nil\n}", "func (b *ByteBuffer) Write(p []byte) (int, error) {\n\tb.B = append(b.B, p...)\n\treturn len(p), nil\n}", "func (w *Writer) Write(p []byte) (int, error) {\n\tif w.err != nil {\n\t\treturn 0, w.err\n\t}\n\tif w.bw.Buffered() != 0 {\n\t\tif err := w.Flush(); err != nil {\n\t\t\treturn 0, err\n\t\t}\n\t}\n\treturn w.cur.Write(p)\n}", "func (w *Writer) Write(buf []byte) (n int, err error) {\n\tw.mtx.Lock()\n\tdefer w.mtx.Unlock()\n\treturn w.buf.Write(buf)\n}", "func (ru *ResumableUpload) Write(p []byte) (int, error) {\n\treturn ru.writeCounter.Write(p)\n}", "func (w *counterWriter) Write(b []byte) (int, error) {\n\tw.n += int64(len(b))\n\treturn len(b), nil\n}", "func (th *Writer) Write(p []byte) (int, error) {\n\tif th.limiter == nil {\n\t\treturn th.w.Write(p)\n\t}\n\tn, err := th.w.Write(p)\n\tif err != nil {\n\t\treturn n, err\n\t}\n\n\tif err := th.limiter.WaitN(th.ctx, n); err != nil {\n\t\treturn n, err\n\t}\n\n\treturn n, err\n}", "func (f *volatileFile) Write(buf unsafe.Pointer, n int, offset int) C.int {\n\tf.mu.Lock()\n\tdefer f.mu.Unlock()\n\n\tif offset+n >= len(f.data) {\n\t\tf.data = append(f.data, make([]byte, offset+n-len(f.data))...)\n\t}\n\n\tsize := unsafe.Sizeof(byte(0))\n\n\tfor i := 0; i < n; i++ {\n\t\tj := i + offset\n\t\tf.data[j] = *(*byte)(unsafe.Pointer(uintptr(buf) + size*uintptr(i)))\n\t}\n\n\treturn C.SQLITE_OK\n}", "func (w *mutexWriter) Write(p []byte) (n int, err error) {\n\tw.mu.Lock()\n\tdefer w.mu.Unlock()\n\tn, err = w.w.Write(p)\n\treturn\n}", "func (rwc *noPIReadWriteCloser) Write(p []byte) (n int, err error) {\n\tcopy(rwc.wBuffer[4:], p)\n\tn, err = rwc.ReadWriteCloser.Write(rwc.wBuffer[:len(p)+4])\n\treturn n - 4, err\n}", "func (mock WriteCloser) Write(p []byte) (n int, err error) {\n\tmethodName := \"Write\" // nolint: goconst\n\tif mock.impl.Write != nil {\n\t\treturn mock.impl.Write(p)\n\t}\n\tif mock.callbackNotImplemented != nil {\n\t\tmock.callbackNotImplemented(mock.t, mock.name, methodName)\n\t} else {\n\t\tgomic.DefaultCallbackNotImplemented(mock.t, mock.name, methodName)\n\t}\n\treturn mock.fakeZeroWrite(p)\n}", "func (s *Stream) Write(b []byte) (n int, err error) {\n\ts.sendLock.Lock()\n\tdefer s.sendLock.Unlock()\n\ttotal := 0\n\tfor total < len(b) {\n\t\tn, err := s.write(b[total:])\n\t\ttotal += n\n\t\tif err != nil {\n\t\t\treturn total, err\n\t\t}\n\t}\n\treturn total, nil\n}", "func (s *BufferSink) Write(b []byte) (n int, err error) {\n\treturn s.buf.Write(b)\n}", "func (w *ReadWriter) Write(b []byte) (int, error) {\n\tdefer func() { w.done <- struct{}{} }()\n\tif w.withErr != nil {\n\t\treturn 0, w.withErr\n\t}\n\tw.b = b\n\treturn len(b), nil\n}", "func (npw *Writer) Write(buf []byte) (int, error) {\n\tif npw.closed {\n\t\treturn 0, fmt.Errorf(\"write to closed Writer\")\n\t}\n\n\tbufOffset := int64(0)\n\tbytesLeft := int64(len(buf))\n\n\tfor bytesLeft > 0 {\n\t\tblockIndex := npw.offset / BigBlockSize\n\t\tblockEnd := (blockIndex + 1) * BigBlockSize\n\n\t\twriteEnd := npw.offset + bytesLeft\n\t\tif writeEnd > blockEnd {\n\t\t\twriteEnd = blockEnd\n\t\t}\n\n\t\tbytesWritten := writeEnd - npw.offset\n\t\tblockBufOffset := npw.offset % BigBlockSize\n\t\tcopy(npw.blockBuf[blockBufOffset:], buf[bufOffset:bufOffset+bytesWritten])\n\n\t\tif writeEnd%BigBlockSize == 0 {\n\t\t\terr := npw.Pool.Downstream.Store(BlockLocation{FileIndex: npw.FileIndex, BlockIndex: blockIndex}, npw.blockBuf)\n\t\t\tif err != nil {\n\t\t\t\treturn 0, errors.WithStack(err)\n\t\t\t}\n\t\t}\n\n\t\tbufOffset += bytesWritten\n\t\tnpw.offset += bytesWritten\n\t\tbytesLeft -= bytesWritten\n\t}\n\n\treturn len(buf), nil\n}", "func (w *BufferedFileWriter) Write(p []byte) (n int, err error) {\n\tw.lock.Lock()\n\tn, err = w.buffer.Write(p)\n\tif !w.updated && n > 0 && w.buffer.Buffered() > 0 { // checks w.updated to prevent notifying w.updateChan twice\n\t\tw.updated = true\n\t\tw.lock.Unlock()\n\n\t\tselect { // ignores if blocked\n\t\tcase w.updateChan <- struct{}{}:\n\t\tdefault:\n\t\t}\n\t} else {\n\t\tw.lock.Unlock()\n\t}\n\treturn\n}", "func (r *Redactor) Write(p []byte) (n int, err error) {\n\treturn r.redactFunc(p)\n}", "func (b *Buffer) Write(p []byte) (int, error) {\n\treturn b.Append(p), nil\n}", "func (file *Remote) WriteAt(data []byte, off int64) (int, error) {\n\tsize := len(data)\n\tif size > file.maxBufSize() {\n\t\tsize = file.maxBufSize()\n\t}\n\n\tvar total int\n\tfor start := 0; start < len(data); start += size {\n\t\tend := start + size\n\t\tif end > len(data) {\n\t\t\tend = len(data)\n\t\t}\n\n\t\tn, err := file.writePart(data[start:end], off+int64(start))\n\t\ttotal += n\n\t\tif err != nil {\n\t\t\treturn total, err\n\t\t}\n\t}\n\treturn total, nil\n}", "func (p *TBufferedReadTransport) Write(buf []byte) (int, error) {\n\tp.readBuf = bytes.NewBuffer(buf)\n\treturn len(buf), nil\n}", "func (rw *ReadWriter) Write(buffer []byte) (int, error) {\n\tn, err := rw.reader.Flush()\n\tif err != nil {\n\t\treturn n, err\n\t}\n\n\treturn rw.writer.Write(buffer)\n}", "func (cw *cafsWriterAt) Write(p []byte) (n int, err error) {\n\twritten, err := cw.w.WriteAt(p, cw.offset+cw.written) // io.WriteAt is expected to be thread safe\n\tcw.written += int64(written)\n\treturn written, err\n}", "func (r *reopenWriter) Write(p []byte) (n int, err error) {\n\tr.lock.Lock()\n\tdefer r.lock.Unlock()\n\tif r.lastErr != nil {\n\t\terr = fmt.Errorf(\"unusable due to %v\", r.lastErr)\n\t\treturn\n\t}\n\treturn r.writer.Write(p)\n}", "func (dc *dummyConn) Write(p []byte) (int, error) { return len(p), nil }", "func (s *UploadStream) Write(data []uint8) (int, error) {\n\t// acquire mutex\n\ts.mutex.Lock()\n\tdefer s.mutex.Unlock()\n\n\t// check if stream has been closed\n\tif s.closed {\n\t\treturn 0, gridfs.ErrStreamClosed\n\t}\n\n\t// buffer and upload data in chunks\n\tvar written int\n\tfor {\n\t\t// check if done\n\t\tif len(data) == 0 {\n\t\t\tbreak\n\t\t}\n\n\t\t// fill buffer\n\t\tn := copy(s.buffer[s.bufLen:], data)\n\t\ts.bufLen += n\n\n\t\t// resize data\n\t\tdata = data[n:]\n\n\t\t// increment counter\n\t\twritten += n\n\n\t\t// upload if buffer is full\n\t\tif s.bufLen == len(s.buffer) {\n\t\t\terr := s.upload(false)\n\t\t\tif err != nil {\n\t\t\t\treturn 0, err\n\t\t\t}\n\t\t}\n\t}\n\n\treturn written, nil\n}", "func (d Device) Write(buf []byte) (n int, err error) {\n\tfor _, c := range buf {\n\t\td.WriteByte(c)\n\t}\n\treturn len(buf), nil\n}", "func (s *syncWriter) Write(p []byte) (n int, err error) {\n\ts.mu.Lock()\n\tdefer s.mu.Unlock()\n\treturn s.lw.Write(p)\n}", "func (bc *ByteCounter) Write(p []byte) (int, error) {\n\tn, err := bc.wr.Write(p)\n\tbc.count += int64(n)\n\treturn n, err\n}", "func (c *channelWriter) Write(p []byte) (int, error) {\n\tc.channel <- p\n\treturn len(p), nil\n}", "func (w *syncFileWriter) Write(b []byte) (n int, err error) {\n\tw.mux.Lock()\n\tdefer w.mux.Unlock()\n\treturn w.file.Write(b)\n}", "func (b *Buffer) Write(bt byte) int {\n\tb.bytes[b.length] = bt\n\tb.length++\n\treturn b.length\n}", "func (c *conn) Write(b []byte) (int, error) {\n\tc.wonce.Do(c.sleepLatency)\n\n\tvar total int64\n\tfor len(b) > 0 {\n\t\tvar max int64\n\n\t\tn, err := c.wb.FillThrottle(func(remaining int64) (int64, error) {\n\t\t\tmax = remaining\n\t\t\tif l := int64(len(b)); remaining >= l {\n\t\t\t\tmax = l\n\t\t\t}\n\n\t\t\tn, err := c.Conn.Write(b[:max])\n\t\t\treturn int64(n), err\n\t\t})\n\n\t\ttotal += n\n\n\t\tif err != nil {\n\t\t\tif err != io.EOF {\n\t\t\t\tlog.Errorf(\"trafficshape: failed write: %v\", err)\n\t\t\t}\n\t\t\treturn int(total), err\n\t\t}\n\n\t\tb = b[max:]\n\t}\n\n\treturn int(total), nil\n}", "func (n *NetConn) Write([]byte) (numBytes int, err error) {\n\treturn 1, n.errOnWrite\n}", "func (gb *MutexReadWriter) Write(p []byte) (int, error) {\n\tgb.Lock()\n\tdefer gb.Unlock()\n\treturn gb.rw.Write(p)\n}", "func Write(fd uintptr, p unsafe.Pointer, n int32) int32", "func (b *ByteArray) Write(p []byte) (n int, err error) {\n\tfor n = 0; n < len(p); {\n\t\tvar slice []byte\n\t\tslice = b.WriteSlice()\n\t\tif slice == nil {\n\t\t\tpanic(\"ASSERT\")\n\t\t}\n\n\t\twritten := copy(slice, p[n:])\n\t\tb.writePos = b.seek(b.writePos, written, SEEK_CUR)\n\t\tn += written\n\t}\n\treturn n, err\n}", "func (w *Writer) Write(p []byte) (n int, err error) {\n\treturn w.buf.Write(p)\n}", "func (f *ClientFD) Write(ctx context.Context, src []byte, offset uint64) (uint64, error) {\n\tvar req PWriteReq\n\t// maxDataWriteSize represents the maximum amount of data we can write at\n\t// once (maximum message size - metadata size present in req). Uninitialized\n\t// req.SizeBytes() correctly returns the metadata size only (since the write\n\t// buffer is empty).\n\tmaxDataWriteSize := uint64(f.client.maxMessageSize) - uint64(req.SizeBytes())\n\treturn chunkify(maxDataWriteSize, src, func(buf []byte, curOff uint64) (uint64, error) {\n\t\treq = PWriteReq{\n\t\t\tOffset: primitive.Uint64(offset + curOff),\n\t\t\tFD: f.fd,\n\t\t\tNumBytes: primitive.Uint32(len(buf)),\n\t\t\tBuf: buf,\n\t\t}\n\n\t\tvar resp PWriteResp\n\t\tctx.UninterruptibleSleepStart(false)\n\t\terr := f.client.SndRcvMessage(PWrite, uint32(req.SizeBytes()), req.MarshalBytes, resp.UnmarshalUnsafe, nil)\n\t\tctx.UninterruptibleSleepFinish(false)\n\t\treturn resp.Count, err\n\t})\n}", "func (c *Client) Write(path gfs.Path, offset gfs.Offset, data []byte) error {\n\tvar f gfs.GetFileInfoReply\n\terr := util.Call(c.master, \"Master.RPCGetFileInfo\", gfs.GetFileInfoArg{path}, &f)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif int64(offset/gfs.MaxChunkSize) > f.Chunks {\n\t\treturn fmt.Errorf(\"write offset exceeds file size\")\n\t}\n\n\tbegin := 0\n\tfor {\n\t\tindex := gfs.ChunkIndex(offset / gfs.MaxChunkSize)\n\t\tchunkOffset := offset % gfs.MaxChunkSize\n\n\t\thandle, err := c.GetChunkHandle(path, index)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\twriteMax := int(gfs.MaxChunkSize - chunkOffset)\n\t\tvar writeLen int\n\t\tif begin+writeMax > len(data) {\n\t\t\twriteLen = len(data) - begin\n\t\t} else {\n\t\t\twriteLen = writeMax\n\t\t}\n\n\t\t//wait := time.NewTimer(gfs.ClientTryTimeout)\n\t\t//loop:\n\t\tfor {\n\t\t\t//select {\n\t\t\t//case <-wait.C:\n\t\t\t// err = fmt.Errorf(\"Write Timeout\")\n\t\t\t// break loop\n\t\t\t//default:\n\t\t\t//}\n\t\t\terr = c.WriteChunk(handle, chunkOffset, data[begin:begin+writeLen])\n\t\t\tif err == nil {\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tlog.Warning(\"Write \", handle, \" connection error, try again \", err)\n\t\t}\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\toffset += gfs.Offset(writeLen)\n\t\tbegin += writeLen\n\n\t\tif begin == len(data) {\n\t\t\tbreak\n\t\t}\n\t}\n\n\treturn nil\n}", "func (w *Writer) Write(p []byte) (n int, err error) {\n\tif w.closed {\n\t\terr = ErrWriteAfterClose\n\t\treturn\n\t}\n\toverwrite := false\n\tif int64(len(p)) > w.nb {\n\t\tp = p[0:w.nb]\n\t\toverwrite = true\n\t}\n\tn, err = w.w.Write(p)\n\tw.nb -= int64(n)\n\tif err == nil && overwrite {\n\t\terr = ErrWriteTooLong\n\t\treturn\n\t}\n\tw.err = err\n\treturn\n}", "func (b *buf) Write(p []byte) (n int, err error) {\n\tb.b = b.base[b.i : b.i+len(p)]\n\tcopy(b.b, p)\n\tb.i += len(p)\n\t//fmt.Printf(\"Write: len(b.b)=%d, len(p)=%d, % #X\\n\", len(b.b), len(p), p)\n\t//fmt.Printf(\"b=%#v\\n\", b)\n\treturn len(p), nil\n}", "func (b *QueueBuffer) Write(p []byte) (int, error) {\n\t// io.Writer shouldn't modify the buf it's handed (even temporarily)\n\tcp := make([]byte, len(p))\n\tif n := copy(cp, p); n != len(p) {\n\t\treturn 0, fmt.Errorf(\"failed to make copy of provided buf\")\n\t}\n\t(*b) = append(cp, (*b)...)\n\treturn len(p), nil\n}", "func (w *WrappedWriter) Write(data []byte) error {\n\tif len(data) == 0 {\n\t\treturn nil\n\t}\n\n\t_, err := w.bw.Write(data)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tn := len(data)\n\t// Increment file position pointer\n\tw.n += int64(n)\n\n\treturn nil\n}", "func (b *Buffer) Write(pkt []byte) (n int, err error) {\n\tb.Lock()\n\tdefer b.Unlock()\n\n\tif b.closed {\n\t\terr = io.EOF\n\t\treturn\n\t}\n\n\tif !b.bound {\n\t\tpacket := make([]byte, len(pkt))\n\t\tcopy(packet, pkt)\n\t\tb.pPackets = append(b.pPackets, pendingPackets{\n\t\t\tpacket: packet,\n\t\t\tarrivalTime: time.Now().UnixNano(),\n\t\t})\n\t\treturn\n\t}\n\n\tb.calc(pkt, time.Now().UnixNano())\n\n\treturn\n}", "func (spi *SPI) Write(data []byte) (n int, err error) {\n\treturn spi.file.Write(data)\n}", "func (r *ThrottledWriteCloser) Write(buf []byte) (int, error) {\n\tsubBuff, delay, err := getBufferAndDelay(r.pool, r.id, len(buf))\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\ttime.Sleep(delay)\n\tif subBuff > len(buf) {\n\t\tsubBuff = len(buf)\n\t}\n\tn, err := r.origWriteCloser.Write(buf[:subBuff])\n\n\treturn n, err\n}", "func (mc *MockConn) Write(b []byte) (n int, err error) {\n\tif mc.closed {\n\t\treturn 0, errors.New(\"Connection closed.\")\n\t}\n\n\tdata := make([]byte, len(b))\n\tcopy(data, b)\n\tmc.sendChan <- data\n\treturn len(b), nil\n}", "func (b *blockWriter) Write(input []byte) (int, error) {\n\tif b.err != nil {\n\t\treturn 0, b.err\n\t}\n\n\t// fill buffer if possible\n\tn := b.buf[0] // n is block size\n\tcopied := copy(b.buf[n+1:], input)\n\tb.buf[0] = n + byte(copied)\n\n\tif n+byte(copied) < 255 {\n\t\t// buffer not full; don't write yet\n\t\treturn copied, nil\n\t}\n\n\t// loop precondition: buffer is full\n\tfor {\n\t\tvar n2 int\n\t\tn2, b.err = b.w.Write(b.buf[:])\n\t\tif n2 < 256 && b.err == nil {\n\t\t\tb.err = io.ErrShortWrite\n\t\t}\n\t\tif b.err != nil {\n\t\t\treturn copied, b.err\n\t\t}\n\n\t\tn := copy(b.buf[1:], input[copied:])\n\t\tb.buf[0] = byte(n)\n\t\tcopied += n\n\t\tif n < 255 {\n\t\t\t// buffer not full\n\t\t\treturn copied, nil\n\t\t}\n\t}\n\n\t// postcondition: b.buf contains a block with n < 255, or b.err is set\n}", "func (rws *MemoryRws) Write(p []byte) (n int, err error) {\n\tif p == nil {return 0, nil}\n\n\twriteEnd := len(p) + rws.pos\n\textra := writeEnd - len(rws.data)\n\tif extra > 0 {\n\t\trws.data = append(rws.data, make([]byte, extra)...)\n\t}\n\tn = 0\n\ti := rws.pos\n\tfor ; i < writeEnd; i++ {\n\t\trws.data[i] = p[n]\n\t\tn++\n\t}\n\trws.pos = i\n\treturn n, nil\n}", "func (w *wrapper) Write(path string, buff []byte, ofst int64, fd uint64) int {\n\tfh, unlock, ok := w.getFileDescriptorWithLock(fd)\n\tif !ok {\n\t\treturn -fuse.EINVAL\n\t}\n\tif wa, ok := fh.(io.WriterAt); ok {\n\t\tunlock()\n\t\tn, err := wa.WriteAt(buff, ofst)\n\t\tif err != nil {\n\t\t\treturn convertError(err)\n\t\t}\n\t\treturn n\n\t}\n\tdefer unlock()\n\tif _, err := fh.Seek(ofst, io.SeekStart); err != nil {\n\t\treturn convertError(err)\n\t}\n\tn, err := fh.Write(buff)\n\tif err != nil {\n\t\treturn convertError(err)\n\t}\n\treturn n\n}", "func (m *Memory) Write(b []byte) (n int, err error) {\n if m == nil {\n return 0, os.ErrInvalid\n }\n\n\t if len(b) > len(m.buf) {\n\t \tn = len(m.buf)\n\t } else {\n\t \tn = len(b)\n\t }\n\n\t\tfor i := 0; i < n; i++ {\n\t\t\tm.buf[i] = b[i]\n\t\t}\n\t m.buf = m.buf[n:]\n\n return n, nil\n}", "func (s *SeekerWrapper) WriteAt(p []byte, offset int64) (int, error) { return s.s.WriteAt(p, offset) }", "func (x *Writer) Write(p []byte) (int, error) {\n\tvar (\n\t\toriginalXferLen = len(p)\n\t\txferLen int\n\t)\n\n\t// Write caller's data to our internal FIFO\n\tx.fifo.Write(p)\n\n\t// Only write hex records 'width' wide. Residual will be\n\t// held in the FIFO until a follow-up write(), Flush() or\n\t// Close() operation.\n\tfor x.fifo.Len() >= x.width {\n\t\terr := x.emitDataRecord(x.fifo.Next(x.width))\n\t\tif err != nil {\n\t\t\treturn xferLen, err\n\t\t}\n\t\txferLen += x.width\n\t}\n\n\treturn originalXferLen, nil\n}" ]
[ "0.70134175", "0.68073744", "0.66656727", "0.6533387", "0.6487888", "0.64106476", "0.63894737", "0.6329203", "0.63271713", "0.6279886", "0.62738836", "0.6247574", "0.624438", "0.6208705", "0.61829495", "0.61191803", "0.6115804", "0.6082525", "0.60441333", "0.6040223", "0.602944", "0.6008687", "0.6004605", "0.598822", "0.5973278", "0.59321153", "0.59012616", "0.5886409", "0.58581996", "0.584971", "0.5849057", "0.5839228", "0.5827282", "0.5822035", "0.5820973", "0.5819899", "0.58088374", "0.5775378", "0.57702386", "0.5769391", "0.57653534", "0.576388", "0.5754385", "0.5752119", "0.5741374", "0.5740964", "0.5730288", "0.5721921", "0.5711888", "0.56757283", "0.56629694", "0.5658295", "0.5655664", "0.564769", "0.5644293", "0.564371", "0.56426567", "0.5639872", "0.56300056", "0.56288576", "0.56282663", "0.5626815", "0.56149924", "0.5614947", "0.56080127", "0.5601223", "0.55993605", "0.55972964", "0.55967265", "0.5591762", "0.55839145", "0.55831456", "0.5581521", "0.5577397", "0.5571328", "0.5569538", "0.5555414", "0.55503225", "0.5549775", "0.5545051", "0.55437595", "0.5539525", "0.5535983", "0.55344284", "0.55313766", "0.5528264", "0.55226237", "0.55225205", "0.55147433", "0.5513786", "0.55118346", "0.54873335", "0.54829264", "0.5478227", "0.5478179", "0.54717076", "0.54701686", "0.54683036", "0.5463598", "0.5453681" ]
0.6562005
3
Peek reads data from a circular buffer but does not update the tail index. Subsequent calls to Peek will produce the same results. The number of bytes read is returned.
func (c *CircBuf) Peek(buf []byte) int { var count int var tail int = c.tail // Use a local tail variable var num int for { count = c.countToEndArg(tail) if len(buf) - num < count { count = len(buf) - num } if count <= 0 { break } copy(buf[num : num + count], c.buf[tail : tail + count]) tail = (tail + count) & (len(c.buf) - 1) num += count } return num }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (c *RingBuffer) Peek(p []byte) (int, error) {\n\tc.mutex.Lock()\n\tdefer c.mutex.Unlock()\n\treturn c.peek(p)\n}", "func (c *RingBuffer) peek(p []byte) (int, error) {\n\tif c.len == 0 && c.closed {\n\t\treturn 0, io.EOF\n\t}\n\tl := len(p)\n\tif l > c.len {\n\t\tl = c.len\n\t}\n\tn := 0\n\tleftBeforeEnd := len(c.buf) - c.index\n\tif l < leftBeforeEnd {\n\t\tn = copy(p, c.buf[c.index:c.index+l])\n\t} else {\n\t\tn = copy(p, c.buf[c.index:])\n\t\tn += copy(p[n:], c.buf[:l-n])\n\t}\n\treturn n, nil\n}", "func (b *defaultByteBuffer) Peek(n int) (buf []byte, err error) {\n\tif b.status&BitReadable == 0 {\n\t\treturn nil, errors.New(\"unreadable buffer, cannot support Peek\")\n\t}\n\tif err = b.readableCheck(n); err != nil {\n\t\treturn nil, err\n\t}\n\treturn b.buff[b.readIdx : b.readIdx+n], nil\n}", "func (b *PeekableReader) Peek() (byte, error) {\n\tif !b.full {\n\t\tbuf := []byte{0}\n\t\tif _, err := b.rd.Read(buf); err != nil {\n\t\t\treturn 0, err\n\t\t}\n\t\tb.byt = buf[0]\n\t\tb.full = true\n\t}\n\treturn b.byt, nil\n}", "func (r *objReader) peek(n int) ([]byte, error) {\n\tif r.err != nil {\n\t\treturn nil, r.err\n\t}\n\tif r.offset >= r.limit {\n\t\tr.error(io.ErrUnexpectedEOF)\n\t\treturn nil, r.err\n\t}\n\tb, err := r.b.Peek(n)\n\tif err != nil {\n\t\tif err != bufio.ErrBufferFull {\n\t\t\tr.error(err)\n\t\t}\n\t}\n\treturn b, err\n}", "func (q *BytesQueue) Peek() ([]byte, error) {\n\tdata, _, err := q.peek(q.head)\n\treturn data, err\n}", "func (q *Queue) Peek() (int, error) {\r\n\tif len(q.data) == 0 {\r\n\t\treturn 0, fmt.Errorf(\"Queue is empty\")\r\n\t}\r\n\treturn q.data[0], nil\r\n}", "func (r *reader) peek() byte {\n\tif r.eof() {\n\t\treturn 0\n\t}\n\treturn r.s[r.p.Offset]\n}", "func (q *FileQueue) Peek() (int64, []byte, error) {\n\tif q.IsEmpty() {\n\t\treturn -1, nil, nil\n\t}\n\tindex := q.FrontIndex\n\n\tbb, err := q.peek(index)\n\treturn index, bb, err\n}", "func (self *bipbuf_t) Peek(size uint32) []byte{\n\tif self.size < self.a_start + size {\n\t\treturn nil\n\t}\n\n\tif self.IsEmpty() {\n\t\treturn nil\n\t}\n\n\treturn self.data[0:self.a_start]\n}", "func (kcp *KCP) PeekSize() (size int) {\n\tif len(kcp.recvQueue) <= 0 {\n\t\treturn -1\n\t}\n\n\tseg := kcp.recvQueue[0]\n\tif seg.frg == 0 {\n\t\treturn len(seg.dataBuffer)\n\t}\n\n\tif len(kcp.recvQueue) < int(seg.frg+1) {\n\t\treturn -1\n\t}\n\n\tfor idx := range kcp.recvQueue {\n\t\tseg := kcp.recvQueue[idx]\n\t\tsize += len(seg.dataBuffer)\n\t\tif seg.frg == 0 {\n\t\t\tbreak\n\t\t}\n\t}\n\n\treturn\n}", "func (q *Queue) Peek() int {\n\tif q.start.next != nil {\n\t\t_iteratePeek(q.start)\n\t}\n\treturn q.start.val\n}", "func (q *Queue) Peek() ([]byte, error) {\n\tq.RLock()\n\tdefer q.RUnlock()\n\titem, err := q.readItemByID(q.head + 1)\n\treturn item.Value, err\n}", "func (kcp *KCP) PeekSize() (length int) {\n\tif len(kcp.rcv_queue) == 0 {\n\t\treturn -1\n\t}\n\n\tseg := &kcp.rcv_queue[0]\n\tif seg.frg == 0 {\n\t\treturn seg.data.Len()\n\t}\n\n\tif len(kcp.rcv_queue) < int(seg.frg+1) {\n\t\treturn -1\n\t}\n\n\tfor k := range kcp.rcv_queue {\n\t\tseg := &kcp.rcv_queue[k]\n\t\tlength += seg.data.Len()\n\t\tif seg.frg == 0 {\n\t\t\tbreak\n\t\t}\n\t}\n\treturn\n}", "func (this *MyQueue) Peek() int {\n\tif len(this.b) == 0 {\n\t\tfor len(this.a) > 0 {\n\t\t\tthis.b = append(this.b, this.a[len(this.a)-1])\n\t\t\tthis.a = this.a[:len(this.a)-1]\n\t\t}\n\t}\n\treturn this.b[len(this.b)-1]\n}", "func (r *Ring) Peek() interface{} {\n\tr.checkInit()\n\tif r.head == -1 {\n\t\treturn nil\n\t}\n\treturn r.get(r.tail)\n}", "func (ring *ringBuffer) peek() []byte {\n\tring.mutex.Lock()\n\n\t/* wait for the data-ready variable to make us happy */\n\tfor !ring.dataReady {\n\t\tring.dataReadyCond.Wait()\n\t}\n\tring.mutex.Unlock()\n\t/* find the address we want */\n\taddress := ring.datagrams[ring.datagramSize+ring.baseData:]\n\treturn address\n}", "func (this *MyQueue) Peek() int {\n\tif len(this.outStack) == 0 {\n\t\tthis.inToOut()\n\t}\n\treturn this.outStack[len(this.outStack)-1]\n}", "func (s *Scanner) peek() byte {\n\tif s.rdOffset < len(s.src) {\n\t\treturn s.src[s.rdOffset]\n\t}\n\treturn 0\n}", "func (this *MyQueue) Peek() int {\n\tif len(this.out) != 0 {\n\t\treturn this.out[len(this.out)-1]\n\t}\n\tfor len(this.in) > 0 {\n\t\tthis.out.Push(this.in.Pop())\n\t}\n\treturn this.out[len(this.out)-1]\n}", "func (q *BytesQueue) peek(index int) ([]byte, int, error) {\n\terr := q.peekCheckErr(index)\n\tif err != nil {\n\t\treturn nil, 0, err\n\t}\n\n\tblockSize, n := binary.Uvarint(q.array[index:])\n\treturn q.array[index+n : index+int(blockSize)], int(blockSize), nil\n}", "func (this *MyQueue) Peek() int {\n\tif this.sIn.isEmpty() && this.sOut.isEmpty() {\n\t\treturn -1\n\t} else if this.sOut.isEmpty() {\n\t\tthis.inToOut()\n\t}\n\n\treturn this.sOut.top()\n}", "func (c *RingBuffer) Read(p []byte) (int, error) {\n\tc.mutex.Lock()\n\tdefer c.mutex.Unlock()\n\tn, err := c.peek(p)\n\tif err != nil {\n\t\treturn n, err\n\t}\n\treturn c.consume(n), nil\n}", "func (c *CircBuf) Consume(nbytes int) int {\n\tvar count int\n\tvar num int\n\tfor {\n\t\tcount = c.countToEnd()\n\t\tif nbytes - num < count {\n\t\t\tcount = nbytes - num\n\t\t}\n\t\tif count <= 0 {\n\t\t\tbreak\n\t\t}\n\t\tc.tail = (c.tail + count) & (len(c.buf) - 1)\n\t\tnum += count\n\t}\n\treturn num\n}", "func (q *PriorityQueue) Peek() interface{} {\n\tif q.count <= 0 {\n\t\tpanic(\"queue: Peek() called on empty queue\")\n\t}\n\treturn q.buf[q.head]\n}", "func (this *MyQueue) Peek() int {\n\tif len(this.outStack) == 0 {\n\t\tthis.in2Out()\n\t}\n\n\treturn this.outStack[len(this.outStack)-1]\n}", "func (src *Source) PeekN(n int) []byte {\n\tif n <= len(src.current) {\n\t\treturn src.current[:n]\n\t}\n\tif src.reader == nil {\n\t\treturn src.current\n\t}\n\tsrc.StoreSavepoint()\n\tpeeked := src.ReadN(n)\n\tsrc.RollbackToSavepoint()\n\treturn peeked\n}", "func (bs *ByteScanner) Peek(offset int) (byte, error) {\n\t// [...]\n\treturn 0, io.EOF\n}", "func (this *MyQueue) Peek() int {\n\treturn this.q[len(this.q)-1]\n}", "func (s *BufferedFrameReader) Peek() (*Frame, error) {\n\tif s.Next != nil {\n\t\treturn s.Next, nil\n\t}\n\n\t_, _, err, _ := s.Reader.NextFrame(&s.TmpFrame)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\ts.Next = &s.TmpFrame\n\treturn s.Next, nil\n}", "func (s *Scanner) peek() (byte, error) {\n\tch, err := s.reader.Peek(1)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\treturn ch[0], nil\n}", "func (q *MyQueue) Peek() int {\n\treturn q.list[0] // 对空数组取值时自动panic\n}", "func (q *Queue) Peek() (interface{}, error) {\n\tif q.size == 0 {\n\t\treturn nil, errors.New(\"cannot peek an empty queue\")\n\t}\n\treturn *q.head.item, nil\n}", "func (q *Queue) Peek() interface{} {\n\tif q.length == 0 {\n\t\treturn nil\n\t}\n\treturn q.start.value\n}", "func (q *Queue) Peek() interface{} {\n\tif q.length == 0 {\n\t\treturn nil\n\t}\n\treturn q.start.value\n}", "func (q *RingQueue) Peek() interface{} {\n\tif q.IsEmpty() {\n\t\treturn nil\n\t}\n\n\te := q.data[q.front]\n\treturn e\n}", "func (q *Queue) Peek() *Element {\n\tif q.Count <= 0 {\n\t\treturn nil\n\t}\n\treturn q.Tail\n}", "func (this *Queue) Peek() interface{} {\r\n\tif this.length == 0 {\r\n\t\treturn nil\r\n\t}\r\n\treturn this.start.value\r\n}", "func (s *lexStream) peek() byte {\r\n\tif s.c >= len(s.input) {\r\n\t\ts.pos[len(s.pos)-1].line = 0\r\n\t\treturn eof\r\n\t}\r\n\treturn s.input[s.c]\r\n}", "func (s *Stack) Peek() (int, error) {\n\tif s.Empty() {\n\t\treturn 0, fmt.Errorf(\"stack is empty\")\n\t}\n\n\treturn (*s)[len(*s)-1], nil\n}", "func (r *Ring) Peek(ctx context.Context) (string, error) {\n\tresponse, err := r.client.Get(ctx, r.Name, clientv3.WithFirstKey()...)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tif len(response.Kvs) == 0 {\n\t\treturn \"\", ErrEmptyRing\n\t}\n\treturn string(response.Kvs[0].Value), nil\n}", "func (rb *RingBuffer) Read(p []byte) (int, error) {\n\texpected := len(p)\n\tn := 0\n\n\tif rb.writeOff > rb.readOff {\n\t\tn += copy(p, rb.data[rb.readOff:rb.writeOff])\n\t} else {\n\t\tpos := copy(p, rb.data[rb.readOff:])\n\t\tn += pos\n\n\t\tif n < expected {\n\t\t\tn += copy(p[pos:], rb.data[:rb.writeOff])\n\t\t}\n\t}\n\n\trb.count -= n\n\trb.readOff += (rb.readOff + n) % rb.cap\n\n\treturn n, nil\n}", "func (r *RingT[T]) Peek(i int) *T {\n\tlength := (r.head - r.tail) & r.mask\n\tui := uint(i)\n\tif ui >= length {\n\t\treturn nil\n\t}\n\tj := (r.tail + ui) & r.mask\n\treturn r.items[j]\n}", "func (s *FIFO) Peek() (T, bool) {\n\tif s.front == nil {\n\t\treturn nil, false\n\t}\n\treturn s.front.v, true\n}", "func (q *arrayQueue) Peek() interface{} {\n\tif q.listSize == 0 {\n\t\treturn nil\n\t}\n\n\treturn q.frontNode.block[q.frontNodeIndex]\n}", "func (stack *Stack) Peek() int {\n\tif stack.length == 0 {\n\t\treturn 0\n\t}\n\treturn stack.top.value\n}", "func (m *minifier) peek() int {\n\tm.theLookahead = m.get()\n\treturn m.theLookahead\n}", "func (queue *Queue) Peek() interface{} {\n\treturn queue.data[0]\n}", "func (this *MyQueue) Peek() int {\n\tif this.out.Len() == 0 {\n\t\tfor v := this.in.Pop(); v != nil; v = this.in.Pop() {\n\t\t\tthis.out.Push(v)\n\t\t}\n\t}\n\treturn this.out.Peek().(int)\n}", "func (q *Queue) Peek() interface{} {\n\tq.RLock()\n\tdefer q.RUnlock()\n\n\tif q.head == nil {\n\t\treturn nil\n\t}\n\titem := q.head\n\treturn item.value\n}", "func (p *PeekingSource) Peek() (interface{}, error) {\n\tp.mu.RLock()\n\tvar err error\n\n\tif p.rec != nil {\n\t\tdefer p.mu.RUnlock()\n\t\treturn p.rec, nil\n\t} else {\n\t\t// Exchange read lock for write lock and recheck p.rec\n\t\tp.mu.RUnlock()\n\t\tp.mu.Lock()\n\t\tif p.rec != nil {\n\t\t\tp.mu.Unlock()\n\t\t\treturn p.Peek()\n\t\t}\n\t\tp.rec, err = p.Source.Record()\n\t\tdefer p.mu.Unlock()\n\t\tif err != nil {\n\t\t\treturn nil, errors.Wrap(err, \"getting next record for peeking\")\n\t\t} else {\n\t\t\treturn p.rec, nil\n\t\t}\n\t}\n}", "func (d *Decoder) Peek() Type {\n\tdefer func() { d.lastCall = peekCall }()\n\tif d.lastCall == readCall {\n\t\td.value, d.err = d.Read()\n\t}\n\treturn d.value.typ\n}", "func (b *QueueBuffer) Read(p []byte) (int, error) {\n\tif x := len(*b) - len(p); x >= 0 {\n\t\tn := copy(p, (*b)[x:])\n\t\t*b = (*b)[:x]\n\t\treturn n, nil\n\t}\n\tn := copy(p, *b)\n\t*b = nil\n\treturn n, io.EOF\n}", "func (q Queue) Peek() (interface{}, error) {\n\tif len(q) <= 0 {\n\t\treturn nil, errors.New(\"Can't peek() from empty queue!\")\n\t}\n\treturn q[0], nil\n}", "func (cr *ChainReader) Peek() *Record {\n\treturn cr.readers[cr.current].Peek()\n}", "func (src *Source) Peek() []byte {\n\treturn src.current\n}", "func (q *SensorStack) Peek() (top *SensorReading, err error) {\n\ttop = &errorReading\n\tx := q.Len() - 1\n\tif x < 0 {\n\t\terr = errors.New(\"Empty Stack\")\n\t\treturn\n\t}\n\ttop = (*q)[x]\n\treturn\n}", "func (p *Stream) peekSkip() (int, byte) {\n\top := p.readFrame[p.readIndex]\n\tif skip := readSkipArray[op]; skip > 0 && p.CanRead() {\n\t\treturn skip, op\n\t} else if skip < 0 {\n\t\treturn 0, op\n\t} else if p.isSafetyReadNBytesInCurrentFrame(5) {\n\t\tb := p.readFrame[p.readIndex:]\n\t\treturn int(uint32(b[1]) |\n\t\t\t(uint32(b[2]) << 8) |\n\t\t\t(uint32(b[3]) << 16) |\n\t\t\t(uint32(b[4]) << 24)), op\n\t} else if p.hasNBytesToRead(5) {\n\t\tb := p.peekNBytesCrossFrameUnsafe(5)\n\t\treturn int(uint32(b[1]) |\n\t\t\t(uint32(b[2]) << 8) |\n\t\t\t(uint32(b[3]) << 16) |\n\t\t\t(uint32(b[4]) << 24)), op\n\t} else {\n\t\treturn 0, op\n\t}\n}", "func (q *MyQueue) Peek() int {\n\tfront := q.list.Front()\n\tres := front.Value.(int)\n\treturn res\n}", "func (queue *LinkedQueue) Peek() interface{} {\n\toutObject := queue.head\n\tif outObject == nil {\n\t\treturn nil\n\t}\n\treturn outObject.value\n}", "func (sb *SeekableBuffer) Read(p []byte) (n int, err error) {\n\tdefer func() {\n\t\tif state := recover(); state != nil {\n\t\t\terr = state.(error)\n\t\t}\n\t}()\n\n\tif sb.position >= len64(sb.data) {\n\t\treturn 0, io.EOF\n\t}\n\n\tn = copy(p, sb.data[sb.position:])\n\tsb.position += int64(n)\n\n\treturn n, nil\n\n}", "func (this *MyQueue) Peek() int {\n\tif len(this.popStack) == 0 {\n\t\tfor len(this.pushStack) > 0 {\n\t\t\t// pop\n\t\t\ttop := this.pushStack[len(this.pushStack)-1]\n\t\t\tthis.pushStack = this.pushStack[:len(this.pushStack)-1]\n\n\t\t\tthis.popStack = append(this.popStack, top)\n\t\t}\n\t}\n\tif len(this.popStack) == 0 {\n\t\treturn -1\n\t}\n\ttop := this.popStack[len(this.popStack)-1]\n\treturn top\n}", "func (c *CircBuf) Read(buf []byte) int {\n\tvar count int\n\tvar num int\n\tfor {\n\t\tcount = c.countToEnd()\n\t\tif len(buf) - num < count {\n\t\t\tcount = len(buf) - num\n\t\t}\n\t\tif count <= 0 {\n\t\t\tbreak\n\t\t}\n\t\tcopy(buf[num : num + count], c.buf[c.tail : c.tail + count])\n\t\tc.tail = (c.tail + count) & (len(c.buf) - 1)\n\t\tnum += count\n\t}\n\treturn num\n}", "func (b *Buffer) Read(out []byte) (n int, err error) {\n\tif b.readCursor >= b.Size() {\n\t\t// we read the entire buffer, let's loop back to the beginning\n\t\tb.readCursor = 0\n\t} else if b.readCursor+int64(len(out)) > b.Size() {\n\t\t// we don't have enough data in our buffer to fill the passed buffer\n\t\t// we need to do multiple passes\n\t\tn := copy(out, b.data[b.offset+b.readCursor:])\n\t\tb.readCursor += int64(n)\n\t\t// TMP check, should remove\n\t\tif b.readCursor != b.Size() {\n\t\t\tpanic(fmt.Sprintf(\"off by one much? %d - %d\", b.readCursor, b.Size()))\n\t\t}\n\t\tn2, _ := b.Read(out[n:])\n\t\tb.readCursor += int64(n2)\n\t\treturn int(n + n2), nil\n\t}\n\tn = copy(out, b.data[b.offset+b.readCursor:])\n\treturn\n}", "func (this *MyQueue) Peek() int {\n\treturn this.Stack[0]\n}", "func (p *parametizer) peek() (byte, error) {\n\tif p.pos >= len(p.z) {\n\t\treturn 0, io.EOF\n\t}\n\treturn p.z[p.pos], nil\n}", "func (q *Queue) Peek(num uint) (operation.QueuedOperationsAtTime, error) {\n\tif q.State() != lifecycle.StateStarted {\n\t\treturn nil, lifecycle.ErrNotStarted\n\t}\n\n\tq.mutex.RLock()\n\tdefer q.mutex.RUnlock()\n\n\tn := int(num)\n\tif len(q.pending) < n {\n\t\tn = len(q.pending)\n\t}\n\n\treturn asQueuedOperations(q.pending[0:n]), nil\n}", "func (b *FixedBuffer) Read(p []byte) (n int, err error) {\n\tif b.r == b.w {\n\t\treturn 0, errReadEmpty\n\t}\n\tn = copy(p, b.buf[b.r:b.w])\n\tb.r += n\n\tif b.r == b.w {\n\t\tb.r = 0\n\t\tb.w = 0\n\t}\n\treturn n, nil\n}", "func (Q *Queue) Peek() (interface{}, error) {\n\tif Q.IsEmpty() {\n\t\treturn nil, errors.New(\"queue is empty. You can't display element :(\")\n\t}\n\treturn Q.head.value, nil\n}", "func (s *Stack) Peek() interface{} {\n\tif s.head == nil {\n\t\treturn nil\n\t}\n\treturn s.head.data\n}", "func (this *MyCircularDeque) GetRear() int {\n if this.IsEmpty() {\n return -1\n }\n return this.data[(this.tail - 1 + this.capacity) % this.capacity]\n}", "func (rr *Reader) Peek() (Type, error) {\n\tb, err := rr.br.Peek(1)\n\tif err != nil {\n\t\treturn TypeInvalid, err\n\t}\n\n\treturn types[b[0]], nil\n}", "func (stack *Stack) Peek() (int, bool) {\n\tif stack.top == nil {\n\t\treturn 0, false\n\t}\n\n\treturn stack.top.data, true\n}", "func (q *Queue) Peek() interface{} {\n\treturn q.data.Front().Value\n}", "func (q *Queue) Peek() (interface{}, error) {\n\tif q.IsEmpty() {\n\t\treturn nil, fmt.Errorf(\"the queue is empty\")\n\t}\n\treturn q.head.key, nil\n}", "func peekNext(in *bufio.Reader) (rune, error) {\n\tif err := skipSpaces(in); err != nil {\n\t\treturn rune(0), err\n\t}\n\tr, _, err := in.ReadRune()\n\tif err != nil {\n\t\treturn rune(0), err\n\t}\n\tin.UnreadRune()\n\treturn r, nil\n}", "func (s *Stack) Peek() int {\n\treturn s.x[len(s.x)-1]\n}", "func (s *SliceQueue) Peek() (val string, ok bool) {\n\tif s.Len() == 0 {\n\t\treturn \"\", false\n\t}\n\n\treturn s.elements[0], true\n}", "func (a *reader) Read(p []byte) (n int, err error) {\n\tif a.err != nil {\n\t\treturn 0, a.err\n\t}\n\t// Swap buffer and maybe return error\n\terr = a.fill()\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\n\t// Copy what we can\n\tn = copy(p, a.cur.buffer())\n\ta.cur.inc(n)\n\n\tif a.cur.isEmpty() {\n\t\t// Return current, so a fetch can start.\n\t\tif a.cur != nil {\n\t\t\t// If at end of buffer, return any error, if present\n\t\t\ta.err = a.cur.err\n\t\t\ta.reuse <- a.cur\n\t\t\ta.cur = nil\n\t\t}\n\t\treturn n, a.err\n\t}\n\treturn n, nil\n}", "func (r *RingBuffer) Read(p []byte) (n int, err error) {\n\tif len(p) == 0 {\n\t\treturn 0, nil\n\t}\n\n\tr.mu.Lock()\n\tdefer r.mu.Unlock()\n\tn, err = r.read(p)\n\treturn\n}", "func (q *Queue) Peek() *linkedlist.Node {\n\treturn q.list.Tail\n}", "func (s *Stack) Peek() int {\n\tif s.length == 0 {\n\t\treturn MIN\n\t}\n\treturn s.top.value\n}", "func (b *Buffer) Read(data []byte, c Cursor) (n int, next Cursor, err error) {\n\tb.mu.RLock()\n\tdefer b.mu.RUnlock()\n\n\tseq, offset := c.seq, c.offset\n\n\tif seq >= b.nextSeq || offset > b.last {\n\t\treturn 0, next, ErrNotArrived\n\t}\n\n\tf := b.frame(offset)\n\tif f.size() == 0 || f.seq() != seq {\n\t\treturn b.readFirst(data)\n\t}\n\n\treturn b.readOffset(data, offset)\n}", "func (this *MyQueue) Peek() int {\n\treturn this.stack.Top()\n}", "func (q *Stack) Peek() interface{} {\n\tq.lock.Lock()\n\tdefer q.lock.Unlock()\n\n\tn := q.top\n\tif n == nil {\n\t\treturn nil\n\t}\n\n\treturn n.data\n}", "func (cb *Buffer) Read(buf []byte) (int, error) {\n\tif buf == nil || len(buf) == 0 {\n\t\treturn 0, fmt.Errorf(\"Target buffer is null or empty\")\n\t}\n\n\ttoRead := min(len(buf), cb.ReadAvailability())\n\n\tlBytes := min(cb.rpos, toRead)\n\tcopy(buf[toRead-lBytes:toRead], cb.buffer[cb.rpos-lBytes:cb.rpos])\n\n\tif toRead > lBytes {\n\t\trBytes := toRead - lBytes\n\t\tcopy(buf[:rBytes], cb.buffer[len(cb.buffer)-rBytes:len(cb.buffer)])\n\t\tcb.rpos = len(cb.buffer) - rBytes\n\t} else {\n\t\tcb.rpos -= lBytes\n\t}\n\n\tcb.full = false\n\treturn toRead, nil\n}", "func (q *BytesQueue) Get(index int) ([]byte, error) {\n\tdata, _, err := q.peek(index)\n\treturn data, err\n}", "func (c *RingBuffer) consume(n int) int {\n\tif n > c.len {\n\t\tn = c.len\n\t}\n\tc.index = (c.index + n) % len(c.buf)\n\tc.addLen(-n)\n\treturn n\n}", "func (q *Queue) Peek(n int) (item interface{}) {\n item = q.queue[n]\n return\n}", "func (s *Stack[T]) Peek() T {\n\treturn s.array[len(s.array)-1]\n}", "func TestMockReadSeekerReads(t *testing.T) {\n\tvar reader = NewMockReadSeeker(&[]byte{0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07})\n\n\t// Buffer to read into.\n\tvar data []byte = []byte{0x00, 0x00, 0x00}\n\tvar count = 0\n\n\t// Start with empty data buffer\n\tassertBytesEqual(data, []byte{0x00, 0x00, 0x00}, t)\n\n\t// Now read into the 3-length buffer\n\tcount, err := reader.Read(data)\n\tif count != 3 {\n\t\tt.Fatal(\"Count not 3 was \", count)\n\t}\n\n\tif err != nil {\n\t\tt.Fatal(\"Error not nil, was \", err)\n\t}\n\n\tassertBytesEqual(data, []byte{0x01, 0x02, 0x03}, t)\n\n\t// Read into it again to get the next 3\n\tcount, err = reader.Read(data)\n\tif count != 3 {\n\t\tt.Fatal(\"Count not 3 was \", count)\n\t}\n\n\tif err != nil {\n\t\tt.Fatal(\"Error not nil, was \", err)\n\t}\n\tassertBytesEqual(data, []byte{0x04, 0x05, 0x06}, t)\n\n\t// Read again to get the last one.\n\tcount, err = reader.Read(data)\n\tif count != 1 {\n\t\tt.Fatal(\"Count not 1 was \", count)\n\t}\n\n\tif err != nil {\n\t\tt.Fatal(\"Error not nil, was \", err)\n\t}\n\n\t// Data will still have the old data remaining\n\tassertBytesEqual(data, []byte{0x07, 0x05, 0x06}, t)\n\n\t// One more time, should be empty\n\tcount, err = reader.Read(data)\n\tif count != 0 {\n\t\tt.Fatal(\"Count not 0 was \", count)\n\t}\n\n\tif err != nil {\n\t\tt.Fatal(\"Error not nil, was \", err)\n\t}\n}", "func (c *CircularQueue) Get() interface{} {\n\tc.mtx.Lock()\n\tdefer c.mtx.Unlock()\n\n\tfor c.head == c.tail {\n\t\tc.cond.Wait()\n\t}\n\n\tret := c.buffer[c.head]\n\tc.head = c.idxInc(c.head)\n\n\treturn ret\n}", "func (s *Stack) Peek() interface{} {\n\ts.mutex.Lock()\n\tdefer s.mutex.Unlock()\n\n\tn := s.top\n\tif n == nil || n.data == nil {\n\t\treturn nil\n\t}\n\n\treturn n.data\n}", "func (s *Stack) Peek() interface{} {\n\tif s.size > 0 {\n\t\tpeek := s.top.value\n\t\treturn peek\n\t}\n\treturn nil\n}", "func (sc *SrcCursor) Peek() Char {\n\treturn sc.PeekN(1)\n}", "func (h *data) Peek() interface{} {\n\tif len(h.queue) > 0 {\n\t\treturn h.items[h.queue[0]].obj\n\t}\n\treturn nil\n}", "func (c *CircularBuffer[T]) Dequeue() (T, bool) {\n\tc.lock.Lock()\n\tdefer c.lock.Unlock()\n\tvar msg T\n\t// record that we have accessed the buffer\n\tc.lastAccess = time.Now()\n\t// if our head and tail are equal there is nothing in our buffer to return\n\tif c.head == c.tail {\n\t\treturn msg, false\n\t}\n\n\tmsg = c.buffer[c.tail]\n\tc.tail = (c.tail + 1) % BUFLEN\n\treturn msg, true\n}", "func (this *MyCircularDeque) GetRear() int {\n\tif this.IsEmpty() {\n\t\treturn -1\n\t}\n\n\tthis.mutex.RLock()\n\tdefer this.mutex.RUnlock()\n\treturn this.data[(this.rear-1+this.capacity)%this.capacity]\n}", "func (q *Queue) Peek() (string, error) {\n\tif q.IsEmpty() {\n\t\treturn \"\", errors.New(\"the stack is empty, nothing to pop\")\n\t}\n\n\treturn q.First.Value, nil\n}", "func (s *SeekerWrapper) ReadAt(p []byte, offset int64) (int, error) { return s.s.ReadAt(p, offset) }" ]
[ "0.72824013", "0.72690505", "0.68026304", "0.6735998", "0.6696234", "0.6670475", "0.658596", "0.6543717", "0.6538083", "0.653098", "0.6524286", "0.6496917", "0.64100665", "0.63664496", "0.6301166", "0.62646484", "0.6258624", "0.62238336", "0.62153476", "0.6190447", "0.6183512", "0.6182573", "0.61646307", "0.61579764", "0.61509603", "0.6148841", "0.6101659", "0.608782", "0.6079247", "0.6076103", "0.6044071", "0.60415745", "0.6011115", "0.5990374", "0.5990374", "0.59767777", "0.5929406", "0.59282494", "0.59084785", "0.5889929", "0.58870894", "0.5881303", "0.5866232", "0.5830458", "0.5828816", "0.5811576", "0.58094466", "0.5809343", "0.58016515", "0.5791894", "0.5786365", "0.5765286", "0.576438", "0.5754071", "0.5737629", "0.57209325", "0.57192445", "0.57104695", "0.5706403", "0.57048035", "0.5703655", "0.56932336", "0.56873876", "0.564754", "0.56383777", "0.56381947", "0.56349725", "0.56340027", "0.56320363", "0.5623079", "0.5620901", "0.56200945", "0.56141436", "0.5614032", "0.56134564", "0.5607653", "0.5602438", "0.5600974", "0.5594429", "0.55900204", "0.5586501", "0.5586074", "0.5566106", "0.55655104", "0.55453366", "0.55429834", "0.5539874", "0.55264825", "0.5525618", "0.5504519", "0.5495932", "0.5463381", "0.54600793", "0.5457509", "0.5453116", "0.54479796", "0.54462886", "0.5444741", "0.5442469", "0.544007" ]
0.7324721
0
Consume advances the tail index to remove data from a circular buffer. A call to Consume usually follows a call to Peek. The number of bytes consumed is returned.
func (c *CircBuf) Consume(nbytes int) int { var count int var num int for { count = c.countToEnd() if nbytes - num < count { count = nbytes - num } if count <= 0 { break } c.tail = (c.tail + count) & (len(c.buf) - 1) num += count } return num }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (c *RingBuffer) Consume(n int) int {\n\tc.mutex.Lock()\n\tdefer c.mutex.Unlock()\n\treturn c.consume(n)\n}", "func (c *RingBuffer) consume(n int) int {\n\tif n > c.len {\n\t\tn = c.len\n\t}\n\tc.index = (c.index + n) % len(c.buf)\n\tc.addLen(-n)\n\treturn n\n}", "func (b *Buffer) Consume(s functional.Stream) (err error) {\n b.idx, err = readStreamIntoSlice(s, b.buffer, b.handler)\n if err == functional.Done {\n err = nil\n }\n return\n}", "func (g *GrowingBuffer) Consume(s functional.Stream) error {\n buffer := g.bufferPtr.Elem()\n buffer.Set(buffer.Slice(0, 0))\n if g.isPtrBuffer {\n return AppendPtrsTo(g.bufferPtr.Interface(), g.creater).Consume(s)\n }\n return AppendTo(g.bufferPtr.Interface()).Consume(s)\n}", "func consume(in []byte, n int) (out, data []byte, err error) {\n\tif n < 0 || len(in) < n {\n\t\treturn nil, nil, bufferTooSmall\n\t}\n\treturn in[n:], in[:n], nil\n}", "func (sk *Seek) Consume() <-chan *sarama.ConsumerMessage {\n\treturn sk.oc.Consume()\n}", "func (c *Consumer) Consume() ([]byte, bool) {\n\tfound := true\n\tif b := c.getItem(); b != nil {\n\t\tif !c.cache.Has(b, c.readIndex) {\n\t\t\tc.cache.Put(b, c.readIndex)\n\t\t\tfound = false\n\t\t}\n\n\t\tincrementIndex(&c.readIndex, len(c.ring.items)-1)\n\t\treturn b, found\n\t}\n\n\truntime.Gosched()\n\treturn nil, true\n}", "func (src *Source) ConsumeN(n int) {\n\tfor {\n\t\tif n <= len(src.current) {\n\t\t\tsrc.current = src.current[n:]\n\t\t\tif len(src.current) == 0 {\n\t\t\t\tsrc.Consume()\n\t\t\t}\n\t\t\treturn\n\t\t}\n\t\tif src.err != nil {\n\t\t\tif src.err == io.EOF {\n\t\t\t\tsrc.err = io.ErrUnexpectedEOF\n\t\t\t}\n\t\t\treturn\n\t\t}\n\t\tn -= len(src.current)\n\t\tsrc.Consume()\n\t}\n}", "func (src *Source) Consume() {\n\tif src.reader == nil {\n\t\tsrc.current = nil\n\t\tsrc.ReportError(io.EOF)\n\t\treturn\n\t}\n\tif len(src.nextList) != 0 {\n\t\tsrc.current = src.nextList[0]\n\t\tsrc.nextList = src.nextList[1:]\n\t\treturn\n\t}\n\tif len(src.savepointStack) > 0 {\n\t\tsrc.buf = make([]byte, len(src.buf))\n\t}\n\tn, err := src.reader.Read(src.buf)\n\tif err != nil {\n\t\tsrc.ReportError(err)\n\t}\n\tsrc.current = src.buf[:n]\n\tfor _, savepoint := range src.savepointStack {\n\t\tsavepoint.nextList = append(savepoint.nextList, src.current)\n\t}\n}", "func (c *CircBuf) Peek(buf []byte) int {\n\tvar count int\n\tvar tail int = c.tail // Use a local tail variable\n\tvar num int\n\tfor {\n\t\tcount = c.countToEndArg(tail)\n\t\tif len(buf) - num < count {\n\t\t\tcount = len(buf) - num\n\t\t}\n\t\tif count <= 0 {\n\t\t\tbreak\n\t\t}\n\t\tcopy(buf[num : num + count], c.buf[tail : tail + count])\n\t\ttail = (tail + count) & (len(c.buf) - 1)\n\t\tnum += count\n\t}\n\treturn num\n}", "func (c *CircularBuffer[T]) Dequeue() (T, bool) {\n\tc.lock.Lock()\n\tdefer c.lock.Unlock()\n\tvar msg T\n\t// record that we have accessed the buffer\n\tc.lastAccess = time.Now()\n\t// if our head and tail are equal there is nothing in our buffer to return\n\tif c.head == c.tail {\n\t\treturn msg, false\n\t}\n\n\tmsg = c.buffer[c.tail]\n\tc.tail = (c.tail + 1) % BUFLEN\n\treturn msg, true\n}", "func (d PacketData) Consume(size int) ([]byte, bool) {\n\tv, ok := d.PullUp(size)\n\tif ok {\n\t\td.pk.consumed += size\n\t}\n\treturn v, ok\n}", "func (c *RingBuffer) Read(p []byte) (int, error) {\n\tc.mutex.Lock()\n\tdefer c.mutex.Unlock()\n\tn, err := c.peek(p)\n\tif err != nil {\n\t\treturn n, err\n\t}\n\treturn c.consume(n), nil\n}", "func (c *CircBuf) Read(buf []byte) int {\n\tvar count int\n\tvar num int\n\tfor {\n\t\tcount = c.countToEnd()\n\t\tif len(buf) - num < count {\n\t\t\tcount = len(buf) - num\n\t\t}\n\t\tif count <= 0 {\n\t\t\tbreak\n\t\t}\n\t\tcopy(buf[num : num + count], c.buf[c.tail : c.tail + count])\n\t\tc.tail = (c.tail + count) & (len(c.buf) - 1)\n\t\tnum += count\n\t}\n\treturn num\n}", "func (mcq *MyCircularQueue) Rear() int {\n\tif mcq.length == 0 {\n\t\treturn -1\n\t}\n\treturn mcq.dummyTail.Pre.Val\n}", "func (t *tFile) consumeSeek() int32 {\n\treturn atomic.AddInt32(&t.seekLeft, -1)\n}", "func (c *Consumer) Consume() (msg *kafka.Message, err error) {\n\tmsg, err = c.kc.ReadMessage(-1)\n\treturn msg, err\n}", "func (r *Reader) Remaining() int {\n\treturn len(r.buf)\n}", "func (this *MyCircularQueue) Rear() int {\n\tif this.Count == 0 {\n\t\treturn -1\n\t}\n\tvar index int\n\tif this.Tail == 0 {\n\t\tindex = len(this.Queue) - 1\n\t} else {\n\t\tindex = this.Tail - 1\n\t}\n\treturn this.Queue[index]\n}", "func (w *WrapperClient) Consume(v int) error {\n\tw.mux.Lock()\n\tdefer w.mux.Unlock()\n\n\tif w.streamQuota < v {\n\t\treturn fmt.Errorf(\"quota is %d, less than %d, can not acquire\", w.streamQuota, v)\n\t}\n\n\tw.streamQuota -= v\n\treturn nil\n}", "func (c *apiConsumers) StopConsume() {\n\tif c.consuming {\n\t\tclose(c.inputQueueStop)\n\t\tc.consuming = false\n\t\tclose(c.queue)\n\t\tclose(c.errors)\n\t}\n}", "func (b *buffer) GetBytesConsumed() int {\n\treturn b.position\n}", "func (d *Decoder) consume(n int) {\n\td.in = d.in[n:]\n\tfor len(d.in) > 0 {\n\t\tswitch d.in[0] {\n\t\tcase ' ', '\\n', '\\r', '\\t':\n\t\t\td.in = d.in[1:]\n\t\tdefault:\n\t\t\treturn\n\t\t}\n\t}\n}", "func (p *Pool) Consume() <-chan interface{} {\n\treturn p.c\n}", "func (this *MyCircularDeque) GetRear() int {\n if this.IsEmpty() {\n return -1\n }\n return this.data[(this.tail - 1 + this.capacity) % this.capacity]\n}", "func (n *Node) Consume() {\n\tn.consumed = true\n}", "func (this *MyCircularQueue) Rear() int {\n if this.IsEmpty() {\n return -1\n }\n return this.Items[this.TailIndex]\n}", "func (c *RingBuffer) peek(p []byte) (int, error) {\n\tif c.len == 0 && c.closed {\n\t\treturn 0, io.EOF\n\t}\n\tl := len(p)\n\tif l > c.len {\n\t\tl = c.len\n\t}\n\tn := 0\n\tleftBeforeEnd := len(c.buf) - c.index\n\tif l < leftBeforeEnd {\n\t\tn = copy(p, c.buf[c.index:c.index+l])\n\t} else {\n\t\tn = copy(p, c.buf[c.index:])\n\t\tn += copy(p[n:], c.buf[:l-n])\n\t}\n\treturn n, nil\n}", "func (b *QueueBuffer) Read(p []byte) (int, error) {\n\tif x := len(*b) - len(p); x >= 0 {\n\t\tn := copy(p, (*b)[x:])\n\t\t*b = (*b)[:x]\n\t\treturn n, nil\n\t}\n\tn := copy(p, *b)\n\t*b = nil\n\treturn n, io.EOF\n}", "func (this *MyCircularQueue) Rear() int {\n\treturn this.CircularQueue[this.Tail]\n}", "func (src *Source) Consume1() {\n\tsrc.current = src.current[1:]\n\tif len(src.current) == 0 {\n\t\tsrc.Consume()\n\t}\n}", "func (this *Consumer) consuming() {\n\t// record the consumed sequence\n\thaveConsumed := this.ReadSequence.Get()\n\n\tfor this.IsRunning {\n\t\tnext := haveConsumed + 1\n\n\t\t// Attention:\n\t\t// this.BarrierSequence may include the producer's cursor and\n\t\t// the pre-group consumer's sequences\n\t\thigh := this.BarrierSequence.GetBarrier(next)\n\n\t\t// have data to consume\n\t\tif next <= high {\n\t\t\t// consume\n\t\t\tthis.ReadHandler.Consume(next, high)\n\t\t\t// change the read sequence\n\t\t\tthis.ReadSequence.Set(high)\n\t\t\thaveConsumed = high\n\t\t}\n\n\t\truntime.Gosched()\n\t\t//time.Sleep(time.Millisecond)\n\t}\n}", "func (h PacketHeader) Consume(size int) (v []byte, consumed bool) {\n\treturn h.pk.consume(h.typ, size)\n}", "func (b *Buffer) Remaining(from Cursor) uint64 {\n\tb.mu.RLock()\n\tdefer b.mu.RUnlock()\n\n\tif from.offset > b.last {\n\t\treturn 0\n\t}\n\n\toff := from.offset\n\tif off < b.first {\n\t\toff = b.first\n\t}\n\tremaining := b.last - off\n\tremaining += uint64(b.frameSize(b.last))\n\treturn remaining\n}", "func (b *Kafka) Consume(ctx context.Context, topic string, offset int64, imm bool, max int64) ([]string, error) {\n\n\tb.lockForTopic(topic)\n\n\tdefer b.unlockForTopic(topic)\n\t// Fetch offsets\n\tnewOff, err := b.Client.GetOffset(topic, 0, sarama.OffsetNewest)\n\n\tif err != nil {\n\t\tlog.Error(err.Error())\n\t}\n\n\toldOff, err := b.Client.GetOffset(topic, 0, sarama.OffsetOldest)\n\tif err != nil {\n\t\tlog.Error(err.Error())\n\t}\n\n\tlog.Infof(\"Consuming topic: %v, min_offset: %v, max_offset: %v, current_offset: %v\", topic, oldOff, newOff, offset)\n\n\t// If tracked offset is equal or bigger than topic offset means no new messages\n\tif offset >= newOff {\n\t\treturn []string{}, nil\n\t}\n\n\t// If tracked offset is left behind increment it to topic's min. offset\n\tif offset < oldOff {\n\t\tlog.Infof(\"Tracked offset is off for topic: %v, broker_offset %v, tracked_offset: %v\", topic, offset, oldOff)\n\t\treturn []string{}, errors.New(\"offset is off\")\n\t}\n\n\tpartitionConsumer, err := b.Consumer.ConsumePartition(topic, 0, offset)\n\n\tif err != nil {\n\t\tlog.Errorf(\"Unable to consume topic %v, %v, min_offset: %v, max_offset: %v, current_offset: %v\", topic, err.Error(), newOff, oldOff, offset)\n\t\treturn []string{}, err\n\n\t}\n\n\tdefer func() {\n\t\tif err := partitionConsumer.Close(); err != nil {\n\t\t\tlog.Error(err)\n\t\t}\n\t}()\n\n\tmessages := make([]string, 0)\n\tvar consumed int64\n\ttimeout := time.After(300 * time.Second)\n\n\tif imm {\n\t\ttimeout = time.After(100 * time.Millisecond)\n\t}\n\nConsumerLoop:\n\tfor {\n\t\tselect {\n\t\t// If the http client cancels the http request break consume loop\n\t\tcase <-ctx.Done():\n\t\t\t{\n\t\t\t\tbreak ConsumerLoop\n\t\t\t}\n\t\tcase <-timeout:\n\t\t\t{\n\t\t\t\tbreak ConsumerLoop\n\t\t\t}\n\t\tcase msg := <-partitionConsumer.Messages():\n\n\t\t\tmessages = append(messages, string(msg.Value[:]))\n\n\t\t\tconsumed++\n\n\t\t\tlog.Infof(\"Consumed: %v, Max: %v, Latest Message: %v\", consumed, max, string(msg.Value[:]))\n\n\t\t\t// if we pass over the available messages and still want more\n\t\t\tif consumed >= max {\n\t\t\t\tbreak ConsumerLoop\n\t\t\t}\n\n\t\t\tif offset+consumed > newOff-1 {\n\t\t\t\t// if returnImmediately is set don't wait for more\n\t\t\t\tif imm {\n\t\t\t\t\tbreak ConsumerLoop\n\t\t\t\t}\n\n\t\t\t}\n\n\t\t}\n\t}\n\n\treturn messages, nil\n}", "func (b *Buffer) Retrieve(length int) {\n\tif length < b.ReadableBytes() {\n\t\tb.readerIndex += length\n\t} else {\n\t\tb.RetrieveAll()\n\t}\n}", "func (b *Buffer) Read(out []byte) (n int, err error) {\n\tif b.readCursor >= b.Size() {\n\t\t// we read the entire buffer, let's loop back to the beginning\n\t\tb.readCursor = 0\n\t} else if b.readCursor+int64(len(out)) > b.Size() {\n\t\t// we don't have enough data in our buffer to fill the passed buffer\n\t\t// we need to do multiple passes\n\t\tn := copy(out, b.data[b.offset+b.readCursor:])\n\t\tb.readCursor += int64(n)\n\t\t// TMP check, should remove\n\t\tif b.readCursor != b.Size() {\n\t\t\tpanic(fmt.Sprintf(\"off by one much? %d - %d\", b.readCursor, b.Size()))\n\t\t}\n\t\tn2, _ := b.Read(out[n:])\n\t\tb.readCursor += int64(n2)\n\t\treturn int(n + n2), nil\n\t}\n\tn = copy(out, b.data[b.offset+b.readCursor:])\n\treturn\n}", "func (pb *PageBuffer) Consume(s functional.Stream) (err error) {\n pb.page_no = 0\n pb.idx = 0\n pb.is_end = false\n for err == nil && !pb.isDesiredPageRead() {\n if pb.idx > 0 {\n pb.page_no++\n }\n offset := pb.pageOffset(pb.page_no)\n pb.idx, err = readStreamIntoSlice(\n s, pb.buffer.Slice(offset, offset + pb.pageLen), pb.handler)\n }\n if err == nil {\n anElement := pb.buffer.Index(pb.pageOffset(pb.page_no + 1))\n pb.handler.ensureValid(anElement)\n pb.is_end = s.Next(pb.handler.toInterface(anElement)) == functional.Done\n } else if err == functional.Done {\n pb.is_end = true\n err = nil\n if pb.page_no > 0 && pb.idx == 0 {\n pb.page_no--\n pb.idx = pb.pageLen\n }\n }\n return\n}", "func (this *MyCircularDeque) GetRear() int {\n\tif this.len == 0 {\n\t\treturn -1\n\t}\n\treturn this.queue[(this.tail+1)%this.size]\n}", "func (e *a1Receiver) Consume(rp *xapp.RMRParams) (err error) {\n\te.msgChan <- rp\n\treturn\n}", "func (c *Consumer) Consume() error {\n\tfor {\n\t\tkey, err := c.Dec.Pop()\n\t\tswitch err {\n\t\tcase nil:\n\t\tcase io.EOF:\n\t\t\treturn nil\n\t\tdefault:\n\t\t\treturn err\n\t\t}\n\t\tswitch key {\n\t\tcase \"atom\":\n\t\t\terr = c.HandleAtom()\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\tcase \"template\":\n\t\t\terr = c.CreateTemplateEnc()\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\tcase \"json\":\n\t\t\tc.CreateJsonEnc()\n\t\tdefault:\n\t\t\treturn &ConsumerError{\n\t\t\t\t\"Unexpected key\",\n\t\t\t}\n\t\t}\n\t}\n}", "func (r *Retry) Consume(ts <-chan types.Transaction) error {\n\tif r.transactionsIn != nil {\n\t\treturn types.ErrAlreadyStarted\n\t}\n\tif err := r.wrapped.Consume(r.transactionsOut); err != nil {\n\t\treturn err\n\t}\n\tr.transactionsIn = ts\n\tgo r.loop()\n\treturn nil\n}", "func (kcp *KCP) PeekSize() (size int) {\n\tif len(kcp.recvQueue) <= 0 {\n\t\treturn -1\n\t}\n\n\tseg := kcp.recvQueue[0]\n\tif seg.frg == 0 {\n\t\treturn len(seg.dataBuffer)\n\t}\n\n\tif len(kcp.recvQueue) < int(seg.frg+1) {\n\t\treturn -1\n\t}\n\n\tfor idx := range kcp.recvQueue {\n\t\tseg := kcp.recvQueue[idx]\n\t\tsize += len(seg.dataBuffer)\n\t\tif seg.frg == 0 {\n\t\t\tbreak\n\t\t}\n\t}\n\n\treturn\n}", "func (this *MyCircularDeque) GetRear() int {\n\tif this.IsEmpty() {\n\t\treturn -1\n\t}\n\n\tthis.mutex.RLock()\n\tdefer this.mutex.RUnlock()\n\treturn this.data[(this.rear-1+this.capacity)%this.capacity]\n}", "func (s *NSQSession) Consume() <-chan Message {\n\ts.combineConsume()\n\treturn s.receive\n}", "func (c *Connection) Consume(done chan bool) error {\n\tmsgs, err := c.Channel.Consume(\n\t\tc.Config.Queue,\n\t\tc.Config.ConsumerTag,\n\t\tc.Config.Options.Consume.AutoAck,\n\t\tc.Config.Options.Consume.Exclusive,\n\t\tc.Config.Options.Consume.NoLocal,\n\t\tc.Config.Options.Consume.NoWait,\n\t\tc.Config.Options.Consume.Args,\n\t)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tgo c.HandleMsgs(msgs)\n\n\tlog.Println(\"Waiting for messages...\")\n\n\tfor {\n\t\tselect {\n\t\tcase <-done:\n\t\t\tc.Channel.Close()\n\t\t\tc.Conn.Close()\n\n\t\t\treturn nil\n\t\t}\n\t}\n}", "func (this *MyCircularDeque) GetRear() int {\n\tif this.IsEmpty() {\n\t\treturn -1\n\t}\n\n\treturn this.data[(len(this.data)+this.end-1)%len(this.data)]\n}", "func (c *RingBuffer) Peek(p []byte) (int, error) {\n\tc.mutex.Lock()\n\tdefer c.mutex.Unlock()\n\treturn c.peek(p)\n}", "func (c *consumer) Consume(ctx context.Context) (<-chan async.Message, <-chan error, error) {\n\tctx, cnl := context.WithCancel(ctx)\n\tc.cnl = cnl\n\n\tcg, err := sarama.NewConsumerGroup(c.config.Brokers, c.group, c.config.SaramaConfig)\n\tif err != nil {\n\t\treturn nil, nil, fmt.Errorf(\"failed to create consumer: %w\", err)\n\t}\n\tc.cg = cg\n\tslog.Debug(\"consuming messages\", slog.String(\"topics\", strings.Join(c.topics, \",\")), slog.String(\"group\", c.group))\n\n\tchMsg := make(chan async.Message, c.config.Buffer)\n\tchErr := make(chan error, c.config.Buffer)\n\n\tgo func() {\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase <-ctx.Done():\n\t\t\t\tslog.Info(\"canceling consuming messages requested\")\n\t\t\t\tcloseConsumer(c.cg)\n\t\t\t\treturn\n\t\t\tcase consumerError := <-c.cg.Errors():\n\t\t\t\tchErr <- consumerError\n\t\t\t\tcloseConsumer(c.cg)\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}()\n\n\t// Iterate over consumer sessions.\n\tgo func() {\n\t\thnd := handler{consumer: c, messages: chMsg}\n\t\tfor {\n\t\t\terr := c.cg.Consume(ctx, c.topics, hnd)\n\t\t\tif err != nil {\n\t\t\t\tchErr <- err\n\t\t\t}\n\t\t}\n\t}()\n\n\treturn chMsg, chErr, nil\n}", "func (cb *Buffer) Read(buf []byte) (int, error) {\n\tif buf == nil || len(buf) == 0 {\n\t\treturn 0, fmt.Errorf(\"Target buffer is null or empty\")\n\t}\n\n\ttoRead := min(len(buf), cb.ReadAvailability())\n\n\tlBytes := min(cb.rpos, toRead)\n\tcopy(buf[toRead-lBytes:toRead], cb.buffer[cb.rpos-lBytes:cb.rpos])\n\n\tif toRead > lBytes {\n\t\trBytes := toRead - lBytes\n\t\tcopy(buf[:rBytes], cb.buffer[len(cb.buffer)-rBytes:len(cb.buffer)])\n\t\tcb.rpos = len(cb.buffer) - rBytes\n\t} else {\n\t\tcb.rpos -= lBytes\n\t}\n\n\tcb.full = false\n\treturn toRead, nil\n}", "func (kcp *KCP) PeekSize() (length int) {\n\tif len(kcp.rcv_queue) == 0 {\n\t\treturn -1\n\t}\n\n\tseg := &kcp.rcv_queue[0]\n\tif seg.frg == 0 {\n\t\treturn seg.data.Len()\n\t}\n\n\tif len(kcp.rcv_queue) < int(seg.frg+1) {\n\t\treturn -1\n\t}\n\n\tfor k := range kcp.rcv_queue {\n\t\tseg := &kcp.rcv_queue[k]\n\t\tlength += seg.data.Len()\n\t\tif seg.frg == 0 {\n\t\t\tbreak\n\t\t}\n\t}\n\treturn\n}", "func (q *Queue) Peek() int {\n\tif q.start.next != nil {\n\t\t_iteratePeek(q.start)\n\t}\n\treturn q.start.val\n}", "func (r *ChannelReader) Read(b []byte) (sz int, err error) {\n\tif len(b) == 0 {\n\t\treturn 0, io.ErrShortBuffer\n\t}\n\n\tfor {\n\t\tif len(r.buf) > 0 {\n\t\t\tif len(r.buf) <= len(b) {\n\t\t\t\tsz = len(r.buf)\n\t\t\t\tcopy(b, r.buf)\n\t\t\t\tr.buf = nil\n\t\t\t} else {\n\t\t\t\tcopy(b, r.buf)\n\t\t\t\tr.buf = r.buf[len(b):]\n\t\t\t\tsz = len(b)\n\t\t\t}\n\t\t\treturn sz, nil\n\t\t}\n\n\t\tvar ok bool\n\t\tif r.deadline.IsZero() {\n\t\t\tr.buf, ok = <-r.c\n\t\t} else {\n\t\t\ttimer := time.NewTimer(r.deadline.Sub(time.Now()))\n\t\t\tdefer timer.Stop()\n\n\t\t\tselect {\n\t\t\tcase r.buf, ok = <-r.c:\n\t\t\tcase <-timer.C:\n\t\t\t\treturn 0, context.DeadlineExceeded\n\t\t\t}\n\t\t}\n\t\tif len(r.buf) == 0 && !ok {\n\t\t\treturn 0, io.EOF\n\t\t}\n\t}\n}", "func (this *FeedableBuffer) Feed(bytesToFeed []byte) (bytesRemaining []byte) {\n\tbyteCount := this.maxByteCount - len(this.Data)\n\tconsumedPortion, bytesRemaining := ConsumeBytes(byteCount, bytesToFeed)\n\tthis.Data = append(this.Data, consumedPortion...)\n\treturn bytesRemaining\n}", "func (this *MyQueue) Peek() int {\n\tif len(this.out) != 0 {\n\t\treturn this.out[len(this.out)-1]\n\t}\n\tfor len(this.in) > 0 {\n\t\tthis.out.Push(this.in.Pop())\n\t}\n\treturn this.out[len(this.out)-1]\n}", "func (b *Buffer) Read(reader io.Reader) (error) {\n\tif b.isCompacted {\n\t\tb.isCompacted = false\n\n\t\t// we want to read into the buffer from where it last was,\n\t\tvar slice = b.internal[b.index:]\n\t\tvar length, err = reader.Read(slice)\n\t\tb.index = 0 // start the index over, so reading starts from beginning again\n\t\tb.length += uint32(length) // increment the number of bytes read\n\t\treturn err\n\t}\n\tvar length, err = reader.Read(b.internal)\n\tb.index = 0\n\tb.length = uint32(length)\n\treturn err\n}", "func (consumer *Consumer) Consume(hub *hub.Hub) error {\n\tdeliveries, err := consumer.channel.Consume(\n\t\tconsumer.queue.Name, // queue name\n\t\t\"consumer-tag\", // consumer tag (should not be blank)\n\t\ttrue, // auto ack\n\t\tfalse, // exclusive\n\t\tfalse, // noLocal\n\t\tfalse, // noWait\n\t\tnil, // args\n\t)\n\tif err != nil {\n\t\tlog.Printf(\"Queue consume error: %s\", err.Error())\n\t\treturn fmt.Errorf(\"Queue Consume: %s\", err)\n\t}\n\n\tgo handleDeliveries(deliveries, hub, consumer.err)\n\n\treturn nil\n}", "func (_this *StreamingReadBuffer) Refill(position int) (positionOffset int) {\n\tif _this.isEOF {\n\t\treturn\n\t}\n\n\t_this.moveUnreadBytesToStart(position)\n\t_this.readFromReader(len(_this.Buffer))\n\tpositionOffset = -position\n\treturn\n}", "func (a *reader) Read(p []byte) (n int, err error) {\n\tif a.err != nil {\n\t\treturn 0, a.err\n\t}\n\t// Swap buffer and maybe return error\n\terr = a.fill()\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\n\t// Copy what we can\n\tn = copy(p, a.cur.buffer())\n\ta.cur.inc(n)\n\n\tif a.cur.isEmpty() {\n\t\t// Return current, so a fetch can start.\n\t\tif a.cur != nil {\n\t\t\t// If at end of buffer, return any error, if present\n\t\t\ta.err = a.cur.err\n\t\t\ta.reuse <- a.cur\n\t\t\ta.cur = nil\n\t\t}\n\t\treturn n, a.err\n\t}\n\treturn n, nil\n}", "func (c *CircularBuffer[T]) Enqueue(msg T) {\n\tc.lock.Lock()\n\tdefer c.lock.Unlock()\n\t// record that we have accessed the buffer\n\tc.lastAccess = time.Now()\n\t// put our message in\n\tc.buffer[c.head] = msg\n\t// get our new head pointer\n\tnewHead := (c.head + 1) % BUFLEN\n\tif newHead == c.tail {\n\t\t// maintain our circle if we have caught up to the tail, technically we could still read this tail value and\n\t\t// move on, but it greatly simplifies things to just move it\n\t\tc.tail = (c.tail + 1) % BUFLEN\n\t}\n\t// finally move the head forward\n\tc.head = newHead\n}", "func (this *MyQueue) Peek() int {\n\tif len(this.outStack) == 0 {\n\t\tthis.inToOut()\n\t}\n\treturn this.outStack[len(this.outStack)-1]\n}", "func (p *asyncProducerMock) consume() (cleanup func()) {\n\tvar wg sync.WaitGroup\n\twg.Add(1)\n\tdone := make(chan struct{})\n\tgo func() {\n\t\tdefer wg.Done()\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase <-done:\n\t\t\t\treturn\n\t\t\tcase m := <-p.inputCh:\n\t\t\t\tp.mu.Lock()\n\t\t\t\tp.mu.outstanding = append(p.mu.outstanding, m)\n\t\t\t\tp.mu.Unlock()\n\t\t\t}\n\t\t}\n\t}()\n\treturn func() {\n\t\tclose(done)\n\t\twg.Wait()\n\t}\n}", "func (this *MyCircularQueue) Rear() int {\n\t//\n\tif this.IsEmpty() {\n\t\treturn -1\n\t}\n\treturn this.items[(this.rear+this.size-1)%this.size]\n}", "func (this *MyQueue) Peek() int {\n\tif this.sIn.isEmpty() && this.sOut.isEmpty() {\n\t\treturn -1\n\t} else if this.sOut.isEmpty() {\n\t\tthis.inToOut()\n\t}\n\n\treturn this.sOut.top()\n}", "func (r *objReader) peek(n int) ([]byte, error) {\n\tif r.err != nil {\n\t\treturn nil, r.err\n\t}\n\tif r.offset >= r.limit {\n\t\tr.error(io.ErrUnexpectedEOF)\n\t\treturn nil, r.err\n\t}\n\tb, err := r.b.Peek(n)\n\tif err != nil {\n\t\tif err != bufio.ErrBufferFull {\n\t\t\tr.error(err)\n\t\t}\n\t}\n\treturn b, err\n}", "func (this *MyQueue) Peek() int {\n\tif len(this.b) == 0 {\n\t\tfor len(this.a) > 0 {\n\t\t\tthis.b = append(this.b, this.a[len(this.a)-1])\n\t\t\tthis.a = this.a[:len(this.a)-1]\n\t\t}\n\t}\n\treturn this.b[len(this.b)-1]\n}", "func (c *Client) Consume(topic string) (msgs <-chan *Delivery, errChan <-chan error, err error) {\n\tif err := c.ensureExchange(topic); err != nil {\n\t\treturn nil, nil, ErrorEnsureExchange\n\t}\n\tif err := c.ensureConsumerQueues(topic); err != nil {\n\t\treturn nil, nil, ErrorEnsureConsumerQueues\n\t}\n\n\tmultiplexer := make(chan *Delivery)\n\n\tfor i := 0; i != c.numConsumerQueues; i++ {\n\t\tqueue := c.getRk(topic, i)\n\t\tc.workerThreads[queue]++\n\t\tc.workerMultiplexer[queue] = multiplexer\n\t}\n\n\treturn multiplexer, make(chan error), nil\n}", "func (c *LocalResultConsumer) Consume() *comet.PredictResult {\n\treturn <- c.Pipe\n}", "func (q *Queue) Peek() (int, error) {\r\n\tif len(q.data) == 0 {\r\n\t\treturn 0, fmt.Errorf(\"Queue is empty\")\r\n\t}\r\n\treturn q.data[0], nil\r\n}", "func ConsumeBytes(byteCount int, buffer []byte) (consumedPortion []byte, remainingPortion []byte) {\n\tif byteCount > len(buffer) {\n\t\tbyteCount = len(buffer)\n\t}\n\n\tconsumedPortion = buffer[:byteCount]\n\tremainingPortion = buffer[byteCount:len(buffer)]\n\treturn consumedPortion, remainingPortion\n}", "func (cons *Console) Consume(workers *sync.WaitGroup) {\n\tgo cons.readPipe()\n\tcons.ControlLoop()\n}", "func (this *MyCircularDeque) GetRear() int {\n\tif this.IsEmpty() {\n\t\treturn -1\n\t}\n\treturn this.Last.Data\n}", "func (p *Parser) consume() {\n\tp.prev = p.next\n\tif p.next.Kind != ItemEof && p.next.Kind != ItemError {\n\t\tp.next = p.lexer.NextItem()\n\t}\n}", "func (rb *RingBuffer) Read(p []byte) (int, error) {\n\texpected := len(p)\n\tn := 0\n\n\tif rb.writeOff > rb.readOff {\n\t\tn += copy(p, rb.data[rb.readOff:rb.writeOff])\n\t} else {\n\t\tpos := copy(p, rb.data[rb.readOff:])\n\t\tn += pos\n\n\t\tif n < expected {\n\t\t\tn += copy(p[pos:], rb.data[:rb.writeOff])\n\t\t}\n\t}\n\n\trb.count -= n\n\trb.readOff += (rb.readOff + n) % rb.cap\n\n\treturn n, nil\n}", "func (q *testQueue) consume() *entry {\n\tq.muw.Lock()\n\tdefer q.muw.Unlock()\n\n\tselect {\n\tcase <-q.wait:\n\t\treturn nil\n\tcase e := <-q.jobs:\n\t\tq.wg.Add(1)\n\n\t\treturn e\n\t}\n}", "func (this *MyQueue) Peek() int {\n\tif len(this.outStack) == 0 {\n\t\tthis.in2Out()\n\t}\n\n\treturn this.outStack[len(this.outStack)-1]\n}", "func (this *MyCircularDeque) GetRear() int {\n\tif this.IsEmpty() {\n\t\treturn -1\n\t}\n\treturn this.tail.value\n}", "func (r *Reader) Discard(n int) (int, error) {\n\tif n <= 0 {\n\t\treturn 0, nil\n\t}\n\tunread := r.Unread()\n\tif n > unread {\n\t\treturn unread, io.ErrUnexpectedEOF\n\t}\n\tr.off += n\n\treturn n, nil\n}", "func (c *Consumer) Consume() {\n\tmsgs, err := c.channel.Consume(\n\t\tc.queue.Name, // queue\n\t\t\"\", // consumer\n\t\tfalse, // auto-ack\n\t\tfalse, // exclusive\n\t\tfalse, // no-local\n\t\tfalse, // no-wait\n\t\tnil, // args\n\t)\n\tutils.FailOnError(err, \"Failed to register a consumer\")\n\n\tforever := make(chan bool)\n\n\tgo func() {\n\t\tfor d := range msgs {\n\t\t\tc.process(d)\n\t\t}\n\t}()\n\n\tc.logger.Printf(\"Worker: %s waiting for messages. To exit press CTRL+C\", c.queue.Name)\n\t<-forever\n}", "func (m *ManagedConsumer) Receive(ctx context.Context) (Message, error) {\n\tfor {\n\t\tm.mu.RLock()\n\t\tconsumer := m.consumer\n\t\twait := m.waitc\n\t\tm.mu.RUnlock()\n\n\t\tif consumer == nil {\n\t\t\tselect {\n\t\t\tcase <-wait:\n\t\t\t\t// a new consumer was established.\n\t\t\t\t// Re-enter read-lock to obtain it.\n\t\t\t\tcontinue\n\t\t\tcase <-ctx.Done():\n\t\t\t\treturn Message{}, ctx.Err()\n\t\t\t}\n\t\t}\n\n\t\t// TODO: determine when, if ever, to call\n\t\t// consumer.RedeliverOverflow\n\n\t\tif err := consumer.Flow(1); err != nil {\n\t\t\treturn Message{}, err\n\t\t}\n\n\t\tselect {\n\t\tcase msg := <-m.queue:\n\t\t\treturn msg, nil\n\n\t\tcase <-ctx.Done():\n\t\t\treturn Message{}, ctx.Err()\n\n\t\tcase <-consumer.Closed():\n\t\t\treturn Message{}, errors.New(\"consumer closed\")\n\n\t\tcase <-consumer.ConnClosed():\n\t\t\treturn Message{}, errors.New(\"consumer connection closed\")\n\t\t}\n\t}\n}", "func (this *MyCircularQueue) Rear() int {\n if this.IsEmpty() { return -1 }\n return this.vals[(this.tail+this.n-1)%this.n] // Note\n}", "func (q *BytesQueue) Peek() ([]byte, error) {\n\tdata, _, err := q.peek(q.head)\n\treturn data, err\n}", "func (r *ReliableTransport) consume(data []byte, err error) {\n\tif err != nil {\n\t\tr.log.Errorf(\"failed consuming message at %s. %v\", r.partition, err)\n\t\treturn\n\t}\n\n\tif data == nil {\n\t\tr.log.Warnf(\"received empty message at %s\", r.partition)\n\t\treturn\n\t}\n\n\tvar m types.Message\n\tif err := json.Unmarshal(data, &m); err != nil {\n\t\tr.log.Errorf(\"failed unmarshalling message %#v. %v\", data, err)\n\t\treturn\n\t}\n\n\tctx, cancel := context.WithTimeout(r.context, r.timeout)\n\tdefer cancel()\n\tselect {\n\tcase <-ctx.Done():\n\t\tr.log.Warnf(\"%s took to long consuming. %#v\", r.partition, m)\n\t\treturn\n\tcase r.producer <- m:\n\t\treturn\n\t}\n}", "func (c *consumer) consume() ([]*proto.Message, error) {\n\tvar msgbuf []*proto.Message\n\tvar retry int\n\tfor len(msgbuf) == 0 {\n\t\tvar err error\n\t\tmsgbuf, err = c.fetch()\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tif len(msgbuf) == 0 {\n\t\t\tretry += 1\n\t\t\tif c.conf.RetryLimit != -1 && retry > c.conf.RetryLimit {\n\t\t\t\treturn nil, ErrNoData\n\t\t\t}\n\t\t\tif c.conf.RetryWait > 0 {\n\t\t\t\ttime.Sleep(c.conf.RetryWait)\n\t\t\t}\n\t\t}\n\t}\n\n\treturn msgbuf, nil\n}", "func (this *MyCircularDeque) GetRear() int {\n\tif this.length == 0 {\n\t\treturn -1\n\t}\n\treturn this.tail.prev.val\n}", "func (c *Consumer) Consume(inOrder bool, timeWindow *time.Duration) {\n\tif err := c.Subscribe(c.Topic, nil); err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\t// Only messages will be pushed to this channel\n\tmsgChan := make(chan kafka.Message)\n\tdefer close(msgChan)\n\tgo c.filterMessages(msgChan)\n\n\t// This will block\n\tif inOrder {\n\t\tprocessMessagesInOrder(msgChan, *timeWindow)\n\t} else {\n\t\tprocessMessages(msgChan)\n\t}\n\n\tc.Close()\n}", "func (rmq *RabbitMQ) Consume(ctx context.Context, qInfo QueueInfo, prefetch int, consumer func(d amqp.Delivery) error) error {\n\n\tvar msgs <-chan amqp.Delivery\n\n\t//create ch and declare its topology\n\tch, err := rmq.Channel(prefetch, 0, false)\n\n\tif err != nil {\n\n\t\treturn err\n\n\t}\n\n\t//initialize consumer\n\tmsgs, err = ch.Consume(\n\t\tqInfo.Name, // queue\n\t\t\"\", // consumer\n\t\tfalse, // auto-ack\n\t\tfalse, // exclusive\n\t\tfalse, // no-local\n\t\tfalse, // no-wait\n\t\tnil, // args\n\t)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t//wait for messages and\n\tfor d := range msgs {\n\n\t\tselect {\n\t\tdefault:\n\n\t\t\tgo consumer(d)\n\n\t\tcase <-ctx.Done():\n\t\t\treturn ctx.Err()\n\t\t}\n\n\t}\n\n\treturn nil\n}", "func (buf *queueBuffer) Len() uint64 {\n\treturn buf.depth\n}", "func (this *Queue) Peek() interface{} {\r\n\tif this.length == 0 {\r\n\t\treturn nil\r\n\t}\r\n\treturn this.start.value\r\n}", "func (c *ConflictConsumer) Consume() {\n\tmsgs, err := c.channel.Consume(\n\t\tc.queue.Name, // queue\n\t\t\"\", // consumer\n\t\tfalse, // auto-ack\n\t\tfalse, // exclusive\n\t\tfalse, // no-local\n\t\tfalse, // no-wait\n\t\tnil, // args\n\t)\n\tutils.FailOnError(err, \"Failed to register a consumer\")\n\n\tforever := make(chan bool)\n\n\tgo func() {\n\t\tfor d := range msgs {\n\t\t\tc.process(d)\n\t\t}\n\t}()\n\n\tc.logger.Printf(\"Worker: %s waiting for messages. To exit press CTRL+C\", c.queue.Name)\n\t<-forever\n}", "func Consume(topicName string, groupID string) {\n\tfmt.Println(\"Consumer started.\")\n\n\t// make a new reader that consumes from topic-A\n\tr := kafka.NewReader(kafka.ReaderConfig{\n\t\tBrokers: []string{\"localhost:9092\"},\n\t\tGroupID: groupID,\n\t\tTopic: topicName,\n\t\tMinBytes: 10e3, // 10KB\n\t\tMaxBytes: 10e6, // 10MB\n\t})\n\n\tfor {\n\t\tm, err := r.ReadMessage(context.Background())\n\t\tif err != nil {\n\t\t\tbreak\n\t\t}\n\t\tfmt.Printf(\"message at topic/partition/offset %v/%v/%v: %s = %s\\n\", m.Topic, m.Partition, m.Offset, string(m.Key), string(m.Value))\n\t}\n\n\tr.Close()\n\tfmt.Println(\"Consumer closed.\")\n}", "func (q *BytesQueue) Pop() ([]byte, error) {\n\tdata, blockSize, err := q.peek(q.head)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tq.head += blockSize\n\tq.count--\n\n\tif q.head == q.rightMargin {\n\t\tq.head = leftMarginIndex\n\t\tif q.tail == q.rightMargin {\n\t\t\tq.tail = leftMarginIndex\n\t\t}\n\t\tq.rightMargin = q.tail\n\t}\n\n\tq.full = false\n\n\treturn data, nil\n}", "func (m *ManagedConsumer) ReceiveAsync(ctx context.Context, msgs chan<- Message) error {\n\t// send flow request after 1/2 of the queue\n\t// has been consumed\n\thighwater := uint32(cap(m.queue)) / 2\n\n\tdrain := func() {\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase msg := <-m.queue:\n\t\t\t\tmsgs <- msg\n\t\t\tdefault:\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}\n\nCONSUMER:\n\tfor {\n\t\t// ensure that the message queue is empty\n\t\tdrain()\n\n\t\t// gain lock on consumer\n\t\tm.mu.RLock()\n\t\tconsumer := m.consumer\n\t\twait := m.waitc\n\t\tm.mu.RUnlock()\n\n\t\tif consumer == nil {\n\t\t\tselect {\n\t\t\tcase <-wait:\n\t\t\t\t// a new consumer was established.\n\t\t\t\t// Re-enter read-lock to obtain it.\n\t\t\t\tcontinue\n\t\t\tcase <-ctx.Done():\n\t\t\t\treturn ctx.Err()\n\t\t\t}\n\t\t}\n\n\t\t// TODO: determine when, if ever, to call\n\t\t// consumer.RedeliverOverflow\n\n\t\t// request half the buffer's capacity\n\t\tif err := consumer.Flow(highwater); err != nil {\n\t\t\tm.asyncErrs.send(err)\n\t\t\tcontinue CONSUMER\n\t\t}\n\n\t\tvar receivedSinceFlow uint32\n\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase msg := <-m.queue:\n\t\t\t\tmsgs <- msg\n\n\t\t\t\tif receivedSinceFlow++; receivedSinceFlow >= highwater {\n\t\t\t\t\tif err := consumer.Flow(receivedSinceFlow); err != nil {\n\t\t\t\t\t\tm.asyncErrs.send(err)\n\t\t\t\t\t\tcontinue CONSUMER\n\t\t\t\t\t}\n\t\t\t\t\treceivedSinceFlow = 0\n\t\t\t\t}\n\t\t\t\tcontinue\n\n\t\t\tcase <-ctx.Done():\n\t\t\t\treturn ctx.Err()\n\n\t\t\tcase <-consumer.Closed():\n\t\t\t\tm.asyncErrs.send(errors.New(\"consumer closed\"))\n\t\t\t\tcontinue CONSUMER\n\n\t\t\tcase <-consumer.ConnClosed():\n\t\t\t\tm.asyncErrs.send(errors.New(\"consumer connection closed\"))\n\t\t\t\tcontinue CONSUMER\n\t\t\t}\n\t\t}\n\t}\n}", "func (c *RingBuffer) Capacity() int {\n\tc.mutex.Lock()\n\tdefer c.mutex.Unlock()\n\treturn len(c.buf)\n}", "func (rb *RingBuffer) Pop() (*logger.LogMessage, error) {\n\trb.mu.Lock()\n\tfor rb.q.size() == 0 && !rb.closed {\n\t\trb.wait.Wait()\n\t}\n\n\tif rb.closed {\n\t\trb.mu.Unlock()\n\t\treturn nil, ErrClosed\n\t}\n\n\tval := rb.q.dequeue()\n\trb.currentBytes -= int64(len(val.Line))\n\trb.mu.Unlock()\n\treturn val, nil\n}", "func (c *Consumer) consume(ctx context.Context) {\n\t// We need to run startConsuming to make sure that we are okay and ready to start consuming. This is mainly to\n\t// avoid a race condition where Listen() will attempt to read the messages channel prior to consume()\n\t// initializing it. We can then launch a goroutine to handle the actual consume operation.\n\tif !c.startConsuming() {\n\t\treturn\n\t}\n\tgo func() {\n\t\tdefer c.stopConsuming()\n\n\t\tchildCtx, cancel := context.WithCancel(ctx)\n\t\tdefer cancel()\n\t\tfor {\n\t\t\t// The consume loop can be cancelled by a calling the cancellation function on the context or by\n\t\t\t// closing the pipe of death. Note that in the case of context cancellation, the getRecords\n\t\t\t// call below will be allowed to complete (as getRecords does not regard context cancellation).\n\t\t\t// In the case of cancellation by pipe of death, however, the getRecords will immediately abort\n\t\t\t// and allow the consume function to immediately abort as well.\n\t\t\tif ok, _ := c.shouldConsume(ctx); !ok {\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tc.enqueueBatch(childCtx)\n\t\t}\n\t}()\n}", "func (p *parser) consume() {\n\tp.prev = p.next\n\tif p.next.k != lexKindEof {\n\t\tp.next = p.lex()\n\t}\n}", "func (q *FileQueue) Peek() (int64, []byte, error) {\n\tif q.IsEmpty() {\n\t\treturn -1, nil, nil\n\t}\n\tindex := q.FrontIndex\n\n\tbb, err := q.peek(index)\n\treturn index, bb, err\n}", "func (this *MyQueue) Peek() int {\n\treturn this.q[len(this.q)-1]\n}", "func (q *Queue) Peek() ([]byte, error) {\n\tq.RLock()\n\tdefer q.RUnlock()\n\titem, err := q.readItemByID(q.head + 1)\n\treturn item.Value, err\n}" ]
[ "0.7131728", "0.6850434", "0.64949995", "0.6380035", "0.6372471", "0.6267718", "0.61481977", "0.6093912", "0.60590017", "0.60086906", "0.59445274", "0.5875308", "0.5792995", "0.5783226", "0.5756069", "0.5718285", "0.5705474", "0.569782", "0.567035", "0.56534123", "0.5649446", "0.5643282", "0.563235", "0.5628244", "0.56216884", "0.5599033", "0.5595568", "0.5579174", "0.55579674", "0.5542946", "0.55194175", "0.5494287", "0.5489103", "0.5482435", "0.54706454", "0.5441346", "0.5438698", "0.54305893", "0.54226494", "0.5413794", "0.54083574", "0.5407892", "0.5386043", "0.537916", "0.53595203", "0.535501", "0.5341979", "0.5334378", "0.5333908", "0.533356", "0.5314795", "0.5314014", "0.5313405", "0.5306217", "0.53021276", "0.5299614", "0.52981615", "0.52933776", "0.52894825", "0.52874213", "0.5284327", "0.52829105", "0.52819103", "0.52647114", "0.5262236", "0.5259819", "0.5258613", "0.52461535", "0.5245898", "0.523109", "0.52299196", "0.5225585", "0.52212435", "0.5219656", "0.5212242", "0.52092046", "0.5207803", "0.5201952", "0.5194991", "0.5194632", "0.5190199", "0.51881516", "0.5183976", "0.5180155", "0.5179543", "0.5173841", "0.51657444", "0.5164327", "0.5157851", "0.5157478", "0.5156873", "0.51509523", "0.51442695", "0.51393354", "0.51358634", "0.5130744", "0.51304704", "0.51297086", "0.51142836", "0.51122427" ]
0.7995392
0
String converts a circular buffer to a string representation.
func (c *CircBuf) String() string { return fmt.Sprintf("CircBuf{len: %v, head: %v, tail: %v, space: %v, count: %v, buf: %v}", len(c.buf), c.head, c.tail, c.Space(), c.Count(), c.buf) }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (b *Buffer) String() string {\n\treturn string(b.buf)\n}", "func (s *Buffer) String() string {\n\ts.mutex.Lock()\n\tdefer s.mutex.Unlock()\n\treturn s.buffer.String()\n}", "func (b *Buf) String() string { return string(b.b) }", "func (buffer *Buffer) String() string {\n\tif buffer == nil {\n\t\treturn \"<nil>\"\n\t}\n\treturn buffer.B.String()\n}", "func (b *SafeBuffer) String() string {\n\tb.m.RLock()\n\tdefer b.m.RUnlock()\n\treturn b.b.String()\n}", "func bufferToString(buffer *bytes.Buffer, unsafePtr *bool) string {\n defer buffer.Reset()//ensure buffer is reset\n if !*unsafePtr {\n return buffer.String()\n }\n bb := buffer.Bytes()\n s := *(*string)(unsafe.Pointer(&bb))\n return s\n}", "func (b *LimitedBuffer) String() string {\n\treturn string(b.buf)\n}", "func (q *CircleQueue) ToString() string {\n\tresult := \"\"\n\ti := q.head\n\tfor i != q.tail {\n\t\tresult += fmt.Sprintf(\"%v\", q.data[i])\n\t\tif i != q.tail {\n\t\t\tresult += \", \"\n\t\t}\n\t\ti = (i + 1) % q.capacity\n\t}\n\treturn result\n}", "func (buff *Bytes) ToString() string {\r\n\treturn *(*string)(unsafe.Pointer(buff))\r\n}", "func (b *Bytes) String() string {\n\treturn fmt.Sprint(*b)\n}", "func (q *CircularQueue) String() string {\n\tif q.head == q.tail {\n\t\treturn \"empty queue\"\n\t}\n\tresult := \"head<-\"\n\ti := q.head\n\tfor true {\n\t\tresult += fmt.Sprintf(\"<-%+v\", q.q[i])\n\t\ti = (i + 1) % q.capacity\n\t\tif i == q.tail {\n\t\t\tbreak\n\t\t}\n\t}\n\tresult += \"<-tail\"\n\treturn result\n}", "func (b Bytes) ToString() string {\n\treturn string(b)\n}", "func (s *BufferSink) String() string {\n\treturn s.buf.String()\n}", "func String(b []byte) string {\n\treturn string(b)\n}", "func (ml *MemoryLogger) String() string {\n\treturn ml.RingBuffer.String()\n}", "func (b *LogBuffer) String() string {\n\tvar str strings.Builder\n\tstr.WriteString(\"{\")\n\tif b.header != nil && len(b.header) != 0 {\n\t\tb.headerMU.RLock()\n\t\thdr, err := json.Marshal(b.header)\n\t\tb.headerMU.RUnlock()\n\t\tif err != nil {\n\t\t\tfmt.Println(\"Error encoding logBuffer JSON\")\n\t\t}\n\t\tstr.Write(hdr[1 : len(hdr)-1])\n\t\tstr.WriteString(\",\")\n\t}\n\tstr.WriteString(\"\\\"entries\\\":[\" + strings.TrimSuffix(b.Buff.String(), \",\") + \"]\")\n\tif b.AddBanner {\n\t\tstr.WriteString(b.banner)\n\t}\n\tstr.WriteString(\"}\\n\")\n\treturn str.String()\n}", "func (b *Builder) String() string {\n\tp := unsafe.Pointer(&b.buf)\n\tsp := (*string)(p)\n\ts := *sp\n\t// return *(*string)(unsafe.Pointer(&b.buf))\n\treturn s\n}", "func String(b []byte) (s string) {\n\treturn string(b)\n}", "func String(b []byte) string {\n\treturn *(*string)(unsafe.Pointer(&b))\n}", "func (e *ObservableEditableBuffer) String() string {\n\treturn e.f.String()\n}", "func (d *Decoder) String() string {\n\tdata := d.Bytes()\n\treturn unsafe.BytesToString(data)\n}", "func (enc *encoder) String() string {\n\treturn enc.buf.String()\n}", "func (buf Hex) String() string {\n\treturn hex.EncodeToString(buf)\n}", "func (w *Reflow) String() string {\n\treturn w.buf.String()\n}", "func (bs ByteSlice) String() string {\n\treturn hex.EncodeToString([]byte(bs))\n}", "func (r UintBoundedStrictly) String() string { return UintBounded(r).String() }", "func (c Chat) String() string {\n\tjc, _ := json.Marshal(c)\n\treturn string(jc)\n}", "func (duplex Duplex) ToString() string {\n\treturn duplexText[duplex]\n}", "func (b *AppendOnlyBufferedBatch) String() string {\n\t// String should not be used in the fast paths, so we will set the length on\n\t// the wrapped batch (which might be a bit expensive but is ok).\n\tb.batch.SetLength(b.length)\n\treturn b.batch.String()\n}", "func (m BytesOutProtocol) String() string {\n\treturn string(m)\n}", "func (vl BytesValue) String() string {\n\treturn Buffer.BytesToHexString(vl)\n}", "func (vl BytesValue) String() string {\n\treturn Buffer.BytesToHexString(vl)\n}", "func (i *Input) ToString() string {\n\treturn i.Buffer.ToString()\n}", "func (s SequencerData) String() string {\n\treturn fmt.Sprintf(\"%T len %v\", s, s.Len())\n}", "func (b *Builder) String() string {\n\tif b.buffer.Len() == 0 {\n\t\tb.first.WriteTo(b.buffer)\n\t}\n\n\treturn strings.TrimSpace(b.buffer.String())\n}", "func (b byteSlice) ToString() string {\n\tvar a []string\n\tfor _, by := range b {\n\t\ta = append(a, string(by))\n\t}\n\n\treturn strings.Join(a, \" \")\n}", "func (b *linkedQueue) String() string {\n\tret := \"[\"\n\tfor e := b.first; e != nil; e = e.next {\n\t\tret+= fmt.Sprintf(\"%v \", e.value)\n\t}\n\treturn strings.TrimSpace(ret) + \"]\"\n}", "func (file *File) ToString() string {\n\treturn file.buffer.ToString(file.newline)\n}", "func(q *Queue) String() string {\n\tvar buffer bytes.Buffer\n\tif q.IsEmpty() {\n\t\tbuffer.WriteString(\"queue is nil\")\n\t}\n\tbuffer.WriteString(\"front [\")\n\tfor i:=0;i<q.Size();i++ {\n\t\tbuffer.WriteString(fmt.Sprint(q.arr[i]))\n\t\tif i != q.Size()-1{\n\t\t\tbuffer.WriteString(\" \")\n\t\t}\n\t}\n\tbuffer.WriteString(\"] back\")\n\treturn buffer.String()\n}", "func (p *StringBuilder) String() string {\n\treturn string(p.buffer)\n}", "func (v atomicReference) String() string {\n\treturn fmt.Sprint(v.Get())\n}", "func (e Bytes) String() string {\n\treturn fmt.Sprintf(\"%v\", e)\n}", "func (r *InterRecord) String() string {\n\tbuf := r.Bytes()\n\tdefer ffjson.Pool(buf)\n\n\treturn string(buf)\n}", "func (tree *BTree) String() string {\n\treturn fmt.Sprintf(\"%v\", tree.ToSlice())\n}", "func (b *Bar) String() string {\n\treturn string(b.Bytes())\n}", "func (o *Waitlistposition) String() string {\n \n \n \n \n\n j, _ := json.Marshal(o)\n str, _ := strconv.Unquote(strings.Replace(strconv.Quote(string(j)), `\\\\u`, `\\u`, -1))\n\n return str\n}", "func (w *Wrap) String() string {\n\treturn w.buf.String()\n}", "func (bitmap *bitmap) String() string {\n\tdiv, mod := bitmap.Size/8, bitmap.Size%8\n\tbuff := make([]string, div+mod)\n\n\tfor i := 0; i < div; i++ {\n\t\tbuff[i] = fmt.Sprintf(\"%08b\", bitmap.data[i])\n\t}\n\n\tfor i := div; i < div+mod; i++ {\n\t\tbuff[i] = fmt.Sprintf(\"%1b\", bitmap.Bit(div*8+(i-div)))\n\t}\n\n\treturn strings.Join(buff, \"\")\n}", "func BytesToString(b []byte) string { return *(*string)(unsafe.Pointer(&b)) }", "func (v Bytes) String() string {\n\tif v >= EXABYTE {\n\t\treturn v.format(EXABYTE) + EXABYTES\n\t}\n\tif v >= PETABYTE {\n\t\treturn v.format(PETABYTE) + PETABYTES\n\t}\n\tif v >= TERABYTE {\n\t\treturn v.format(TERABYTE) + TERABYTES\n\t}\n\tif v >= GIGABYTE {\n\t\treturn v.format(GIGABYTE) + GIGABYTES\n\t}\n\tif v >= MEGABYTE {\n\t\treturn v.format(MEGABYTE) + MEGABYTES\n\t}\n\tif v >= KILOBYTE {\n\t\treturn v.format(KILOBYTE) + KILOBYTES\n\t}\n\treturn strconv.FormatUint(uint64(v), 10) + JUSTBYTES\n}", "func (q *UnsafeQueue16) String() string {\n\treturn fmt.Sprintf(\"Queue{capacity: %v, capMod: %v, putPos: %v, getPos: %v}\",\n\t\tq.capacity, q.capMod, q.getPos, q.getPos)\n}", "func (r ReceiveAll) String() string {\n\tJSON, err := json.Marshal(r)\n\tif err != nil {\n\t\tlog.Fatalln(err)\n\t}\n\treturn string(JSON)\n}", "func BytesToString(data []byte) string {\n\treturn string(data[:])\n}", "func (s *Serial) String() string {\n\tvar ss []string\n\tfor _, t := range s.inner {\n\t\tss = append(ss, t.String())\n\t}\n\treturn strings.Join(ss, \"\\n\")\n}", "func (r Replacement) String() string {\n\treturn fmt.Sprintf(\"%s:%X\", r.Addr, r.Buf)\n}", "func (bbw *Writer) String() string {\n\treturn fmt.Sprintf(\"{len(buf)=%d, clsdPos=%d, offs=%d, noExt=%t}\", len(bbw.buf), bbw.clsdPos, bbw.offs, bbw.noExt)\n}", "func (d Deque) String() string {\n\tresult := \"\"\n\tsep := \" \"\n\tfor curr := d.back; curr != nil; curr = curr.next {\n\t\tif curr.next == nil {\n\t\t\tsep = \"\"\n\t\t}\n\t\tresult += fmt.Sprintf(\"%v%v\", curr.data, sep)\n\t}\n\treturn result\n}", "func (q *UnsafeQueue64) String() string {\n\treturn fmt.Sprintf(\"Queue{capacity: %v, capMod: %v, putPos: %v, getPos: %v}\",\n\t\tq.capacity, q.capMod, q.getPos, q.getPos)\n}", "func (ob *PyObject) String() string {\n\tif ob.rawptr == nil {\n\t\treturn \"\"\n\t}\n\trepr := C.PyObject_Str(ob.rawptr)\n\tif repr == nil {\n\t\treturn \"\"\n\t}\n\tdefer C.Py_DecRef(repr)\n\ts := C.PyUnicode_AsEncodedString(repr, encoding, codecErrors)\n\tif s == nil {\n\t\treturn \"invalid Unicode string\"\n\t}\n\tdefer C.Py_DecRef(s)\n\treturn C.GoString(C.PyBytes_AsString(s))\n}", "func (frame ProcessedFrame) ToString(delimiter string) string {\n\tvar frameString string\n\tframeString += frame.PacketHash + delimiter\n\tframeString += frame.Packet.ToString(delimiter)\n\treturn frameString\n}", "func (g *GameDataWithSent) ToString() string {\n\tb, _ := g.MarshalBinary()\n\n\treturn string(b)\n}", "func (b *Buffer) ToStringHex() string {\n\treturn hex.EncodeToString(b.b)\n}", "func (n Uint256) String() string {\n\treturn string(n.toDecimal())\n}", "func (q *LinkedListQueue) ToString() string {\n\tresult := \"\"\n\tcur := q.head\n\tfor cur != nil {\n\t\tresult += fmt.Sprintf(\"%v\", cur.value)\n\t\tif cur.next != nil {\n\t\t\tresult += \", \"\n\t\t}\n\t\tcur = cur.next\n\t}\n\treturn result\n}", "func (r Room) String() string {\n\tjr, _ := json.Marshal(r)\n\treturn string(jr)\n}", "func (space *Space) ToString() string {\n\treturn string(space.ToByte())\n}", "func toString(v value) string {\n\tvar b bytes.Buffer\n\twriteValue(&b, v)\n\treturn b.String()\n}", "func (q *ArrayQueue) ToString() string {\n\tresult := \"\"\n\tfor i := q.head; i < q.tail; i++ {\n\t\tresult += fmt.Sprintf(\"%v\", q.data[i])\n\t\tif i < q.tail-1 {\n\t\t\tresult += \", \"\n\t\t}\n\t}\n\treturn result\n}", "func (c Call) String() string {\n\treturn fmt.Sprint(c)\n}", "func (r *Rerror) String() string {\n\tif r == nil {\n\t\treturn \"<nil>\"\n\t}\n\tb, _ := r.MarshalJSON()\n\treturn goutil.BytesToString(b)\n}", "func (w *Writer) String() string {\n\treturn w.buf.String()\n}", "func byteSliceToString(b []byte) string {\n\treturn *(*string)(unsafe.Pointer(&b))\n}", "func (k *key) String() string {\n\treturn *(*string)(unsafe.Pointer(&k.b))\n}", "func (s SampleChannelDataOutput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func String(b []byte) (s string) {\n pbytes := (*reflect.SliceHeader)(unsafe.Pointer(&b))\n pstring := (*reflect.StringHeader)(unsafe.Pointer(&s))\n pstring.Data = pbytes.Data\n pstring.Len = pbytes.Len\n return\n}", "func (v Struct) String() string {\n\tdata, err := v.MarshalJSON()\n\tif err != nil {\n\t\treturn fmt.Sprintf(`{\"error\": %v}`, err)\n\t}\n\treturn string(data)\n}", "func (bw *BufWriter) String(val string) {\n\tif bw.Error != nil {\n\t\treturn\n\t}\n\tbw.stringBuf = String(val, bw.stringBuf[:0])\n\t_, bw.Error = bw.writer.Write(bw.stringBuf)\n}", "func (d *Deque[T]) String() string {\n\tstr := \"[\"\n\tfor i := 0; i < d.Size(); i++ {\n\t\tif str != \"[\" {\n\t\t\tstr += \" \"\n\t\t}\n\t\tstr += fmt.Sprintf(\"%v\", d.At(i))\n\t}\n\tstr += \"]\"\n\n\treturn str\n}", "func (x *BigUInt) String() string {\n\tif len(x.data) == 0 {\n\t\treturn \"0x0\"\n\t}\n\tstr := \"0x\"\n\tfor i := len(x.data) - 1; i >= 0; i-- {\n\t\tif x.data[i] > 0xF || i == len(x.data)-1 {\n\t\t\tstr += fmt.Sprintf(\"%x\", x.data[i])\n\t\t} else {\n\t\t\tstr += fmt.Sprintf(\"0%x\", x.data[i])\n\t\t}\n\t\tif i != 0 && i%4 == 0 {\n\t\t\tstr += \"_\"\n\t\t}\n\t}\n\treturn str\n}", "func (n *Node) String() string {\n\treturn fmt.Sprintf(\"%v\", n.data)\n}", "func (r *Response) String() string {\n\tif r.Error != nil {\n\t\treturn \"\"\n\t}\n\n\tr.populateResponseByteBuffer()\n\n\treturn r.internalByteBuffer.String()\n}", "func (n *Node) String() string {\n\treturn fmt.Sprintf(\"%v|%d\", n.addr, n.currentNumConnections)\n}", "func (ba *BitArray) String() string {\n\tbuf := bytes.Buffer{}\n\tfor _, b := range ba.buf {\n\t\tif b {\n\t\t\tbuf.WriteString(\"1\")\n\t\t} else {\n\t\t\tbuf.WriteString(\"0\")\n\t\t}\n\t}\n\treturn buf.String()\n}", "func (pos *Position) String() string {\n\tif *pos.file == \"\" {\n\t\treturn fmt.Sprintf(\"L:%v|C:%v\", pos.Line, pos.Column)\n\t}\n\treturn fmt.Sprintf(\"%v|L:%v|C:%v\", *pos.file, pos.Line, pos.Column)\n}", "func (c Chats) String() string {\n\tjc, _ := json.Marshal(c)\n\treturn string(jc)\n}", "func (j JSONMetaPrimitiveNode) String() string {\n\treturn fmt.Sprintf(\"(%q -> %s)\", j.Path(), j.ContentString(0))\n}", "func ByteArrayToString(buf []byte) string {\n\treturn *(*string)(unsafe.Pointer(&buf))\n}", "func (tag Tag) String() string {\n\treturn string([]byte{\n\t\tbyte(tag >> 24 & 0xFF),\n\t\tbyte(tag >> 16 & 0xFF),\n\t\tbyte(tag >> 8 & 0xFF),\n\t\tbyte(tag & 0xFF),\n\t})\n}", "func (s DescribeChannelOutput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (s DescribeChannelOutput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (c *compress) String() string {\n\treturn fmt.Sprint(*c)\n}", "func (cfg frozenConfig) MarshalToString(val interface{}) (string, error) {\n buf, err := encoder.Encode(val, cfg.encoderOpts)\n return rt.Mem2Str(buf), err\n}", "func (v ByteVec) String() string { return string([]byte(v)) }", "func ToString(object CometObject) *CometStr {\n\tswitch n := object.(type) {\n\tcase *CometStr:\n\t\treturn n\n\tcase *CometBool:\n\t\treturn &CometStr{Value: strconv.FormatBool(n.Value), Size: 4}\n\tcase *CometInt:\n\t\t// TODO: updates should be made when we have numbers with different bases\n\t\tvalue := strconv.FormatInt(n.Value, 10)\n\t\treturn &CometStr{Value: value, Size: len(value)}\n\tcase *CometRange:\n\t\treturn &CometStr{Value: n.ToString(), Size: len(n.ToString())}\n\tcase *CometFunc:\n\t\tvalue := n.ToString()\n\t\treturn &CometStr{Value: value, Size: len(value)}\n\tcase *CometError:\n\t\tvalue := n.Message\n\t\treturn &CometStr{Value: value, Size: len(value)}\n\tcase *CometInstance:\n\t\treturn &CometStr{Value: n.ToString(), Size: len(n.ToString())}\n\tdefault:\n\t\tpanic(\"All types should have been exhausted!!\")\n\t}\n}", "func (g Grid) String() string {\n\tb := bytes.NewBuffer(nil)\n\tif _, err := g.WriteTo(b); err != nil {\n\t\treturn fmt.Sprintf(\"Error: %s\", err)\n\t}\n\treturn b.String()\n}", "func (c Contract) String() string {\n\tjc, _ := json.Marshal(c)\n\treturn string(jc)\n}", "func (binaryTree *BinaryTree) ToString() string {\n\tvar buffer bytes.Buffer\n\n\tif !binaryTree.Empty() {\n\t\tvar queue []*BinaryTreeNode = []*BinaryTreeNode{binaryTree.root}\n\n\t\tfor len(queue) > 0 {\n\t\t\tvar node *BinaryTreeNode = queue[0]\n\t\t\tbuffer.WriteString(strconv.Itoa(node.value) + \" \")\n\n\t\t\tif node.left != nil {\n\t\t\t\tqueue = append(queue, node.left)\n\t\t\t}\n\n\t\t\tif node.right != nil {\n\t\t\t\tqueue = append(queue, node.right)\n\t\t\t}\n\n\t\t\tqueue = queue[1:]\n\t\t}\n\t}\n\n\treturn buffer.String()\n}", "func (na *NArray) String() string {\n\n\treturn na.Sprint(func(na *NArray, k int) bool {\n\t\treturn true\n\t})\n}", "func ToString(byteStr []byte) string {\n\tn := bytes.IndexByte(byteStr, 0)\n\treturn string(byteStr[:n])\n}", "func (c FileDescriptor) String() string {\n\treturn fmt.Sprintf(\"%T(%v)\", c, capnp.Client(c))\n}" ]
[ "0.7288735", "0.71794194", "0.7084949", "0.69326824", "0.6912065", "0.6856904", "0.6845287", "0.6817113", "0.67771125", "0.660697", "0.65616924", "0.6472023", "0.6391958", "0.63303596", "0.63248277", "0.63169754", "0.62542444", "0.6195914", "0.6127785", "0.6124196", "0.60954326", "0.6086083", "0.608542", "0.60780764", "0.6046883", "0.6044375", "0.60377324", "0.6037374", "0.6035542", "0.6034596", "0.6029636", "0.6029636", "0.6019509", "0.60117525", "0.600104", "0.5965643", "0.59190756", "0.59036934", "0.58969915", "0.58919096", "0.58825815", "0.58615315", "0.585616", "0.5850515", "0.58450174", "0.5824416", "0.5807263", "0.580347", "0.58016515", "0.57927984", "0.5784774", "0.575744", "0.575449", "0.5745073", "0.57281274", "0.5727915", "0.5714741", "0.57124454", "0.5709111", "0.5702506", "0.5695244", "0.5687385", "0.56813896", "0.56773233", "0.5661272", "0.56587875", "0.56458163", "0.56457835", "0.56405085", "0.56256074", "0.5624697", "0.5624528", "0.5616208", "0.56055856", "0.5602756", "0.5602709", "0.5594861", "0.5594204", "0.55929536", "0.5589956", "0.55893576", "0.5588335", "0.55866474", "0.5584143", "0.5584063", "0.55839163", "0.55801433", "0.5576785", "0.5572505", "0.5572505", "0.5570932", "0.55691105", "0.5566119", "0.55641836", "0.5563977", "0.5563024", "0.55603987", "0.5550731", "0.55492485", "0.5546292" ]
0.74762917
0
GetID returns the security group id
func (e *EC2SG) GetID() string { return e.ID }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (m *Group) GetSecurityIdentifier()(*string) {\n return m.securityIdentifier\n}", "func (a *Adapter) SecurityGroupID() string {\n\treturn a.manifest.securityGroup.id\n}", "func (m *PrivilegedAccessGroupEligibilitySchedule) GetGroupId()(*string) {\n val, err := m.GetBackingStore().Get(\"groupId\")\n if err != nil {\n panic(err)\n }\n if val != nil {\n return val.(*string)\n }\n return nil\n}", "func (o GetServerGroupsGroupOutput) Id() pulumi.StringOutput {\n\treturn o.ApplyT(func(v GetServerGroupsGroup) string { return v.Id }).(pulumi.StringOutput)\n}", "func (o *SecurityGroup) GetSecurityGroupId() string {\n\tif o == nil || o.SecurityGroupId == nil {\n\t\tvar ret string\n\t\treturn ret\n\t}\n\treturn *o.SecurityGroupId\n}", "func (o ClusterManagedPrivateEndpointOutput) GroupId() pulumi.StringOutput {\n\treturn o.ApplyT(func(v *ClusterManagedPrivateEndpoint) pulumi.StringOutput { return v.GroupId }).(pulumi.StringOutput)\n}", "func (g Group) GetID() string {\n\tl := strings.SplitN(g.URI, \"/\", -1)\n\tid := l[len(l)-1]\n\treturn id\n}", "func (o *VersionedControllerService) GetGroupIdentifier() string {\n\tif o == nil || o.GroupIdentifier == nil {\n\t\tvar ret string\n\t\treturn ret\n\t}\n\treturn *o.GroupIdentifier\n}", "func Getgid() int", "func (Krypton) GetGroup() string {\n\tvar g groupType = a8\n\treturn g.get()\n}", "func (o SecurityGroupIngressOutput) SecurityGroupId() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v SecurityGroupIngress) *string { return v.SecurityGroupId }).(pulumi.StringPtrOutput)\n}", "func getGroupSidString(sd []byte) (string, error) {\n\t// Make sure we have enough bytes to safely read the required fields.\n\tif len(sd) < int(unsafe.Sizeof(SECURITY_DESCRIPTOR_RELATIVE{})) {\n\t\treturn \"\", fmt.Errorf(\"Short Security Descriptor: %d bytes!\", len(sd))\n\t}\n\n\t// Only valid revision is 1, verify that.\n\trevision := getRevision(sd)\n\tif revision != 1 {\n\t\treturn \"\", fmt.Errorf(\"Invalid SD revision (%d), expected 1!\", revision)\n\t}\n\n\t// SECURITY_DESCRIPTOR_RELATIVE.OffsetGroup.\n\toffsetGroup := binary.LittleEndian.Uint32(sd[8:12])\n\tif offsetGroup >= uint32(len(sd)) {\n\t\treturn \"\", fmt.Errorf(\"offsetGroup (%d) points outside Security Descriptor of size %d bytes!\",\n\t\t\toffsetGroup, len(sd))\n\t}\n\n\tsidStr, err := sidToString(sd[offsetGroup:])\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn \"G:\" + sidStr, nil\n}", "func (o EciScalingConfigurationOutput) SecurityGroupId() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v *EciScalingConfiguration) pulumi.StringPtrOutput { return v.SecurityGroupId }).(pulumi.StringPtrOutput)\n}", "func (api *API) GetGroupID(appID, trackName string, arch Arch) (string, error) {\n\tvar cachedGroupsRef map[GroupDescriptor]string\n\tcachedGroupsLock.RLock()\n\tif cachedGroups != nil {\n\t\t// Keep a reference to the map that we found.\n\t\tcachedGroupsRef = cachedGroups\n\t}\n\tcachedGroupsLock.RUnlock()\n\t// Generate map on startup or if invalidated.\n\tif cachedGroupsRef == nil {\n\t\tcachedGroupsLock.Lock()\n\t\tcachedGroupsRef = cachedGroups\n\t\t// If a concurrent execution generated it inbetween our RUnlock() and Lock(),\n\t\t// we can use this because any invalidation inbetween must have happened\n\t\t// before the generation because all writes are sequential.\n\t\tif cachedGroupsRef == nil {\n\t\t\tcachedGroups = make(map[GroupDescriptor]string)\n\t\t\tquery, _, err := goqu.From(\"groups\").ToSQL()\n\t\t\tvar groups []*Group\n\t\t\tif err == nil {\n\t\t\t\tgroups, err = api.getGroupsFromQuery(query)\n\t\t\t}\n\t\t\t// Checks boths errors above.\n\t\t\tif err != nil {\n\t\t\t\tlogger.Error().Err(err).Msg(\"GetGroupID error\")\n\t\t\t} else {\n\t\t\t\tfor _, group := range groups {\n\t\t\t\t\tif group.Channel != nil {\n\t\t\t\t\t\tdescriptor := GroupDescriptor{AppID: group.ApplicationID, Track: group.Track, Arch: group.Channel.Arch}\n\t\t\t\t\t\t// The groups are sorted descendingly by the creation time.\n\t\t\t\t\t\t// The newest group with the track name and arch wins.\n\t\t\t\t\t\tif otherID, ok := cachedGroups[descriptor]; ok {\n\t\t\t\t\t\t\t// Log a warning for others.\n\t\t\t\t\t\t\tlogger.Warn().Str(\"group\", group.ID).Str(\"group2\", otherID).Str(\"track\", group.Track).Msg(\"GetGroupID - another group already uses the same track name and architecture\")\n\t\t\t\t\t\t}\n\t\t\t\t\t\tcachedGroups[descriptor] = group.ID\n\t\t\t\t\t} else {\n\t\t\t\t\t\tlogger.Warn().Str(\"group\", group.ID).Msg(\"GetGroupID - no channel found for\")\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t\t// Keep a reference to the map we created.\n\t\t\tcachedGroupsRef = cachedGroups\n\t\t}\n\t\tcachedGroupsLock.Unlock()\n\t}\n\n\t// Trim space and the {} that may surround the ID\n\tappIDNoBrackets := strings.TrimSpace(appID)\n\tif len(appIDNoBrackets) > 1 && appIDNoBrackets[0] == '{' {\n\t\tappIDNoBrackets = strings.TrimSpace(appIDNoBrackets[1 : len(appIDNoBrackets)-1])\n\t}\n\n\tcachedGroupID, ok := cachedGroupsRef[GroupDescriptor{AppID: appIDNoBrackets, Track: trackName, Arch: arch}]\n\tif !ok {\n\t\treturn \"\", fmt.Errorf(\"no group found for app %v, track %v, and architecture %v\", appID, trackName, arch)\n\t}\n\treturn cachedGroupID, nil\n}", "func (o *VersionedConnection) GetGroupIdentifier() string {\n\tif o == nil || o.GroupIdentifier == nil {\n\t\tvar ret string\n\t\treturn ret\n\t}\n\treturn *o.GroupIdentifier\n}", "func (c *client) GetID(ctx context.Context, groupName string) (string, error) {\n\tcli := c.getGroupClient()\n\n\tresp, err := cli.FindByID(ctx, &pb.GroupFindRequest{Names: []string{groupName}})\n\tif err != nil {\n\t\tlog.Println(err)\n\t\treturn \"\", err\n\t}\n\n\tif len(resp.Groups) == 0 {\n\t\treturn \"\", ErrGroupNonExist\n\t}\n\n\treturn resp.Groups[0].GetGroupid(), nil\n}", "func (o EcsLaunchTemplateOutput) SecurityGroupId() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v *EcsLaunchTemplate) pulumi.StringPtrOutput { return v.SecurityGroupId }).(pulumi.StringPtrOutput)\n}", "func (group *NodeGroup) Id() string {\n\treturn group.nodeGroupID\n}", "func (g Group) GetID() string {\n\treturn strconv.FormatUint(uint64(g.ID), 10)\n}", "func (Lawrencium) GetGroup() string {\n\tvar g groupType = b3\n\treturn g.get()\n}", "func (o LookupClusterRoleTemplateBindingResultOutput) GroupId() pulumi.StringOutput {\n\treturn o.ApplyT(func(v LookupClusterRoleTemplateBindingResult) string { return v.GroupId }).(pulumi.StringOutput)\n}", "func (Sodium) GetGroup() string {\n\tvar g groupType = a1\n\treturn g.get()\n}", "func (g *Group) GetID() string {\n\treturn g.ID\n}", "func (m *PrivilegedAccessGroupEligibilitySchedule) GetGroup()(Groupable) {\n val, err := m.GetBackingStore().Get(\"group\")\n if err != nil {\n panic(err)\n }\n if val != nil {\n return val.(Groupable)\n }\n return nil\n}", "func (o PowerBIOutputDataSourceOutput) GroupId() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v PowerBIOutputDataSource) *string { return v.GroupId }).(pulumi.StringPtrOutput)\n}", "func (m CrossOrderCancelReplaceRequest) GetSecurityID() (v string, err quickfix.MessageRejectError) {\n\tvar f field.SecurityIDField\n\tif err = m.Get(&f); err == nil {\n\t\tv = f.Value()\n\t}\n\treturn\n}", "func (provider *ResourceProvider) SecurityGroup(id string) (*reachAWS.SecurityGroup, error) {\n\tinput := &ec2.DescribeSecurityGroupsInput{\n\t\tGroupIds: []*string{\n\t\t\taws.String(id),\n\t\t},\n\t}\n\tresult, err := provider.ec2.DescribeSecurityGroups(input)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif err = ensureSingleResult(len(result.SecurityGroups), \"security group\", id); err != nil {\n\t\treturn nil, err\n\t}\n\n\tsecurityGroup := newSecurityGroupFromAPI(result.SecurityGroups[0])\n\treturn &securityGroup, nil\n}", "func (m SecurityListRequest) GetSecurityID() (v string, err quickfix.MessageRejectError) {\n\tvar f field.SecurityIDField\n\tif err = m.Get(&f); err == nil {\n\t\tv = f.Value()\n\t}\n\treturn\n}", "func (o *VirtualMachineToAlternativeRestoreOptions) GetNetworkSecurityGroupId() string {\n\tif o == nil {\n\t\tvar ret string\n\t\treturn ret\n\t}\n\n\treturn o.NetworkSecurityGroupId\n}", "func (c *MockNetworkSecurityGroupsClient) Get(ctx context.Context, resourceGroupName string, nsgName string) (*network.SecurityGroup, error) {\n\tasg, ok := c.NSGs[nsgName]\n\tif !ok {\n\t\treturn nil, nil\n\t}\n\treturn &asg, nil\n}", "func (Samarium) GetGroup() string {\n\tvar g groupType = b3\n\treturn g.get()\n}", "func GetGroupId(e *Engine, groupName string) (int64, error){\n\tres, _, err := e.RawSelect(Filter(\"autoscope_groups\", map[string]interface{}{\n\t\t\"name\": groupName,\n\t}))\n\tif err != nil { return -1, err }\n\t\n\tif res.Next(){\n\t\tm, err := res.Get()\n\t\tif err != nil { return -1, err }\n\t\treturn m[\"id\"].(int64), nil\n\t}\n\t\n\treturn -1, errors.New(\"No group found\")\n}", "func (m *PrivilegedAccessGroupEligibilitySchedule) GetAccessId()(*PrivilegedAccessGroupRelationships) {\n val, err := m.GetBackingStore().Get(\"accessId\")\n if err != nil {\n panic(err)\n }\n if val != nil {\n return val.(*PrivilegedAccessGroupRelationships)\n }\n return nil\n}", "func (o PowerBIOutputDataSourceResponseOutput) GroupId() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v PowerBIOutputDataSourceResponse) *string { return v.GroupId }).(pulumi.StringPtrOutput)\n}", "func (c *ConsumerGroupInfo) ID() string {\n\treturn c.id\n}", "func (Phosphorus) GetGroup() string {\n\tvar g groupType = a5\n\treturn g.get()\n}", "func libc_getgid() int32", "func (m MarketDataSnapshotFullRefresh) GetSecurityID() (v string, err quickfix.MessageRejectError) {\n\tvar f field.SecurityIDField\n\tif err = m.Get(&f); err == nil {\n\t\tv = f.Value()\n\t}\n\treturn\n}", "func (o *ControllerServiceReferencingComponentDTO) GetGroupId() string {\n\tif o == nil || o.GroupId == nil {\n\t\tvar ret string\n\t\treturn ret\n\t}\n\treturn *o.GroupId\n}", "func (b *BasicGroup) GetID() (value int64) {\n\tif b == nil {\n\t\treturn\n\t}\n\treturn b.ID\n}", "func (client IdentityClient) getGroup(ctx context.Context, request common.OCIRequest, binaryReqBody *common.OCIReadSeekCloser, extraHeaders map[string]string) (common.OCIResponse, error) {\n\n\thttpRequest, err := request.HTTPRequest(http.MethodGet, \"/groups/{groupId}\", binaryReqBody, extraHeaders)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar response GetGroupResponse\n\tvar httpResponse *http.Response\n\thttpResponse, err = client.Call(ctx, &httpRequest)\n\tdefer common.CloseBodyIfValid(httpResponse)\n\tresponse.RawResponse = httpResponse\n\tif err != nil {\n\t\treturn response, err\n\t}\n\n\terr = common.UnmarshalResponse(httpResponse, &response)\n\treturn response, err\n}", "func GetSecurityGroup(ctx *pulumi.Context,\n\tname string, id pulumi.IDInput, state *SecurityGroupState, opts ...pulumi.ResourceOption) (*SecurityGroup, error) {\n\tvar resource SecurityGroup\n\terr := ctx.ReadResource(\"aws:elasticache/securityGroup:SecurityGroup\", name, id, state, &resource, opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &resource, nil\n}", "func (Tellurium) GetGroup() string {\n\tvar g groupType = a6\n\treturn g.get()\n}", "func (o SecurityGroupRuleOutput) SecurityGroupId() pulumi.StringOutput {\n\treturn o.ApplyT(func(v *SecurityGroupRule) pulumi.StringOutput { return v.SecurityGroupId }).(pulumi.StringOutput)\n}", "func Group() int {\n\treturn group\n}", "func (c *Client) GetSecurityGroup(ctx context.Context, zone, id string) (*SecurityGroup, error) {\n\tresp, err := c.GetSecurityGroupWithResponse(ctx, id)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tsecurityGroup := securityGroupFromAPI(resp.JSON200)\n\tsecurityGroup.c = c\n\tsecurityGroup.zone = zone\n\n\treturn securityGroup, nil\n}", "func (o ApplicationOutput) GroupId() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v *Application) pulumi.StringPtrOutput { return v.GroupId }).(pulumi.StringPtrOutput)\n}", "func (m NoSecurityAltIDRepeatingGroup) Get(i int) NoSecurityAltID {\n\treturn NoSecurityAltID{m.RepeatingGroup.Get(i)}\n}", "func (m NoSecurityAltIDRepeatingGroup) Get(i int) NoSecurityAltID {\n\treturn NoSecurityAltID{m.RepeatingGroup.Get(i)}\n}", "func (o DnsDomainOutput) GroupId() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v *DnsDomain) pulumi.StringPtrOutput { return v.GroupId }).(pulumi.StringPtrOutput)\n}", "func (m *Group) GetOnPremisesSecurityIdentifier()(*string) {\n return m.onPremisesSecurityIdentifier\n}", "func (Technetium) GetGroup() string {\n\tvar g groupType = b7\n\treturn g.get()\n}", "func (id NetworkGroupId) ID() string {\n\tfmtString := \"/subscriptions/%s/resourceGroups/%s/providers/Microsoft.Network/networkManagers/%s/networkGroups/%s\"\n\treturn fmt.Sprintf(fmtString, id.SubscriptionId, id.ResourceGroupName, id.NetworkManagerName, id.NetworkGroupName)\n}", "func (k *KeepassXDatabase) getGroup(id uint32) (*Group, error) {\n\tg, ok := k.groupIdx[id]\n\tif ok {\n\t\treturn g, nil\n\t}\n\treturn nil, errors.New(\"group not found\")\n}", "func (o *WafPolicyGroup) GetId() string {\n\tif o == nil || o.Id == nil {\n\t\tvar ret string\n\t\treturn ret\n\t}\n\treturn *o.Id\n}", "func (n *hetznerNodeGroup) Id() string {\n\treturn n.id\n}", "func (ng *NodeGroup) Id() string {\n\treturn ng.Name\n}", "func (m *PrivilegedAccessGroupEligibilitySchedule) GetPrincipalId()(*string) {\n val, err := m.GetBackingStore().Get(\"principalId\")\n if err != nil {\n panic(err)\n }\n if val != nil {\n return val.(*string)\n }\n return nil\n}", "func (m *CalendarGroup) GetClassId()(*i561e97a8befe7661a44c8f54600992b4207a3a0cf6770e5559949bc276de2e22.UUID) {\n val, err := m.GetBackingStore().Get(\"classId\")\n if err != nil {\n panic(err)\n }\n if val != nil {\n return val.(*i561e97a8befe7661a44c8f54600992b4207a3a0cf6770e5559949bc276de2e22.UUID)\n }\n return nil\n}", "func (o ClusterNodeAttributeOutput) SecurityGroupId() pulumi.StringOutput {\n\treturn o.ApplyT(func(v ClusterNodeAttribute) string { return v.SecurityGroupId }).(pulumi.StringOutput)\n}", "func (m OrderStatusRequest) GetSecurityID() (v string, err quickfix.MessageRejectError) {\n\tvar f field.SecurityIDField\n\tif err = m.Get(&f); err == nil {\n\t\tv = f.Value()\n\t}\n\treturn\n}", "func (c *Context) GetGroup() string {\n\treturn c.Group\n}", "func (Molybdenum) GetGroup() string {\n\tvar g groupType = b6\n\treturn g.get()\n}", "func (r *ReorderSupergroupActiveUsernamesRequest) GetSupergroupID() (value int64) {\n\tif r == nil {\n\t\treturn\n\t}\n\treturn r.SupergroupID\n}", "func (o ProjectRoleTemplateBindingOutput) GroupId() pulumi.StringOutput {\n\treturn o.ApplyT(func(v *ProjectRoleTemplateBinding) pulumi.StringOutput { return v.GroupId }).(pulumi.StringOutput)\n}", "func (g *GetGroupCallRequest) GetGroupCallID() (value int32) {\n\tif g == nil {\n\t\treturn\n\t}\n\treturn g.GroupCallID\n}", "func SecurityGroupByID(conn *ec2.EC2, id string) (*ec2.SecurityGroup, error) {\n\treq := &ec2.DescribeSecurityGroupsInput{\n\t\tGroupIds: aws.StringSlice([]string{id}),\n\t}\n\tresult, err := conn.DescribeSecurityGroups(req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif result == nil || len(result.SecurityGroups) == 0 || result.SecurityGroups[0] == nil {\n\t\treturn nil, nil\n\t}\n\n\treturn result.SecurityGroups[0], nil\n}", "func (Cadmium) GetGroup() string {\n\tvar g groupType = b2\n\treturn g.get()\n}", "func (c *Canary) GetCanaryLoadBalancerSecurityGroup(region schemas.RegionConfig) (*string, error) {\n\tclient, err := selectClientFromList(c.AWSClients, region.Region)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tnewLBName := c.GenerateCanaryLBSecurityGroupName(region.Region)\n\n\tgroupID, err := client.EC2Service.GetSecurityGroup(newLBName)\n\tif err != nil {\n\t\tc.Logger.Warn(err.Error())\n\t\treturn nil, nil\n\t}\n\n\tc.Logger.Debugf(\"Found existing lb security group id: %s\", *groupID)\n\n\treturn groupID, nil\n}", "func (m Message) GetSecurityID(f *field.SecurityIDField) quickfix.MessageRejectError {\n\treturn m.Body.Get(f)\n}", "func (m Message) GetSecurityID(f *field.SecurityIDField) quickfix.MessageRejectError {\n\treturn m.Body.Get(f)\n}", "func (m Message) GetSecurityID(f *field.SecurityIDField) quickfix.MessageRejectError {\n\treturn m.Body.Get(f)\n}", "func (m Message) GetSecurityID(f *field.SecurityIDField) quickfix.MessageRejectError {\n\treturn m.Body.Get(f)\n}", "func (t *ToggleSupergroupUsernameIsActiveRequest) GetSupergroupID() (value int64) {\n\tif t == nil {\n\t\treturn\n\t}\n\treturn t.SupergroupID\n}", "func (ctx *TestContext) getGroupGroupKey(parentGroupID, childGroupID int64) string {\n\treturn strconv.FormatInt(parentGroupID, 10) + \",\" + strconv.FormatInt(childGroupID, 10)\n}", "func (ec *executionContext) _Group_id(ctx context.Context, field graphql.CollectedField, obj *model.Group) (ret graphql.Marshaler) {\n\tdefer func() {\n\t\tif r := recover(); r != nil {\n\t\t\tec.Error(ctx, ec.Recover(ctx, r))\n\t\t\tret = graphql.Null\n\t\t}\n\t}()\n\tfc := &graphql.FieldContext{\n\t\tObject: \"Group\",\n\t\tField: field,\n\t\tArgs: nil,\n\t\tIsMethod: false,\n\t\tIsResolver: false,\n\t}\n\n\tctx = graphql.WithFieldContext(ctx, fc)\n\tresTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {\n\t\tctx = rctx // use context from middleware stack in children\n\t\treturn obj.ID, nil\n\t})\n\tif err != nil {\n\t\tec.Error(ctx, err)\n\t\treturn graphql.Null\n\t}\n\tif resTmp == nil {\n\t\tif !graphql.HasFieldError(ctx, fc) {\n\t\t\tec.Errorf(ctx, \"must not be null\")\n\t\t}\n\t\treturn graphql.Null\n\t}\n\tres := resTmp.(string)\n\tfc.Result = res\n\treturn ec.marshalNID2string(ctx, field.Selections, res)\n}", "func (o GetServerGroupsGroupOutput) ResourceGroupId() pulumi.StringOutput {\n\treturn o.ApplyT(func(v GetServerGroupsGroup) string { return v.ResourceGroupId }).(pulumi.StringOutput)\n}", "func (o StudioOutput) WorkspaceSecurityGroupId() pulumi.StringOutput {\n\treturn o.ApplyT(func(v *Studio) pulumi.StringOutput { return v.WorkspaceSecurityGroupId }).(pulumi.StringOutput)\n}", "func getGroupIdString(id interface{}) (gid string, err error) {\r\n\tswitch t := id.(type) {\r\n\tcase string:\r\n\t\tif strings.Contains(id.(string), \"http\") {\r\n\t\t\treturn \"url=\" + url.QueryEscape(id.(string)), nil // group url\r\n\t\t}\r\n\t\treturn id.(string), nil // group id as a string\r\n\tcase uint64:\r\n\t\treturn strconv.FormatUint(id.(uint64), 10), nil // group id as an int\r\n\tdefault:\r\n\t\treturn gid, errors.New(fmt.Sprintf(\"Group ID type exception: Expecting string or uint64 got %T\", t))\r\n\t}\r\n}", "func (o *SecurityGroup) GetNetId() string {\n\tif o == nil || o.NetId == nil {\n\t\tvar ret string\n\t\treturn ret\n\t}\n\treturn *o.NetId\n}", "func (r *LogGroup) ID() pulumi.IDOutput {\n\treturn r.s.ID()\n}", "func (*BasicGroup) TypeID() uint32 {\n\treturn BasicGroupTypeID\n}", "func getGroup(client *chef.Client, group string) chef.Group {\n\tgroupInfo, err := client.Groups.Get(group)\n\tif err != nil {\n\t\tfmt.Println(\"Issue getting: \"+group, err)\n\t\tpanic(err.Error()) // proper error handling instead of panic in your app\n\t}\n\treturn groupInfo\n}", "func (r Resource) Group() string {\n\treturn r.group\n}", "func (m *User) GetSecurityIdentifier()(*string) {\n return m.securityIdentifier\n}", "func (c *Consumer) Group() string { return c.group }", "func GetGroup(c *gin.Context) *group.Group {\n\treturn c.MustGet(\"group\").(*group.Group)\n}", "func (m SecurityListRequest) GetNoSecurityAltID() (NoSecurityAltIDRepeatingGroup, quickfix.MessageRejectError) {\n\tf := NewNoSecurityAltIDRepeatingGroup()\n\terr := m.GetGroup(f)\n\treturn f, err\n}", "func (s *Server) Group() string {\n\treturn s.group\n}", "func GetGroup() *Group {\n\treturn group\n}", "func (m *Group) SetSecurityIdentifier(value *string)() {\n m.securityIdentifier = value\n}", "func (o *BulletinDTO) GetGroupId() string {\n\tif o == nil || o.GroupId == nil {\n\t\tvar ret string\n\t\treturn ret\n\t}\n\treturn *o.GroupId\n}", "func (r Virtual_Guest_Block_Device_Template_Group) GetGlobalIdentifier() (resp string, err error) {\n\terr = r.Session.DoRequest(\"SoftLayer_Virtual_Guest_Block_Device_Template_Group\", \"getGlobalIdentifier\", nil, &r.Options, &resp)\n\treturn\n}", "func (_RandomBeacon *RandomBeaconCaller) GetGroup(opts *bind.CallOpts, groupId uint64) (GroupsGroup, error) {\n\tvar out []interface{}\n\terr := _RandomBeacon.contract.Call(opts, &out, \"getGroup\", groupId)\n\n\tif err != nil {\n\t\treturn *new(GroupsGroup), err\n\t}\n\n\tout0 := *abi.ConvertType(out[0], new(GroupsGroup)).(*GroupsGroup)\n\n\treturn out0, err\n\n}", "func (dao *SysConfigDao) Group() string {\n\treturn dao.group\n}", "func (m *Group) GetSecurityEnabled()(*bool) {\n return m.securityEnabled\n}", "func (o *SecurityGroup) GetSecurityGroupName() string {\n\tif o == nil || o.SecurityGroupName == nil {\n\t\tvar ret string\n\t\treturn ret\n\t}\n\treturn *o.SecurityGroupName\n}", "func (s *MultipassServer) Id(ctx context.Context, request *apigrpc.NodeGroupServiceRequest) (*apigrpc.IdReply, error) {\n\tglog.V(5).Infof(\"Call server Id: %v\", request)\n\n\tif request.GetProviderID() != s.Configuration.ProviderID {\n\t\tglog.Errorf(errMismatchingProvider)\n\t\treturn nil, fmt.Errorf(errMismatchingProvider)\n\t}\n\n\tnodeGroup := s.Groups[request.GetNodeGroupID()]\n\n\tif nodeGroup == nil {\n\t\tglog.Errorf(errNodeGroupNotFound, request.GetNodeGroupID())\n\n\t\treturn nil, fmt.Errorf(errNodeGroupNotFound, request.GetNodeGroupID())\n\t}\n\n\treturn &apigrpc.IdReply{\n\t\tResponse: nodeGroup.NodeGroupIdentifier,\n\t}, nil\n}", "func (Fermium) GetGroup() string {\n\tvar g groupType = b3\n\treturn g.get()\n}", "func (m SecurityListRequest) GetSecurityReqID() (v string, err quickfix.MessageRejectError) {\n\tvar f field.SecurityReqIDField\n\tif err = m.Get(&f); err == nil {\n\t\tv = f.Value()\n\t}\n\treturn\n}" ]
[ "0.7723133", "0.72929937", "0.7139533", "0.647312", "0.6435965", "0.6417648", "0.641732", "0.6278158", "0.6267658", "0.6250463", "0.6202925", "0.61886334", "0.6184663", "0.6147308", "0.61387384", "0.61131614", "0.6073874", "0.6066186", "0.6042517", "0.6022453", "0.6020448", "0.60183686", "0.6016537", "0.6015213", "0.60012126", "0.5993791", "0.5983211", "0.5968718", "0.5944754", "0.59443444", "0.59405184", "0.59388816", "0.59261554", "0.5917761", "0.5910618", "0.5887422", "0.5883137", "0.5876647", "0.5870781", "0.58567023", "0.58427024", "0.58417267", "0.583535", "0.5806308", "0.57989484", "0.57953906", "0.5792448", "0.5779646", "0.5779646", "0.5760357", "0.5755955", "0.5752822", "0.5747743", "0.57456195", "0.57438433", "0.5724105", "0.57240826", "0.57205796", "0.57012194", "0.56934667", "0.5689131", "0.56844395", "0.56833833", "0.56799126", "0.5657308", "0.5656765", "0.56494105", "0.56489426", "0.56437415", "0.56395376", "0.56395376", "0.56395376", "0.56395376", "0.563887", "0.5608406", "0.5606781", "0.5606634", "0.5602256", "0.55975544", "0.55966926", "0.55914605", "0.5590845", "0.55836797", "0.5581262", "0.557626", "0.5568699", "0.5567853", "0.5547644", "0.55456793", "0.5544576", "0.5539269", "0.55335975", "0.5528185", "0.55275303", "0.5521732", "0.5516987", "0.5507339", "0.5499271", "0.54853064", "0.5482111" ]
0.57060146
58
NewEC2SG returns a new ec2 security group
func NewEC2SG(sg *ec2.SecurityGroup, region string) *EC2SG { entity := &EC2SG{ Entity: NewEntity(), } if sg == nil { log.Debug("nil sg") return entity } entity.Region = region if sg.GroupId != nil { entity.ID = *sg.GroupId } for _, tag := range sg.Tags { if tag == nil { continue } if tag.Key != nil && tag.Value != nil && *tag.Key == "Name" { entity.Name = *tag.Value } entity.AddTag(tag.Key, tag.Value) } entity.AddLabel(vpcID, sg.VpcId) for _, permission := range sg.IpPermissions { for _, rang := range permission.IpRanges { if rang.CidrIp != nil && util.ContainsPublicIps(*rang.CidrIp) { entity.AddLabel(publicIngress, aws.String("true")) } } } return entity }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func createSecurityGroup(client ec2iface.EC2API, vpcID, clusterName string) (string, error) {\n\tvar securityGroupID string\n\n\tnewSecurityGroupName := resourceNamePrefix + clusterName\n\tcsgOut, err := client.CreateSecurityGroup(&ec2.CreateSecurityGroupInput{\n\t\tVpcId: &vpcID,\n\t\tGroupName: aws.String(newSecurityGroupName),\n\t\tDescription: aws.String(fmt.Sprintf(\"Security group for the Kubernetes cluster %s\", clusterName)),\n\t})\n\tif err != nil {\n\t\tif awsErr, ok := err.(awserr.Error); !ok || awsErr.Code() != \"InvalidGroup.Duplicate\" {\n\t\t\treturn \"\", fmt.Errorf(\"failed to create security group %s: %v\", newSecurityGroupName, err)\n\t\t}\n\t\tdescribeOut, err := client.DescribeSecurityGroups(&ec2.DescribeSecurityGroupsInput{\n\t\t\tFilters: []*ec2.Filter{{\n\t\t\t\tName: aws.String(\"group-name\"),\n\t\t\t\tValues: []*string{aws.String(newSecurityGroupName)}}},\n\t\t})\n\t\tif err != nil {\n\t\t\treturn \"\", fmt.Errorf(\"failed to get security group after creation failed because the group already existed: %v\", err)\n\t\t}\n\t\tif n := len(describeOut.SecurityGroups); n != 1 {\n\t\t\treturn \"\", fmt.Errorf(\"expected to get exactly one security group after create failed because the group already existed, got %d\", n)\n\t\t}\n\n\t\tsecurityGroupID = aws.StringValue(describeOut.SecurityGroups[0].GroupId)\n\t}\n\tif csgOut != nil && csgOut.GroupId != nil {\n\t\tsecurityGroupID = *csgOut.GroupId\n\t}\n\tklog.V(2).Infof(\"Security group %s for cluster %s created with id %s.\", newSecurityGroupName, clusterName, securityGroupID)\n\n\t// Add permissions.\n\t_, err = client.AuthorizeSecurityGroupIngress(&ec2.AuthorizeSecurityGroupIngressInput{\n\t\tGroupId: aws.String(securityGroupID),\n\t\tIpPermissions: []*ec2.IpPermission{\n\t\t\t(&ec2.IpPermission{}).\n\t\t\t\t// all protocols from within the sg\n\t\t\t\tSetIpProtocol(\"-1\").\n\t\t\t\tSetUserIdGroupPairs([]*ec2.UserIdGroupPair{\n\t\t\t\t\t(&ec2.UserIdGroupPair{}).\n\t\t\t\t\t\tSetGroupId(securityGroupID),\n\t\t\t\t}),\n\t\t\t(&ec2.IpPermission{}).\n\t\t\t\t// tcp:22 from everywhere\n\t\t\t\tSetIpProtocol(\"tcp\").\n\t\t\t\tSetFromPort(provider.DefaultSSHPort).\n\t\t\t\tSetToPort(provider.DefaultSSHPort).\n\t\t\t\tSetIpRanges([]*ec2.IpRange{\n\t\t\t\t\t{CidrIp: aws.String(\"0.0.0.0/0\")},\n\t\t\t\t}),\n\t\t\t(&ec2.IpPermission{}).\n\t\t\t\t// ICMP from/to everywhere\n\t\t\t\tSetIpProtocol(\"icmp\").\n\t\t\t\tSetFromPort(-1). // any port\n\t\t\t\tSetToPort(-1). // any port\n\t\t\t\tSetIpRanges([]*ec2.IpRange{\n\t\t\t\t\t{CidrIp: aws.String(\"0.0.0.0/0\")},\n\t\t\t\t}),\n\t\t\t(&ec2.IpPermission{}).\n\t\t\t\t// ICMPv6 from/to everywhere\n\t\t\t\tSetIpProtocol(\"icmpv6\").\n\t\t\t\tSetFromPort(-1). // any port\n\t\t\t\tSetToPort(-1). // any port\n\t\t\t\tSetIpv6Ranges([]*ec2.Ipv6Range{\n\t\t\t\t\t{CidrIpv6: aws.String(\"::/0\")},\n\t\t\t\t}),\n\t\t},\n\t})\n\tif err != nil {\n\t\tif awsErr, ok := err.(awserr.Error); !ok || awsErr.Code() != \"InvalidPermission.Duplicate\" {\n\t\t\treturn \"\", fmt.Errorf(\"failed to authorize security group %s with id %s: %v\", newSecurityGroupName, securityGroupID, err)\n\t\t}\n\t}\n\n\treturn securityGroupID, nil\n}", "func NewSecurityGroup(ctx *pulumi.Context,\n\tname string, args *SecurityGroupArgs, opts ...pulumi.ResourceOption) (*SecurityGroup, error) {\n\tif args == nil || args.SecurityGroupNames == nil {\n\t\treturn nil, errors.New(\"missing required argument 'SecurityGroupNames'\")\n\t}\n\tif args == nil {\n\t\targs = &SecurityGroupArgs{}\n\t}\n\tif args.Description == nil {\n\t\targs.Description = pulumi.StringPtr(\"Managed by Pulumi\")\n\t}\n\tvar resource SecurityGroup\n\terr := ctx.RegisterResource(\"aws:elasticache/securityGroup:SecurityGroup\", name, args, &resource, opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &resource, nil\n}", "func New() *AwsAsg {\n\n\tcfg, err := config.LoadDefaultConfig()\n\tif err != nil {\n\t\tpanic(\"unable to load SDK config, \" + err.Error())\n\t}\n\tsvc := autoscaling.NewFromConfig(cfg)\n\treturn &AwsAsg{\n\t\tsvc: svc,\n\t}\n}", "func (net *NetworkComponentInput) CreateSecurityGroup(con aws.EstablishConnectionInput) (NetworkComponentResponse, error) {\n\n\tec2, seserr := con.EstablishConnection()\n\tif seserr != nil {\n\t\treturn NetworkComponentResponse{}, seserr\n\t}\n\tsecurity, secerr := ec2.CreateSecurityGroup(\n\t\t&aws.CreateNetworkInput{\n\t\t\tVpcId: net.VpcIds[0],\n\t\t\tName: net.Name + \"_sec\",\n\t\t},\n\t)\n\tif secerr != nil {\n\t\treturn NetworkComponentResponse{}, secerr\n\t}\n\n\tsctags := new(Tag)\n\tsctags.Resource = *security.GroupId\n\tsctags.Name = \"Name\"\n\tsctags.Value = net.Name + \"_sec\"\n\t_, sctagerr := sctags.CreateTags(con)\n\tif sctagerr != nil {\n\t\treturn NetworkComponentResponse{}, sctagerr\n\t}\n\n\t//creating egree and ingres rules for the security group which I created just now\n\tfor _, port := range net.Ports {\n\t\tintport, _ := strconv.ParseInt(port, 10, 64)\n\t\tingreserr := ec2.CreateIngressRule(\n\t\t\t&aws.IngressEgressInput{\n\t\t\t\tPort: intport,\n\t\t\t\tSecId: *security.GroupId,\n\t\t\t},\n\t\t)\n\t\tif ingreserr != nil {\n\t\t\treturn NetworkComponentResponse{}, ingreserr\n\t\t}\n\t}\n\tegreserr := ec2.CreateEgressRule(\n\t\t&aws.IngressEgressInput{\n\t\t\tSecId: *security.GroupId,\n\t\t},\n\t)\n\tif egreserr != nil {\n\t\treturn NetworkComponentResponse{}, egreserr\n\t}\n\n\tif net.GetRaw == true {\n\t\treturn NetworkComponentResponse{CreateSecurityRaw: security}, nil\n\t}\n\treturn NetworkComponentResponse{SecGroupIds: []string{*security.GroupId}}, nil\n}", "func (c *Canary) GetEC2CanarySecurityGroup(tg *elbv2.TargetGroup, region schemas.RegionConfig, lbSg *string, completeCanary bool) error {\n\tclient, err := selectClientFromList(c.AWSClients, region.Region)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tnewSGName := c.GenerateCanarySecurityGroupName(region.Region)\n\n\tif completeCanary {\n\t\tgroupID, err := client.EC2Service.GetSecurityGroup(newSGName)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tc.Deployer.SecurityGroup[region.Region] = groupID\n\t\tc.Logger.Debugf(\"Found existing security group id: %s\", *groupID)\n\n\t\treturn nil\n\t}\n\n\tduplicated := false\n\tgroupID, err := client.EC2Service.CreateSecurityGroup(newSGName, tg.VpcId)\n\tif err != nil {\n\t\tif aerr, ok := err.(awserr.Error); ok && aerr.Code() == \"InvalidGroup.Duplicate\" {\n\t\t\tc.Logger.Debugf(\"Security group is already created: %s\", newSGName)\n\t\t\tduplicated = true\n\t\t}\n\n\t\tif !duplicated {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tif duplicated {\n\t\tgroupID, err = client.EC2Service.GetSecurityGroup(newSGName)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tc.Logger.Debugf(\"Found existing security group id: %s\", *groupID)\n\t} else if err := client.EC2Service.UpdateOutboundRules(*groupID, \"-1\", \"0.0.0.0/0\", \"outbound to internet\", -1, -1); err != nil {\n\t\tc.Logger.Warn(err.Error())\n\t}\n\n\t// inbound\n\tif err := client.EC2Service.UpdateInboundRulesWithGroup(*groupID, \"tcp\", \"Allow access from canary load balancer\", lbSg, *tg.Port, *tg.Port); err != nil {\n\t\tc.Logger.Warn(err.Error())\n\t}\n\n\tc.Deployer.SecurityGroup[region.Region] = groupID\n\tc.Logger.Debugf(\"Security group for this canary deployment: %s\", *groupID)\n\n\treturn nil\n}", "func NewSecurityGroup() *SecurityGroup {\n\tthis := SecurityGroup{}\n\treturn &this\n}", "func (stg *securityTestGroup) testSecurityGroupCreateDelete() {\n\t// sg params\n\tsg := security.SecurityGroup{\n\t\tTypeMeta: api.TypeMeta{Kind: \"SecurityGroup\"},\n\t\tObjectMeta: api.ObjectMeta{\n\t\t\tTenant: \"default\",\n\t\t\tNamespace: \"default\",\n\t\t\tName: \"group1\",\n\t\t},\n\t\tSpec: security.SecurityGroupSpec{\n\t\t\tWorkloadSelector: labels.SelectorFromSet(labels.Set{\"env\": \"production\", \"app\": \"procurement\"}),\n\t\t},\n\t}\n\tvar securityGroupRestIf security.SecurityV1SecurityGroupInterface\n\tapiGwAddr := ts.tu.ClusterVIP + \":\" + globals.APIGwRESTPort\n\trestSvc, err := apiclient.NewRestAPIClient(apiGwAddr)\n\tif err == nil {\n\t\tBy(\"Creating SecurityGroup Client ------\")\n\t\tsecurityGroupRestIf = restSvc.SecurityV1().SecurityGroup()\n\t}\n\tExpect(err).ShouldNot(HaveOccurred())\n\tExpect(stg.securityRestIf).ShouldNot(Equal(nil))\n\n\tctx := ts.tu.MustGetLoggedInContext(context.Background())\n\t// create sg policy\n\tresp, err := securityGroupRestIf.Create(ctx, &sg)\n\tExpect(err).ShouldNot(HaveOccurred())\n\n\t// verify we can read the policy back\n\trsg, err := securityGroupRestIf.Get(ctx, &sg.ObjectMeta)\n\tExpect(err).ShouldNot(HaveOccurred())\n\tExpect(rsg).Should(Equal(resp))\n\n\t// verify agents have the policy\n\tEventually(func() bool {\n\t\tfor _, naplesIP := range ts.tu.NaplesNodeIPs {\n\t\t\tsglist, err := stg.getSecurityGroups(naplesIP)\n\t\t\tif err != nil {\n\t\t\t\tBy(fmt.Sprintf(\"ts:%s security group list failed, err: %+v sgs: %+v\", time.Now().String(), err, sglist))\n\t\t\t\treturn false\n\t\t\t}\n\t\t\tif (len(sglist) != 1) || (sglist[0].Name != sg.Name) {\n\t\t\t\tBy(fmt.Sprintf(\"ts:%s security group list has invalid items, security groups: %+v\", time.Now().String(), sglist))\n\t\t\t\treturn false\n\t\t\t}\n\t\t\tBy(fmt.Sprintf(\"ts:%s security group list success, err: %+v sgs: %+v\", time.Now().String(), err, sglist))\n\t\t}\n\t\treturn true\n\t}, 30, 1).Should(BeTrue(), \"Failed to get security groups on netagent\")\n\n\t// delete the sg policy\n\tEventually(func() error {\n\t\t_, err = securityGroupRestIf.Delete(ctx, &sg.ObjectMeta)\n\t\treturn err\n\t}, 30, 1).ShouldNot(HaveOccurred())\n\n\t// verify policy is gone from the agents\n\tEventually(func() bool {\n\t\tfor _, naplesIP := range ts.tu.NaplesNodeIPs {\n\t\t\tsglist, err := stg.getSecurityGroups(naplesIP)\n\t\t\tif err != nil {\n\t\t\t\tBy(fmt.Sprintf(\"ts:%s security group list failed, err: %+v sgs: %+v\", time.Now().String(), err, sglist))\n\t\t\t\treturn false\n\t\t\t}\n\t\t\tif len(sglist) != 0 {\n\t\t\t\tBy(fmt.Sprintf(\"ts:%s security group list has invalid items, sg groups: %+v\", time.Now().String(), sglist))\n\t\t\t\treturn false\n\t\t\t}\n\t\t\tBy(fmt.Sprintf(\"ts:%s security group list success, err: %+v sgs: %+v\", time.Now().String(), err, sglist))\n\t\t}\n\t\treturn true\n\t}, 30, 1).Should(BeTrue(), \"Failed to get security groups on netagent\")\n}", "func NewSecurityGroup(name string, protocol string, destination string, ports *string, description *string) resources.SecurityGroup {\n\treturn resources.SecurityGroup{\n\t\tName: name,\n\t\tRules: []resources.Rule{{\n\t\t\tProtocol: protocol,\n\t\t\tDestination: destination,\n\t\t\tPorts: ports,\n\t\t\tDescription: description,\n\t\t}},\n\t}\n}", "func CreateNetworkSecurityGroup(ctx context.Context, nsgName string) (nsg network.SecurityGroup, err error) {\n\tnsgClient := getNsgClient()\n\tfuture, err := nsgClient.CreateOrUpdate(\n\t\tctx,\n\t\tconfig.GroupName(),\n\t\tnsgName,\n\t\tnetwork.SecurityGroup{\n\t\t\tLocation: to.StringPtr(config.Location()),\n\t\t\tSecurityGroupPropertiesFormat: &network.SecurityGroupPropertiesFormat{\n\t\t\t\tSecurityRules: &[]network.SecurityRule{\n\t\t\t\t\t{\n\t\t\t\t\t\tName: to.StringPtr(\"allow_ssh\"),\n\t\t\t\t\t\tSecurityRulePropertiesFormat: &network.SecurityRulePropertiesFormat{\n\t\t\t\t\t\t\tProtocol: network.SecurityRuleProtocolTCP,\n\t\t\t\t\t\t\tSourceAddressPrefix: to.StringPtr(\"0.0.0.0/0\"),\n\t\t\t\t\t\t\tSourcePortRange: to.StringPtr(\"1-65535\"),\n\t\t\t\t\t\t\tDestinationAddressPrefix: to.StringPtr(\"0.0.0.0/0\"),\n\t\t\t\t\t\t\tDestinationPortRange: to.StringPtr(\"22\"),\n\t\t\t\t\t\t\tAccess: network.SecurityRuleAccessAllow,\n\t\t\t\t\t\t\tDirection: network.SecurityRuleDirectionInbound,\n\t\t\t\t\t\t\tPriority: to.Int32Ptr(100),\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tName: to.StringPtr(\"allow_https\"),\n\t\t\t\t\t\tSecurityRulePropertiesFormat: &network.SecurityRulePropertiesFormat{\n\t\t\t\t\t\t\tProtocol: network.SecurityRuleProtocolTCP,\n\t\t\t\t\t\t\tSourceAddressPrefix: to.StringPtr(\"0.0.0.0/0\"),\n\t\t\t\t\t\t\tSourcePortRange: to.StringPtr(\"1-65535\"),\n\t\t\t\t\t\t\tDestinationAddressPrefix: to.StringPtr(\"0.0.0.0/0\"),\n\t\t\t\t\t\t\tDestinationPortRange: to.StringPtr(\"443\"),\n\t\t\t\t\t\t\tAccess: network.SecurityRuleAccessAllow,\n\t\t\t\t\t\t\tDirection: network.SecurityRuleDirectionInbound,\n\t\t\t\t\t\t\tPriority: to.Int32Ptr(200),\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t)\n\n\tif err != nil {\n\t\treturn nsg, fmt.Errorf(\"cannot create nsg: %v\", err)\n\t}\n\n\terr = future.WaitForCompletion(ctx, nsgClient.Client)\n\tif err != nil {\n\t\treturn nsg, fmt.Errorf(\"cannot get nsg create or update future response: %v\", err)\n\t}\n\n\treturn future.Result(nsgClient)\n}", "func (c *Client) EvalEC2SG(accounts []*policy.Account, p policy.Policy, regions []string, f func(policy.Violation)) error {\n\tvar errs error\n\tctx := context.Background()\n\terr := c.WalkAccountsAndRegions(accounts, regions, func(client *cziAws.Client, account *policy.Account, region string) {\n\t\tvar nextToken *string\n\t\t// Limiting to 1000 iteration guarantees that we don't get an infinite loop, even if we have\n\t\t// a mistake below. Small tradeoff is that if there are greater than 1000*pagesize security\n\t\t// groups we won't scan them all.\n\t\tfor i := 1; i <= 1000; i++ {\n\t\t\tlog.Debugf(\"nextToken: %#v\", nextToken)\n\t\t\tinput := &ec2.DescribeSecurityGroupsInput{NextToken: nextToken}\n\n\t\t\toutput, err := client.EC2.Svc.DescribeSecurityGroupsWithContext(ctx, input)\n\n\t\t\tif err != nil {\n\t\t\t\terrs = multierror.Append(errs, err)\n\t\t\t} else {\n\t\t\t\tfor _, sg := range output.SecurityGroups {\n\t\t\t\t\ts := NewEC2SG(sg, region)\n\t\t\t\t\tif p.Match(s) {\n\t\t\t\t\t\tviolation := policy.NewViolation(p, s, false, account)\n\t\t\t\t\t\tf(violation)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t\tif output.NextToken == nil {\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tnextToken = output.NextToken\n\t\t}\n\t})\n\terrs = multierror.Append(errs, err)\n\n\treturn errs\n}", "func (p *BoteaterServiceClient) CreateGroupV2(ctx context.Context, seq int32, name string, contactIds []string) (r *GroupStruct, err error) {\r\n var _args59 BoteaterServiceCreateGroupV2Args\r\n _args59.Seq = seq\r\n _args59.Name = name\r\n _args59.ContactIds = contactIds\r\n var _result60 BoteaterServiceCreateGroupV2Result\r\n if err = p.Client_().Call(ctx, \"createGroupV2\", &_args59, &_result60); err != nil {\r\n return\r\n }\r\n switch {\r\n case _result60.E!= nil:\r\n return r, _result60.E\r\n }\r\n\r\n return _result60.GetSuccess(), nil\r\n}", "func Listv2SGs(se Session) (PolicyList []interface{}) {\n\t// Create an EC2 service client.\n\tsvc := ec2.New(se.Sess)\n\n\t// Retrieve the security sg descriptions\n\tresult, err := svc.DescribeSecurityGroups(&ec2.DescribeSecurityGroupsInput{\n\t\tDryRun: aws.Bool(false),\n\t})\n\tif err != nil {\n\t\tif aerr, ok := err.(awserr.Error); ok {\n\t\t\tswitch aerr.Code() {\n\t\t\tcase \"InvalidGroupId.Malformed\":\n\t\t\t\tfallthrough\n\t\t\tcase \"InvalidGroup.NotFound\":\n\t\t\t\texitErrorf(\"%s.\", aerr.Message())\n\t\t\t}\n\t\t}\n\t\texitErrorf(\"Unable to get descriptions for security sgs, %v\", err)\n\t}\n\t//\n\tpolicy := new(PolicyDetail)\n\tfor _, sg := range result.SecurityGroups {\n\t\tfor _, ippermission := range sg.IpPermissions {\n\t\t\tif *ippermission.IpProtocol == \"-1\" {\n\t\t\t\tif len(ippermission.IpRanges) != 0 {\n\t\t\t\t\tfor _, permission := range ippermission.IpRanges {\n\t\t\t\t\t\t// fmt.Println(\" ALL IpRanges information:\", *sg.GroupName, *sg.VpcId, *sg.GroupId, \"ALL PROTOCOL\", \"from port ALL\", \"end port ALL\", *permission.CidrIp)\n\t\t\t\t\t\tpolicy.GroupName = *sg.GroupName\n\t\t\t\t\t\tpolicy.VpcId = *sg.VpcId\n\t\t\t\t\t\tpolicy.GroupId = *sg.GroupId\n\t\t\t\t\t\tpolicy.Source = *permission.CidrIp\n\t\t\t\t\t\tpolicy.Protocol = \"ALL\"\n\t\t\t\t\t\tpolicy.FromPort = 0\n\t\t\t\t\t\tpolicy.ToPort = 65535\n\t\t\t\t\t\tPolicyList = append(PolicyList, *policy)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tif len(ippermission.UserIdGroupPairs) != 0 {\n\t\t\t\t\tfor _, permission := range ippermission.UserIdGroupPairs {\n\t\t\t\t\t\t// fmt.Println(\" ALL GroupPairs information:\", *sg.GroupName, *sg.VpcId, *sg.GroupId, \"ALLPROTOCOL\", \"fromportALL\", \"endportALL\", *permission.GroupId)\n\t\t\t\t\t\tpolicy.GroupName = *sg.GroupName\n\t\t\t\t\t\tpolicy.VpcId = *sg.VpcId\n\t\t\t\t\t\tpolicy.GroupId = *sg.GroupId\n\t\t\t\t\t\tpolicy.Source = *permission.GroupId\n\t\t\t\t\t\tpolicy.Protocol = \"ALL\"\n\t\t\t\t\t\tpolicy.FromPort = 0\n\t\t\t\t\t\tpolicy.ToPort = 65535\n\t\t\t\t\t\tPolicyList = append(PolicyList, *policy)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tif len(ippermission.PrefixListIds) != 0 {\n\t\t\t\t\tfor _, permission := range ippermission.PrefixListIds {\n\t\t\t\t\t\t//fmt.Println(\" ===all===Prefix information:\", permission)\n\t\t\t\t\t\tpolicy.GroupName = *sg.GroupName\n\t\t\t\t\t\tpolicy.VpcId = *sg.VpcId\n\t\t\t\t\t\tpolicy.GroupId = *sg.GroupId\n\t\t\t\t\t\tpolicy.Source = *permission.PrefixListId\n\t\t\t\t\t\tpolicy.Protocol = \"Unknow\"\n\t\t\t\t\t\tpolicy.FromPort = 0\n\t\t\t\t\t\tpolicy.ToPort = 65535\n\t\t\t\t\t\tPolicyList = append(PolicyList, *policy)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tif len(ippermission.IpRanges) != 0 {\n\t\t\t\t\tfor _, permission := range ippermission.IpRanges {\n\t\t\t\t\t\t//fmt.Println(\" IpRanges information:\", *sg.GroupName, *sg.VpcId, *sg.GroupId, *ippermission.IpProtocol, *ippermission.FromPort, *ippermission.ToPort, *permission.CidrIp)\n\t\t\t\t\t\tpolicy.GroupName = *sg.GroupName\n\t\t\t\t\t\tpolicy.VpcId = *sg.VpcId\n\t\t\t\t\t\tpolicy.GroupId = *sg.GroupId\n\t\t\t\t\t\tpolicy.Source = *permission.CidrIp\n\t\t\t\t\t\tpolicy.Protocol = *ippermission.IpProtocol\n\t\t\t\t\t\tpolicy.FromPort = *ippermission.FromPort\n\t\t\t\t\t\tpolicy.ToPort = *ippermission.ToPort\n\t\t\t\t\t\tPolicyList = append(PolicyList, *policy)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tif len(ippermission.UserIdGroupPairs) != 0 {\n\t\t\t\t\tfor _, permission := range ippermission.UserIdGroupPairs {\n\t\t\t\t\t\t//fmt.Println(\" GroupPairs information:\", *sg.GroupName, *sg.VpcId, *sg.GroupId, *ippermission.IpProtocol, *ippermission.FromPort, *ippermission.ToPort, *permission.GroupId )\n\t\t\t\t\t\tpolicy.GroupName = *sg.GroupName\n\t\t\t\t\t\tpolicy.VpcId = *sg.VpcId\n\t\t\t\t\t\tpolicy.GroupId = *sg.GroupId\n\t\t\t\t\t\tpolicy.Source = *permission.GroupId\n\t\t\t\t\t\tpolicy.Protocol = *ippermission.IpProtocol\n\t\t\t\t\t\tpolicy.FromPort = *ippermission.FromPort\n\t\t\t\t\t\tpolicy.ToPort = *ippermission.ToPort\n\t\t\t\t\t\tPolicyList = append(PolicyList, *policy)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tif len(ippermission.PrefixListIds) != 0 {\n\t\t\t\t\tfor _, permission := range ippermission.PrefixListIds {\n\t\t\t\t\t\t//fmt.Println(\" ======Prefix information:\", permission)\n\t\t\t\t\t\tpolicy.GroupName = *sg.GroupName\n\t\t\t\t\t\tpolicy.VpcId = *sg.VpcId\n\t\t\t\t\t\tpolicy.GroupId = *sg.GroupId\n\t\t\t\t\t\tpolicy.Source = *permission.PrefixListId\n\t\t\t\t\t\tpolicy.Protocol = \"Unknow\"\n\t\t\t\t\t\tpolicy.FromPort = -1\n\t\t\t\t\t\tpolicy.ToPort = -1\n\t\t\t\t\t\tPolicyList = append(PolicyList, *policy)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\treturn PolicyList\n}", "func Find(ec2Client aws.EC2API, nameTags []*string) ([]*SecurityGroup, error) {\n\tfilters := []*ec2.Filter{\n\t\t&ec2.Filter{\n\t\t\tName: to.Strp(\"tag-key\"),\n\t\t\tValues: []*string{to.Strp(\"Name\")},\n\t\t},\n\t\t&ec2.Filter{\n\t\t\tName: to.Strp(\"tag-value\"),\n\t\t\tValues: nameTags,\n\t\t},\n\t}\n\n\toutput, err := ec2Client.DescribeSecurityGroups(&ec2.DescribeSecurityGroupsInput{\n\t\tFilters: filters,\n\t\tMaxResults: to.Int64p(5), // Smallest allowed value returns\n\t})\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tsgs := newSGs(output.SecurityGroups)\n\tswitch len(sgs) {\n\tcase len(nameTags):\n\t\treturn sgs, nil\n\tdefault:\n\t\treturn nil, fmt.Errorf(\"Number of Security Groups %v/%v\", len(sgs), len(nameTags))\n\t}\n}", "func (c *Canary) CreateCanaryLBSecurityGroup(tg *elbv2.TargetGroup, region schemas.RegionConfig) (*string, error) {\n\tclient, err := selectClientFromList(c.AWSClients, region.Region)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tlbSGName := c.GenerateCanaryLBSecurityGroupName(region.Region)\n\n\tduplicated := false\n\tgroupID, err := client.EC2Service.CreateSecurityGroup(lbSGName, tg.VpcId)\n\tif err != nil {\n\t\tif aerr, ok := err.(awserr.Error); ok && aerr.Code() == \"InvalidGroup.Duplicate\" {\n\t\t\tc.Logger.Debugf(\"Security group is already created: %s\", lbSGName)\n\t\t\tduplicated = true\n\t\t}\n\t\tif !duplicated {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\tif duplicated {\n\t\tgroupID, err = client.EC2Service.GetSecurityGroup(lbSGName)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tc.Logger.Debugf(\"Found existing security group id: %s\", *groupID)\n\t}\n\n\t// inbound\n\tif err := client.EC2Service.UpdateInboundRules(*groupID, \"tcp\", \"0.0.0.0/0\", \"inbound from internet\", 80, 80); err != nil {\n\t\tc.Logger.Warn(err.Error())\n\t}\n\n\t// outbound\n\tif err := client.EC2Service.UpdateOutboundRules(*groupID, \"-1\", \"0.0.0.0/0\", \"outbound to internet\", -1, -1); err != nil {\n\t\tc.Logger.Warn(err.Error())\n\t}\n\n\treturn groupID, nil\n}", "func New(app, account, region, stack, cluster string) InstanceGroup {\n\treturn group{\n\t\tapp: app,\n\t\taccount: account,\n\t\tregion: region,\n\t\tstack: stack,\n\t\tcluster: cluster,\n\t}\n}", "func CreateDescribeScalingGroupsRequest() (request *DescribeScalingGroupsRequest) {\n\trequest = &DescribeScalingGroupsRequest{\n\t\tRpcRequest: &requests.RpcRequest{},\n\t}\n\trequest.InitWithApiInfo(\"Ess\", \"2014-08-28\", \"DescribeScalingGroups\", \"ess\", \"openAPI\")\n\trequest.Method = requests.POST\n\treturn\n}", "func CreateSecurityGroup(s resources.SecurityGroup) {\n\tdir, err := ioutil.TempDir(\"\", \"simple-security-group\")\n\tExpect(err).ToNot(HaveOccurred())\n\tdefer os.RemoveAll(dir)\n\n\ttempfile := filepath.Join(dir, \"security-group.json\")\n\n\tsecurityGroup, err := json.Marshal(s.Rules)\n\tExpect(err).ToNot(HaveOccurred())\n\n\terr = ioutil.WriteFile(tempfile, securityGroup, 0666)\n\tExpect(err).ToNot(HaveOccurred())\n\tEventually(CF(\"create-security-group\", s.Name, tempfile)).Should(Exit(0))\n}", "func (provider *ResourceProvider) SecurityGroup(id string) (*reachAWS.SecurityGroup, error) {\n\tinput := &ec2.DescribeSecurityGroupsInput{\n\t\tGroupIds: []*string{\n\t\t\taws.String(id),\n\t\t},\n\t}\n\tresult, err := provider.ec2.DescribeSecurityGroups(input)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif err = ensureSingleResult(len(result.SecurityGroups), \"security group\", id); err != nil {\n\t\treturn nil, err\n\t}\n\n\tsecurityGroup := newSecurityGroupFromAPI(result.SecurityGroups[0])\n\treturn &securityGroup, nil\n}", "func NewSecurityGroupCollector(logger log.Logger, client *scw.Client, failures *prometheus.CounterVec, duration *prometheus.HistogramVec, cfg config.Target) *SecurityGroupCollector {\n\tif failures != nil {\n\t\tfailures.WithLabelValues(\"security_group\").Add(0)\n\t}\n\n\tlabels := []string{\"id\", \"name\", \"zone\", \"org\", \"project\"}\n\tcollector := &SecurityGroupCollector{\n\t\tclient: client,\n\t\tinstance: instance.NewAPI(client),\n\t\tlogger: log.With(logger, \"collector\", \"security_group\"),\n\t\tfailures: failures,\n\t\tduration: duration,\n\t\tconfig: cfg,\n\n\t\tDefined: prometheus.NewDesc(\n\t\t\t\"scw_security_group_defined\",\n\t\t\t\"Constant value of 1 that this security group is defined\",\n\t\t\tlabels,\n\t\t\tnil,\n\t\t),\n\t\tEnableDefault: prometheus.NewDesc(\n\t\t\t\"scw_security_group_enable_default\",\n\t\t\t\"1 if the security group is enabled by default, 0 otherwise\",\n\t\t\tlabels,\n\t\t\tnil,\n\t\t),\n\t\tProjectDefault: prometheus.NewDesc(\n\t\t\t\"scw_security_group_project_default\",\n\t\t\t\"1 if the security group is an project default, 0 otherwise\",\n\t\t\tlabels,\n\t\t\tnil,\n\t\t),\n\t\tStateful: prometheus.NewDesc(\n\t\t\t\"scw_security_group_stateful\",\n\t\t\t\"1 if the security group is stateful by default, 0 otherwise\",\n\t\t\tlabels,\n\t\t\tnil,\n\t\t),\n\t\tInboundDefault: prometheus.NewDesc(\n\t\t\t\"scw_security_group_inbound_default_policy\",\n\t\t\t\"1 if the security group inbound default policy is accept, 0 otherwise\",\n\t\t\tlabels,\n\t\t\tnil,\n\t\t),\n\t\tOutboundDefault: prometheus.NewDesc(\n\t\t\t\"scw_security_group_outbound_default_policy\",\n\t\t\t\"1 if the security group outbound default policy is accept, 0 otherwise\",\n\t\t\tlabels,\n\t\t\tnil,\n\t\t),\n\t\tServers: prometheus.NewDesc(\n\t\t\t\"scw_security_group_servers_count\",\n\t\t\t\"Number of servers attached to the security group\",\n\t\t\tlabels,\n\t\t\tnil,\n\t\t),\n\t\tCreated: prometheus.NewDesc(\n\t\t\t\"scw_security_group_created_timestamp\",\n\t\t\t\"Timestamp when the security group have been created\",\n\t\t\tlabels,\n\t\t\tnil,\n\t\t),\n\t\tModified: prometheus.NewDesc(\n\t\t\t\"scw_security_group_modified_timestamp\",\n\t\t\t\"Timestamp when the security group have been modified\",\n\t\t\tlabels,\n\t\t\tnil,\n\t\t),\n\t}\n\n\tif cfg.Org != \"\" {\n\t\tcollector.org = scw.StringPtr(cfg.Org)\n\t}\n\n\tif cfg.Project != \"\" {\n\t\tcollector.project = scw.StringPtr(cfg.Project)\n\t}\n\n\treturn collector\n}", "func GetSecurityGroupInfo(sess *session.Session) (*ec2.DescribeSecurityGroupsOutput, error) {\n // snippet-start:[ec2.go.describe_security_groups.call]\n svc := ec2.New(sess)\n\n result, err := svc.DescribeSecurityGroups(nil)\n // snippet-end:[ec2.go.describe_security_groups.call]\n if err != nil {\n return nil, err\n }\n\n return result, nil\n}", "func (s *Service) CreateOrUpdateNetworkSecurityGroup(resourceGroupName string, networkSecurityGroupName string, location string) (*armada.SecurityGroupsCreateOrUpdateFuture, error) {\n\t//JEB if networkSecurityGroupName == \"\" {\n\t//JEB networkSecurityGroupName = SecurityGroupDefaultName\n\t//JEB }\n\t//JEB sshInbound := armada.SecurityRule{\n\t//JEB Name: to.StringPtr(\"ClusterAPISSH\"),\n\t//JEB SecurityRulePropertiesFormat: &armada.SecurityRulePropertiesFormat{\n\t//JEB Protocol: armada.SecurityRuleProtocolTCP,\n\t//JEB SourcePortRange: to.StringPtr(\"*\"),\n\t//JEB DestinationPortRange: to.StringPtr(\"22\"),\n\t//JEB SourceAddressPrefix: to.StringPtr(\"*\"),\n\t//JEB DestinationAddressPrefix: to.StringPtr(\"*\"),\n\t//JEB Priority: to.Int32Ptr(1000),\n\t//JEB Direction: armada.SecurityRuleDirectionInbound,\n\t//JEB Access: armada.SecurityRuleAccessAllow,\n\t//JEB },\n\t//JEB }\n\t//JEB\n\t//JEB kubernetesInbound := armada.SecurityRule{\n\t//JEB Name: to.StringPtr(\"KubernetesAPI\"),\n\t//JEB SecurityRulePropertiesFormat: &armada.SecurityRulePropertiesFormat{\n\t//JEB Protocol: armada.SecurityRuleProtocolTCP,\n\t//JEB SourcePortRange: to.StringPtr(\"*\"),\n\t//JEB DestinationPortRange: to.StringPtr(\"6443\"),\n\t//JEB SourceAddressPrefix: to.StringPtr(\"*\"),\n\t//JEB DestinationAddressPrefix: to.StringPtr(\"*\"),\n\t//JEB Priority: to.Int32Ptr(1001),\n\t//JEB Direction: armada.SecurityRuleDirectionInbound,\n\t//JEB Access: armada.SecurityRuleAccessAllow,\n\t//JEB },\n\t//JEB }\n\t//JEB\n\t//JEB securityGroupProperties := armada.SecurityGroupPropertiesFormat{\n\t//JEB SecurityRules: &[]armada.SecurityRule{sshInbound, kubernetesInbound},\n\t//JEB }\n\t//JEB securityGroup := armada.SecurityGroup{\n\t//JEB Location: to.StringPtr(location),\n\t//JEB SecurityGroupPropertiesFormat: &securityGroupProperties,\n\t//JEB }\n\t//JEB sgFuture, err := s.scope.AirshipClients.SecurityGroups.CreateOrUpdate(s.scope.Context, resourceGroupName, networkSecurityGroupName, securityGroup)\n\t//JEB if err != nil {\n\t//JEB return nil, err\n\t//JEB }\n\t//JEB return &sgFuture, nil\n\treturn &armada.SecurityGroupsCreateOrUpdateFuture{}, nil\n}", "func NewFromV2(aV2 *storagegroup.StorageGroup) *StorageGroup {\n\treturn (*StorageGroup)(aV2)\n}", "func (c *MockNetworkSecurityGroupsClient) Get(ctx context.Context, resourceGroupName string, nsgName string) (*network.SecurityGroup, error) {\n\tasg, ok := c.NSGs[nsgName]\n\tif !ok {\n\t\treturn nil, nil\n\t}\n\treturn &asg, nil\n}", "func (a *AliyunInstanceAttribute) createBastionHostSecurityGroup() {\n\tres, err := ExecCmdReturnOutput(\"bash\", \"-c\", \"aliyun ecs DescribeSecurityGroups --VpcId=\"+a.VpcID)\n\tcheckError(err)\n\tdecodedQuery := decodeAndQueryFromJSONString(res)\n\n\tsecurityGroupNames, err := decodedQuery.Array(\"SecurityGroups\", \"SecurityGroup\")\n\tcheckError(err)\n\tsecurityGroupExists := false\n\tfor _, iter := range securityGroupNames {\n\t\tsecurityGroup := jsonq.NewQuery(iter)\n\t\tname, err := securityGroup.String(\"SecurityGroupName\")\n\t\tcheckError(err)\n\t\tif name == a.BastionSecurityGroupName {\n\t\t\tsecurityGroupExists = true\n\t\t\ta.BastionSecurityGroupID, err = securityGroup.String(\"SecurityGroupId\")\n\t\t\tcheckError(err)\n\t\t\tfmt.Println(\"Configuring bastion host security group rules...\")\n\t\t\tcreateSGCmdString := \"aliyun ecs AuthorizeSecurityGroup --Policy Accept --NicType intranet --Priority 1 --SourceCidrIp \" + a.MyPublicIP + \" --PortRange 22/22 --IpProtocol tcp --SecurityGroupId=\" + a.BastionSecurityGroupID\n\t\t\t_, err = ExecCmdReturnOutput(\"bash\", \"-c\", createSGCmdString)\n\t\t\tcheckError(err)\n\t\t\ttime.Sleep(time.Second * 10)\n\t\t\tfmt.Println(\"Bastion host security group rules configured.\")\n\t\t}\n\t}\n\n\tif !securityGroupExists {\n\t\tres, err = ExecCmdReturnOutput(\"bash\", \"-c\", \"aliyun ecs CreateSecurityGroup --RegionId=\"+a.RegionID+\" --VpcId=\"+a.VpcID+\" --SecurityGroupName=\"+a.BastionSecurityGroupName)\n\t\tcheckError(err)\n\t\tdecodedQuery = decodeAndQueryFromJSONString(res)\n\t\ta.BastionSecurityGroupID, err = decodedQuery.String(\"SecurityGroupId\")\n\t\tcheckError(err)\n\t\tattemptCnt := 0\n\t\tfor attemptCnt < 60 {\n\t\t\tres, err = ExecCmdReturnOutput(\"bash\", \"-c\", \"aliyun ecs DescribeSecurityGroups --SecurityGroupIds=\\\"['\"+a.BastionSecurityGroupID+\"']\\\"\")\n\t\t\tcheckError(err)\n\t\t\tdecodedQuery = decodeAndQueryFromJSONString(res)\n\t\t\ttotalCount, err := decodedQuery.Int(\"TotalCount\")\n\t\t\tcheckError(err)\n\t\t\tif totalCount == 1 {\n\t\t\t\ttime.Sleep(time.Second * 30)\n\t\t\t\tfmt.Println(\"Bastion host security group created.\")\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tfmt.Println(\"Creating bastion host security group...\")\n\t\t\ttime.Sleep(time.Second * 2)\n\t\t\tattemptCnt++\n\t\t}\n\t\tif attemptCnt == 60 {\n\t\t\tfmt.Println(\"Bastion host security group creation time out. Please try again.\")\n\t\t\tos.Exit(2)\n\t\t}\n\t\tfmt.Println(\"Configuring bastion host security group rules...\")\n\t\tcreateSGCmdString := \"aliyun ecs AuthorizeSecurityGroup --Policy Accept --NicType intranet --Priority 1 --SourceCidrIp \" + a.MyPublicIP + \" --PortRange 22/22 --IpProtocol tcp --SecurityGroupId=\" + a.BastionSecurityGroupID\n\t\t_, err = ExecCmdReturnOutput(\"bash\", \"-c\", createSGCmdString)\n\t\tcheckError(err)\n\t\ttime.Sleep(time.Second * 10)\n\t\tfmt.Println(\"Bastion host security group rules configured.\")\n\t}\n}", "func (c *Client) newServicegroup(servicegroup *Servicegroup) ([]byte, error) {\n\tnagiosURL, err := c.buildURL(\"servicegroup\", \"POST\", \"\", \"\", \"\", \"\")\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tdata := setURLParams(servicegroup)\n\n\tbody, err := c.post(data, nagiosURL)\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn body, nil\n}", "func newRsaKeyPair(config CreateKeyPairConfig) (KeyPair, error) {\n\tif config.Bits == 0 {\n\t\tconfig.Bits = defaultRsaBits\n\t}\n\n\tprivateKey, err := rsa.GenerateKey(rand.Reader, config.Bits)\n\tif err != nil {\n\t\treturn KeyPair{}, err\n\t}\n\n\tsshPublicKey, err := gossh.NewPublicKey(&privateKey.PublicKey)\n\tif err != nil {\n\t\treturn KeyPair{}, err\n\t}\n\n\tprivatePemBlock, err := rawPemBlock(&pem.Block{\n\t\tType: \"RSA PRIVATE KEY\",\n\t\tHeaders: nil,\n\t\tBytes: x509.MarshalPKCS1PrivateKey(privateKey),\n\t})\n\tif err != nil {\n\t\treturn KeyPair{}, err\n\t}\n\n\treturn KeyPair{\n\t\tPrivateKeyPemBlock: privatePemBlock,\n\t\tPublicKeyAuthorizedKeysLine: authorizedKeysLine(sshPublicKey, config.Comment),\n\t\tComment: config.Comment,\n\t}, nil\n}", "func CreateSimpleNetworkSecurityGroup(ctx context.Context, nsgName string) (nsg network.SecurityGroup, err error) {\n\tnsgClient := getNsgClient()\n\tfuture, err := nsgClient.CreateOrUpdate(\n\t\tctx,\n\t\tconfig.GroupName(),\n\t\tnsgName,\n\t\tnetwork.SecurityGroup{\n\t\t\tLocation: to.StringPtr(config.Location()),\n\t\t},\n\t)\n\n\tif err != nil {\n\t\treturn nsg, fmt.Errorf(\"cannot create nsg: %v\", err)\n\t}\n\n\terr = future.WaitForCompletion(ctx, nsgClient.Client)\n\tif err != nil {\n\t\treturn nsg, fmt.Errorf(\"cannot get nsg create or update future response: %v\", err)\n\t}\n\n\treturn future.Result(nsgClient)\n}", "func ExampleRDS_CreateDBSecurityGroup_shared00() {\n\tsvc := rds.New(session.New())\n\tinput := &rds.CreateDBSecurityGroupInput{\n\t\tDBSecurityGroupDescription: aws.String(\"My DB security group\"),\n\t\tDBSecurityGroupName: aws.String(\"mydbsecuritygroup\"),\n\t}\n\n\tresult, err := svc.CreateDBSecurityGroup(input)\n\tif err != nil {\n\t\tif aerr, ok := err.(awserr.Error); ok {\n\t\t\tswitch aerr.Code() {\n\t\t\tcase rds.ErrCodeDBSecurityGroupAlreadyExistsFault:\n\t\t\t\tfmt.Println(rds.ErrCodeDBSecurityGroupAlreadyExistsFault, aerr.Error())\n\t\t\tcase rds.ErrCodeDBSecurityGroupQuotaExceededFault:\n\t\t\t\tfmt.Println(rds.ErrCodeDBSecurityGroupQuotaExceededFault, aerr.Error())\n\t\t\tcase rds.ErrCodeDBSecurityGroupNotSupportedFault:\n\t\t\t\tfmt.Println(rds.ErrCodeDBSecurityGroupNotSupportedFault, aerr.Error())\n\t\t\tdefault:\n\t\t\t\tfmt.Println(aerr.Error())\n\t\t\t}\n\t\t} else {\n\t\t\t// Print the error, cast err to awserr.Error to get the Code and\n\t\t\t// Message from an error.\n\t\t\tfmt.Println(err.Error())\n\t\t}\n\t\treturn\n\t}\n\n\tfmt.Println(result)\n}", "func (c *MockNetworkSecurityGroupsClient) CreateOrUpdate(ctx context.Context, resourceGroupName, nsgName string, parameters network.SecurityGroup) (*network.SecurityGroup, error) {\n\t// Ignore resourceGroupName for simplicity.\n\tif _, ok := c.NSGs[nsgName]; ok {\n\t\treturn nil, fmt.Errorf(\"update not supported\")\n\t}\n\tparameters.Name = &nsgName\n\tc.NSGs[nsgName] = parameters\n\treturn &parameters, nil\n}", "func NewAsgSess(profile, region string) *AutoScaling {\n\treturn &AutoScaling{\n\t\tClient: autoscaling.New(GetSession(profile, region)),\n\t}\n}", "func (a *Azure) CreateSimpleNetworkSecurityGroup(ctx context.Context, location string, nsgName string) (nsg network.SecurityGroup, err error) {\n\tnsgClient, err := a.getNsgClient()\n\tif err != nil {\n\t\treturn\n\t}\n\n\tfuture, err := nsgClient.CreateOrUpdate(\n\t\tctx,\n\t\ta.groupName,\n\t\tnsgName,\n\t\tnetwork.SecurityGroup{\n\t\t\tLocation: to.StringPtr(location),\n\t\t},\n\t)\n\n\tif err != nil {\n\t\treturn nsg, fmt.Errorf(\"cannot create nsg: %v\", err)\n\t}\n\n\terr = future.WaitForCompletionRef(ctx, nsgClient.Client)\n\tif err != nil {\n\t\treturn nsg, fmt.Errorf(\"cannot get nsg create or update future response: %v\", err)\n\t}\n\n\tnsg, err = future.Result(*nsgClient)\n\treturn\n}", "func (*CreateSecurityGroupRequest) Descriptor() ([]byte, []int) {\n\treturn file_yandex_cloud_vpc_v1_security_group_service_proto_rawDescGZIP(), []int{3}\n}", "func CreateNetworkSecurityGroupRule() {}", "func (*SDKGetter) EC2(session *session.Session) EC2Interface {\n\treturn ec2svc.NewService(ec2.New(session))\n}", "func newEcdsaKeyPair(config CreateKeyPairConfig) (KeyPair, error) {\n\tvar curve elliptic.Curve\n\n\tswitch config.Bits {\n\tcase 0:\n\t\tconfig.Bits = 521\n\t\tfallthrough\n\tcase 521:\n\t\tcurve = elliptic.P521()\n\tcase 384:\n\t\tcurve = elliptic.P384()\n\tcase 256:\n\t\tcurve = elliptic.P256()\n\tcase 224:\n\t\t// Not supported by \"golang.org/x/crypto/ssh\".\n\t\treturn KeyPair{}, fmt.Errorf(\"golang.org/x/crypto/ssh does not support %d bits\", config.Bits)\n\tdefault:\n\t\treturn KeyPair{}, fmt.Errorf(\"crypto/elliptic does not support %d bits\", config.Bits)\n\t}\n\n\tprivateKey, err := ecdsa.GenerateKey(curve, rand.Reader)\n\tif err != nil {\n\t\treturn KeyPair{}, err\n\t}\n\n\tsshPublicKey, err := gossh.NewPublicKey(&privateKey.PublicKey)\n\tif err != nil {\n\t\treturn KeyPair{}, err\n\t}\n\n\tprivateRaw, err := x509.MarshalECPrivateKey(privateKey)\n\tif err != nil {\n\t\treturn KeyPair{}, err\n\t}\n\n\tprivatePem, err := rawPemBlock(&pem.Block{\n\t\tType: \"EC PRIVATE KEY\",\n\t\tHeaders: nil,\n\t\tBytes: privateRaw,\n\t})\n\tif err != nil {\n\t\treturn KeyPair{}, err\n\t}\n\n\treturn KeyPair{\n\t\tPrivateKeyPemBlock: privatePem,\n\t\tPublicKeyAuthorizedKeysLine: authorizedKeysLine(sshPublicKey, config.Comment),\n\t\tComment: config.Comment,\n\t}, nil\n}", "func createENI(ec2Client *ec2.EC2, cfg *config) (*ec2.NetworkInterface, error) {\n\tvar filterValuesGroupName []*string\n\tfor _, sg := range cfg.securityGroups {\n\t\tfilterValuesGroupName = append(filterValuesGroupName, aws.String(sg))\n\t}\n\t// Get security group id for the security group that the instance was\n\t// started with\n\tsecurityGroups, err := ec2Client.DescribeSecurityGroups(&ec2.DescribeSecurityGroupsInput{\n\t\tFilters: []*ec2.Filter{\n\t\t\t{\n\t\t\t\tName: aws.String(\"group-name\"),\n\t\t\t\tValues: filterValuesGroupName,\n\t\t\t},\n\t\t\t{\n\t\t\t\tName: aws.String(\"vpc-id\"),\n\t\t\t\tValues: []*string{aws.String(cfg.vpc)},\n\t\t\t},\n\t\t}})\n\tif err != nil {\n\t\treturn nil, errors.Wrapf(err, \"unable to get security group ids\")\n\t}\n\tvar securityGroupIDs []*string\n\tfor _, sg := range securityGroups.SecurityGroups {\n\t\tsecurityGroupIDs = append(securityGroupIDs, sg.GroupId)\n\t}\n\n\t// Create the ENI\n\toutput, err := ec2Client.CreateNetworkInterface(&ec2.CreateNetworkInterfaceInput{\n\t\tDescription: aws.String(\"for running end-to-end test for ECS ENI Plugin\"),\n\t\tGroups: securityGroupIDs,\n\t\tSubnetId: aws.String(cfg.subnet),\n\t})\n\tif err != nil {\n\t\treturn nil, errors.Wrapf(err, \"unable to create network interface\")\n\t}\n\treturn output.NetworkInterface, nil\n}", "func newGroup(groupId string, broadcastChannelCap int64) *group {\n\n\tg := &group{\n\t\tId: groupId,\n\t\tclients: make(map[string]*socketClient),\n\t\tbroadcastChannel: make(chan interface{}, broadcastChannelCap),\n\t\tshutdownChannel: make(chan interface{}),\n\t\tdownChannel: make(chan interface{}, broadcastChannelCap),\n\t}\n\n\tAppLogger.Infof(\"[newGroup] group: %s created\", groupId)\n\treturn g\n}", "func (c *Client) CreateSecurityGroup(ctx context.Context, zone string,\n\tsecurityGroup *SecurityGroup) (*SecurityGroup, error) {\n\tresp, err := c.CreateSecurityGroupWithResponse(ctx, papi.CreateSecurityGroupJSONRequestBody{\n\t\tDescription: &securityGroup.Description,\n\t\tName: securityGroup.Name,\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tres, err := papi.NewPoller().\n\t\tWithTimeout(c.timeout).\n\t\tPoll(ctx, c.OperationPoller(zone, *resp.JSON200.Id))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn c.GetSecurityGroup(ctx, zone, *res.(*papi.Reference).Id)\n}", "func New() *BBSG2Pub {\n\treturn &BBSG2Pub{}\n}", "func New(gid string) *Group {\n return &Group{\n Client: client.New().Init(),\n GroupID: gid,\n }\n}", "func TestExistingSG(t *testing.T) {\n\tnewIntegrationTest(\"existingsg.example.com\", \"existing_sg\").withZones(3).\n\t\twithAddons(\n\t\t\tawsEBSCSIAddon,\n\t\t\tdnsControllerAddon,\n\t\t\tawsCCMAddon,\n\t\t).\n\t\trunTestTerraformAWS(t)\n}", "func (sg *StorageGroup) ToV2() *storagegroup.StorageGroup {\n\treturn (*storagegroup.StorageGroup)(sg)\n}", "func NewEC2Client(config EC2ClientConfig) (*AwsEC2, error) {\n\tif config.ControllerID == \"\" {\n\t\treturn nil, fmt.Errorf(\"ControllerID is a required parameter\")\n\t}\n\tif config.Nametag == \"\" {\n\t\treturn nil, fmt.Errorf(\"Nametag is a required parameter\")\n\t}\n\tec2Client, err := getEC2Client(config.EndpointURL, config.InsecureTLSSkipVerify)\n\tif err != nil {\n\t\treturn nil, util.WrapError(err, \"Error creating EC2 client\")\n\t}\n\tvar ecsClient *ecs.ECS\n\tif config.ECSClusterName != \"\" {\n\t\tecsClient, err = getECSClient(config.EndpointURL, config.InsecureTLSSkipVerify)\n\t\tif err != nil {\n\t\t\treturn nil, util.WrapError(err, \"Error creating ECS client\")\n\t\t}\n\t}\n\tssmClient, err := getSSMClient(config.EndpointURL, config.InsecureTLSSkipVerify)\n\tif err != nil {\n\t\treturn nil, util.WrapError(err, \"creating SSM client\")\n\t}\n\tiamClient, err := getIAMClient(config.EndpointURL, config.InsecureTLSSkipVerify)\n\tif err != nil {\n\t\treturn nil, util.WrapError(err, \"creating IAM client\")\n\t}\n\tclient := &AwsEC2{\n\t\tclient: ec2Client,\n\t\tecs: ecsClient,\n\t\tssm: ssmClient,\n\t\tiam: iamClient,\n\t\tecsClusterName: config.ECSClusterName,\n\t\tcontrollerID: config.ControllerID,\n\t\tnametag: config.Nametag,\n\t}\n\tclient.vpcID, client.vpcCIDR, err = client.assertVPCExists(config.VPCID)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tclient.subnetID = config.SubnetID\n\tif client.subnetID == \"\" {\n\t\tclient.subnetID, err = detectCurrentSubnet()\n\t\tif err != nil {\n\t\t\treturn nil, util.WrapError(err, \"Could not detect current subnet from metadata service. Please supply an AWS subnet id in provider.yaml\")\n\t\t}\n\t}\n\tclient.region = os.Getenv(\"AWS_REGION\")\n\n\tsubnetAttrs, err := client.getSubnetAttributes(client.subnetID)\n\tif err != nil {\n\t\treturn nil, util.WrapError(err, \"Error getting subnet attributes\")\n\t}\n\tclient.availabilityZone = subnetAttrs.AZ\n\tclient.usePublicIPs = !config.PrivateIPOnly\n\tif subnetAttrs.AddressAffinity == cloud.PrivateAddress {\n\t\tklog.V(2).Infoln(\"cells will run in a private subnet (no route to internet gateway)\")\n\t\tclient.usePublicIPs = false\n\t}\n\treturn client, nil\n}", "func (aaa *GroupService) CreateNewGroupPublicV2(input *group.CreateNewGroupPublicV2Params) (*groupclientmodels.ModelsGroupResponseV1, error) {\n\ttoken, err := aaa.TokenRepository.GetToken()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tcreated, badRequest, unauthorized, forbidden, conflict, internalServerError, err := aaa.Client.Group.CreateNewGroupPublicV2(input, client.BearerToken(*token.AccessToken))\n\tif badRequest != nil {\n\t\treturn nil, badRequest\n\t}\n\tif unauthorized != nil {\n\t\treturn nil, unauthorized\n\t}\n\tif forbidden != nil {\n\t\treturn nil, forbidden\n\t}\n\tif conflict != nil {\n\t\treturn nil, conflict\n\t}\n\tif internalServerError != nil {\n\t\treturn nil, internalServerError\n\t}\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn created.GetPayload(), nil\n}", "func New(merge bool) *schg {\n\treturn &schg{services: make(map[string]string), merge: merge}\n}", "func newNetwork(cfg *config.Network, c *ec2.EC2) (*network, error) {\n\tlog.Debug(\"Initializing AWS Network\")\n\tn := &network{\n\t\tResources: resource.NewResources(),\n\t\tNetwork: cfg,\n\t\tec2: c,\n\t}\n\n\tvpc, err := newVpc(c, n)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tn.vpc = vpc\n\tn.Append(vpc)\n\n\trouteTables, err := newRouteTables(c, n)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tn.routeTables = routeTables\n\tn.Append(routeTables)\n\n\tinternetGateway, err := newInternetGateway(c, n, \"public\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tn.internetGateway = internetGateway\n\tn.Append(internetGateway)\n\n\t// Load the vpc since it is needed for the caches.\n\terr = n.vpc.Load()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tn.subnetCache, err = newSubnetCache(n)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tn.secgroupCache, err = newSecurityGroupCache(n)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn n, nil\n}", "func NewServerGroup(ctx *pulumi.Context,\n\tname string, args *ServerGroupArgs, opts ...pulumi.ResourceOption) (*ServerGroup, error) {\n\tif args == nil {\n\t\targs = &ServerGroupArgs{}\n\t}\n\n\topts = internal.PkgResourceDefaultOpts(opts)\n\tvar resource ServerGroup\n\terr := ctx.RegisterResource(\"openstack:compute/serverGroup:ServerGroup\", name, args, &resource, opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &resource, nil\n}", "func NewGetSecurityGroupRequest(server string, id string) (*http.Request, error) {\n\tvar err error\n\n\tvar pathParam0 string\n\n\tpathParam0, err = runtime.StyleParam(\"simple\", false, \"id\", id)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tqueryUrl, err := url.Parse(server)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tbasePath := fmt.Sprintf(\"/security-group/%s\", pathParam0)\n\tif basePath[0] == '/' {\n\t\tbasePath = basePath[1:]\n\t}\n\n\tqueryUrl, err = queryUrl.Parse(basePath)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treq, err := http.NewRequest(\"GET\", queryUrl.String(), nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn req, nil\n}", "func (p providerServices) EC2() ec2.EC2er {\n\treturn p.ec2\n}", "func New(auth stacks.AuthenticationOptions, localCfg stacks.AWSConfiguration, cfg stacks.ConfigurationOptions) (*stack, error) { // nolint\n\tif localCfg.Ec2Endpoint == \"\" {\n\t\tlocalCfg.Ec2Endpoint = fmt.Sprintf(\"https://ec2.%s.amazonaws.com\", localCfg.Region)\n\t}\n\tif localCfg.SsmEndpoint == \"\" {\n\t\tlocalCfg.SsmEndpoint = fmt.Sprintf(\"https://ssm.%s.amazonaws.com\", localCfg.Region)\n\t}\n\n\tstack := &stack{\n\t\tConfig: &cfg,\n\t\tAuthOptions: &auth,\n\t\tAwsConfig: &localCfg,\n\t}\n\n\taccessKeyID := auth.AccessKeyID\n\tsecretAccessKey := auth.SecretAccessKey\n\n\tss3 := session.Must(session.NewSession(&aws.Config{\n\t\tCredentials: credentials.NewStaticCredentials(accessKeyID, secretAccessKey, \"\"),\n\t\tS3ForcePathStyle: aws.Bool(true),\n\t\tRegion: aws.String(localCfg.Region),\n\t\tEndpoint: aws.String(localCfg.S3Endpoint),\n\t}))\n\n\tsec2 := session.Must(session.NewSession(&aws.Config{\n\t\tCredentials: credentials.NewStaticCredentials(accessKeyID, secretAccessKey, \"\"),\n\t\tS3ForcePathStyle: aws.Bool(true),\n\t\tRegion: aws.String(localCfg.Region),\n\t\tEndpoint: aws.String(localCfg.Ec2Endpoint),\n\t}))\n\n\tsssm := session.Must(session.NewSession(&aws.Config{\n\t\tCredentials: credentials.NewStaticCredentials(accessKeyID, secretAccessKey, \"\"),\n\t\tS3ForcePathStyle: aws.Bool(true),\n\t\tRegion: aws.String(localCfg.Region),\n\t\tEndpoint: aws.String(localCfg.SsmEndpoint),\n\t}))\n\n\tspricing := session.Must(session.NewSession(&aws.Config{\n\t\tCredentials: credentials.NewStaticCredentials(accessKeyID, secretAccessKey, \"\"),\n\t\tS3ForcePathStyle: aws.Bool(true),\n\t\tRegion: aws.String(endpoints.UsEast1RegionID),\n\t}))\n\n\tstack.S3Service = s3.New(ss3, &aws.Config{})\n\tstack.EC2Service = ec2.New(sec2, &aws.Config{})\n\tstack.SSMService = ssm.New(sssm, &aws.Config{})\n\tstack.PricingService = pricing.New(spricing, &aws.Config{})\n\n\tif cfg.Timings != nil {\n\t\tstack.MutableTimings = cfg.Timings\n\t} else {\n\t\tstack.MutableTimings = temporal.NewTimings()\n\t}\n\n\treturn stack, nil\n}", "func NewSystemGroup(name string, members ...string) *Group {\n\treturn &Group{\n\t\tName: name,\n\t\tpassword: \"\",\n\t\tGID: -1,\n\t\tUserList: members,\n\n\t\taddSystemGroup: true,\n\t}\n}", "func NewEc2Instance(ctx sdutils.AppContext, dd *awsDeploymentDescription) (*Ec2Instance, error) {\n\tvar err error\n\tcustomData := \"\"\n\tif dd.customPropFile != \"\" {\n\t\tdata, err := ioutil.ReadFile(dd.customPropFile)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"Invalid custom properties file: %s\", err)\n\t\t}\n\t\tcustomData = string(data)\n\t}\n\n\tcustomLog4J := \"\"\n\tif dd.customLog4J != \"\" {\n\t\tlog4JData, err := ioutil.ReadFile(dd.customLog4J)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"Invalid custom properties file: %s\", err)\n\t\t}\n\t\tcustomLog4J = base64.StdEncoding.EncodeToString(log4JData)\n\t}\n\n\tvar envBuffer bytes.Buffer\n\tfor _, env := range dd.environment {\n\t\tenvBuffer.WriteString(fmt.Sprintf(\"export %s\\n\", env))\n\t}\n\t// The custom script cannot be null in terraform so make a temp one\n\tscriptData := []byte(\"#!/bin/bash\\nexit 0\\n\")\n\tif dd.CustomScript != \"\" {\n\t\tscriptData, err = ioutil.ReadFile(dd.CustomScript)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"Failed to read the script %s: %s\", dd.CustomScript, err.Error())\n\t\t}\n\t}\n\tbase64CustomScriptData := base64.StdEncoding.EncodeToString(scriptData)\n\tbase64CustomScriptPath := path.Join(dd.deployDir, \"custom-stardogscript.base64\")\n\terr = ioutil.WriteFile(base64CustomScriptPath, []byte(base64CustomScriptData), 0644)\n\tif err != nil {\n\t\treturn nil, errors.New(\"Failed to create the base 64 encoded custom script\")\n\t}\n\n\tscriptZkData := []byte(\"#!/bin/bash\\nexit 0\\n\")\n\tif dd.CustomZkScript != \"\" {\n\t\tscriptZkData, err = ioutil.ReadFile(dd.CustomZkScript)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"Failed to read the script %s: %s\", dd.CustomZkScript, err.Error())\n\t\t}\n\t}\n\tbase64CustomZkScriptData := base64.StdEncoding.EncodeToString(scriptZkData)\n\tbase64CustomZkScriptPath := path.Join(dd.deployDir, \"custom-zk-stardogscript.base64\")\n\terr = ioutil.WriteFile(base64CustomZkScriptPath, []byte(base64CustomZkScriptData), 0644)\n\tif err != nil {\n\t\treturn nil, errors.New(\"Failed to create the base 64 encoded custom zk script\")\n\t}\n\n\tinstance := Ec2Instance{\n\t\tDeploymentName: dd.Name,\n\t\tRegion: dd.Region,\n\t\tKeyName: dd.AwsKeyName,\n\t\tVersion: dd.Version,\n\t\tZkInstanceType: dd.ZkInstanceType,\n\t\tSdInstanceType: dd.SdInstanceType,\n\t\tAmiID: dd.AmiID,\n\t\tPrivateKey: dd.PrivateKeyPath,\n\t\tDeployDir: dd.deployDir,\n\t\tCustomScript: base64CustomScriptPath,\n\t\tCustomZkScript: base64CustomZkScriptPath,\n\t\tCtx: ctx,\n\t\tCustomPropsData: customData,\n\t\tCustomLog4JData: customLog4J,\n\t\tEnvironment: envBuffer.String(),\n\t}\n\tif dd.disableSecurity {\n\t\tinstance.StartOpts = \"--disable-security\"\n\t}\n\treturn &instance, nil\n}", "func (*CreateSecurityGroupMetadata) Descriptor() ([]byte, []int) {\n\treturn file_yandex_cloud_vpc_v1_security_group_service_proto_rawDescGZIP(), []int{5}\n}", "func CreateCsr(commonName string, country string, state string, city string,\n organization string, organizationalUnit string,\n emailAddress string) ([]byte, []byte, error) {\n\n priv, err := rsa.GenerateKey(rand.Reader, 2048)\n if err != nil {\n return nil, nil, err\n }\n\n template := x509.CertificateRequest{\n Subject: pkix.Name{\n CommonName: commonName,\n Country: []string{country},\n Province: []string{state},\n Locality: []string{city},\n Organization: []string{organization},\n OrganizationalUnit: []string{organizationalUnit},\n },\n SignatureAlgorithm: x509.SHA256WithRSA,\n EmailAddresses: []string{emailAddress},\n }\n\n random := rand.Reader\n csrBytes, err := x509.CreateCertificateRequest(random, &template, priv)\n if err != nil {\n return nil, nil, err\n }\n\n block := pem.Block{\n Type: \"CERTIFICATE REQUEST\",\n Bytes: csrBytes,\n }\n certPem := pem.EncodeToMemory(&block)\n\n block = pem.Block{\n Type: \"RSA PRIVATE KEY\",\n Bytes: x509.MarshalPKCS1PrivateKey(priv),\n }\n privPem := pem.EncodeToMemory(&block)\n\n return privPem, certPem, nil\n}", "func (stg *securityTestGroup) getSecurityGroups(agent string) ([]security.NetworkSecurityPolicy, error) {\n\tvar sgplist []security.NetworkSecurityPolicy\n\tstatus, err := stg.authAgentClient.Req(\"GET\", \"https://\"+agent+\":\"+globals.AgentProxyPort+\"/api/sgs/\", nil, &sgplist)\n\tif err != nil || status != http.StatusOK {\n\t\treturn nil, fmt.Errorf(\"Error getting SG Policies list: %v\", err)\n\t}\n\treturn sgplist, nil\n}", "func newAwsAutoscalingPolicies(c *TrussleV1Client, namespace string) *awsAutoscalingPolicies {\n\treturn &awsAutoscalingPolicies{\n\t\tclient: c.RESTClient(),\n\t\tns: namespace,\n\t}\n}", "func newPrivateKey() (crypto.Signer, error) {\n\treturn ecdsa.GenerateKey(ellipticCurve, crand.Reader)\n}", "func New() *StorageGroup {\n\treturn NewFromV2(new(storagegroup.StorageGroup))\n}", "func newOrgConfigGroup(org Organization) (*cb.ConfigGroup, error) {\n\torgGroup := newConfigGroup()\n\torgGroup.ModPolicy = AdminsPolicyKey\n\n\tif org.ModPolicy != \"\" {\n\t\torgGroup.ModPolicy = org.ModPolicy\n\t}\n\n\tif err := setPolicies(orgGroup, org.Policies); err != nil {\n\t\treturn nil, err\n\t}\n\n\tfabricMSPConfig, err := org.MSP.toProto()\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"converting fabric msp config to proto: %v\", err)\n\t}\n\n\tconf, err := proto.Marshal(fabricMSPConfig)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"marshaling msp config: %v\", err)\n\t}\n\n\t// mspConfig defaults type to FABRIC which implements an X.509 based provider\n\tmspConfig := &mb.MSPConfig{\n\t\tConfig: conf,\n\t}\n\n\terr = setValue(orgGroup, mspValue(mspConfig), AdminsPolicyKey)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn orgGroup, nil\n}", "func (instance *Host) DisableSecurityGroup(ctx context.Context, sgInstance resources.SecurityGroup) (ferr fail.Error) {\n\tdefer fail.OnPanic(&ferr)\n\n\tif valid.IsNil(instance) {\n\t\treturn fail.InvalidInstanceError()\n\t}\n\tif ctx == nil {\n\t\treturn fail.InvalidParameterError(\"ctx\", \"cannot be nil\")\n\t}\n\tif sgInstance == nil {\n\t\treturn fail.InvalidParameterError(\"sgInstance\", \"cannot be nil\")\n\t}\n\n\tsgName := sgInstance.GetName()\n\tsgID, err := sgInstance.GetID()\n\tif err != nil {\n\t\treturn fail.ConvertError(err)\n\t}\n\n\thid, err := instance.GetID()\n\tif err != nil {\n\t\treturn fail.ConvertError(err)\n\t}\n\n\tsvc := instance.Service()\n\txerr := instance.Alter(ctx, func(_ data.Clonable, props *serialize.JSONProperties) fail.Error {\n\t\treturn props.Alter(hostproperty.SecurityGroupsV1, func(clonable data.Clonable) fail.Error {\n\t\t\thsgV1, ok := clonable.(*propertiesv1.HostSecurityGroups)\n\t\t\tif !ok {\n\t\t\t\treturn fail.InconsistentError(\"'*propertiesv1.HostSecurityGroups' expected, '%s' provided\", reflect.TypeOf(clonable).String())\n\t\t\t}\n\n\t\t\tvar asg *abstract.SecurityGroup\n\t\t\txerr := sgInstance.Inspect(ctx, func(clonable data.Clonable, _ *serialize.JSONProperties) fail.Error {\n\t\t\t\tvar ok bool\n\t\t\t\tif asg, ok = clonable.(*abstract.SecurityGroup); !ok {\n\t\t\t\t\treturn fail.InconsistentError(\"'*abstract.SecurityGroup' expected, '%s' provided\", reflect.TypeOf(clonable).String())\n\t\t\t\t}\n\n\t\t\t\treturn nil\n\t\t\t})\n\t\t\txerr = debug.InjectPlannedFail(xerr)\n\t\t\tif xerr != nil {\n\t\t\t\treturn xerr\n\t\t\t}\n\n\t\t\t// First check if the security group is not already registered for the Host with the exact same state\n\t\t\tvar found bool\n\t\t\tfor k := range hsgV1.ByID {\n\t\t\t\tif k == asg.ID {\n\t\t\t\t\tfound = true\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\t\t\tif !found {\n\t\t\t\treturn fail.NotFoundError(\"security group '%s' is not bound to Host '%s'\", sgName, sgID)\n\t\t\t}\n\n\t\t\t{\n\t\t\t\t// Bind the security group on provider side; if security group not binded, considered as a success\n\t\t\t\txerr = svc.UnbindSecurityGroupFromHost(ctx, asg, hid)\n\t\t\t\txerr = debug.InjectPlannedFail(xerr)\n\t\t\t\tif xerr != nil {\n\t\t\t\t\tswitch xerr.(type) {\n\t\t\t\t\tcase *fail.ErrNotFound:\n\t\t\t\t\t\tdebug.IgnoreError2(ctx, xerr)\n\t\t\t\t\t\t// continue\n\t\t\t\t\tdefault:\n\t\t\t\t\t\treturn xerr\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\n\t\t\t// found, update properties\n\t\t\thsgV1.ByID[asg.ID].Disabled = true\n\t\t\treturn nil\n\t\t})\n\t})\n\tif xerr != nil {\n\t\treturn xerr\n\t}\n\n\treturn nil\n}", "func ensureSecurityGroupDeleted(e *AwsEC2) error {\n\tapiGroupName := util.CreateSecurityGroupName(e.controllerID, cloud.MilpaAPISGName)\n\tsg, err := e.FindSecurityGroup(apiGroupName)\n\tif err != nil {\n\t\t// Todo, return if security group not found\n\t\treturn util.WrapError(err, \"Error finding security group\")\n\t}\n\tif sg == nil {\n\t\treturn nil\n\t}\n\n\t// Try to delete groups for up to 3 minutes\n\t// this has flaked a few times and we've had dependency violations for\n\t// up to 2 mintues... It's getting worse for AWS.\n\t// Azure takes for freakin' ever\n\tfor i := 1; i < 60; i++ {\n\t\tfmt.Printf(\"Deleting security group: %s - %s\\n\", sg.Name, sg.ID)\n\t\terr = e.DeleteSecurityGroup(sg.ID)\n\t\tif err == nil {\n\t\t\tbreak\n\t\t}\n\t\ttime.Sleep(5 * time.Second)\n\t}\n\tif err != nil {\n\t\treturn util.WrapError(err, \"Could not delete security group\")\n\t}\n\tklog.Infof(\"Deleted security group %s\", sg.ID)\n\treturn nil\n}", "func (c *Canary) DeleteEC2SecurityGroup(region schemas.RegionConfig) error {\n\tif c.Deployer.SecurityGroup[region.Region] == nil {\n\t\tc.Logger.Debugf(\"No EC2 security group to delete\")\n\t\treturn nil\n\t}\n\n\tclient, err := selectClientFromList(c.AWSClients, region.Region)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = client.EC2Service.DeleteSecurityGroup(*c.Deployer.SecurityGroup[region.Region])\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tc.Logger.Debugf(\"Delete canary EC2 security group: %s\", *c.Deployer.SecurityGroup[region.Region])\n\n\treturn nil\n}", "func NewCreateSecurityGroupRequestWithBody(server string, contentType string, body io.Reader) (*http.Request, error) {\n\tvar err error\n\n\tqueryUrl, err := url.Parse(server)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tbasePath := fmt.Sprintf(\"/security-group\")\n\tif basePath[0] == '/' {\n\t\tbasePath = basePath[1:]\n\t}\n\n\tqueryUrl, err = queryUrl.Parse(basePath)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treq, err := http.NewRequest(\"POST\", queryUrl.String(), body)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treq.Header.Add(\"Content-Type\", contentType)\n\treturn req, nil\n}", "func (s *Service) NetworkSGIfExists(resourceGroupName string, networkSecurityGroupName string) (*armada.SecurityGroup, error) {\n\t//JEB networkSG, err := s.scope.AirshipClients.SecurityGroups.Get(s.scope.Context, resourceGroupName, networkSecurityGroupName, \"\")\n\t//JEB if err != nil {\n\t//JEB \t\tif aerr, ok := err.(autorest.DetailedError); ok {\n\t//JEB \t\t\tif aerr.StatusCode.(int) == 404 {\n\t//JEB \t\t\t\treturn nil, nil\n\t//JEB \t\t\t}\n\t//JEB \t\t}\n\t//JEB \t\treturn nil, err\n\t//JEB \t}\n\t//JEB \treturn &networkSG, nil\n\treturn &armada.SecurityGroup{}, nil\n}", "func (c *MockApplicationSecurityGroupsClient) CreateOrUpdate(ctx context.Context, resourceGroupName, asgName string, parameters network.ApplicationSecurityGroup) (*network.ApplicationSecurityGroup, error) {\n\t// Ignore resourceGroupName for simplicity.\n\tif _, ok := c.ASGs[asgName]; ok {\n\t\treturn nil, fmt.Errorf(\"update not supported\")\n\t}\n\tparameters.Name = &asgName\n\tc.ASGs[asgName] = parameters\n\treturn &parameters, nil\n}", "func (r CreateDBSecurityGroupRequest) Send(ctx context.Context) (*CreateDBSecurityGroupResponse, error) {\n\tr.Request.SetContext(ctx)\n\terr := r.Request.Send()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tresp := &CreateDBSecurityGroupResponse{\n\t\tCreateDBSecurityGroupOutput: r.Request.Data.(*CreateDBSecurityGroupOutput),\n\t\tresponse: &aws.Response{Request: r.Request},\n\t}\n\n\treturn resp, nil\n}", "func NewBackendSGProvider(clusterName string, backendSG string, vpcID string,\n\tec2Client services.EC2, k8sClient client.Client, defaultTags map[string]string, logger logr.Logger) *defaultBackendSGProvider {\n\treturn &defaultBackendSGProvider{\n\t\tvpcID: vpcID,\n\t\tclusterName: clusterName,\n\t\tbackendSG: backendSG,\n\t\tdefaultTags: defaultTags,\n\t\tec2Client: ec2Client,\n\t\tk8sClient: k8sClient,\n\t\tlogger: logger,\n\t\tmutex: sync.Mutex{},\n\n\t\tcheckIngressFinalizersFunc: func(finalizers []string) bool {\n\t\t\tfor _, fin := range finalizers {\n\t\t\t\tif fin == implicitGroupFinalizer || strings.HasPrefix(fin, explicitGroupFinalizerPrefix) {\n\t\t\t\t\treturn true\n\t\t\t\t}\n\t\t\t}\n\t\t\treturn false\n\t\t},\n\n\t\tcheckServiceFinalizersFunc: func(finalizers []string) bool {\n\t\t\tfor _, fin := range finalizers {\n\t\t\t\tif fin == serviceFinalizer {\n\t\t\t\t\treturn true\n\t\t\t\t}\n\t\t\t}\n\t\t\treturn false\n\t\t},\n\n\t\tdefaultDeletionPollInterval: defaultSGDeletionPollInterval,\n\t\tdefaultDeletionTimeout: defaultSGDeletionTimeout,\n\t}\n}", "func CreateDescribeScalingGroupsResponse() (response *DescribeScalingGroupsResponse) {\n\tresponse = &DescribeScalingGroupsResponse{\n\t\tBaseResponse: &responses.BaseResponse{},\n\t}\n\treturn\n}", "func (ng *NodeGroup) Create() (cloudprovider.NodeGroup, error) {\n\tklog.V(4).Info(\"Creating a new NodeGroup\")\n\n\t// Forge create node pool parameters (defaulting b2-7 for now)\n\tname := ng.Id()\n\tsize := uint32(ng.CurrentSize)\n\tmin := uint32(ng.MinSize())\n\tmax := uint32(ng.MaxSize())\n\n\topts := sdk.CreateNodePoolOpts{\n\t\tFlavorName: \"b2-7\",\n\t\tName: &name,\n\t\tDesiredNodes: &size,\n\t\tMinNodes: &min,\n\t\tMaxNodes: &max,\n\t\tAutoscale: true,\n\t}\n\n\t// Call API to add a node pool in the project/cluster\n\tnp, err := ng.Manager.Client.CreateNodePool(context.Background(), ng.Manager.ProjectID, ng.Manager.ClusterID, &opts)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to create node pool: %w\", err)\n\t}\n\n\t// Forge a node group interface given the API response\n\treturn &NodeGroup{\n\t\tNodePool: *np,\n\t\tManager: ng.Manager,\n\t\tCurrentSize: int(ng.DesiredNodes),\n\t}, nil\n}", "func (p *EC2Provisioner) CreateInstance(opts EC2CreateInstanceOptions) (*cfg.Remote, error) {\n\t// Set requested region\n\tp.WithRegion(opts.Region)\n\n\t// set highlighter\n\tvar highlight = out.NewColorer(out.CY)\n\n\t// Generate authentication\n\tvar keyName = fmt.Sprintf(\"%s_%s_inertia_key_%d\", opts.Name, p.user, time.Now().UnixNano())\n\tout.Fprintf(p.out, highlight.Sf(\":key: Generating key pair '%s'...\\n\", keyName))\n\tkeyResp, err := p.client.CreateKeyPair(&ec2.CreateKeyPairInput{\n\t\tKeyName: aws.String(keyName),\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\thomeDir, err := os.UserHomeDir()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Save key\n\tvar keyPath = filepath.Join(homeDir, \".ssh\", *keyResp.KeyName)\n\tout.Fprintf(p.out, highlight.Sf(\":inbox_tray: Saving key to '%s'...\\n\", keyPath))\n\tif err = local.SaveKey(*keyResp.KeyMaterial, keyPath); err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Create security group for network configuration\n\tvar secGroup = fmt.Sprintf(\"%s-%d\", opts.Name, time.Now().UnixNano())\n\tout.Fprintf(p.out, highlight.Sf(\":circus_tent: Creating security group '%s'...\\n\", secGroup))\n\tgroup, err := p.client.CreateSecurityGroup(&ec2.CreateSecurityGroupInput{\n\t\tGroupName: aws.String(secGroup),\n\t\tDescription: aws.String(\n\t\t\tfmt.Sprintf(\"Rules for project %s on %s\", opts.ProjectName, opts.Name),\n\t\t),\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Set rules for ports\n\tout.Fprintf(p.out, highlight.Sf(\":electric_plug: Exposing ports '%s'...\\n\", secGroup))\n\tif err = p.exposePorts(*group.GroupId, opts.DaemonPort, opts.Ports); err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Start up instance\n\tout.Fprintf(p.out, highlight.Sf(\":boat: Requesting instance '%s'...\\n\", secGroup))\n\trunResp, err := p.client.RunInstances(&ec2.RunInstancesInput{\n\t\tImageId: aws.String(opts.ImageID),\n\t\tInstanceType: aws.String(opts.InstanceType),\n\t\tMinCount: aws.Int64(1),\n\t\tMaxCount: aws.Int64(1),\n\n\t\t// Security options\n\t\tKeyName: keyResp.KeyName,\n\t\tSecurityGroupIds: []*string{group.GroupId},\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Check response validity\n\tif runResp.Instances == nil || len(runResp.Instances) == 0 {\n\t\treturn nil, errors.New(\"Unable to start instances: \" + runResp.String())\n\t}\n\tout.Fprintf(p.out, highlight.Sf(\"A %s instance has been provisioned\", opts.InstanceType))\n\n\t// Loop until intance is running\n\tvar instance ec2.Instance\n\tfor {\n\t\t// Wait briefly between checks\n\t\ttime.Sleep(3 * time.Second)\n\n\t\t// Request instance status\n\t\tout.Fprintf(p.out, \"Checking status of the requested instance...\\n\")\n\t\tresult, err := p.client.DescribeInstances(&ec2.DescribeInstancesInput{\n\t\t\tInstanceIds: []*string{runResp.Instances[0].InstanceId},\n\t\t})\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\t// Check if reservations are present\n\t\tif result.Reservations == nil || len(result.Reservations) == 0 ||\n\t\t\tlen(result.Reservations[0].Instances) == 0 {\n\t\t\t// A reservation corresponds to a command to start instances\n\t\t\t// If nothing is here... we gotta keep waiting\n\t\t\tfmt.Fprintln(p.out, \"No reservations found yet.\")\n\t\t\tcontinue\n\t\t}\n\n\t\t// Get status\n\t\ts := result.Reservations[0].Instances[0].State\n\t\tif s == nil {\n\t\t\tfmt.Println(p.out, \"Status unknown.\")\n\t\t\tcontinue\n\t\t}\n\n\t\t// Code 16 means instance has started, and we can continue!\n\t\tif s.Code != nil && *s.Code == codeEC2InstanceStarted {\n\t\t\tfmt.Fprintln(p.out, \"Instance is running!\")\n\t\t\tinstance = *result.Reservations[0].Instances[0]\n\t\t\tbreak\n\t\t}\n\n\t\t// Otherwise, keep polling\n\t\tif s.Name != nil {\n\t\t\tfmt.Fprintln(p.out, \"Instance status: \"+*s.Name)\n\t\t} else {\n\t\t\tfmt.Fprintln(p.out, \"Instance status: \"+s.String())\n\t\t}\n\t\tcontinue\n\t}\n\n\t// Check instance validity\n\tif instance.PublicDnsName == nil {\n\t\treturn nil, errors.New(\"Unable to find public IP address for instance: \" + instance.String())\n\t}\n\n\t// Set tags\n\tout.Fprintf(p.out, \"Setting tags on instance...\\n\")\n\tif _, err = p.client.CreateTags(&ec2.CreateTagsInput{\n\t\tResources: []*string{instance.InstanceId},\n\t\tTags: []*ec2.Tag{\n\t\t\t{\n\t\t\t\tKey: aws.String(\"Name\"),\n\t\t\t\tValue: aws.String(opts.Name),\n\t\t\t},\n\t\t\t{\n\t\t\t\tKey: aws.String(\"Purpose\"),\n\t\t\t\tValue: aws.String(\"Inertia Continuous Deployment\"),\n\t\t\t},\n\t\t},\n\t}); err != nil {\n\t\tfmt.Fprintln(p.out, \"Failed to set tags: \"+err.Error())\n\t}\n\n\t// Poll for SSH port to open\n\tfmt.Fprintln(p.out, \"Waiting for ports to open...\")\n\tfor {\n\t\ttime.Sleep(3 * time.Second)\n\t\tfmt.Fprintln(p.out, \"Checking ports...\")\n\t\tif conn, err := net.Dial(\"tcp\", *instance.PublicDnsName+\":22\"); err == nil {\n\t\t\tfmt.Fprintln(p.out, \"Connection established!\")\n\t\t\tconn.Close()\n\t\t\tbreak\n\t\t}\n\t}\n\n\t// Generate webhook secret\n\tout.Fprintf(p.out, \"Generating a webhook secret...\\n\")\n\twebhookSecret, err := common.GenerateRandomString()\n\tif err != nil {\n\t\tfmt.Fprintln(p.out, err.Error())\n\t\tfmt.Fprintln(p.out, \"Using default secret 'inertia'\")\n\t\twebhookSecret = \"interia\"\n\t} else {\n\t\tfmt.Fprintf(p.out, \"Generated webhook secret: '%s'\\n\", webhookSecret)\n\t}\n\n\t// Return remote configuration\n\treturn &cfg.Remote{\n\t\tName: opts.Name,\n\t\tIP: *instance.PublicDnsName,\n\t\tSSH: &cfg.SSH{\n\t\t\tUser: p.user,\n\t\t\tIdentityFile: keyPath,\n\t\t\tSSHPort: \"22\",\n\t\t},\n\t\tDaemon: &cfg.Daemon{\n\t\t\tPort: strconv.FormatInt(opts.DaemonPort, 10),\n\t\t\tWebHookSecret: webhookSecret,\n\t\t},\n\t\tProfiles: make(map[string]string),\n\t}, nil\n}", "func (a *Azure) CreateNetworkSecurityGroup(ctx context.Context, location string, nsgName string, c *types.Config) (nsg *network.SecurityGroup, err error) {\n\tnsgClient, err := a.getNsgClient()\n\tif err != nil {\n\t\treturn\n\t}\n\n\tvar securityRules []network.SecurityRule\n\n\tfor _, ports := range c.RunConfig.Ports {\n\t\tvar rule = a.buildFirewallRule(network.SecurityRuleProtocolTCP, ports)\n\t\tsecurityRules = append(securityRules, rule)\n\t}\n\n\tfor _, ports := range c.RunConfig.UDPPorts {\n\t\tvar rule = a.buildFirewallRule(network.SecurityRuleProtocolUDP, ports)\n\t\tsecurityRules = append(securityRules, rule)\n\t}\n\n\tfuture, err := nsgClient.CreateOrUpdate(\n\t\tctx,\n\t\ta.groupName,\n\t\tnsgName,\n\t\tnetwork.SecurityGroup{\n\t\t\tLocation: to.StringPtr(location),\n\t\t\tSecurityGroupPropertiesFormat: &network.SecurityGroupPropertiesFormat{\n\t\t\t\tSecurityRules: &securityRules,\n\t\t\t},\n\t\t\tTags: getAzureDefaultTags(),\n\t\t},\n\t)\n\n\tif err != nil {\n\t\treturn nsg, fmt.Errorf(\"cannot create nsg: %v\", err)\n\t}\n\n\terr = future.WaitForCompletionRef(ctx, nsgClient.Client)\n\tif err != nil {\n\t\treturn nsg, fmt.Errorf(\"cannot get nsg create or update future response: %v\", err)\n\t}\n\n\tnsgValue, err := future.Result(*nsgClient)\n\tnsg = &nsgValue\n\treturn\n}", "func NewCreateSecurityGroupRequest(server string, body CreateSecurityGroupJSONRequestBody) (*http.Request, error) {\n\tvar bodyReader io.Reader\n\tbuf, err := json.Marshal(body)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tbodyReader = bytes.NewReader(buf)\n\treturn NewCreateSecurityGroupRequestWithBody(server, \"application/json\", bodyReader)\n}", "func newConfig() (*config, error) {\n\tec2Metadata := ec2metadata.New(session.Must(session.NewSession()))\n\tregion, err := ec2Metadata.Region()\n\tif err != nil {\n\t\treturn nil, errors.Wrapf(err, \"unable to get region from ec2 metadata\")\n\t}\n\n\tinstanceID, err := ec2Metadata.GetMetadata(\"instance-id\")\n\tif err != nil {\n\t\treturn nil, errors.Wrapf(err, \"unable to get instance id from ec2 metadata\")\n\t}\n\n\tmac, err := ec2Metadata.GetMetadata(\"mac\")\n\tif err != nil {\n\t\treturn nil, errors.Wrapf(err, \"unable to get mac from ec2 metadata\")\n\t}\n\n\tsecurityGroups, err := ec2Metadata.GetMetadata(\"security-groups\")\n\tif err != nil {\n\t\treturn nil, errors.Wrapf(err, \"unable to get security groups from ec2 metadata\")\n\t}\n\n\tinterfaces, err := ec2Metadata.GetMetadata(\"network/interfaces/macs\")\n\tif err != nil {\n\t\treturn nil, errors.Wrapf(err, \"unable to get interfaces from ec2 metadata\")\n\t}\n\n\tsubnet, err := ec2Metadata.GetMetadata(\"network/interfaces/macs/\" + mac + \"/subnet-id\")\n\tif err != nil {\n\t\treturn nil, errors.Wrapf(err, \"unable to get subnet from ec2 metadata\")\n\t}\n\n\tvpc, err := ec2Metadata.GetMetadata(\"network/interfaces/macs/\" + mac + \"/vpc-id\")\n\tif err != nil {\n\t\treturn nil, errors.Wrapf(err, \"unable to get vpc from ec2 metadata\")\n\t}\n\n\treturn &config{region: region,\n\t\tsubnet: subnet,\n\t\tindex: int64(len(strings.Split(interfaces, \"\\n\"))),\n\t\tinstanceID: instanceID,\n\t\tsecurityGroups: strings.Split(securityGroups, \"\\n\"),\n\t\tvpc: vpc,\n\t}, nil\n}", "func NewBackendSGProvider(clusterName string, backendSG string, vpcID string,\n\tec2Client services.EC2, k8sClient client.Client, logger logr.Logger) *defaultBackendSGProvider {\n\treturn &defaultBackendSGProvider{\n\t\tvpcID: vpcID,\n\t\tclusterName: clusterName,\n\t\tbackendSG: backendSG,\n\t\tec2Client: ec2Client,\n\t\tk8sClient: k8sClient,\n\t\tlogger: logger,\n\t\tmutex: sync.Mutex{},\n\n\t\tdefaultDeletionPollInterval: defaultSGDeletionPollInterval,\n\t\tdefaultDeletionTimeout: defaultSGDeletionTimeout,\n\t}\n}", "func (r *ReconcileSecurityGroup) Reconcile(request reconcile.Request) (reconcile.Result, error) {\n\t// Fetch the SecurityGroup instance\n\tinstance := &openstackv1beta1.SecurityGroup{}\n\terr := r.Get(context.TODO(), request.NamespacedName, instance)\n\tif err != nil {\n\t\tif errors_.IsNotFound(err) {\n\t\t\tlog.Info(\"Debug: instance not found\", \"SecurityGroup\", instance.Name)\n\n\t\t\treturn reconcile.Result{}, nil\n\t\t}\n\t\t// Error reading the object - requeue the request.\n\t\treturn reconcile.Result{}, err\n\t}\n\n\tlog.Info(\"Info: Start reconcile\", \"sg\", instance.Name)\n\tif instance.ObjectMeta.DeletionTimestamp.IsZero() {\n\t\tlog.Info(\"Debug: deletion timestamp is zero\")\n\t\tif err := r.setFinalizer(instance); err != nil {\n\t\t\treturn reconcile.Result{}, err\n\t\t}\n\t} else {\n\t\treturn r.runFinalizer(instance)\n\t}\n\n\ttenant, err := r.osClient.GetTenantByName(os.Getenv(\"OS_TENANT_NAME\"))\n\tif err != nil {\n\t\treturn reconcile.Result{}, err\n\t}\n\n\tvar sg *groups.SecGroup\n\n\t// Check if the SecurityGroup already exists\n\tsg, err = r.osClient.GetSecurityGroup(instance.Status.ID)\n\tif err != nil {\n\t\tswitch err.(type) {\n\t\tcase gophercloud.ErrDefault404:\n\t\t\trand.Seed(time.Now().Unix())\n\n\t\t\tlog.Info(\"Creating SG\", \"name\", instance.Spec.Name)\n\t\t\tname := fmt.Sprintf(\"%s-%s\", instance.Spec.Name, r.osClient.RandomString())\n\n\t\t\tsg, err = r.osClient.CreateSecurityGroup(name, \"\", tenant.ID)\n\t\t\tif err != nil {\n\t\t\t\tlog.Info(\"Error\", \"msg\", err.Error())\n\t\t\t}\n\t\t\tinstance.Status.ID = sg.ID\n\t\t\tinstance.Status.Name = sg.Name\n\t\t\tlog.Info(\"Success creating SG\", \"name\", instance.Spec.Name, \"id\", sg.ID)\n\t\tdefault:\n\t\t\tlog.Info(\"Debug: errorrrrrr\")\n\t\t\treturn reconcile.Result{}, err\n\t\t}\n\t}\n\n\t// Resource側のルールがない場合、SGにルールを追加\n\tfor _, rule := range instance.Spec.Rules {\n\t\texists := false\n\t\tfor _, existsRule := range sg.Rules {\n\t\t\tif rule.RemoteIpPrefix == existsRule.RemoteIPPrefix && rule.PortRangeMax == existsRule.PortRangeMax && rule.PortRangeMin == existsRule.PortRangeMin {\n\t\t\t\texists = true\n\t\t\t}\n\t\t}\n\n\t\tif !exists {\n\t\t\tr.addRule(sg.ID, rule)\n\t\t\tif err != nil {\n\t\t\t\tlog.Info(\"Error\", \"addRule\", err.Error())\n\t\t\t\treturn reconcile.Result{}, err\n\t\t\t}\n\t\t}\n\t}\n\n\t// SGのルールがResource側にない場合、ルールを削除\n\tfor _, existRule := range sg.Rules {\n\t\tdelete := true\n\t\tfor _, rule := range instance.Spec.Rules {\n\t\t\tif existRule.RemoteIPPrefix == rule.RemoteIpPrefix && existRule.PortRangeMax == rule.PortRangeMax && existRule.PortRangeMin == rule.PortRangeMin {\n\t\t\t\tdelete = false\n\t\t\t}\n\t\t}\n\t\tif delete {\n\t\t\tlog.Info(\"Deleting SG Rule\", \"cidr\", existRule.RemoteIPPrefix, \"port\", fmt.Sprintf(\"%d-%d\", existRule.PortRangeMin, existRule.PortRangeMax))\n\t\t\terr = r.osClient.DeleteSecurityGroupRule(existRule.ID)\n\t\t\tif err != nil {\n\t\t\t\treturn reconcile.Result{}, err\n\t\t\t}\n\t\t\tlog.Info(\"Success to delete SG Rule\", \"cidr\", existRule.RemoteIPPrefix, \"port\", fmt.Sprintf(\"%d-%d\", existRule.PortRangeMin, existRule.PortRangeMax))\n\t\t}\n\t}\n\n\tvar nodes v1.NodeList\n\tlistOpts := client.ListOptions{\n\t\tRaw: &metav1.ListOptions{LabelSelector: labelSelector(instance)},\n\t}\n\terr = r.List(context.Background(), &listOpts, &nodes)\n\tif err != nil {\n\t\tlog.Info(\"Error\", \"Failed to NodeList\", err.Error())\n\t\treturn reconcile.Result{}, err\n\t}\n\n\texistsNodeIDs := []string{}\n\tfor _, node := range nodes.Items {\n\t\texistsNodeIDs = append(existsNodeIDs, strings.ToLower(node.Status.NodeInfo.SystemUUID))\n\t}\n\n\tfor _, id := range instance.Status.Nodes {\n\t\tif !containsString(existsNodeIDs, id) {\n\t\t\tlog.Info(\"Info\", \"Dettach SG from Server\", strings.ToLower(id))\n\t\t\tr.osClient.DettachSG(strings.ToLower(id), sg.Name)\n\t\t\tinstance.Status.Nodes = removeString(instance.Status.Nodes, id)\n\t\t}\n\t}\n\n\tfor _, node := range nodes.Items {\n\t\tid := node.Status.NodeInfo.SystemUUID\n\t\thasSg, err := r.osClient.ServerHasSG(strings.ToLower(id), sg.Name)\n\t\tif err != nil {\n\t\t\tlog.Info(\"Error\", \"Failed to ServerHasSG\", err.Error())\n\t\t\treturn reconcile.Result{}, err\n\t\t}\n\n\t\tif !hasSg {\n\t\t\tlog.Info(\"Info\", \"Attach SG to Server\", strings.ToLower(id))\n\t\t\tif err = r.osClient.AttachSG(strings.ToLower(id), sg.Name); err != nil {\n\t\t\t\tlog.Info(\"Debug\", \"failed to attach sg\", err.Error())\n\t\t\t\treturn reconcile.Result{}, err\n\t\t\t}\n\t\t\tinstance.Status.Nodes = append(instance.Status.Nodes, strings.ToLower(id))\n\t\t}\n\t}\n\n\tif err := r.Status().Update(context.Background(), instance); err != nil {\n\t\tlog.Info(\"Debug\", \"failed to update sg\", err.Error())\n\t\treturn reconcile.Result{}, err\n\t}\n\n\tlog.Info(\"Info: Success reconcile\", \"sg\", instance.Name)\n\treturn reconcile.Result{RequeueAfter: 60 * time.Second}, nil\n}", "func createHyperShiftVPC() (*HyperShiftVPC, error) {\n\tctx := context.Background()\n\n\tvar vpc HyperShiftVPC\n\tworkingDir := viper.GetString(config.ReportDir)\n\n\ttf, err := terraform.New(workingDir)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tdefer func() {\n\t\t_ = tf.Uninstall(ctx)\n\t}()\n\n\tlog.Println(\"Creating ROSA HyperShift aws vpc\")\n\n\terr = copyFile(\"terraform/setup-vpc.tf\", fmt.Sprintf(\"%s/setup-vpc.tf\", workingDir))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\terr = tf.Init(ctx)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\terr = callAndSetAWSSession(func() error {\n\t\terr := tf.Plan(\n\t\t\tctx,\n\t\t\ttfexec.Var(fmt.Sprintf(\"aws_region=%s\", viper.GetString(config.AWSRegion))),\n\t\t\ttfexec.Var(fmt.Sprintf(\"cluster_name=%s\", viper.GetString(config.Cluster.Name))),\n\t\t)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\terr = tf.Apply(ctx)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\toutput, err := tf.Output(ctx)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvpc.PrivateSubnet = strings.ReplaceAll(string(output[\"cluster-private-subnet\"].Value), \"\\\"\", \"\")\n\tvpc.PublicSubnet = strings.ReplaceAll(string(output[\"cluster-public-subnet\"].Value), \"\\\"\", \"\")\n\tvpc.NodePrivateSubnet = strings.ReplaceAll(string(output[\"node-private-subnet\"].Value), \"\\\"\", \"\")\n\n\tlog.Println(\"ROSA HyperShift aws vpc created!\")\n\n\treturn &vpc, nil\n}", "func (c EasyCert) newPrivateKey() (crypto.PrivateKey, error) {\n\tif c.ec != \"\" {\n\t\tvar curve elliptic.Curve\n\t\tswitch c.ec {\n\t\tcase \"224\":\n\t\t\tcurve = elliptic.P224()\n\t\tcase \"384\":\n\t\t\tcurve = elliptic.P384()\n\t\tcase \"521\":\n\t\t\tcurve = elliptic.P521()\n\t\tdefault:\n\t\t\treturn nil, fmt.Errorf(\"Unknown elliptic curve: %q\", c.ec)\n\t\t}\n\t\treturn ecdsa.GenerateKey(curve, rand.Reader)\n\t}\n\treturn rsa.GenerateKey(rand.Reader, c.rsaBits)\n}", "func (s *GroupsService) Create(\n\tctx context.Context,\n\tgroupName string,\n) error {\n\traw, err := json.Marshal(struct {\n\t\tGroupName string `json:\"group_name\"`\n\t}{\n\t\tgroupName,\n\t})\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treq, err := http.NewRequest(\n\t\thttp.MethodPost,\n\t\ts.client.url+\"2.0/groups/create\",\n\t\tbytes.NewBuffer(raw),\n\t)\n\tif err != nil {\n\t\treturn err\n\t}\n\treq = req.WithContext(ctx)\n\tres, err := s.client.client.Do(req)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif res.StatusCode >= 300 || res.StatusCode <= 199 {\n\t\treturn fmt.Errorf(\n\t\t\t\"Failed to returns 2XX response: %d\", res.StatusCode)\n\t}\n\n\treturn nil\n}", "func newKeyPair() (ecdsa.PrivateKey, []byte) {\n\t// ECC generate private key\n\tcurve := elliptic.P256()\n\tprivate, err := ecdsa.GenerateKey(curve, rand.Reader)\n\tlog.Println(\"--------\", private)\n\tif err != nil {\n\t\tlog.Panic(err)\n\t}\n\t// private key generate public key\n\tpubKey := append(private.PublicKey.X.Bytes(), private.PublicKey.Y.Bytes()...)\n\treturn *private, pubKey\n}", "func (ggSession *GreengrassSession) CreateGroup(name string) error {\n\t// fmt.Printf(\"creategroup: %v\\n\", groupOutput)\n\tthingOutput, err := ggSession.CreateCore(name)\n\tif err != nil {\n\t\tfmt.Printf(\"CreateCore error: %v\\n\", err)\n\t}\n\tfmt.Printf(\"Created core '%s'\\n\", name)\n\n\tcoreDefinition, err := ggSession.greengrass.CreateCoreDefinition(&greengrass.CreateCoreDefinitionInput{\n\t\tName: &name,\n\t})\n\tif err != nil {\n\t\tfmt.Printf(\"CreateCoreDefinition error: %v\\n\", err)\n\t\treturn err\n\t}\n\tfmt.Printf(\"Created core definition\\n\")\n\n\tdefinitionInput := greengrass.CreateCoreDefinitionVersionInput{\n\t\tCoreDefinitionId: coreDefinition.Id,\n\t\tCores: []*greengrass.Core{\n\t\t\t&greengrass.Core{\n\t\t\t\tCertificateArn: ggSession.keyCertOutput.CertificateArn,\n\t\t\t\tId: coreDefinition.Id,\n\t\t\t\tThingArn: thingOutput.ThingArn,\n\t\t\t},\n\t\t},\n\t}\n\n\tdefinitionVersion, err := ggSession.greengrass.CreateCoreDefinitionVersion(&definitionInput)\n\tif err != nil {\n\t\tfmt.Printf(\"CreateCoreDefinitionVersion error: %v\\n\", err)\n\t\treturn err\n\t}\n\tfmt.Printf(\"Created core definition version\\n\")\n\n\tgroupOutput, err := ggSession.greengrass.CreateGroup(&greengrass.CreateGroupInput{Name: &name})\n\tif err != nil {\n\t\tfmt.Printf(\"creategroup error: %v\\n\", err)\n\t\treturn err\n\t}\n\tfmt.Printf(\"Created group\\n\")\n\n\t// Update group configuration\n\tggSession.config.CoreDefinition.ID = *definitionVersion.Id\n\tggSession.config.CoreDefinition.VersionArn = *definitionVersion.Arn\n\tggSession.config.Group.ID = *groupOutput.Id\n\n\tggSession.updateGroup()\n\n\treturn nil\n}", "func (g *Google) createInstanceGroup() (string, error) {\n\tif group, err := g.getInstanceGroup(); err == nil {\n\t\tlog.Infof(\"found InstanceGroup %s: %s\", instanceGroupName, group.SelfLink)\n\t\treturn group.SelfLink, nil\n\t}\n\n\top, err := g.instanceGroupsService.ZoneViews.Insert(g.project, g.zone,\n\t\t&resourceviews.ResourceView{\n\t\t\tName: instanceGroupName,\n\t\t\tEndpoints: []*resourceviews.ServiceEndpoint{\n\t\t\t\t{\n\t\t\t\t\tName: \"http\",\n\t\t\t\t\tPort: g.context.Port,\n\t\t\t\t},\n\t\t\t},\n\t\t}).Do()\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tif err := g.waitForInstanceGroupOperation(op); err != nil {\n\t\treturn \"\", err\n\t}\n\tlog.Infof(\"created InstanceGroup %s: %s\", instanceGroupName, op.TargetLink)\n\treturn op.TargetLink, nil\n}", "func New() *BBSG2Pub {\n\treturn bbs.New()\n}", "func (asg *Asg) Create() error {\n\treturn cloudprovider.ErrAlreadyExist\n}", "func (jbobject *ServicesEc2ModelCreateReservedInstancesListingRequest) Clone2() (*JavaLangObject, error) {\n\tjret, err := jbobject.CallMethod(javabind.GetEnv(), \"clone\", \"java/lang/Object\")\n\tif err != nil {\n\t\tvar zero *JavaLangObject\n\t\treturn zero, err\n\t}\n\tretconv := javabind.NewJavaToGoCallable()\n\tdst := &javabind.Callable{}\n\tretconv.Dest(dst)\n\tif err := retconv.Convert(javabind.ObjectRef(jret)); err != nil {\n\t\tpanic(err)\n\t}\n\tretconv.CleanUp()\n\tunique_x := &JavaLangObject{}\n\tunique_x.Callable = dst\n\treturn unique_x, nil\n}", "func NewServerGroup() (*ServerGroup, error) {\n\tctx, ctxCancel := context.WithCancel(context.Background())\n\t// Create the targetSet (which will maintain all of the updating etc. in the background)\n\tsg := &ServerGroup{\n\t\tctx: ctx,\n\t\tctxCancel: ctxCancel,\n\t\tReady: make(chan struct{}),\n\t}\n\n\tlogCfg := &promlog.Config{\n\t\tLevel: &promlog.AllowedLevel{},\n\t\tFormat: &promlog.AllowedFormat{},\n\t}\n\tif err := logCfg.Level.Set(\"info\"); err != nil {\n\t\treturn nil, err\n\t}\n\tsg.targetManager = discovery.NewManager(ctx, promlog.New(logCfg))\n\t// Background the updating\n\tgo sg.targetManager.Run()\n\tgo sg.Sync()\n\n\treturn sg, nil\n\n}", "func NewGetSecurityGroupBadRequest() *GetSecurityGroupBadRequest {\n\treturn &GetSecurityGroupBadRequest{}\n}", "func New2(open OpenFunc, usable UsableFunc, values url.Values) (*Pool, error) {\n\tvar maxOpen, maxIdle int = 10, 1\n\n\tif s := values.Get(`maxOpen`); s != `` {\n\t\tif i, err := strconv.Atoi(s); err != nil {\n\t\t\treturn nil, err\n\t\t} else {\n\t\t\tmaxOpen = i\n\t\t}\n\t}\n\n\tif s := values.Get(`maxIdle`); s != `` {\n\t\tif i, err := strconv.Atoi(s); err != nil {\n\t\t\treturn nil, err\n\t\t} else {\n\t\t\tmaxIdle = i\n\t\t}\n\t}\n\n\treturn New(open, usable, maxOpen, maxIdle)\n}", "func (a *Client) ListOpenstackSecurityGroupsNoCredentialsV2(params *ListOpenstackSecurityGroupsNoCredentialsV2Params, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*ListOpenstackSecurityGroupsNoCredentialsV2OK, error) {\n\t// TODO: Validate the params before sending\n\tif params == nil {\n\t\tparams = NewListOpenstackSecurityGroupsNoCredentialsV2Params()\n\t}\n\top := &runtime.ClientOperation{\n\t\tID: \"listOpenstackSecurityGroupsNoCredentialsV2\",\n\t\tMethod: \"GET\",\n\t\tPathPattern: \"/api/v2/projects/{project_id}/clusters/{cluster_id}/providers/openstack/securitygroups\",\n\t\tProducesMediaTypes: []string{\"application/json\"},\n\t\tConsumesMediaTypes: []string{\"application/json\"},\n\t\tSchemes: []string{\"https\"},\n\t\tParams: params,\n\t\tReader: &ListOpenstackSecurityGroupsNoCredentialsV2Reader{formats: a.formats},\n\t\tAuthInfo: authInfo,\n\t\tContext: params.Context,\n\t\tClient: params.HTTPClient,\n\t}\n\tfor _, opt := range opts {\n\t\topt(op)\n\t}\n\n\tresult, err := a.transport.Submit(op)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tsuccess, ok := result.(*ListOpenstackSecurityGroupsNoCredentialsV2OK)\n\tif ok {\n\t\treturn success, nil\n\t}\n\t// unexpected success response\n\tunexpectedSuccess := result.(*ListOpenstackSecurityGroupsNoCredentialsV2Default)\n\treturn nil, runtime.NewAPIError(\"unexpected success response: content available as default response in error\", unexpectedSuccess, unexpectedSuccess.Code())\n}", "func (c *EC2) createAWSEC2Instance(input *RunInstancesInput) (r aws.Referencer, attr aws.Attrabuter, err error) {\n\n\truninstancesrequest := input\n\treservation, err := RunInstances(runinstancesrequest)\n\tif err == nil {\n\t\tdescribeinstancesrequest := &DescribeInstancesInput{}\n\t\tif err := awsutil.CopyValue(describeinstancesrequest, \"InstanceIds\", reservation, \"Instances[].InstanceId\"); err != nil {\n\t\t\treturn reservation, reservation, err\n\t\t}\n\t\tif err := WaitUntilInstanceRunning(describeinstancesrequest); err != nil {\n\t\t\treturn reservation, reservation, err\n\t\t}\n\n\t} else {\n\t\treturn nil, nil, err\n\t}\n\tstartinstancesrequest := &StartInstancesInput{}\n\tif err := awsutil.CopyValue(startinstancesrequest, \"InstanceIds\", reservation, \"Instances[].InstanceId\"); err != nil {\n\t\treturn nil, nil, err\n\t}\n\tstartinstancesresult, err := StartInstances(startinstancesrequest)\n\tif err == nil {\n\t\tdescribeinstancesrequest := &DescribeInstancesInput{}\n\t\tif err := awsutil.CopyValue(describeinstancesrequest, \"InstanceIds\", reservation, \"Instances[].InstanceId\"); err != nil {\n\t\t\treturn reservation, reservation, err\n\t\t}\n\t\tif err := WaitUntilInstanceRunning(describeinstancesrequest); err != nil {\n\t\t\treturn reservation, reservation, err\n\t\t}\n\n\t} else {\n\t\treturn nil, nil, err\n\t}\n\treturn reservation, reservation, nil\n}", "func (service *Service) GetAll() (*[]CloudSecurityGroupResponse, *http.Response, error) {\n\tv := new([]CloudSecurityGroupResponse)\n\tresp, err := service.Client.NewRequestDo(\"GET\", awsSgResourcePath, nil, nil, v)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\treturn v, resp, nil\n}", "func (ts *Tester) createVPC() error {\n\tif ts.cfg.VPC.ID != \"\" {\n\t\tts.lg.Info(\"querying ELBv2\", zap.String(\"vpc-id\", ts.cfg.VPC.ID))\n\t\tctx, cancel := context.WithTimeout(context.Background(), time.Minute)\n\t\toutput, err := ts.elbv2APIV2.DescribeLoadBalancers(\n\t\t\tctx,\n\t\t\t&aws_elbv2_v2.DescribeLoadBalancersInput{},\n\t\t)\n\t\tcancel()\n\t\tif err != nil {\n\t\t\tts.lg.Warn(\"failed to describe ELBv2\", zap.Error(err))\n\t\t} else {\n\t\t\tfor _, ev := range output.LoadBalancers {\n\t\t\t\tarn := aws_v2.ToString(ev.LoadBalancerArn)\n\t\t\t\tvpcID := aws_v2.ToString(ev.VpcId)\n\t\t\t\tif vpcID == ts.cfg.VPC.ID {\n\t\t\t\t\tts.lg.Warn(\"found ELBv2 for this VPC; may overlap with the other cluster\",\n\t\t\t\t\t\tzap.String(\"vpc-id\", ts.cfg.VPC.ID),\n\t\t\t\t\t\tzap.String(\"elb-arn\", arn),\n\t\t\t\t\t)\n\t\t\t\t} else {\n\t\t\t\t\tts.lg.Info(\"found ELBv2 for other VPCs\", zap.String(\"vpc-id\", vpcID), zap.String(\"elb-arn\", arn))\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\tts.lg.Info(\"querying subnet IDs for given VPC\",\n\t\t\tzap.String(\"vpc-id\", ts.cfg.VPC.ID),\n\t\t)\n\t\tctx, cancel = context.WithTimeout(context.Background(), time.Minute)\n\t\tsresp, err := ts.ec2APIV2.DescribeSubnets(\n\t\t\tctx,\n\t\t\t&aws_ec2_v2.DescribeSubnetsInput{\n\t\t\t\tFilters: []aws_ec2_v2_types.Filter{\n\t\t\t\t\t{\n\t\t\t\t\t\tName: aws_v2.String(\"vpc-id\"),\n\t\t\t\t\t\tValues: []string{ts.cfg.VPC.ID},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t})\n\t\tcancel()\n\t\tif err != nil {\n\t\t\tts.lg.Warn(\"failed to subnets\", zap.Error(err))\n\t\t\treturn err\n\t\t}\n\n\t\tts.cfg.VPC.PublicSubnetIDs = make([]string, 0, len(sresp.Subnets))\n\t\tts.cfg.VPC.PrivateSubnetIDs = make([]string, 0, len(sresp.Subnets))\n\t\tfor _, sv := range sresp.Subnets {\n\t\t\tid := aws_v2.ToString(sv.SubnetId)\n\t\t\tnetworkTagValue := \"\"\n\t\t\tfor _, tg := range sv.Tags {\n\t\t\t\tswitch aws_v2.ToString(tg.Key) {\n\t\t\t\tcase \"Network\":\n\t\t\t\t\tnetworkTagValue = aws_v2.ToString(tg.Value)\n\t\t\t\t}\n\t\t\t\tif networkTagValue != \"\" {\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\t\t\tts.lg.Info(\"found subnet\",\n\t\t\t\tzap.String(\"id\", id),\n\t\t\t\tzap.String(\"availability-zone\", aws_v2.ToString(sv.AvailabilityZone)),\n\t\t\t\tzap.String(\"network-tag\", networkTagValue),\n\t\t\t)\n\t\t\tswitch networkTagValue {\n\t\t\tcase \"Public\":\n\t\t\t\tts.cfg.VPC.PublicSubnetIDs = append(ts.cfg.VPC.PublicSubnetIDs, id)\n\t\t\tcase \"Private\":\n\t\t\t\tts.cfg.VPC.PrivateSubnetIDs = append(ts.cfg.VPC.PrivateSubnetIDs, id)\n\t\t\tdefault:\n\t\t\t\treturn fmt.Errorf(\"'Network' tag not found in subnet %q\", id)\n\t\t\t}\n\t\t}\n\t\tif len(ts.cfg.VPC.PublicSubnetIDs) == 0 {\n\t\t\treturn fmt.Errorf(\"no subnet found for VPC ID %q\", ts.cfg.VPC.ID)\n\t\t}\n\n\t\tts.lg.Info(\"querying security IDs\", zap.String(\"vpc-id\", ts.cfg.VPC.ID))\n\t\tctx, cancel = context.WithTimeout(context.Background(), time.Minute)\n\t\tgresp, err := ts.ec2APIV2.DescribeSecurityGroups(\n\t\t\tctx,\n\t\t\t&aws_ec2_v2.DescribeSecurityGroupsInput{\n\t\t\t\tFilters: []aws_ec2_v2_types.Filter{\n\t\t\t\t\t{\n\t\t\t\t\t\tName: aws_v2.String(\"vpc-id\"),\n\t\t\t\t\t\tValues: []string{ts.cfg.VPC.ID},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t})\n\t\tcancel()\n\t\tif err != nil {\n\t\t\tts.lg.Warn(\"failed to security groups\", zap.Error(err))\n\t\t\treturn err\n\t\t}\n\t\tfor _, sg := range gresp.SecurityGroups {\n\t\t\tid, name := aws_v2.ToString(sg.GroupId), aws_v2.ToString(sg.GroupName)\n\t\t\tts.lg.Info(\"found security group\", zap.String(\"id\", id), zap.String(\"name\", name))\n\t\t\tif name != \"default\" {\n\t\t\t\tts.cfg.VPC.SecurityGroupID = id\n\t\t\t}\n\t\t}\n\t\tif ts.cfg.VPC.SecurityGroupID == \"\" {\n\t\t\treturn fmt.Errorf(\"no security group found for VPC ID %q\", ts.cfg.VPC.ID)\n\t\t}\n\n\t\tts.cfg.Sync()\n\t\treturn nil\n\t}\n\tif !ts.cfg.VPC.Create {\n\t\tts.lg.Info(\"VPC.Create false; skipping creation\")\n\t\treturn nil\n\t}\n\tif ts.cfg.VPC.ID != \"\" &&\n\t\tlen(ts.cfg.VPC.PublicSubnetIDs) > 0 &&\n\t\tts.cfg.VPC.SecurityGroupID != \"\" {\n\t\tts.lg.Info(\"VPC already created; no need to create a new one\")\n\t\treturn nil\n\t}\n\n\tif err := ts._createVPC(); err != nil { // AWS::EC2::VPC\n\t\treturn err\n\t}\n\tif err := ts.modifyVPC(); err != nil {\n\t\treturn err\n\t}\n\tif err := ts.createSecurityGroups(); err != nil { // AWS::EC2::SecurityGroup\n\t\treturn err\n\t}\n\tif err := ts.associateVPCCIDRBlocks(); err != nil { // AWS::EC2::VPCCidrBlock\n\t\treturn err\n\t}\n\n\tif err := ts.createInternetGateway(); err != nil { // AWS::EC2::InternetGateway\n\t\treturn err\n\t}\n\tif err := ts.createVPCGatewayAttachment(); err != nil { // AWS::EC2::VPCGatewayAttachment\n\t\treturn err\n\t}\n\n\tif err := ts.createPublicSubnets(); err != nil { // AWS::EC2::Subnet\n\t\treturn err\n\t}\n\tif err := ts.createPublicRouteTable(); err != nil { // AWS::EC2::RouteTable\n\t\treturn err\n\t}\n\tif err := ts.createPublicRoute(); err != nil { // AWS::EC2::Route\n\t\treturn err\n\t}\n\tif err := ts.createPublicSubnetRouteTableAssociation(); err != nil { // AWS::EC2::SubnetRouteTableAssociation\n\t\treturn err\n\t}\n\n\tif err := ts.createPublicEIPs(); err != nil { // AWS::EC2::EIP\n\t\treturn err\n\t}\n\tif err := ts.createPublicNATGateways(); err != nil { // AWS::EC2::NatGateway\n\t\treturn err\n\t}\n\n\tif err := ts.createPrivateSubnets(); err != nil { // AWS::EC2::Subnet\n\t\treturn err\n\t}\n\tif err := ts.createPrivateRouteTables(); err != nil { // AWS::EC2::RouteTable\n\t\treturn err\n\t}\n\tif err := ts.createPrivateRoutes(); err != nil { // AWS::EC2::Route\n\t\treturn err\n\t}\n\tif err := ts.createPrivateSubnetRouteTableAssociation(); err != nil { // AWS::EC2::SubnetRouteTableAssociation\n\t\treturn err\n\t}\n\n\tif err := ts.createDHCPOptions(); err != nil { // AWS::EC2::DHCPOptions, AWS::EC2::VPCDHCPOptionsAssociation\n\t\treturn err\n\t}\n\n\tif err := ts.authorizeSecurityGroup(); err != nil {\n\t\treturn err\n\t}\n\n\tts.lg.Info(\"created a VPC\",\n\t\tzap.String(\"vpc-id\", ts.cfg.VPC.ID),\n\t\tzap.Strings(\"vpc-cidr-blocks\", ts.cfg.VPC.CIDRs),\n\t\tzap.Strings(\"public-subnet-ids\", ts.cfg.VPC.PublicSubnetIDs),\n\t\tzap.Strings(\"private-subnet-ids\", ts.cfg.VPC.PrivateSubnetIDs),\n\t\tzap.String(\"control-plane-security-group-id\", ts.cfg.VPC.SecurityGroupID),\n\t)\n\n\tts.cfg.Sync()\n\treturn nil\n}", "func NewGroup(client *gosip.SPClient, endpoint string, config *RequestConfig) *Group {\n\treturn &Group{\n\t\tclient: client,\n\t\tendpoint: endpoint,\n\t\tconfig: config,\n\t\tmodifiers: NewODataMods(),\n\t}\n}", "func NewConsortiumsGroup(conf map[string]*genesisconfig.Consortium) (*cb.ConfigGroup, error) {\n\tconsortiumsGroup := protoutil.NewConfigGroup()\n\t// This policy is not referenced anywhere, it is only used as part of the implicit meta policy rule at the channel level, so this setting\n\t// effectively degrades control of the ordering system channel to the ordering admins\n\taddPolicy(consortiumsGroup, policies.SignaturePolicy(channelconfig.AdminsPolicyKey, policydsl.AcceptAllPolicy), ordererAdminsPolicyName)\n\n\tfor consortiumName, consortium := range conf {\n\t\tvar err error\n\t\tconsortiumsGroup.Groups[consortiumName], err = NewConsortiumGroup(consortium)\n\t\tif err != nil {\n\t\t\treturn nil, errors.Wrapf(err, \"failed to create consortium %s\", consortiumName)\n\t\t}\n\t}\n\n\tconsortiumsGroup.ModPolicy = ordererAdminsPolicyName\n\treturn consortiumsGroup, nil\n}", "func NewCalendarGroup()(*CalendarGroup) {\n m := &CalendarGroup{\n Entity: *NewEntity(),\n }\n return m\n}", "func (s *Server) newNSEC(qname string) *dns.NSEC {\n\tqlabels := dns.SplitDomainName(qname)\n\tif len(qlabels) < s.domainLabels {\n\t\t// TODO(miek): can not happen...?\n\t}\n\t// Strip the last s.domainLabels, return up to 4 before\n\t// that. Four labels is the maximum qname we can handle.\n\tls := len(qlabels) - s.domainLabels\n\tls4 := ls - 4\n\tif ls4 < 0 {\n\t\tls4 = 0\n\t}\n\tkey := qlabels[ls4:ls]\n\tprev, next := s.registry.GetNSEC(strings.Join(key, \".\"))\n\tnsec := &dns.NSEC{Hdr: dns.RR_Header{Name: prev + s.domain + \".\", Rrtype: dns.TypeNSEC, Class: dns.ClassINET, Ttl: 60},\n\t\tNextDomain: next + s.domain + \".\"}\n\tif prev == \"\" {\n\t\tnsec.TypeBitMap = []uint16{dns.TypeA, dns.TypeSOA, dns.TypeNS, dns.TypeAAAA, dns.TypeRRSIG, dns.TypeNSEC, dns.TypeDNSKEY}\n\t} else {\n\t\tnsec.TypeBitMap = []uint16{dns.TypeA, dns.TypeAAAA, dns.TypeSRV, dns.TypeRRSIG, dns.TypeNSEC}\n\t}\n\treturn nsec\n}", "func NewSecurityGroupRule(ctx *pulumi.Context,\n\tname string, args *SecurityGroupRuleArgs, opts ...pulumi.ResourceOption) (*SecurityGroupRule, error) {\n\tif args == nil {\n\t\treturn nil, errors.New(\"missing one or more required arguments\")\n\t}\n\n\tif args.FromPort == nil {\n\t\treturn nil, errors.New(\"invalid value for required argument 'FromPort'\")\n\t}\n\tif args.Protocol == nil {\n\t\treturn nil, errors.New(\"invalid value for required argument 'Protocol'\")\n\t}\n\tif args.SecurityGroupId == nil {\n\t\treturn nil, errors.New(\"invalid value for required argument 'SecurityGroupId'\")\n\t}\n\tif args.ToPort == nil {\n\t\treturn nil, errors.New(\"invalid value for required argument 'ToPort'\")\n\t}\n\tif args.Type == nil {\n\t\treturn nil, errors.New(\"invalid value for required argument 'Type'\")\n\t}\n\tvar resource SecurityGroupRule\n\terr := ctx.RegisterResource(\"aws:ec2/securityGroupRule:SecurityGroupRule\", name, args, &resource, opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &resource, nil\n}", "func newEksProvisioner(stackConfig interfaces.IStack, clusterSot interfaces.IClusterSot) (*EksProvisioner, error) {\n\teksConfig, err := parseEksConfig(stackConfig)\n\tif err != nil {\n\t\treturn nil, errors.WithStack(err)\n\t}\n\n\treturn &EksProvisioner{\n\t\tstack: stackConfig,\n\t\teksConfig: *eksConfig,\n\t\tclusterSot: clusterSot,\n\t}, nil\n}", "func NewGroupAddr2(a, b uint8) GroupAddr {\n\treturn GroupAddr(a)<<8 | GroupAddr(b)\n}", "func (o *Operator) NewLicense(ctx context.Context, req ops.NewLicenseRequest) (string, error) {\n\tif !o.isOpsCenter() {\n\t\treturn \"\", trace.AccessDenied(\"cannot generate licenses\")\n\t}\n\n\terr := req.Validate()\n\tif err != nil {\n\t\treturn \"\", trace.Wrap(err)\n\t}\n\n\to.Infof(\"Generating new license: %v\", req)\n\n\tca, err := pack.ReadCertificateAuthority(o.packages())\n\tif err != nil {\n\t\treturn \"\", trace.Wrap(err)\n\t}\n\n\tlicense, err := licenseapi.NewLicense(licenseapi.NewLicenseInfo{\n\t\tMaxNodes: req.MaxNodes,\n\t\tValidFor: req.ValidFor,\n\t\tStopApp: req.StopApp,\n\t\tTLSKeyPair: *ca,\n\t})\n\tif err != nil {\n\t\treturn \"\", trace.Wrap(err)\n\t}\n\n\tparsed, err := licenseapi.ParseLicense(license)\n\tif err != nil {\n\t\treturn \"\", trace.Wrap(err)\n\t}\n\n\tlibevents.Emit(ctx, o, events.LicenseGenerated, libevents.Fields{\n\t\tevents.FieldExpires: parsed.GetPayload().Expiration,\n\t\tevents.FieldMaxNodes: parsed.GetPayload().MaxNodes,\n\t})\n\n\treturn license, nil\n}", "func (client IdentityClient) createGroup(ctx context.Context, request common.OCIRequest, binaryReqBody *common.OCIReadSeekCloser, extraHeaders map[string]string) (common.OCIResponse, error) {\n\n\thttpRequest, err := request.HTTPRequest(http.MethodPost, \"/groups\", binaryReqBody, extraHeaders)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar response CreateGroupResponse\n\tvar httpResponse *http.Response\n\thttpResponse, err = client.Call(ctx, &httpRequest)\n\tdefer common.CloseBodyIfValid(httpResponse)\n\tresponse.RawResponse = httpResponse\n\tif err != nil {\n\t\treturn response, err\n\t}\n\n\terr = common.UnmarshalResponse(httpResponse, &response)\n\treturn response, err\n}" ]
[ "0.6844878", "0.66064495", "0.6216187", "0.6186211", "0.6084027", "0.60137", "0.58653337", "0.5861363", "0.585019", "0.58274394", "0.57939017", "0.5772428", "0.57188517", "0.56309795", "0.55850065", "0.5574755", "0.5543562", "0.5481946", "0.54668605", "0.54562044", "0.5438065", "0.5427066", "0.53744036", "0.5362565", "0.5351783", "0.5348959", "0.53459024", "0.5338469", "0.53374", "0.53060824", "0.5278722", "0.5248681", "0.5220454", "0.52123725", "0.51932585", "0.51904595", "0.5189232", "0.5164036", "0.51637167", "0.5160126", "0.51488763", "0.5123804", "0.5118642", "0.5110015", "0.51018566", "0.50942415", "0.50831705", "0.50774133", "0.5074758", "0.50606626", "0.5052034", "0.50490296", "0.5043407", "0.5042812", "0.50402313", "0.50114346", "0.49955127", "0.49753392", "0.4972153", "0.4957553", "0.4955947", "0.4949484", "0.4939584", "0.49328712", "0.49321526", "0.4927026", "0.49260646", "0.49221417", "0.4916763", "0.49099353", "0.49067855", "0.49047884", "0.48936802", "0.48882332", "0.48757055", "0.4873308", "0.48721886", "0.48646668", "0.4863891", "0.48631465", "0.4856064", "0.485095", "0.48487782", "0.48460686", "0.4845025", "0.4843473", "0.4842115", "0.48398393", "0.4831808", "0.48288956", "0.48186794", "0.48149487", "0.48144513", "0.4814076", "0.48137626", "0.4809243", "0.48050407", "0.48026013", "0.48011383", "0.47934607" ]
0.7339454
0
GetConsoleURL will return a url to the AWS console for this security group
func (e *EC2SG) GetConsoleURL() string { t := "https://%s.console.aws.amazon.com/ec2/v2/home?region=%s#SecurityGroups:groupId=%s" return fmt.Sprintf(t, e.Region, e.Region, e.ID) }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (a *MemberAwaitility) GetConsoleURL() string {\n\troute := &routev1.Route{}\n\tnamespacedName := types.NamespacedName{Namespace: \"openshift-console\", Name: \"console\"}\n\terr := a.Client.Get(context.TODO(), namespacedName, route)\n\trequire.NoError(a.T, err)\n\treturn fmt.Sprintf(\"https://%s/%s\", route.Spec.Host, route.Spec.Path)\n}", "func (o *SubscriptionRegistration) GetConsoleURL() (value string, ok bool) {\n\tok = o != nil && o.bitmap_&2 != 0\n\tif ok {\n\t\tvalue = o.consoleURL\n\t}\n\treturn\n}", "func (o *SubscriptionRegistration) ConsoleURL() string {\n\tif o != nil && o.bitmap_&2 != 0 {\n\t\treturn o.consoleURL\n\t}\n\treturn \"\"\n}", "func (c Creds) ToConsoleURL() (string, error) {\n\treturn c.ToCustomConsoleURL(\"\")\n}", "func (u UserRequested) ConsoleURL() string {\n\treturn fmt.Sprintf(\"%s/admin/user-management/%s\", u.Network.ConsoleURL, u.Entity.ID)\n}", "func BuildConsoleURLForService(cluster, service string) string {\n\tregion := os.Getenv(\"AWS_REGION\")\n\treturn fmt.Sprintf(\"https://%s.console.aws.amazon.com/ecs/home?region=%s#/clusters/%s/services/%s\", region, region, cluster, service)\n}", "func (b *SubscriptionMetricsBuilder) ConsoleUrl(value string) *SubscriptionMetricsBuilder {\n\tb.consoleUrl = value\n\tb.bitmap_ |= 16\n\treturn b\n}", "func (client *Client) GetArmsConsoleUrl(request *GetArmsConsoleUrlRequest) (response *GetArmsConsoleUrlResponse, err error) {\n\tresponse = CreateGetArmsConsoleUrlResponse()\n\terr = client.DoAction(request, response)\n\treturn\n}", "func (b *cloudBackend) CloudConsoleURL(paths ...string) string {\n\treturn cloudConsoleURL(b.CloudURL(), paths...)\n}", "func (c Creds) ToCustomConsoleURL(dest string) (string, error) {\n\tlogger.InfoMsg(\"generating console url\")\n\tconsoleToken, err := c.toConsoleToken()\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tnamespace, err := c.namespace()\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tbaseURL := fmt.Sprintf(consoleTokenURL, namespace)\n\tvar targetURL string\n\tif c.Region != \"\" {\n\t\ttargetURL = fmt.Sprintf(\"https://%s.console.%s.com/%s\", c.Region, namespace, dest)\n\t} else {\n\t\ttargetURL = fmt.Sprintf(\"https://console.%s.com/%s\", namespace, dest)\n\t}\n\tlogger.InfoMsgf(\"using destination url %s\", targetURL)\n\turlParts := []string{\n\t\tbaseURL,\n\t\t\"/federation\",\n\t\t\"?Action=login\",\n\t\t\"&Issuer=\",\n\t\t\"&Destination=\",\n\t\turl.QueryEscape(targetURL),\n\t\t\"&SigninToken=\",\n\t\tconsoleToken,\n\t}\n\turlString := strings.Join(urlParts, \"\")\n\treturn urlString, nil\n}", "func (c *ConfigurationData) GetAdminConsoleServiceURL() string {\n\treturn c.v.GetString(varAdminConsoleServiceURL)\n}", "func (f *IBMPIInstanceClient) PostConsoleURL(id, powerinstanceid string, timeout time.Duration) (models.Object, error) {\n\n\tparams := p_cloud_p_vm_instances.NewPcloudPvminstancesConsolePostParamsWithTimeout(helpers.PICreateTimeOut).WithCloudInstanceID(powerinstanceid).WithPvmInstanceID(id)\n\tpostok, err := f.session.Power.PCloudPVMInstances.PcloudPvminstancesConsolePost(params, ibmpisession.NewAuth(f.session, powerinstanceid))\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"Failed to Generate the Console URL PVM Instance:%s\", err)\n\t}\n\treturn postok.Payload, nil\n}", "func (o LookupProvisioningConfigResultOutput) CloudConsoleUri() pulumi.StringOutput {\n\treturn o.ApplyT(func(v LookupProvisioningConfigResult) string { return v.CloudConsoleUri }).(pulumi.StringOutput)\n}", "func (o GroupInitContainerVolumeGitRepoOutput) Url() pulumi.StringOutput {\n\treturn o.ApplyT(func(v GroupInitContainerVolumeGitRepo) string { return v.Url }).(pulumi.StringOutput)\n}", "func (c *Config) GetConsoleRouteName() string {\n\treturn c.member.GetString(varConsoleRouteName)\n}", "func (o GroupContainerVolumeGitRepoOutput) Url() pulumi.StringOutput {\n\treturn o.ApplyT(func(v GroupContainerVolumeGitRepo) string { return v.Url }).(pulumi.StringOutput)\n}", "func (o BucketOutput) Url() pulumi.StringOutput {\n\treturn o.ApplyT(func(v *Bucket) pulumi.StringOutput { return v.Url }).(pulumi.StringOutput)\n}", "func (c *Client) SQRLCliURL(nr *NutResponse, includeCan bool) *url.URL {\n\tu := c.baseURL()\n\tu.Scheme = ssp.SqrlScheme\n\tu.Path += \"/cli.sqrl\"\n\tparams := make(url.Values)\n\tparams.Add(\"nut\", string(nr.Nut))\n\tif includeCan {\n\t\tparams.Add(\"can\", ssp.Sqrl64.EncodeToString([]byte(nr.Can)))\n\t}\n\tu.RawQuery = params.Encode()\n\treturn u\n}", "func (m *SectionGroup) GetSectionGroupsUrl()(*string) {\n return m.sectionGroupsUrl\n}", "func Console(consoleFlags *flags.ConsoleFlags) error {\n\n\taccount, err := buildIdpAccount(consoleFlags.LoginExecFlags)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"error building login details\")\n\t}\n\n\tsharedCreds := awsconfig.NewSharedCredentials(account.Profile, account.CredentialsFile)\n\n\t// this checks if the credentials file has been created yet\n\t// can only really be triggered if saml2aws exec is run on a new\n\t// system prior to creating $HOME/.aws\n\texist, err := sharedCreds.CredsExists()\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"error loading credentials\")\n\t}\n\tif !exist {\n\t\tlog.Println(\"unable to load credentials, login required to create them\")\n\t\treturn nil\n\t}\n\n\tawsCreds, err := loadOrLogin(account, sharedCreds, consoleFlags)\n\tif err != nil {\n\t\treturn errors.Wrap(err,\n\t\t\tfmt.Sprintf(\"error loading credentials for profile: %s\", consoleFlags.LoginExecFlags.ExecProfile))\n\t}\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"error logging in\")\n\t}\n\n\tif consoleFlags.LoginExecFlags.ExecProfile != \"\" {\n\t\t// Assume the desired role before generating env vars\n\t\tawsCreds, err = assumeRoleWithProfile(consoleFlags.LoginExecFlags.ExecProfile, consoleFlags.LoginExecFlags.CommonFlags.SessionDuration)\n\t\tif err != nil {\n\t\t\treturn errors.Wrap(err,\n\t\t\t\tfmt.Sprintf(\"error acquiring credentials for profile: %s\", consoleFlags.LoginExecFlags.ExecProfile))\n\t\t}\n\t}\n\n\tlog.Printf(\"Presenting credentials for %s to %s\", account.Profile, federationURL)\n\treturn federatedLogin(awsCreds, consoleFlags)\n}", "func (c *Config) GetConsoleNamespace() string {\n\treturn c.member.GetString(varConsoleNamespace)\n}", "func ServiceInstanceURL(cliConnection plugin.CliConnection, serviceInstanceName string, accessToken string, authClient httpclient.AuthenticatedClient) (string, error) {\n\tserviceModel, err := cliConnection.GetService(serviceInstanceName)\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"Service instance not found: %s\", err)\n\t}\n\n\tparsedUrl, err := url.Parse(serviceModel.DashboardUrl)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tpath := parsedUrl.Path\n\n\tsegments := strings.Split(path, \"/\")\n\tif len(segments) == 0 || (len(segments) == 1 && segments[0] == \"\") {\n\t\treturn \"\", fmt.Errorf(\"path of %s has no segments\", serviceModel.DashboardUrl)\n\t}\n\tguid := segments[len(segments)-1]\n\n\tparsedUrl.Path = \"/cli/instance/\" + guid\n\n\tbodyReader, statusCode, err := authClient.DoAuthenticatedGet(parsedUrl.String(), accessToken)\n\n\t//In the case of a 404, the most likely cause is that the CLI version is greater than the broker version.\n\tif statusCode == http.StatusNotFound {\n\t\treturn \"\", errors.New(\"The /cli/instance endpoint could not be found.\\n\" +\n\t\t\t\"This could be because the Spring Cloud Services broker version is too old.\\n\" +\n\t\t\t\"Please ensure SCS is at least version 1.3.3.\\n\")\n\t}\n\tvar serviceDefinitionResp serviceDefinitionResp\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"Invalid service definition response: %s\", err)\n\t}\n\n\tbody, err := ioutil.ReadAll(bodyReader)\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"Cannot read service definition response body: %s\", err)\n\t}\n\n\terr = json.Unmarshal(body, &serviceDefinitionResp)\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"JSON response failed to unmarshal: %s\", string(body))\n\t}\n\tif serviceDefinitionResp.Credentials.URI == \"\" {\n\t\treturn \"\", fmt.Errorf(\"JSON response contained empty property 'credentials.url', response body: '%s'\", string(body))\n\n\t}\n\treturn serviceDefinitionResp.Credentials.URI + \"/\", nil\n}", "func (o SnapshotImportDiskContainerOutput) Url() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v SnapshotImportDiskContainer) *string { return v.Url }).(pulumi.StringPtrOutput)\n}", "func (o AppSharedCredentialsOutput) Url() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v *AppSharedCredentials) pulumi.StringPtrOutput { return v.Url }).(pulumi.StringPtrOutput)\n}", "func (o StudioOutput) Url() pulumi.StringOutput {\n\treturn o.ApplyT(func(v *Studio) pulumi.StringOutput { return v.Url }).(pulumi.StringOutput)\n}", "func GetPrivateRegistryURL(cluster *v3.Cluster) string {\n\tregistry := GetPrivateRegistry(cluster)\n\tif registry == nil {\n\t\treturn \"\"\n\t}\n\treturn registry.URL\n}", "func (p providerServices) AWSCli() awscli.AWS {\n\treturn p.cli\n}", "func GetUrl(ctx *pulumi.Context) string {\n\treturn config.Get(ctx, \"grafana:url\")\n}", "func (o *Platform) GetConsole() string {\n\tif o == nil {\n\t\tvar ret string\n\t\treturn ret\n\t}\n\n\treturn o.Console\n}", "func GetSecurityGroupInfo(sess *session.Session) (*ec2.DescribeSecurityGroupsOutput, error) {\n // snippet-start:[ec2.go.describe_security_groups.call]\n svc := ec2.New(sess)\n\n result, err := svc.DescribeSecurityGroups(nil)\n // snippet-end:[ec2.go.describe_security_groups.call]\n if err != nil {\n return nil, err\n }\n\n return result, nil\n}", "func CreateGetArmsConsoleUrlRequest() (request *GetArmsConsoleUrlRequest) {\n\trequest = &GetArmsConsoleUrlRequest{\n\t\tRpcRequest: &requests.RpcRequest{},\n\t}\n\trequest.InitWithApiInfo(\"ARMS\", \"2019-08-08\", \"GetArmsConsoleUrl\", \"arms\", \"openAPI\")\n\trequest.Method = requests.GET\n\treturn\n}", "func (c *Client) CliURL(nut ssp.Nut, can string) string {\n\tparams := make(url.Values)\n\tif nut != \"\" {\n\t\tparams.Add(\"nut\", string(nut))\n\t}\n\tif can != \"\" {\n\t\tparams.Add(\"can\", can)\n\t}\n\tu := c.baseURL()\n\tu.Path = c.Qry\n\tif len(params) > 0 {\n\t\tu.RawQuery = params.Encode()\n\t}\n\treturn u.String()\n}", "func (o ContainerServiceOutput) Url() pulumi.StringOutput {\n\treturn o.ApplyT(func(v *ContainerService) pulumi.StringOutput { return v.Url }).(pulumi.StringOutput)\n}", "func getUrl(config map[string]string, path string) (string) {\n url := fmt.Sprint(\"https://\", config[\"CONTROL_SERVICE\"], \":\", config[\"CONTROL_PORT\"], path)\n return url\n}", "func (l *jsiiProxy_LambdaDeploymentGroup) ToString() *string {\n\tvar returns *string\n\n\t_jsii_.Invoke(\n\t\tl,\n\t\t\"toString\",\n\t\tnil, // no parameters\n\t\t&returns,\n\t)\n\n\treturn returns\n}", "func (s AwsEc2NetworkInterfaceSecurityGroup) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (group *Group) ToURL() string {\n\treturn toURL(group.endpoint, group.modifiers)\n}", "func RequestConsole(command string, serviceID string, settings *models.Settings) *models.Task {\n\tconsole := map[string]string{}\n\tif command != \"\" {\n\t\tconsole[\"command\"] = command\n\t}\n\tb, err := json.Marshal(console)\n\tif err != nil {\n\t\tfmt.Println(err.Error())\n\t\tos.Exit(1)\n\t}\n\tresp := httpclient.Post(b, fmt.Sprintf(\"%s/v1/environments/%s/services/%s/console\", settings.PaasHost, settings.EnvironmentID, serviceID), true, settings)\n\tvar m map[string]string\n\tjson.Unmarshal(resp, &m)\n\treturn &models.Task{\n\t\tID: m[\"taskId\"],\n\t}\n}", "func (a *API) GetEndpointURL() string {\n\tif a.Sandbox {\n\t\treturn \"https://apitest.authorize.net/xml/v1/request.api\"\n\t}\n\treturn \"https://api.authorize.net/xml/v1/request.api\"\n}", "func (dash *Dashboard) GetUrl() string {\n\treturn GetDashboardFolderUrl(dash.IsFolder, dash.Uid, dash.Slug)\n}", "func GetURL(name string) func(*types.Cmd) {\n\treturn func(g *types.Cmd) {\n\t\tg.AddOptions(\"get-url\")\n\t\tg.AddOptions(name)\n\t}\n}", "func (role *Role) URL() string {\n\treturn \"https://localhost/tips/api/role\"\n}", "func getGroupURL(count int) (groupURL string, err error) {\n\tif len(os.Args) == 1 {\n\t\tlog.Fatal(\"Error: No group adress!\")\n\t}\n\n\tif len(os.Args) > 2 {\n\t\tlog.Fatal(\"Error: Too many arguments!\")\n\t}\n\n\tr, err := http.Get(os.Args[1])\n\tif err != nil {\n\t\treturn\n\t}\n\n\t_, err = ioutil.ReadAll(r.Body)\n\tif err != nil {\n\t\treturn\n\t}\n\n\tu, err := url.Parse(os.Args[1])\n\tif err != nil {\n\t\treturn\n\t}\n\n\tdomain := strings.Replace(u.Path, \"/\", \"\", 1)\n\tgroupURL = fmt.Sprintf(gURL+\"&domain=%s&count=%d\", domain, count)\n\treturn\n}", "func (o ApplicationSpecRolloutplanRolloutbatchesBatchrolloutwebhooksOutput) Url() pulumi.StringOutput {\n\treturn o.ApplyT(func(v ApplicationSpecRolloutplanRolloutbatchesBatchrolloutwebhooks) string { return v.Url }).(pulumi.StringOutput)\n}", "func GetURL(kind string, token string) string {\n\treturn fmt.Sprintf(\"%s%s?token=%s\", \"https://api.clubhouse.io/api/v2/\", kind, token)\n}", "func (c Creds) ToSignoutURL() (string, error) {\n\tlogger.InfoMsg(\"generating signout url\")\n\tnamespace, err := c.namespace()\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tbaseURL := fmt.Sprintf(consoleTokenURL, namespace)\n\turl := strings.Join([]string{baseURL, \"/oauth?Action=logout\"}, \"\")\n\treturn url, nil\n}", "func (m *MockIamServer) GetURL() string {\n\treturn m.s.URL\n}", "func main() {\n opts := gophercloud.AuthOptions{\n\t\tIdentityEndpoint: \"http://10.169.41.188/identity\",\n\t\tUsername: \"admin\",\n\t\tPassword: \"password\",\n\t\tTenantID: \"01821bd38f2f474489491adb0da7efaf\",\n\t\tDomainID: \"default\",\n\t}\n\tprovider, err := openstack.AuthenticatedClient(opts)\n\tfmt.Println(err)\n\tif err != nil {\n\t\tfmt.Errorf(\"Sending get container group request failed: %v\", err)\n\t\treturn\n\t}\n client, err := openstack.NewNetworkV2(provider, gophercloud.EndpointOpts{\n\t\tRegion: \"RegionOne\",\n\t})\n\tif err != nil {\n\t\tfmt.Errorf(\"Unable to create a network client: %v\", err)\n\t}\n\tallPages, err := subnets.List(client, nil).AllPages()\n\tif err != nil {\n\t\tfmt.Errorf(\"Unable to list subnets: %v\", err)\n\t}\n\n\tallSubnets, err := subnets.ExtractSubnets(allPages)\n\tif err != nil {\n\t\tfmt.Errorf(\"Unable to extract subnets: %v\", err)\n\t}\n\n\tfor _, subnet := range allSubnets {\n\t b, _ := json.MarshalIndent(subnet, \"\", \" \")\n fmt.Printf(\"%s\", string(b))\n }\n\n}", "func (g *GitLab) URL() string {\n\treturn g.url\n}", "func (d *Driver) GetURL() (string, error) {\n\tif err := drivers.MustBeRunning(d); err != nil {\n\t\treturn \"\", err\n\t}\n\n\tip, err := d.GetIP()\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\treturn fmt.Sprintf(\"tcp://%s\", net.JoinHostPort(ip, \"2376\")), nil\n}", "func (e Environment) URL() string {\r\n\treturn e.apiURL\r\n}", "func (o CatalogOutput) Url() pulumi.StringOutput {\n\treturn o.ApplyT(func(v *Catalog) pulumi.StringOutput { return v.Url }).(pulumi.StringOutput)\n}", "func (o ContainerOutput) Url() pulumi.StringOutput {\n\treturn o.ApplyT(func(v *Container) pulumi.StringOutput { return v.Url }).(pulumi.StringOutput)\n}", "func (o GroupInitContainerVolumeGitRepoPtrOutput) Url() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v *GroupInitContainerVolumeGitRepo) *string {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn &v.Url\n\t}).(pulumi.StringPtrOutput)\n}", "func (o TimelineOutput) Url() pulumi.StringOutput {\n\treturn o.ApplyT(func(v *Timeline) pulumi.StringOutput { return v.Url }).(pulumi.StringOutput)\n}", "func (o ApplicationSpecRolloutplanRolloutwebhooksOutput) Url() pulumi.StringOutput {\n\treturn o.ApplyT(func(v ApplicationSpecRolloutplanRolloutwebhooks) string { return v.Url }).(pulumi.StringOutput)\n}", "func GetDCOSURL() string {\n\t// get data from CLI or from envar, and trim e.g. \"/\" or \"/#/\" from copy-pasted Dashboard URLs:\n\treturn strings.TrimRight(RequiredCLIConfigValue(\n\t\t\"core.dcos_url\",\n\t\t\"DC/OS Cluster URL\",\n\t\t\"Run 'dcos config set core.dcos_url http://your-cluster.com' to configure\"),\n\t\t\"#/\")\n}", "func (o BuildSpecSourceOutput) Url() pulumi.StringOutput {\n\treturn o.ApplyT(func(v BuildSpecSource) string { return v.Url }).(pulumi.StringOutput)\n}", "func (client *Client) GetArmsConsoleUrlWithChan(request *GetArmsConsoleUrlRequest) (<-chan *GetArmsConsoleUrlResponse, <-chan error) {\n\tresponseChan := make(chan *GetArmsConsoleUrlResponse, 1)\n\terrChan := make(chan error, 1)\n\terr := client.AddAsyncTask(func() {\n\t\tdefer close(responseChan)\n\t\tdefer close(errChan)\n\t\tresponse, err := client.GetArmsConsoleUrl(request)\n\t\tif err != nil {\n\t\t\terrChan <- err\n\t\t} else {\n\t\t\tresponseChan <- response\n\t\t}\n\t})\n\tif err != nil {\n\t\terrChan <- err\n\t\tclose(responseChan)\n\t\tclose(errChan)\n\t}\n\treturn responseChan, errChan\n}", "func (r *Repository) GetStargazersURL() string {\n\tif r == nil || r.StargazersURL == nil {\n\t\treturn \"\"\n\t}\n\treturn *r.StargazersURL\n}", "func (s CreateDBSecurityGroupOutput) String() string {\n\treturn nifcloudutil.Prettify(s)\n}", "func (s AwsEc2SecurityGroupDetails) String() string {\n\treturn awsutil.Prettify(s)\n}", "func GetApiUrl(ctx *pulumi.Context) string {\n\treturn config.Get(ctx, \"newrelic:apiUrl\")\n}", "func (o BuildRunStatusBuildSpecSourceOutput) Url() pulumi.StringOutput {\n\treturn o.ApplyT(func(v BuildRunStatusBuildSpecSource) string { return v.Url }).(pulumi.StringOutput)\n}", "func (b *BaseCmd) GitScmURL() (string, error) {\n\tgitScmURLCommand := util.Command{\n\t\tName: \"git\",\n\t\tArgs: []string{\"config\", \"--get\", \"remote.origin.url\"},\n\t}\n\n\tout, err := b.CommandRunner.RunWithoutRetry(&gitScmURLCommand)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tout = strings.ReplaceAll(out, \"[email protected]:\", \"https://github.com/\")\n\treturn out, nil\n}", "func (c *ContainerClient) URL() string {\n\treturn c.client.endpoint\n}", "func (ic *iamClient) GetApplyURL(request ApplicationRequest, relatedResources []ApplicationAction, user BkUser) (string,\n\terror) {\n\tif ic == nil {\n\t\treturn \"\", ErrServerNotInit\n\t}\n\n\tapplication := request.BuildApplication(relatedResources)\n\n\turl, err := ic.cli.GetApplyURL(application, user.BkToken, user.BkUserName)\n\tif err != nil {\n\t\tklog.Errorf(\"iam generate apply url failed: %s\", err)\n\t\treturn IamAppURL, nil\n\t}\n\n\treturn url, nil\n}", "func (s AuthorizeNASSecurityGroupIngressOutput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (km *KataMonitor) GetAgentURL(w http.ResponseWriter, r *http.Request) {\n\tsandboxID, err := getSandboxIDFromReq(r)\n\tif err != nil {\n\t\tcommonServeError(w, http.StatusBadRequest, err)\n\t\treturn\n\t}\n\n\tdata, err := shimclient.DoGet(sandboxID, defaultTimeout, \"agent-url\")\n\tif err != nil {\n\t\tcommonServeError(w, http.StatusBadRequest, err)\n\t\treturn\n\t}\n\n\tfmt.Fprintln(w, string(data))\n}", "func (cli *Client) URL() string {\n\treturn cli.ref.Get(\"url\").String()\n}", "func (client *Client) GetArmsConsoleUrlWithCallback(request *GetArmsConsoleUrlRequest, callback func(response *GetArmsConsoleUrlResponse, err error)) <-chan int {\n\tresult := make(chan int, 1)\n\terr := client.AddAsyncTask(func() {\n\t\tvar response *GetArmsConsoleUrlResponse\n\t\tvar err error\n\t\tdefer close(result)\n\t\tresponse, err = client.GetArmsConsoleUrl(request)\n\t\tcallback(response, err)\n\t\tresult <- 1\n\t})\n\tif err != nil {\n\t\tdefer close(result)\n\t\tcallback(nil, err)\n\t\tresult <- 0\n\t}\n\treturn result\n}", "func (e *GSEvent) URL() string {\n\treturn fmt.Sprintf(\"gs://%v/%v\", e.Bucket, e.Name)\n}", "func (c *Client) GetSecurityGroup(ctx context.Context, zone, id string) (*SecurityGroup, error) {\n\tresp, err := c.GetSecurityGroupWithResponse(ctx, id)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tsecurityGroup := securityGroupFromAPI(resp.JSON200)\n\tsecurityGroup.c = c\n\tsecurityGroup.zone = zone\n\n\treturn securityGroup, nil\n}", "func (c *jsiiProxy_CfnDataflowEndpointGroup) ToString() *string {\n\tvar returns *string\n\n\t_jsii_.Invoke(\n\t\tc,\n\t\t\"toString\",\n\t\tnil, // no parameters\n\t\t&returns,\n\t)\n\n\treturn returns\n}", "func DestroyConsole(jobID string, serviceID string, settings *models.Settings) {\n\thttpclient.Delete(fmt.Sprintf(\"%s/v1/environments/%s/services/%s/console/%s\", settings.PaasHost, settings.EnvironmentID, serviceID, jobID), true, settings)\n}", "func (c *ChromeTarget) Console() *ChromeConsole {\n\tif c.console == nil {\n\t\tc.console = newChromeConsole(c)\n\t}\n\treturn c.console\n}", "func BuildURL(route string) string {\n\tprefix := os.Getenv(\"AWS_LAMBDA_RUNTIME_API\")\n\tif len(prefix) == 0 {\n\t\treturn fmt.Sprintf(\"http://localhost:9001%s\", route)\n\t}\n\treturn fmt.Sprintf(\"http://%s%s\", prefix, route)\n}", "func getWebConsoleConfigMap(config *restclient.Config) (*corev1.ConfigMap, error) {\n\tmyScheme := runtime.NewScheme()\n\tcl, _ := client.New(config, client.Options{Scheme: myScheme})\n\tcorev1.AddToScheme(myScheme)\n\tconfigmap := &corev1.ConfigMap{}\n\terr := cl.Get(context.TODO(), types.NamespacedName{\n\t\tNamespace: \"openshift-web-console\", Name: \"webconsole-config\"}, configmap)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tcmCopy := configmap.DeepCopy()\n\tif cmCopy == nil {\n\t\terr = errors.New(\"getWebConsoleConfigMap: Failed to copy web-console configuration data\")\n\t}\n\n\treturn cmCopy, err\n}", "func (o GroupContainerVolumeGitRepoPtrOutput) Url() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v *GroupContainerVolumeGitRepo) *string {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn &v.Url\n\t}).(pulumi.StringPtrOutput)\n}", "func (o ApiLicenseOutput) Url() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v ApiLicense) *string { return v.Url }).(pulumi.StringPtrOutput)\n}", "func GetSecurityGroup(ctx *pulumi.Context,\n\tname string, id pulumi.IDInput, state *SecurityGroupState, opts ...pulumi.ResourceOption) (*SecurityGroup, error) {\n\tvar resource SecurityGroup\n\terr := ctx.ReadResource(\"aws:elasticache/securityGroup:SecurityGroup\", name, id, state, &resource, opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &resource, nil\n}", "func (s ApplySecurityGroupsToClientVpnTargetNetworkOutput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (fi *FilterInvoker) GetURL() *common.URL {\n\treturn fi.invoker.GetURL()\n}", "func CreateGetArmsConsoleUrlResponse() (response *GetArmsConsoleUrlResponse) {\n\tresponse = &GetArmsConsoleUrlResponse{\n\t\tBaseResponse: &responses.BaseResponse{},\n\t}\n\treturn\n}", "func (m *Command) URL() string { return m.API.Node().URI.String() }", "func (s AwsRdsDbInstanceVpcSecurityGroup) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (schematics *SchematicsV1) GetServiceURL() string {\n\treturn schematics.Service.GetServiceURL()\n}", "func (b *cloudBackend) cloudConsoleStackPath(stackID client.StackIdentifier) string {\n\treturn path.Join(stackID.Owner, stackID.Project, stackID.Stack)\n}", "func (o *StorageNetAppCloudTargetAllOf) GetCapUrl() string {\n\tif o == nil || o.CapUrl == nil {\n\t\tvar ret string\n\t\treturn ret\n\t}\n\treturn *o.CapUrl\n}", "func (cc *CloneCommand) URL() string {\n\treturn fmt.Sprintf(\"%s%s.git\", cc.BaseURL(), cc.GistID)\n}", "func GetAPIUrl() string {\n\tz := strings.TrimSpace(os.Getenv(\"CYPRESS_PARALLEL_API_URL\"))\n\tif z == \"\" {\n\t\treturn \"http://127.0.0.1:8080\"\n\t} else {\n\t\treturn z\n\t}\n}", "func (s GetRunGroupOutput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (d *ConnectionDetails) PrivateRegistryURL() (string, error) {\n\tr, err := regexp.Compile(`127\\.0\\.0\\.1`)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tfor _, credentials := range d.RegistryCredentials {\n\t\tif r.MatchString(credentials.URL) {\n\t\t\treturn credentials.URL, nil\n\t\t}\n\t}\n\n\treturn \"\", nil\n}", "func (s SecurityGroup) String() string {\n\treturn awsutil.Prettify(s)\n}", "func GetURLDB() string {\n\tenv := os.Getenv(\"ENVIRONMENT\")\n\tvar url string\n\n\tif env == \"PRODUCTION\" {\n\t\turl = \"mongodb://api:[email protected]:47003/heroku_rvdsxf5j\"\n\t} else {\n\t\turl = \"localhost\"\n\t}\n\treturn url\n}", "func (o PacketMirroringMirroredResourceInfoSubnetInfoResponseOutput) Url() pulumi.StringOutput {\n\treturn o.ApplyT(func(v PacketMirroringMirroredResourceInfoSubnetInfoResponse) string { return v.Url }).(pulumi.StringOutput)\n}", "func (o *CreateEventPayloadActions) GetUrl() string {\n\tif o == nil {\n\t\tvar ret string\n\t\treturn ret\n\t}\n\n\treturn o.Url\n}", "func (dash *Dashboard) GenerateUrl() string {\n\treturn GetDashboardUrl(dash.Uid, dash.Slug)\n}", "func GetRegistryCtlURL() string {\n\turl := os.Getenv(\"REGISTRY_CONTROLLER_URL\")\n\tif len(url) == 0 {\n\t\treturn common.DefaultRegistryCtlURL\n\t}\n\treturn url\n}", "func GetNetworkSecurityGroup(ctx context.Context, nsgName string) (network.SecurityGroup, error) {\n\tnsgClient := getNsgClient()\n\treturn nsgClient.Get(ctx, config.GroupName(), nsgName, \"\")\n}" ]
[ "0.76067555", "0.66860074", "0.6570878", "0.6325054", "0.6235434", "0.60000056", "0.59207374", "0.5898576", "0.5892092", "0.5515598", "0.5500602", "0.5383667", "0.53329617", "0.4953479", "0.49308753", "0.4871541", "0.48535362", "0.48398408", "0.48320523", "0.48291644", "0.48264244", "0.48083648", "0.47026318", "0.46955937", "0.46558505", "0.4641289", "0.46375713", "0.4612368", "0.46001503", "0.45973253", "0.45905614", "0.4569795", "0.45579433", "0.45500273", "0.44889006", "0.44779876", "0.44706786", "0.44665074", "0.44616035", "0.44351512", "0.4422125", "0.44172454", "0.44092986", "0.44085255", "0.43947312", "0.43913838", "0.43902957", "0.4380421", "0.4367457", "0.4366733", "0.43664065", "0.43607587", "0.43551663", "0.43499824", "0.4339987", "0.43370962", "0.43353748", "0.43282363", "0.43198276", "0.43163362", "0.43160963", "0.4301782", "0.42831406", "0.42818215", "0.42797258", "0.42784163", "0.4276868", "0.42768627", "0.42732543", "0.42708296", "0.42707106", "0.42538932", "0.42534176", "0.42482337", "0.42453477", "0.4244472", "0.4240915", "0.4239503", "0.42392448", "0.42372036", "0.42367154", "0.4236046", "0.42268616", "0.42164835", "0.4211938", "0.42112458", "0.4204021", "0.42037022", "0.4200742", "0.41989392", "0.4197323", "0.419205", "0.4190419", "0.41887334", "0.41841406", "0.4178498", "0.41737133", "0.41690493", "0.41636848", "0.41534525" ]
0.8550274
0
EvalEC2SG walks through all ec2 instances
func (c *Client) EvalEC2SG(accounts []*policy.Account, p policy.Policy, regions []string, f func(policy.Violation)) error { var errs error ctx := context.Background() err := c.WalkAccountsAndRegions(accounts, regions, func(client *cziAws.Client, account *policy.Account, region string) { var nextToken *string // Limiting to 1000 iteration guarantees that we don't get an infinite loop, even if we have // a mistake below. Small tradeoff is that if there are greater than 1000*pagesize security // groups we won't scan them all. for i := 1; i <= 1000; i++ { log.Debugf("nextToken: %#v", nextToken) input := &ec2.DescribeSecurityGroupsInput{NextToken: nextToken} output, err := client.EC2.Svc.DescribeSecurityGroupsWithContext(ctx, input) if err != nil { errs = multierror.Append(errs, err) } else { for _, sg := range output.SecurityGroups { s := NewEC2SG(sg, region) if p.Match(s) { violation := policy.NewViolation(p, s, false, account) f(violation) } } } if output.NextToken == nil { break } nextToken = output.NextToken } }) errs = multierror.Append(errs, err) return errs }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func ListEC2(environment, profile string, cfg *config.Config) ([]EC2Result, error) {\n\tif r, ok := resultCache[environment]; ok {\n\t\treturn r, nil\n\t}\n\tresultCache[environment] = make([]EC2Result, 0)\n\n\tec2Svc := getEC2Service(environment, profile)\n\n\tvar result *ec2.DescribeInstancesOutput\n\tvar err error\n\texpectEnvTag := environment\n\tif cfg.IsCI(environment) {\n\t\texpectEnvTag = \"ci\"\n\t}\n\trequest := &ec2.DescribeInstancesInput{\n\t\tFilters: []*ec2.Filter{\n\t\t\t{\n\t\t\t\tName: aws.String(\"tag:Environment\"),\n\t\t\t\tValues: []*string{aws.String(expectEnvTag)},\n\t\t\t},\n\t\t\t{\n\t\t\t\tName: aws.String(\"instance-state-name\"),\n\t\t\t\tValues: []*string{aws.String(ec2.InstanceStateNameRunning)},\n\t\t\t},\n\t\t},\n\t}\n\n\tfor {\n\t\tif result != nil {\n\t\t\tif result.NextToken == nil {\n\t\t\t\tbreak\n\t\t\t}\n\t\t\trequest.SetNextToken(*result.NextToken)\n\t\t}\n\n\t\tif result, err = ec2Svc.DescribeInstances(request); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tfor _, r := range result.Reservations {\n\t\t\tfor _, i := range r.Instances {\n\t\t\t\tvar name, ansibleGroup string\n\t\t\t\tfor _, tag := range i.Tags {\n\t\t\t\t\tif tag.Key != nil && *tag.Key == \"Name\" && tag.Value != nil {\n\t\t\t\t\t\tname = *tag.Value\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t} else if tag.Key != nil && *tag.Key == \"AnsibleGroup\" && tag.Value != nil {\n\t\t\t\t\t\tansibleGroup = *tag.Value\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tvar ipAddr string\n\t\t\t\tif cfg.IsCI(environment) && cfg.IsAWSA(environment) {\n\t\t\t\t\tif len(i.NetworkInterfaces) > 0 &&\n\t\t\t\t\t\ti.NetworkInterfaces[0].Association != nil &&\n\t\t\t\t\t\tlen(*i.NetworkInterfaces[0].Association.PublicIp) > 0 &&\n\t\t\t\t\t\ti.NetworkInterfaces[0].Association.PublicIp != nil {\n\t\t\t\t\t\tipAddr = *i.NetworkInterfaces[0].Association.PublicIp\n\t\t\t\t\t}\n\t\t\t\t} else {\n\t\t\t\t\tif len(i.NetworkInterfaces) > 0 &&\n\t\t\t\t\t\tlen(i.NetworkInterfaces[0].PrivateIpAddresses) > 0 &&\n\t\t\t\t\t\ti.NetworkInterfaces[0].PrivateIpAddresses[0].PrivateIpAddress != nil {\n\t\t\t\t\t\tipAddr = *i.NetworkInterfaces[0].PrivateIpAddresses[0].PrivateIpAddress\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tresultCache[environment] = append(resultCache[environment], EC2Result{\n\t\t\t\t\tName: name,\n\t\t\t\t\tIPAddress: ipAddr,\n\t\t\t\t\tEnvironment: environment,\n\t\t\t\t\tAnsibleGroups: strings.Split(ansibleGroup, \",\"),\n\t\t\t\t\tGroupAKA: []string{},\n\t\t\t\t\tInstanceId: *i.InstanceId,\n\t\t\t\t\tLaunchTime: i.LaunchTime,\n\t\t\t\t})\n\t\t\t}\n\t\t}\n\t}\n\n\tsort.Slice(resultCache[environment], func(i, j int) bool {\n\t\tif resultCache[environment][i].Name == resultCache[environment][j].Name {\n\t\t\treturn resultCache[environment][i].LaunchTime.Before(*resultCache[environment][j].LaunchTime)\n\t\t}\n\t\treturn resultCache[environment][i].Name < resultCache[environment][j].Name\n\t})\n\n\t// add (e.g.) \"publishing 2\" to GroupAKA field, now that the list is sorted\n\tcountGroup := make(map[string]int)\n\tfor i := range resultCache[environment] {\n\t\tfor _, grp := range resultCache[environment][i].AnsibleGroups {\n\t\t\tcountGroup[grp]++\n\t\t\tresultCache[environment][i].GroupAKA = append(resultCache[environment][i].GroupAKA, fmt.Sprintf(\"%s %d\", grp, countGroup[grp]))\n\t\t}\n\t}\n\n\treturn resultCache[environment], nil\n}", "func getInstances(ec2Service *ec2.EC2) ([]ec2.Instance, error) {\n\tresult, err := ec2Service.DescribeInstances(nil)\n\toutput := make([]ec2.Instance, 0)\n\n\tif err == nil {\n\t\tfor _, v := range result.Reservations {\n\t\t\tfor _, instance := range v.Instances {\n\t\t\t\toutput = append(output, *instance)\n\t\t\t}\n\t\t}\n\t}\n\n\treturn output, err\n}", "func ListEC2(environment, profile string) ([]EC2Result, error) {\n\tif r, ok := resultCache[environment]; ok {\n\t\treturn r, nil\n\t}\n\tresultCache[environment] = make([]EC2Result, 0)\n\n\tec2Svc := getEC2Service(environment, profile)\n\n\tvar result *ec2.DescribeInstancesOutput\n\tvar err error\n\trequest := &ec2.DescribeInstancesInput{\n\t\tFilters: []*ec2.Filter{\n\t\t\t{\n\t\t\t\tName: aws.String(\"tag:Environment\"),\n\t\t\t\tValues: []*string{aws.String(environment)},\n\t\t\t},\n\t\t\t{\n\t\t\t\tName: aws.String(\"instance-state-name\"),\n\t\t\t\tValues: []*string{aws.String(ec2.InstanceStateNameRunning)},\n\t\t\t},\n\t\t},\n\t}\n\n\tfor {\n\t\tif result != nil {\n\t\t\tif result.NextToken == nil {\n\t\t\t\tbreak\n\t\t\t}\n\t\t\trequest.SetNextToken(*result.NextToken)\n\t\t}\n\n\t\tif result, err = ec2Svc.DescribeInstances(request); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tfor _, r := range result.Reservations {\n\t\t\tfor _, i := range r.Instances {\n\t\t\t\tvar name, ansibleGroup string\n\t\t\t\tfor _, tag := range i.Tags {\n\t\t\t\t\tif tag.Key != nil && *tag.Key == \"Name\" && tag.Value != nil {\n\t\t\t\t\t\tname = *tag.Value\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t} else if tag.Key != nil && *tag.Key == \"AnsibleGroup\" && tag.Value != nil {\n\t\t\t\t\t\tansibleGroup = *tag.Value\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tvar ipAddr string\n\t\t\t\tif len(i.NetworkInterfaces) > 0 && len(i.NetworkInterfaces[0].PrivateIpAddresses) > 0 {\n\t\t\t\t\tif i.NetworkInterfaces[0].PrivateIpAddresses[0].PrivateIpAddress != nil {\n\t\t\t\t\t\tipAddr = *i.NetworkInterfaces[0].PrivateIpAddresses[0].PrivateIpAddress\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tresultCache[environment] = append(resultCache[environment], EC2Result{\n\t\t\t\t\tName: name,\n\t\t\t\t\tIPAddress: ipAddr,\n\t\t\t\t\tEnvironment: environment,\n\t\t\t\t\tAnsibleGroups: strings.Split(ansibleGroup, \",\"),\n\t\t\t\t})\n\t\t\t}\n\t\t}\n\t}\n\n\tsort.Slice(resultCache[environment], func(i, j int) bool { return resultCache[environment][i].Name < resultCache[environment][j].Name })\n\treturn resultCache[environment], nil\n}", "func (provider *ResourceProvider) AllEC2Instances() ([]reachAWS.EC2Instance, error) {\n\tconst errFormat = \"unable to get all EC2 instances: %v\"\n\n\tdescribeInstancesOutput, err := provider.ec2.DescribeInstances(nil)\n\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(errFormat, err)\n\t}\n\n\treservations := describeInstancesOutput.Reservations\n\tinstances, err := extractEC2Instances(reservations)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(errFormat, err)\n\t}\n\n\treturn instances, nil\n}", "func ReplaceNodeASGInstances(ctx context.Context, f *framework.Framework, nodes []corev1.Node) error {\n\tvar asgs []*string\n\tvar nodeNames []*string\n\tvar instanceIDsTerminate []*string\n\n\tfor _, node := range nodes {\n\t\tnodeName := node.Name\n\t\tnodeNames = append(nodeNames, &nodeName)\n\t}\n\n\t// Get instance IDs\n\tfilterName := \"private-dns-name\"\n\tdescribeInstancesInput := &ec2.DescribeInstancesInput{\n\t\tFilters: []*ec2.Filter{\n\t\t\t{\n\t\t\t\tName: &filterName,\n\t\t\t\tValues: nodeNames,\n\t\t\t},\n\t\t},\n\t}\n\tinstancesToTerminate, err := f.Cloud.EC2().DescribeInstancesAsList(aws.BackgroundContext(), describeInstancesInput)\n\tif err != nil {\n\t\tif aerr, ok := err.(awserr.Error); ok {\n\t\t\tswitch aerr.Code() {\n\t\t\tdefault:\n\t\t\t\tlog.Debug(aerr)\n\t\t\t}\n\t\t} else {\n\t\t\tlog.Debug(err)\n\t\t}\n\t\treturn err\n\t}\n\tif len(instancesToTerminate) == 0 {\n\t\treturn errors.New(\"No instances found\")\n\t}\n\tfor i, instance := range instancesToTerminate {\n\t\tlog.Debugf(\"Terminating instance %d/%d (name: %v, id: %v)\", i+1, len(instancesToTerminate), *(instance.PrivateDnsName), *(instance.InstanceId))\n\t\tinstanceIDsTerminate = append(instanceIDsTerminate, instance.InstanceId)\n\t}\n\t// Terminate instances\n\tfor _, instanceID := range instanceIDsTerminate {\n\t\tterminateInstanceInASGInput := &autoscaling.TerminateInstanceInAutoScalingGroupInput{\n\t\t\tInstanceId: aws.String(*instanceID),\n\t\t\tShouldDecrementDesiredCapacity: aws.Bool(false),\n\t\t}\n\t\tresult, err := f.Cloud.AutoScaling().TerminateInstanceInAutoScalingGroup(terminateInstanceInASGInput)\n\t\tif err != nil {\n\t\t\tif aerr, ok := err.(awserr.Error); ok {\n\t\t\t\tswitch aerr.Code() {\n\t\t\t\tcase autoscaling.ErrCodeScalingActivityInProgressFault:\n\t\t\t\t\tlog.Debug(autoscaling.ErrCodeScalingActivityInProgressFault, aerr.Error())\n\t\t\t\tcase autoscaling.ErrCodeResourceContentionFault:\n\t\t\t\t\tlog.Debug(autoscaling.ErrCodeResourceContentionFault, aerr.Error())\n\t\t\t\tdefault:\n\t\t\t\t\tlog.Debug(aerr.Error())\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\t// Print the error, cast err to awserr.Error to get the Code and\n\t\t\t\t// Message from an error.\n\t\t\t\tlog.Debug(err.Error())\n\t\t\t}\n\t\t\treturn err\n\t\t}\n\t\tasgs = append(asgs, result.Activity.AutoScalingGroupName)\n\t}\n\t// Ensure node is in terminating state\n\ttime.Sleep(time.Second * 2)\n\n\t// Wait for ASGs to be in service\n\tdescribeASGsInput := &autoscaling.DescribeAutoScalingGroupsInput{\n\t\tAutoScalingGroupNames: asgs,\n\t}\n\n\t// Wait for instances and nodes to be ready\n\treturn WaitForASGInstancesAndNodesReady(ctx, f, describeASGsInput)\n}", "func DescribeInstances(session *session.Session, ssmFilters string) ([]Instance, error) {\n\tvar err error\n\tresults := make([]Instance, 0)\n\tinstanceMap := make(map[string]Instance)\n\tssmSvc := ssm.New(session)\n\n\tssmInput := ssm.DescribeInstanceInformationInput{}\n\tssmInput.Filters, err = genSSMFilters(ssmFilters)\n\tif err != nil {\n\t\treturn results, err\n\t}\n\n\tinstanceIDs := make([]*string, 0)\n\terr = ssmSvc.DescribeInstanceInformationPages(&ssmInput,\n\t\tfunc(page *ssm.DescribeInstanceInformationOutput, lastPage bool) bool {\n\t\t\tfor _, inst := range page.InstanceInformationList {\n\t\t\t\tinstanceIDs = append(instanceIDs, inst.InstanceId)\n\t\t\t\tinstanceMap[aws.StringValue(inst.InstanceId)] = Instance{SSMInformation: *inst}\n\t\t\t}\n\n\t\t\treturn true\n\t\t},\n\t)\n\tif err != nil {\n\t\treturn results, err\n\t}\n\n\t// If no SSM instances are available return an empty slice\n\tif len(instanceIDs) == 0 {\n\t\treturn []Instance{}, nil\n\t}\n\n\tec2Svc := ec2.New(session)\n\tdescribeInstInput := &ec2.DescribeInstancesInput{\n\t\tInstanceIds: instanceIDs,\n\t}\n\n\terr = ec2Svc.DescribeInstancesPages(describeInstInput,\n\t\tfunc(page *ec2.DescribeInstancesOutput, latPage bool) bool {\n\t\t\tfor _, reservation := range page.Reservations {\n\t\t\t\tfor _, inst := range reservation.Instances {\n\t\t\t\t\ttempInst := instanceMap[aws.StringValue(inst.InstanceId)]\n\t\t\t\t\ttempInst.EC2Information = *inst\n\t\t\t\t\tinstanceMap[aws.StringValue(inst.InstanceId)] = tempInst\n\t\t\t\t}\n\t\t\t}\n\t\t\treturn true\n\t\t},\n\t)\n\tif err != nil {\n\t\treturn results, err\n\t}\n\n\tfor _, inst := range instanceMap {\n\t\tresults = append(results, inst)\n\t}\n\n\treturn results, nil\n}", "func (i *instance) Go(ctx context.Context) {\n\tconst maxTries = 5\n\ttype stateT int\n\tconst (\n\t\t// Perform capacity check for EC2 spot.\n\t\tstateCapacity stateT = iota\n\t\t// Launch the instance via EC2.\n\t\tstateLaunch\n\t\t// Tag the instance\n\t\tstateTag\n\t\t// Wait for the instance to enter running state.\n\t\tstateWait\n\t\t// Describe the instance via EC2.\n\t\tstateDescribe\n\t\t// Wait for offers to appear--i.e., the Reflowlet is live.\n\t\tstateOffers\n\t\tstateDone\n\t)\n\tvar (\n\t\tstate stateT\n\t\tid string\n\t\tdns string\n\t\tn int\n\t\td = 5 * time.Second\n\t)\n\t// TODO(marius): propagate context to the underlying AWS calls\n\tfor state < stateDone && ctx.Err() == nil {\n\t\tswitch state {\n\t\tcase stateCapacity:\n\t\t\tif !i.Spot {\n\t\t\t\tbreak\n\t\t\t}\n\t\t\t// 20 instances should be a good margin for spot.\n\t\t\tvar ok bool\n\t\t\tok, i.err = i.ec2HasCapacity(ctx, 20)\n\t\t\tif i.err == nil && !ok {\n\t\t\t\ti.err = errors.E(errors.Unavailable, errors.New(\"ec2 capacity is likely exhausted\"))\n\t\t\t}\n\t\tcase stateLaunch:\n\t\t\tid, i.err = i.launch(ctx)\n\t\t\tif i.err != nil {\n\t\t\t\ti.Log.Errorf(\"instance launch error: %v\", i.err)\n\t\t\t} else {\n\t\t\t\tspot := \"\"\n\t\t\t\tif i.Spot {\n\t\t\t\t\tspot = \"spot \"\n\t\t\t\t}\n\t\t\t\ti.Log.Printf(\"launched %sinstance %v: %s: %s %d %s\",\n\t\t\t\t\tspot,\n\t\t\t\t\tid, i.Config.Type,\n\t\t\t\t\tdata.Size(i.Config.Resources.Memory),\n\t\t\t\t\ti.Config.Resources.CPU,\n\t\t\t\t\tdata.Size(i.Config.Resources.Disk))\n\t\t\t}\n\n\t\tcase stateTag:\n\t\t\tinput := &ec2.CreateTagsInput{\n\t\t\t\tResources: []*string{aws.String(id)},\n\t\t\t\tTags: []*ec2.Tag{{Key: aws.String(\"Name\"), Value: aws.String(i.Tag)}},\n\t\t\t}\n\t\t\tfor k, v := range i.Labels {\n\t\t\t\tinput.Tags = append(input.Tags, &ec2.Tag{Key: aws.String(k), Value: aws.String(v)})\n\t\t\t}\n\t\t\t_, i.err = i.EC2.CreateTags(input)\n\t\tcase stateWait:\n\t\t\ti.err = i.EC2.WaitUntilInstanceRunning(&ec2.DescribeInstancesInput{\n\t\t\t\tInstanceIds: []*string{aws.String(id)},\n\t\t\t})\n\t\tcase stateDescribe:\n\t\t\tvar resp *ec2.DescribeInstancesOutput\n\t\t\tresp, i.err = i.EC2.DescribeInstances(&ec2.DescribeInstancesInput{\n\t\t\t\tInstanceIds: []*string{aws.String(id)},\n\t\t\t})\n\t\t\tif len(resp.Reservations) != 1 || len(resp.Reservations[0].Instances) != 1 {\n\t\t\t\ti.err = errors.Errorf(\"ec2.describeinstances %v: invalid output\", id)\n\t\t\t}\n\t\t\tif i.err == nil {\n\t\t\t\ti.ec2inst = resp.Reservations[0].Instances[0]\n\t\t\t\tif i.ec2inst.PublicDnsName == nil || *i.ec2inst.PublicDnsName == \"\" {\n\t\t\t\t\ti.err = errors.Errorf(\"ec2.describeinstances %v: no public DNS name\", id)\n\t\t\t\t} else {\n\t\t\t\t\tdns = *i.ec2inst.PublicDnsName\n\t\t\t\t}\n\t\t\t}\n\t\tcase stateOffers:\n\t\t\tvar pool pool.Pool\n\t\t\tpool, i.err = client.New(fmt.Sprintf(\"https://%s:9000/v1/\", dns), i.HTTPClient, nil /*log.New(os.Stderr, \"client: \", 0)*/)\n\t\t\tif i.err != nil {\n\t\t\t\ti.err = errors.E(errors.Fatal, i.err)\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tctx, cancel := context.WithTimeout(ctx, 10*time.Second)\n\t\t\t_, i.err = pool.Offers(ctx)\n\t\t\tif i.err != nil && strings.HasSuffix(i.err.Error(), \"connection refused\") {\n\t\t\t\ti.err = errors.E(errors.Temporary, i.err)\n\t\t\t}\n\t\t\tcancel()\n\t\tdefault:\n\t\t\tpanic(\"unknown state\")\n\t\t}\n\t\tif i.err == nil {\n\t\t\tn = 0\n\t\t\td = 5 * time.Second\n\t\t\tstate++\n\t\t\tcontinue\n\t\t}\n\t\tif n == maxTries {\n\t\t\tbreak\n\t\t}\n\t\tif awserr, ok := i.err.(awserr.Error); ok {\n\t\t\tswitch awserr.Code() {\n\t\t\t// According to EC2 API docs, these codes indicate\n\t\t\t// capacity issues.\n\t\t\t//\n\t\t\t// http://docs.aws.amazon.com/AWSEC2/latest/APIReference/errors-overview.html\n\t\t\t//\n\t\t\t// TODO(marius): add a separate package for interpreting AWS errors.\n\t\t\tcase \"InsufficientCapacity\", \"InsufficientInstanceCapacity\", \"InsufficientHostCapacity\", \"InsufficientReservedInstanceCapacity\", \"InstanceLimitExceeded\":\n\t\t\t\ti.err = errors.E(errors.Unavailable, awserr)\n\t\t\t}\n\t\t}\n\t\tswitch {\n\t\tcase i.err == nil:\n\t\tcase errors.Match(errors.Fatal, i.err):\n\t\t\treturn\n\t\tcase errors.Match(errors.Unavailable, i.err):\n\t\t\t// Return these immediately because our caller may be able to handle\n\t\t\t// them by selecting a different instance type.\n\t\t\treturn\n\t\tcase !errors.Recover(i.err).Timeout() && !errors.Recover(i.err).Temporary():\n\t\t\ti.Log.Errorf(\"instance error: %v\", i.err)\n\t\t}\n\t\ttime.Sleep(d)\n\t\tn++\n\t\td *= time.Duration(2)\n\t}\n\tif i.err != nil {\n\t\treturn\n\t}\n\ti.err = ctx.Err()\n}", "func getEC2Hosts(aws awsECSClient, state *ecsState) error {\n\t// Now extract all ec2 instance IDs\n\tec2IDs := []string{}\n\tfor cid, ci := range state.live.containerInstances {\n\t\tid := ptr.StringValue(ci.Ec2InstanceId)\n\t\tif id == \"\" {\n\t\t\tconsole.Error().Printf(\"Container %s did not contain a EC2 Instance ID\", cid)\n\t\t\tcontinue\n\t\t}\n\t\tec2IDs = append(ec2IDs, id)\n\t}\n\n\tsort.Strings(ec2IDs)\n\tec2IDs = dedupe.Strings(ec2IDs)\n\thosts, err := aws.GetEC2Instances(ec2IDs...)\n\tif err != nil {\n\t\treturn err\n\t}\n\tstate.live.ec2Hosts = hosts\n\n\treturn nil\n}", "func (ms *MachinePlugin) getInstancesFromMachineName(machineName string, providerSpec *api.AWSProviderSpec, secrets *api.Secrets) ([]*ec2.Instance, error) {\n\tvar (\n\t\tclusterName string\n\t\tnodeRole string\n\t\tinstances []*ec2.Instance\n\t)\n\n\tsvc, err := ms.createSVC(secrets, providerSpec.Region)\n\tif err != nil {\n\t\treturn nil, status.Error(codes.Internal, err.Error())\n\t}\n\n\tfor key := range providerSpec.Tags {\n\t\tif strings.Contains(key, \"kubernetes.io/cluster/\") {\n\t\t\tclusterName = key\n\t\t} else if strings.Contains(key, \"kubernetes.io/role/\") {\n\t\t\tnodeRole = key\n\t\t}\n\t}\n\n\tinput := ec2.DescribeInstancesInput{\n\t\tFilters: []*ec2.Filter{\n\t\t\t&ec2.Filter{\n\t\t\t\tName: aws.String(\"tag:Name\"),\n\t\t\t\tValues: []*string{\n\t\t\t\t\taws.String(machineName),\n\t\t\t\t},\n\t\t\t},\n\t\t\t&ec2.Filter{\n\t\t\t\tName: aws.String(\"tag-key\"),\n\t\t\t\tValues: []*string{\n\t\t\t\t\t&clusterName,\n\t\t\t\t},\n\t\t\t},\n\t\t\t&ec2.Filter{\n\t\t\t\tName: aws.String(\"tag-key\"),\n\t\t\t\tValues: []*string{\n\t\t\t\t\t&nodeRole,\n\t\t\t\t},\n\t\t\t},\n\t\t\t&ec2.Filter{\n\t\t\t\tName: aws.String(\"instance-state-name\"),\n\t\t\t\tValues: []*string{\n\t\t\t\t\taws.String(\"pending\"),\n\t\t\t\t\taws.String(\"running\"),\n\t\t\t\t\taws.String(\"stopping\"),\n\t\t\t\t\taws.String(\"stopped\"),\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\n\trunResult, err := svc.DescribeInstances(&input)\n\tif err != nil {\n\t\tglog.Errorf(\"AWS plugin is returning error while describe instances request is sent: %s\", err)\n\t\treturn nil, status.Error(codes.Internal, err.Error())\n\t}\n\n\tfor _, reservation := range runResult.Reservations {\n\t\tfor _, instance := range reservation.Instances {\n\t\t\tinstances = append(instances, instance)\n\t\t}\n\t}\n\tif len(instances) == 0 {\n\t\terrMessage := fmt.Sprintf(\"AWS plugin is returning no VM instances backing this machine object\")\n\t\treturn nil, status.Error(codes.NotFound, errMessage)\n\t}\n\n\treturn instances, nil\n}", "func (lb *Elb) getInstancesInAwsElb() (instances []string, err error) {\n\tinstances = []string{}\n\toptions := elb.DescribeLoadBalancer{\n\t\tNames: []string{lb.name},\n\t}\n\tresp, err := lb.awsClient.DescribeLoadBalancers(&options)\n\tif err == nil {\n\t\tfor _, instance := range resp.LoadBalancers[0].Instances {\n\t\t\tinstances = append(instances, instance.InstanceId)\n\t\t}\n\t\tlog.Printf(\"-- ELB:%s:instancesInAws:%s\\n\", lb.name, instances)\n\t}\n\treturn\n}", "func waitForAWSInstances(api *ec2.EC2, ids []*string, d time.Duration) error {\n\tafter := time.After(d)\n\n\tonline := make(map[string]bool)\n\n\tfor len(ids) != len(online) {\n\t\tselect {\n\t\tcase <-after:\n\t\t\treturn fmt.Errorf(\"timed out waiting for instances to run\")\n\t\tdefault:\n\t\t}\n\n\t\t// don't make api calls too quickly, or we will hit the rate limit\n\n\t\ttime.Sleep(10 * time.Second)\n\n\t\tgetinst := &ec2.DescribeInstancesInput{\n\t\t\tInstanceIds: ids,\n\t\t}\n\n\t\tinsts, err := api.DescribeInstances(getinst)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tfor _, r := range insts.Reservations {\n\t\t\tfor _, i := range r.Instances {\n\t\t\t\t// skip instances known to be up\n\t\t\t\tif online[*i.InstanceId] {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\n\t\t\t\t// \"running\"\n\t\t\t\tif *i.State.Code == int64(16) {\n\t\t\t\t\t// XXX: ssh is a terrible way to check this, but it is all we have.\n\t\t\t\t\tc, err := net.DialTimeout(\"tcp\", *i.PublicIpAddress+\":22\", 10*time.Second)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t}\n\t\t\t\t\tc.Close()\n\n\t\t\t\t\tonline[*i.InstanceId] = true\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\treturn nil\n}", "func (p *plugin) DescribeInstances(tags map[string]string, properties bool) ([]instance.Description, error) {\n\tlog.Debug(fmt.Sprintf(\"describe-instances: %v\", tags))\n\tresults := []instance.Description{}\n\n\tgroupName := tags[group.GroupTag]\n\n\tinstances, err := findGroupInstances(p, groupName)\n\tif err != nil {\n\t\tlog.Error(\"Problems finding group instances\", \"err\", err)\n\t}\n\n\t// Iterate through group instances and find the sha from their annotation field\n\tfor _, vmInstance := range instances {\n\t\tctx, cancel := context.WithCancel(context.Background())\n\t\tdefer cancel()\n\n\t\tconfigSHA := returnDataFromVM(ctx, vmInstance, \"sha\")\n\t\tguestIP := returnDataFromVM(ctx, vmInstance, \"guestIP\")\n\n\t\t// Duplicate original tags\n\t\tvmTags := make(map[string]string)\n\t\tfor k, v := range tags {\n\t\t\tvmTags[k] = v\n\t\t}\n\n\t\tvmTags[group.ConfigSHATag] = configSHA\n\t\tvmTags[\"guestIP\"] = guestIP\n\t\tresults = append(results, instance.Description{\n\t\t\tID: instance.ID(vmInstance.Name()),\n\t\t\tLogicalID: nil,\n\t\t\tTags: vmTags,\n\t\t})\n\t}\n\tlog.Debug(\"Updating FSM\", \"Count\", len(p.fsm))\n\n\t// DIFF what the endpoint is saying as reported versus what we have in the FSM\n\tvar updatedFSM []provisioningFSM\n\tfor _, unprovisionedInstance := range p.fsm {\n\t\tvar provisioned bool\n\n\t\tfor _, provisionedInstance := range results {\n\n\t\t\tif string(provisionedInstance.ID) == unprovisionedInstance.instanceName {\n\t\t\t\tprovisioned = true\n\t\t\t\t// instance has been provisioned so break from loop\n\t\t\t\tbreak\n\t\t\t} else {\n\t\t\t\tprovisioned = false\n\t\t\t}\n\t\t}\n\t\tif provisioned == false && unprovisionedInstance.timer.After(time.Now()) && unprovisionedInstance.tags[group.GroupTag] == tags[group.GroupTag] {\n\t\t\tupdatedFSM = append(updatedFSM, unprovisionedInstance)\n\t\t}\n\t}\n\n\tp.fsm = make([]provisioningFSM, len(updatedFSM))\n\tcopy(p.fsm, updatedFSM)\n\n\tlog.Debug(\"FSM Updated\", \"Count\", len(p.fsm))\n\tfor _, unprovisionedInstances := range p.fsm {\n\t\tresults = append(results, instance.Description{\n\t\t\tID: instance.ID(unprovisionedInstances.instanceName),\n\t\t\tLogicalID: nil,\n\t\t\tTags: unprovisionedInstances.tags,\n\t\t})\n\t}\n\tif len(results) == 0 {\n\t\tlog.Info(\"No Instances found\")\n\t}\n\treturn results, nil\n}", "func (p providerServices) EC2() ec2.EC2er {\n\treturn p.ec2\n}", "func ConnectEC2(sshEntries lib.SSHEntries, sshConfigPath string, args []string) {\n\t// get the pub key from the ssh agent first\n\tsshAgent, err := net.Dial(\"unix\", os.Getenv(\"SSH_AUTH_SOCK\"))\n\tif err != nil {\n\t\tlog.WithError(err).Fatal(\"can't connect to ssh agent, maybe SSH_AUTH_SOCK is unset?\")\n\t}\n\n\tkeys, err := agent.NewClient(sshAgent).List()\n\tif err != nil || len(keys) < 1 {\n\t\tlog.Fatal(\"Can't get public keys from ssh agent. Please ensure you have the ssh-agent running and have at least one identity added (with ssh-add)\")\n\t}\n\tpubkey := keys[0].String()\n\n\t// push the pub key to those instances one after each other\n\t// TODO: maybe make it parallel\n\tfor _, sshEntry := range sshEntries {\n\t\tvar instanceName = sshEntry.InstanceID\n\t\tif len(sshEntry.Names) > 0 {\n\t\t\tinstanceName = sshEntry.Names[0]\n\t\t}\n\t\tlog.WithField(\"instance\", instanceName).Info(\"trying to do ec2 connect...\")\n\t\tinstanceIPAddress, instanceUser, err := pushEC2Connect(sshEntry.ProfileConfig.Name, sshEntry.InstanceID, sshEntry.User, pubkey)\n\t\tif err != nil {\n\t\t\tlog.WithError(err).Fatal(\"can't push ssh key to the instance\")\n\t\t}\n\t\t// if the address is empty we set to the value we got from ec2 connect push\n\t\tif sshEntry.Address == \"\" {\n\t\t\tsshEntry.Address = instanceIPAddress\n\t\t}\n\t\tif sshEntry.User == \"\" {\n\t\t\tsshEntry.User = instanceUser\n\t\t}\n\t}\n\n\t// then generate ssh config for all instances in sshEntries\n\t// save the dynamic ssh config first\n\tif err := sshEntries.SaveConfig(sshConfigPath); err != nil {\n\t\tlog.WithError(err).Fatal(\"can't save ssh config for ec2 connect\")\n\t}\n\n\tvar instanceName = sshEntries[0].InstanceID\n\tif len(sshEntries[0].Names) > 0 {\n\t\tinstanceName = sshEntries[0].Names[0]\n\t}\n\t// connect to the first instance in sshEntry, as the other will be bastion(s)\n\tif len(args) == 0 {\n\t\t// construct default args\n\t\targs = []string{\n\t\t\t\"ssh\",\n\t\t\t\"-tt\",\n\t\t\tinstanceName,\n\t\t}\n\t}\n\n\tcommand, err := exec.LookPath(args[0])\n\tif err != nil {\n\t\tlog.WithError(err).Fatal(\"Can't find the binary in the PATH\")\n\t}\n\n\tvar replacer = strings.NewReplacer(\n\t\t\"{host}\", instanceName,\n\t\t\"{user}\", sshEntries[0].User,\n\t)\n\tvar newArgs []string\n\tfor _, arg := range args {\n\t\tnewArgs = append(newArgs, replacer.Replace(arg))\n\t}\n\tlog.WithField(\"instance_id\", sshEntries[0].InstanceID).Infof(\"Connecting to the instance using '%s'\", strings.Join(newArgs, \" \"))\n\n\tif err := syscall.Exec(command, newArgs, os.Environ()); err != nil {\n\t\tlog.WithFields(log.Fields{\"command\": command}).WithError(err).Fatal(\"can't run the command\")\n\t}\n}", "func (n *EC2NodePoolBackend) getInstances(filters []*ec2.Filter) ([]*ec2.Instance, error) {\n\tparams := &ec2.DescribeInstancesInput{\n\t\tFilters: filters,\n\t}\n\n\tinstances := make([]*ec2.Instance, 0)\n\terr := n.ec2Client.DescribeInstancesPagesWithContext(context.TODO(), params, func(output *ec2.DescribeInstancesOutput, lastPage bool) bool {\n\t\tfor _, reservation := range output.Reservations {\n\t\t\tfor _, instance := range reservation.Instances {\n\t\t\t\tswitch aws.StringValue(instance.State.Name) {\n\t\t\t\tcase ec2.InstanceStateNameRunning, ec2.InstanceStateNamePending, ec2.InstanceStateNameStopped:\n\t\t\t\t\tinstances = append(instances, instance)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\treturn true\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn instances, nil\n}", "func (a *Client) GetInstancesEvents2(params *GetInstancesEvents2Params) (*GetInstancesEvents2OK, error) {\n\t// TODO: Validate the params before sending\n\tif params == nil {\n\t\tparams = NewGetInstancesEvents2Params()\n\t}\n\n\tresult, err := a.transport.Submit(&runtime.ClientOperation{\n\t\tID: \"getInstancesEvents2\",\n\t\tMethod: \"GET\",\n\t\tPathPattern: \"/instances/{id}/events\",\n\t\tProducesMediaTypes: []string{\"\"},\n\t\tConsumesMediaTypes: []string{\"\"},\n\t\tSchemes: []string{\"https\"},\n\t\tParams: params,\n\t\tReader: &GetInstancesEvents2Reader{formats: a.formats},\n\t\tContext: params.Context,\n\t\tClient: params.HTTPClient,\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn result.(*GetInstancesEvents2OK), nil\n\n}", "func (client *Client) EC2SLs(options EC2SLsOptions) error {\n\tsnapshots, err := client.FindEC2Snapshots(options.FilterTag, options.All)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfor _, snapshot := range snapshots {\n\t\tfmt.Fprintln(client.stdout, formatEC2Snapshot(client, options, snapshot))\n\t}\n\treturn nil\n}", "func (m *EC2Client) DescribeEC2Instances(filter *ec2.DescribeInstancesInput) ([]*ec2.Instance, error) {\n\tinstances := []*ec2.Instance{}\n\tresult, err := m.EC2API.DescribeInstances(filter)\n\tif err != nil {\n\t\tif awsErr, ok := err.(awserr.Error); ok {\n\t\t\treturn nil, awsErr\n\t\t}\n\t}\n\tfor _, rsvp := range result.Reservations {\n\t\tfor _, instance := range rsvp.Instances {\n\t\t\tinstances = append(instances, instance)\n\t\t}\n\t}\n\treturn instances, nil\n}", "func (o FleetOutput) Ec2InstanceType() pulumi.StringOutput {\n\treturn o.ApplyT(func(v *Fleet) pulumi.StringOutput { return v.Ec2InstanceType }).(pulumi.StringOutput)\n}", "func (provider *ResourceProvider) EC2Instance(id string) (*reachAWS.EC2Instance, error) {\n\tinput := &ec2.DescribeInstancesInput{\n\t\tInstanceIds: []*string{\n\t\t\taws.String(id),\n\t\t},\n\t}\n\tresult, err := provider.ec2.DescribeInstances(input)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tinstances, err := extractEC2Instances(result.Reservations)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif len(instances) == 0 {\n\t\treturn nil, fmt.Errorf(\"AWS API returned no instances for ID '%s'\", id)\n\t}\n\n\tif len(instances) > 1 {\n\t\treturn nil, fmt.Errorf(\"AWS API returned more than one instance for ID '%s'\", id)\n\t}\n\n\tinstance := instances[0]\n\treturn &instance, nil\n}", "func testRunInstances(input *ec2.RunInstancesInput) (*ec2.Reservation, error) {\n\tif *input.PrivateIpAddress == \"bad\" {\n\t\treturn nil, fmt.Errorf(\"error\")\n\t}\n\treturn testEC2Reservation(), nil\n}", "func WaitForASGDesiredCapacity(ctx context.Context, f *framework.Framework, nodes []corev1.Node, desiredCapacity int) error {\n\tvar asgNames []*string\n\tvar asgs []*autoscaling.Group\n\tvar nodeNames []*string\n\tvar instanceIDs []*string\n\n\tif len(nodes) >= desiredCapacity {\n\t\treturn nil\n\t}\n\n\tfor _, node := range nodes {\n\t\tnodeName := node.Name\n\t\tnodeNames = append(nodeNames, &nodeName)\n\t}\n\n\t// Get instance IDs\n\tfilterName := \"private-dns-name\"\n\tdescribeInstancesInput := &ec2.DescribeInstancesInput{\n\t\tFilters: []*ec2.Filter{\n\t\t\t{\n\t\t\t\tName: &filterName,\n\t\t\t\tValues: nodeNames,\n\t\t\t},\n\t\t},\n\t}\n\tinstances, err := f.Cloud.EC2().DescribeInstancesAsList(aws.BackgroundContext(), describeInstancesInput)\n\tif err != nil {\n\t\tif aerr, ok := err.(awserr.Error); ok {\n\t\t\tswitch aerr.Code() {\n\t\t\tdefault:\n\t\t\t\tlog.Debug(aerr)\n\t\t\t}\n\t\t} else {\n\t\t\tlog.Debug(err)\n\t\t}\n\t\treturn err\n\t}\n\tif len(instances) == 0 {\n\t\treturn errors.New(\"No instances found\")\n\t}\n\n\t// Get ASGs\n\tfor _, instance := range instances {\n\t\tinstanceIDs = append(instanceIDs, instance.InstanceId)\n\t}\n\tdescribeAutoScalingInstancesInput := &autoscaling.DescribeAutoScalingInstancesInput{\n\t\tInstanceIds: instanceIDs,\n\t}\n\tasgInstanceDetails, err := f.Cloud.AutoScaling().DescribeAutoScalingInstancesAsList(aws.BackgroundContext(), describeAutoScalingInstancesInput)\n\tif err != nil {\n\t\treturn err\n\t}\n\tfor _, asgInstance := range asgInstanceDetails {\n\t\tasgNames = append(asgNames, asgInstance.AutoScalingGroupName)\n\t}\n\tdescribeASGsInput := &autoscaling.DescribeAutoScalingGroupsInput{\n\t\tAutoScalingGroupNames: asgNames,\n\t}\n\tasgs, err = f.Cloud.AutoScaling().DescribeAutoScalingGroupsAsList(aws.BackgroundContext(), describeASGsInput)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tlog.Debug(\"verifying desired capacity of ASGs\")\n\tcap := int64(math.Ceil(float64(desiredCapacity) / float64(len(asgs))))\n\tfor _, asg := range asgs {\n\t\tif *(asg.DesiredCapacity) < cap {\n\t\t\tlog.Debugf(\"\")\n\t\t\tmax := *(asg.MaxSize)\n\t\t\tif max < cap {\n\t\t\t\tmax = cap\n\t\t\t}\n\t\t\tlog.Debugf(\"increasing ASG desired capacity to %d\", cap)\n\t\t\tupdateAutoScalingGroupInput := &autoscaling.UpdateAutoScalingGroupInput{\n\t\t\t\tAutoScalingGroupName: asg.AutoScalingGroupName,\n\t\t\t\tDesiredCapacity: &cap,\n\t\t\t\tMaxSize: &max,\n\t\t\t}\n\t\t\t_, err := f.Cloud.AutoScaling().UpdateAutoScalingGroup(updateAutoScalingGroupInput)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\n\t// Wait for instances and nodes to be ready\n\treturn WaitForASGInstancesAndNodesReady(ctx, f, describeASGsInput)\n}", "func Listv2SGs(se Session) (PolicyList []interface{}) {\n\t// Create an EC2 service client.\n\tsvc := ec2.New(se.Sess)\n\n\t// Retrieve the security sg descriptions\n\tresult, err := svc.DescribeSecurityGroups(&ec2.DescribeSecurityGroupsInput{\n\t\tDryRun: aws.Bool(false),\n\t})\n\tif err != nil {\n\t\tif aerr, ok := err.(awserr.Error); ok {\n\t\t\tswitch aerr.Code() {\n\t\t\tcase \"InvalidGroupId.Malformed\":\n\t\t\t\tfallthrough\n\t\t\tcase \"InvalidGroup.NotFound\":\n\t\t\t\texitErrorf(\"%s.\", aerr.Message())\n\t\t\t}\n\t\t}\n\t\texitErrorf(\"Unable to get descriptions for security sgs, %v\", err)\n\t}\n\t//\n\tpolicy := new(PolicyDetail)\n\tfor _, sg := range result.SecurityGroups {\n\t\tfor _, ippermission := range sg.IpPermissions {\n\t\t\tif *ippermission.IpProtocol == \"-1\" {\n\t\t\t\tif len(ippermission.IpRanges) != 0 {\n\t\t\t\t\tfor _, permission := range ippermission.IpRanges {\n\t\t\t\t\t\t// fmt.Println(\" ALL IpRanges information:\", *sg.GroupName, *sg.VpcId, *sg.GroupId, \"ALL PROTOCOL\", \"from port ALL\", \"end port ALL\", *permission.CidrIp)\n\t\t\t\t\t\tpolicy.GroupName = *sg.GroupName\n\t\t\t\t\t\tpolicy.VpcId = *sg.VpcId\n\t\t\t\t\t\tpolicy.GroupId = *sg.GroupId\n\t\t\t\t\t\tpolicy.Source = *permission.CidrIp\n\t\t\t\t\t\tpolicy.Protocol = \"ALL\"\n\t\t\t\t\t\tpolicy.FromPort = 0\n\t\t\t\t\t\tpolicy.ToPort = 65535\n\t\t\t\t\t\tPolicyList = append(PolicyList, *policy)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tif len(ippermission.UserIdGroupPairs) != 0 {\n\t\t\t\t\tfor _, permission := range ippermission.UserIdGroupPairs {\n\t\t\t\t\t\t// fmt.Println(\" ALL GroupPairs information:\", *sg.GroupName, *sg.VpcId, *sg.GroupId, \"ALLPROTOCOL\", \"fromportALL\", \"endportALL\", *permission.GroupId)\n\t\t\t\t\t\tpolicy.GroupName = *sg.GroupName\n\t\t\t\t\t\tpolicy.VpcId = *sg.VpcId\n\t\t\t\t\t\tpolicy.GroupId = *sg.GroupId\n\t\t\t\t\t\tpolicy.Source = *permission.GroupId\n\t\t\t\t\t\tpolicy.Protocol = \"ALL\"\n\t\t\t\t\t\tpolicy.FromPort = 0\n\t\t\t\t\t\tpolicy.ToPort = 65535\n\t\t\t\t\t\tPolicyList = append(PolicyList, *policy)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tif len(ippermission.PrefixListIds) != 0 {\n\t\t\t\t\tfor _, permission := range ippermission.PrefixListIds {\n\t\t\t\t\t\t//fmt.Println(\" ===all===Prefix information:\", permission)\n\t\t\t\t\t\tpolicy.GroupName = *sg.GroupName\n\t\t\t\t\t\tpolicy.VpcId = *sg.VpcId\n\t\t\t\t\t\tpolicy.GroupId = *sg.GroupId\n\t\t\t\t\t\tpolicy.Source = *permission.PrefixListId\n\t\t\t\t\t\tpolicy.Protocol = \"Unknow\"\n\t\t\t\t\t\tpolicy.FromPort = 0\n\t\t\t\t\t\tpolicy.ToPort = 65535\n\t\t\t\t\t\tPolicyList = append(PolicyList, *policy)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tif len(ippermission.IpRanges) != 0 {\n\t\t\t\t\tfor _, permission := range ippermission.IpRanges {\n\t\t\t\t\t\t//fmt.Println(\" IpRanges information:\", *sg.GroupName, *sg.VpcId, *sg.GroupId, *ippermission.IpProtocol, *ippermission.FromPort, *ippermission.ToPort, *permission.CidrIp)\n\t\t\t\t\t\tpolicy.GroupName = *sg.GroupName\n\t\t\t\t\t\tpolicy.VpcId = *sg.VpcId\n\t\t\t\t\t\tpolicy.GroupId = *sg.GroupId\n\t\t\t\t\t\tpolicy.Source = *permission.CidrIp\n\t\t\t\t\t\tpolicy.Protocol = *ippermission.IpProtocol\n\t\t\t\t\t\tpolicy.FromPort = *ippermission.FromPort\n\t\t\t\t\t\tpolicy.ToPort = *ippermission.ToPort\n\t\t\t\t\t\tPolicyList = append(PolicyList, *policy)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tif len(ippermission.UserIdGroupPairs) != 0 {\n\t\t\t\t\tfor _, permission := range ippermission.UserIdGroupPairs {\n\t\t\t\t\t\t//fmt.Println(\" GroupPairs information:\", *sg.GroupName, *sg.VpcId, *sg.GroupId, *ippermission.IpProtocol, *ippermission.FromPort, *ippermission.ToPort, *permission.GroupId )\n\t\t\t\t\t\tpolicy.GroupName = *sg.GroupName\n\t\t\t\t\t\tpolicy.VpcId = *sg.VpcId\n\t\t\t\t\t\tpolicy.GroupId = *sg.GroupId\n\t\t\t\t\t\tpolicy.Source = *permission.GroupId\n\t\t\t\t\t\tpolicy.Protocol = *ippermission.IpProtocol\n\t\t\t\t\t\tpolicy.FromPort = *ippermission.FromPort\n\t\t\t\t\t\tpolicy.ToPort = *ippermission.ToPort\n\t\t\t\t\t\tPolicyList = append(PolicyList, *policy)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tif len(ippermission.PrefixListIds) != 0 {\n\t\t\t\t\tfor _, permission := range ippermission.PrefixListIds {\n\t\t\t\t\t\t//fmt.Println(\" ======Prefix information:\", permission)\n\t\t\t\t\t\tpolicy.GroupName = *sg.GroupName\n\t\t\t\t\t\tpolicy.VpcId = *sg.VpcId\n\t\t\t\t\t\tpolicy.GroupId = *sg.GroupId\n\t\t\t\t\t\tpolicy.Source = *permission.PrefixListId\n\t\t\t\t\t\tpolicy.Protocol = \"Unknow\"\n\t\t\t\t\t\tpolicy.FromPort = -1\n\t\t\t\t\t\tpolicy.ToPort = -1\n\t\t\t\t\t\tPolicyList = append(PolicyList, *policy)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\treturn PolicyList\n}", "func (m *MockEC2API) DescribeInstances(*ec2.DescribeInstancesInput) (*ec2.DescribeInstancesOutput, error) {\n\tout := &ec2.DescribeInstancesOutput{\n\t\tReservations: []*ec2.Reservation{},\n\t}\n\tfor instanceId, vpcID := range m.VpcIds {\n\t\tout.Reservations = append(out.Reservations, &ec2.Reservation{\n\t\t\tInstances: []*ec2.Instance{&ec2.Instance{\n\t\t\t\tInstanceId: aws.String(instanceId),\n\t\t\t\tVpcId: aws.String(vpcID),\n\t\t\t}},\n\t\t})\n\t}\n\treturn out, nil\n}", "func runAndWaitForInstance(svc *ec2.EC2, name string, params *ec2.RunInstancesInput) (ec2.Instance, error) {\n\trunResult, err := svc.RunInstances(params)\n\tif err != nil {\n\t\treturn ec2.Instance{}, gruntworkerrors.WithStackTrace(err)\n\t}\n\n\tif len(runResult.Instances) == 0 {\n\t\terr := errors.New(\"Could not create test EC2 instance\")\n\t\treturn ec2.Instance{}, gruntworkerrors.WithStackTrace(err)\n\t}\n\n\terr = svc.WaitUntilInstanceExists(&ec2.DescribeInstancesInput{\n\t\tFilters: []*ec2.Filter{\n\t\t\t&ec2.Filter{\n\t\t\t\tName: awsgo.String(\"instance-id\"),\n\t\t\t\tValues: []*string{runResult.Instances[0].InstanceId},\n\t\t\t},\n\t\t},\n\t})\n\n\tif err != nil {\n\t\treturn ec2.Instance{}, gruntworkerrors.WithStackTrace(err)\n\t}\n\n\t// Add test tag to the created instance\n\t_, err = svc.CreateTags(&ec2.CreateTagsInput{\n\t\tResources: []*string{runResult.Instances[0].InstanceId},\n\t\tTags: []*ec2.Tag{\n\t\t\t{\n\t\t\t\tKey: awsgo.String(\"Name\"),\n\t\t\t\tValue: awsgo.String(name),\n\t\t\t},\n\t\t},\n\t})\n\n\tif err != nil {\n\t\treturn ec2.Instance{}, gruntworkerrors.WithStackTrace(err)\n\t}\n\n\t// EC2 Instance must be in a running before this function returns\n\terr = svc.WaitUntilInstanceRunning(&ec2.DescribeInstancesInput{\n\t\tFilters: []*ec2.Filter{\n\t\t\t&ec2.Filter{\n\t\t\t\tName: awsgo.String(\"instance-id\"),\n\t\t\t\tValues: []*string{runResult.Instances[0].InstanceId},\n\t\t\t},\n\t\t},\n\t})\n\n\tif err != nil {\n\t\treturn ec2.Instance{}, gruntworkerrors.WithStackTrace(err)\n\t}\n\n\treturn *runResult.Instances[0], nil\n\n}", "func WaitForASGInstancesAndNodesReady(ctx context.Context, f *framework.Framework, describeASGsInput *autoscaling.DescribeAutoScalingGroupsInput) error {\n\tvar asgInstanceIDs []*string\n\n\tBy(\"wait until ASG instances are ready\")\n\terr := f.Cloud.AutoScaling().WaitUntilAutoScalingGroupInService(aws.BackgroundContext(), describeASGsInput)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// Get instance IDs\n\tasgInstances, err := f.Cloud.AutoScaling().DescribeInServiceAutoScalingGroupInstancesAsList(aws.BackgroundContext(), describeASGsInput)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tBy(\"wait nodes ready\")\n\tfor i, asgInstance := range asgInstances {\n\t\tlog.Debugf(\"Instance %d/%d (id: %s) is in service\", i+1, len(asgInstances), *(asgInstance.InstanceId))\n\t\tasgInstanceIDs = append(asgInstanceIDs, asgInstance.InstanceId)\n\t}\n\tdescribeInstancesInput := &ec2.DescribeInstancesInput{\n\t\tInstanceIds: asgInstanceIDs,\n\t}\n\tinstancesList, err := f.Cloud.EC2().DescribeInstancesAsList(aws.BackgroundContext(), describeInstancesInput)\n\tif err != nil {\n\t\tif aerr, ok := err.(awserr.Error); ok {\n\t\t\tswitch aerr.Code() {\n\t\t\tdefault:\n\t\t\t\tlog.Debug(aerr)\n\t\t\t}\n\t\t} else {\n\t\t\tlog.Debug(err)\n\t\t}\n\t\treturn err\n\t}\n\n\t// Wait until nodes exist and are ready\n\tfor i, instance := range instancesList {\n\t\tnodeName := instance.PrivateDnsName\n\t\tlog.Debugf(\"Wait until node %d/%d (%s) exists\", i+1, len(instancesList), *nodeName)\n\t\tnode := &corev1.Node{ObjectMeta: metav1.ObjectMeta{Name: *nodeName}}\n\t\tnode, err = f.ResourceManager.WaitNodeExists(ctx, node)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tlog.Debugf(\"Wait until node %d/%d (%s) ready\", i+1, len(instancesList), *nodeName)\n\t\t_, err = f.ResourceManager.WaitNodeReady(ctx, node)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}", "func getBkupInstances(svc *ec2.EC2, tagname string) chan *faBkupInstance {\n\n\tc := make(chan *faBkupInstance)\n\n\tgo func(c chan *faBkupInstance, tn string) {\n\t\tdefer close(c)\n\n\t\tparams := &ec2.DescribeInstancesInput{\n\t\t\tFilters: []*ec2.Filter{\n\t\t\t\t{\n\t\t\t\t\tName: aws.String(\"tag-key\"),\n\t\t\t\t\tValues: []*string{\n\t\t\t\t\t\taws.String(tn),\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t}\n\n\t\tresp, err := svc.DescribeInstances(params)\n\n\t\tif err != nil {\n\t\t\tc <- &faBkupInstance{ii: nil, e: err}\n\t\t\treturn\n\t\t}\n\n\t\t// for any instance found extract tag name and instanceid\n\t\tfor reservation := range resp.Reservations {\n\t\t\tfor _, inst := range resp.Reservations[reservation].Instances {\n\t\t\t\t// Create a new theInstance variable for each run through the loop\n\t\t\t\tnewImage := &ec2.CreateImageInput{}\n\t\t\t\tnewImage.NoReboot = aws.Bool(true)\n\n\t\t\t\tfor _, t := range inst.Tags {\n\t\t\t\t\tswitch *t.Key {\n\t\t\t\t\tcase \"Name\":\n\t\t\t\t\t\tnewImage.Name = aws.String(\n\t\t\t\t\t\t\t*t.Value + \"-\" + strconv.FormatInt(time.Now().Unix(), 10))\n\t\t\t\t\tcase \"bkupReboot\":\n\t\t\t\t\t\t// value in tag is reboot so if tag not present default is no reboot\n\t\t\t\t\t\tif b, err := strconv.ParseBool(*t.Value); err == nil {\n\t\t\t\t\t\t\t// swap value as the question is NoReboot?\n\t\t\t\t\t\t\tnewImage.NoReboot = aws.Bool(!b)\n\t\t\t\t\t\t}\n\t\t\t\t\tdefault:\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tif *newImage.Name == \"\" {\n\t\t\t\t\tnewImage.Name = aws.String(\n\t\t\t\t\t\t*inst.InstanceId + \"-\" + strconv.FormatInt(time.Now().Unix(), 10))\n\t\t\t\t}\n\t\t\t\tnewImage.Description = aws.String(\"Auto backup of instance \" + *inst.InstanceId)\n\t\t\t\tnewImage.InstanceId = inst.InstanceId\n\t\t\t\t// append details on this instance to the slice\n\t\t\t\t//fmt.Printf(\"found new: %s\\n\", *newImage.InstanceId)\n\t\t\t\tc <- &faBkupInstance{ii: newImage, e: nil}\n\t\t\t}\n\t\t}\n\t}(c, tagname)\n\treturn c\n}", "func GetInstances(asgc aws.ASGAPI, asgName *string) (aws.Instances, error) {\n\tgroup, err := findByName(asgc, asgName)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tinstances := aws.Instances{}\n\n\tfor _, i := range group.instances {\n\t\tinstances.AddASGInstance(i)\n\t}\n\n\treturn instances, nil\n}", "func (m *EC2Client) GetInstanceIDs(filter *ec2.DescribeInstancesInput, rgxMatch string, rgxTag string) ([]*string, error) {\n\tinstanceIds := []*string{}\n\tinput := make(chan *ec2.Instance)\n\toutput := make(chan *string)\n\tfor i := 0; i < m.workers; i++ {\n\t\tm.wg.Add(1)\n\t\tgo func() {\n\t\t\tdefer wg.Done()\n\t\t\tfor instance := range input {\n\t\t\t\tec2Tags := make(map[string]string, 0)\n\t\t\t\tfor _, tag := range instance.Tags {\n\t\t\t\t\tec2Tags[*tag.Key] = *tag.Value\n\t\t\t\t}\n\t\t\t\tmatch, err := regexp.MatchString(rgxMatch, ec2Tags[rgxTag])\n\t\t\t\tif err != nil {\n\t\t\t\t\tlog.Panic(err)\n\t\t\t\t}\n\t\t\t\tif match == true {\n\t\t\t\t\toutput <- instance.InstanceId\n\t\t\t\t}\n\t\t\t}\n\t\t}()\n\t}\n\tresult, err := m.EC2API.DescribeInstances(filter)\n\tif err != nil {\n\t\tif awsErr, ok := err.(awserr.Error); ok {\n\t\t\treturn nil, awsErr\n\t\t}\n\t}\n\tfor _, rsvp := range result.Reservations {\n\t\tfor _, instance := range rsvp.Instances {\n\t\t\tinput <- instance\n\t\t}\n\t}\n\n\tclose(input)\n\tm.wg.Wait()\n\tclose(output)\n\tfor instance := range output {\n\t\tinstanceIds = append(instanceIds, instance)\n\t}\n\treturn instanceIds, nil\n}", "func listEC2Ids(containerToEC2InstancesMap map[string]string) []*string {\n\tec2InstanceIds := []*string{}\n\tfor _, val := range containerToEC2InstancesMap {\n\t\tec2InstanceIds = append(ec2InstanceIds, aws.String(val))\n\t}\n\treturn ec2InstanceIds\n}", "func (*SDKGetter) EC2(session *session.Session) EC2Interface {\n\treturn ec2svc.NewService(ec2.New(session))\n}", "func (svc *AWSService) initInstances(confDir string) error {\n\tif confDir == \"\" {\n\t\treturn errors.New(\"invalid config dir (empty)\")\n\t}\n\n\tentries, err := ioutil.ReadDir(confDir)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"reading AWS config dir\")\n\t}\n\n\tfor _, entry := range entries {\n\t\tif entry.IsDir() {\n\t\t\tcontinue\n\t\t}\n\t\tif !strings.Contains(\".json|.toml|.yaml\", filepath.Ext(entry.Name())) {\n\t\t\tcontinue\n\t\t}\n\n\t\tvar cfg Config\n\t\tif err := config.LoadConfigFile(path.Join(confDir, entry.Name()), &cfg); err != nil {\n\t\t\tsvc.logger.Error().Err(err).Str(\"file\", entry.Name()).Msg(\"loading config, skipping\")\n\t\t\tcontinue\n\t\t}\n\n\t\tif cfg.ID == \"\" {\n\t\t\tsvc.logger.Error().Str(\"file\", entry.Name()).Msg(\"invalid config ID (empty), skipping\")\n\t\t\tcontinue\n\t\t}\n\t\tif strings.Contains(cfg.ID, \" \") {\n\t\t\tsvc.logger.Error().Str(\"file\", entry.Name()).Msg(\"invalid config ID (contains spaces), skipping\")\n\t\t\tcontinue\n\t\t}\n\t\tif len(cfg.Regions) == 0 {\n\t\t\tsvc.logger.Error().Str(\"file\", entry.Name()).Msg(\"invalid config regions (empty), skipping\")\n\t\t}\n\n\t\t// based on cfg.Period - collect every 1min for 'detailed' or every 5min for 'basic'\n\t\tperiod := 300\n\t\tif cfg.Period == \"detailed\" {\n\t\t\tperiod = 60\n\t\t}\n\t\t// used to control how many samples we request - calclulating start from\n\t\t// time.Now (e.g. time.Now().Add(- (interval * time.Second))). desired\n\t\t// number of samples is three. if exactly three * period is used,\n\t\t// cloudwatch sdk will often respond with only the last two samples.\n\t\t// so use 3 * period, plus a little extra cushion.\n\t\tinterval := (period * 3) + (period / 2)\n\n\t\tfor _, regionConfig := range cfg.Regions {\n\t\t\tinstance := &Instance{\n\t\t\t\tcfg: &cfg,\n\t\t\t\tregionCfg: &regionConfig,\n\t\t\t\tctx: svc.groupCtx,\n\t\t\t\tinterval: uint(interval),\n\t\t\t\tlogger: svc.logger.With().Str(\"id\", cfg.ID).Str(\"region\", regionConfig.Name).Logger(),\n\t\t\t\tperiod: int64(period),\n\t\t\t}\n\t\t\tinstance.logger.Debug().Str(\"aws_region\", regionConfig.Name).Msg(\"initialized client instance for region\")\n\n\t\t\tcheckConfig := &circonus.Config{\n\t\t\t\tID: fmt.Sprintf(\"aws_%s_%s\", cfg.ID, regionConfig.Name),\n\t\t\t\tDisplayName: fmt.Sprintf(\"aws %s %s /%s\", cfg.ID, regionConfig.Name, release.NAME),\n\t\t\t\tCheckBundleID: cfg.Circonus.CID,\n\t\t\t\tAPIKey: cfg.Circonus.Key,\n\t\t\t\tAPIApp: cfg.Circonus.App,\n\t\t\t\tAPIURL: cfg.Circonus.URL,\n\t\t\t\tDebug: cfg.Circonus.Debug,\n\t\t\t\tLogger: instance.logger,\n\t\t\t\tTags: fmt.Sprintf(\"%s:aws,aws_region:%s\", release.NAME, regionConfig.Name),\n\t\t\t}\n\t\t\tif len(cfg.Tags) > 0 { // if top-level tags are configured, add them to check\n\t\t\t\ttags := make([]string, len(cfg.Tags))\n\t\t\t\tfor idx, tag := range cfg.Tags {\n\t\t\t\t\ttags[idx] = tag.Category + \":\" + tag.Value\n\t\t\t\t}\n\t\t\t\tcheckConfig.Tags += \",\" + strings.Join(tags, \",\")\n\t\t\t}\n\n\t\t\tchk, err := circonus.NewCheck(checkConfig)\n\t\t\tif err != nil {\n\t\t\t\tinstance.logger.Error().Err(err).Msg(\"creating Circonus Check instance, skipping\")\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tinstance.check = chk\n\n\t\t\t// TODO: dig more for any api call(s) that can be used for auto-discovery\n\t\t\t//\n\t\t\t// // using aws credentials, get list of active services\n\t\t\t// // actually...this is an inverse viewpoint - how many aws services are available,\n\t\t\t// // not how many the credentials actually have active\n\t\t\t// sess, err := instance.createSession(regionConfig.Name)\n\t\t\t// if err != nil {\n\t\t\t// \tinstance.logger.Error().Err(err).Str(\"region\", regionConfig.Name).Msg(\"unable to create session for region as configured\")\n\t\t\t// \tbreak\n\t\t\t// }\n\t\t\t// svcList, err := instance.getActiveServiceList(sess)\n\t\t\t// if err != nil {\n\t\t\t// \tinstance.logger.Error().Err(err).Str(\"region\", regionConfig.Name).Msg(\"unable to get list of active services for region\")\n\t\t\t// \tcontinue\n\t\t\t// }\n\t\t\t//\n\t\t\t// ms, err := collectors.New(instance.ctx, regionConfig.Services, instance.logger, svcList)\n\n\t\t\tms, err := collectors.New(instance.ctx, instance.check, regionConfig.Services, instance.logger)\n\t\t\tif err != nil {\n\t\t\t\tinstance.logger.Warn().Err(err).Msg(\"setting up aws metric services\")\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tinstance.collectors = ms\n\n\t\t\tsvc.instances = append(svc.instances, instance)\n\t\t}\n\t}\n\n\tif len(svc.instances) == 0 {\n\t\treturn errors.New(\"no valid AWS configs found\")\n\t}\n\n\treturn nil\n}", "func NewEc2Instance(ctx sdutils.AppContext, dd *awsDeploymentDescription) (*Ec2Instance, error) {\n\tvar err error\n\tcustomData := \"\"\n\tif dd.customPropFile != \"\" {\n\t\tdata, err := ioutil.ReadFile(dd.customPropFile)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"Invalid custom properties file: %s\", err)\n\t\t}\n\t\tcustomData = string(data)\n\t}\n\n\tcustomLog4J := \"\"\n\tif dd.customLog4J != \"\" {\n\t\tlog4JData, err := ioutil.ReadFile(dd.customLog4J)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"Invalid custom properties file: %s\", err)\n\t\t}\n\t\tcustomLog4J = base64.StdEncoding.EncodeToString(log4JData)\n\t}\n\n\tvar envBuffer bytes.Buffer\n\tfor _, env := range dd.environment {\n\t\tenvBuffer.WriteString(fmt.Sprintf(\"export %s\\n\", env))\n\t}\n\t// The custom script cannot be null in terraform so make a temp one\n\tscriptData := []byte(\"#!/bin/bash\\nexit 0\\n\")\n\tif dd.CustomScript != \"\" {\n\t\tscriptData, err = ioutil.ReadFile(dd.CustomScript)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"Failed to read the script %s: %s\", dd.CustomScript, err.Error())\n\t\t}\n\t}\n\tbase64CustomScriptData := base64.StdEncoding.EncodeToString(scriptData)\n\tbase64CustomScriptPath := path.Join(dd.deployDir, \"custom-stardogscript.base64\")\n\terr = ioutil.WriteFile(base64CustomScriptPath, []byte(base64CustomScriptData), 0644)\n\tif err != nil {\n\t\treturn nil, errors.New(\"Failed to create the base 64 encoded custom script\")\n\t}\n\n\tscriptZkData := []byte(\"#!/bin/bash\\nexit 0\\n\")\n\tif dd.CustomZkScript != \"\" {\n\t\tscriptZkData, err = ioutil.ReadFile(dd.CustomZkScript)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"Failed to read the script %s: %s\", dd.CustomZkScript, err.Error())\n\t\t}\n\t}\n\tbase64CustomZkScriptData := base64.StdEncoding.EncodeToString(scriptZkData)\n\tbase64CustomZkScriptPath := path.Join(dd.deployDir, \"custom-zk-stardogscript.base64\")\n\terr = ioutil.WriteFile(base64CustomZkScriptPath, []byte(base64CustomZkScriptData), 0644)\n\tif err != nil {\n\t\treturn nil, errors.New(\"Failed to create the base 64 encoded custom zk script\")\n\t}\n\n\tinstance := Ec2Instance{\n\t\tDeploymentName: dd.Name,\n\t\tRegion: dd.Region,\n\t\tKeyName: dd.AwsKeyName,\n\t\tVersion: dd.Version,\n\t\tZkInstanceType: dd.ZkInstanceType,\n\t\tSdInstanceType: dd.SdInstanceType,\n\t\tAmiID: dd.AmiID,\n\t\tPrivateKey: dd.PrivateKeyPath,\n\t\tDeployDir: dd.deployDir,\n\t\tCustomScript: base64CustomScriptPath,\n\t\tCustomZkScript: base64CustomZkScriptPath,\n\t\tCtx: ctx,\n\t\tCustomPropsData: customData,\n\t\tCustomLog4JData: customLog4J,\n\t\tEnvironment: envBuffer.String(),\n\t}\n\tif dd.disableSecurity {\n\t\tinstance.StartOpts = \"--disable-security\"\n\t}\n\treturn &instance, nil\n}", "func getBkupInstances(svc *ec2.EC2, bkupId string, reboot bool) (bkupInstances []*ec2.CreateImageInput, err error) {\n\n\tvar instanceSlice []*string\n\tvar ec2Filter ec2.Filter\n\n\t// if instance id provided use it else search for tags autobkup\n\tif len(bkupId) > 0 {\n\t\tinstanceSlice = append(instanceSlice, &bkupId)\n\t\tec2Filter.Name = nil\n\t\tec2Filter.Values = nil\n\t} else {\n\t\tec2Filter.Name = aws.String(\"tag-key\")\n\t\tec2Filter.Values = []*string{aws.String(\"autobkup\")}\n\t\tinstanceSlice = nil\n\t}\n\n\tec2dii := ec2.DescribeInstancesInput{InstanceIds: instanceSlice, Filters: []*ec2.Filter{&ec2Filter}}\n\n\tresp, err := svc.DescribeInstances(&ec2dii)\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// for any instance found extract tag name and instanceid\n\tfor reservation := range resp.Reservations {\n\t\tfor instance := range resp.Reservations[reservation].Instances {\n\t\t\t// Create a new theInstance variable for each run through the loop\n\t\t\ttheInstance := ec2.CreateImageInput{}\n\n\t\t\tfor tag := range resp.Reservations[reservation].Instances[instance].Tags {\n\t\t\t\tif *resp.Reservations[reservation].Instances[instance].Tags[tag].Key == \"Name\" {\n\t\t\t\t\t// name of the created AMI must be unique so add the Unix Epoch\n\t\t\t\t\ttheInstance.Name = aws.String(\n\t\t\t\t\t\t*resp.Reservations[reservation].Instances[instance].Tags[tag].Value +\n\t\t\t\t\t\t\t\"-\" +\n\t\t\t\t\t\t\tstrconv.FormatInt(time.Now().Unix(), 10))\n\t\t\t\t\tbreak\n\t\t\t\t} else {\n\t\t\t\t\ttheInstance.Name = aws.String(\n\t\t\t\t\t\t*resp.Reservations[reservation].Instances[instance].InstanceId +\n\t\t\t\t\t\t\t\"-\" +\n\t\t\t\t\t\t\tstrconv.FormatInt(time.Now().Unix(), 10))\n\t\t\t\t}\n\t\t\t}\n\t\t\ttheInstance.Description = aws.String(\"Auto backup of instance \" + *resp.Reservations[reservation].Instances[instance].InstanceId)\n\t\t\ttheInstance.InstanceId = resp.Reservations[reservation].Instances[instance].InstanceId\n\t\t\t// swap value as the question is NoReboot?\n\t\t\ttheInstance.NoReboot = aws.Bool(!reboot)\n\t\t\t// append details on this instance to the slice\n\t\t\tbkupInstances = append(bkupInstances, &theInstance)\n\t\t}\n\t}\n\treturn bkupInstances, nil\n}", "func (e *ec2_t) getIpAddr() {\n\tsvc := ec2.New(session.New(&aws.Config{Region: aws.String(e.Region)}))\n\tparams := &ec2.DescribeInstancesInput{\n\t\tFilters: []*ec2.Filter{\n\t\t\t/*\n\t\t\t\t&ec2.Filter{\n\t\t\t\t\tName: aws.String(\"instance-state-name\"),\n\t\t\t\t\tValues: []*string{\n\t\t\t\t\t\taws.String(\"running\"),\n\t\t\t\t\t\taws.String(\"pending\"),\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t*/\n\t\t\t&ec2.Filter{\n\t\t\t\tName: aws.String(\"instance-id\"),\n\t\t\t\tValues: []*string{\n\t\t\t\t\taws.String(e.InstanceId),\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\n\tresp, err := svc.DescribeInstances(params)\n\tif err != nil {\n\t\tlog.Println(\"fail to get public and private IP address of ec2 instance. error: \", err)\n\t\treturn\n\t}\n\n\tinstance := resp.Reservations[0].Instances[0]\n\te.PrivateIp = *instance.PrivateIpAddress\n\te.PublicIp = *instance.PublicIpAddress\n\tlog.Println(\"successfully get ec2 instance's IP address. public ip: \", e.PublicIp, \" private ip: \", e.PrivateIp)\n\treturn\n\n}", "func (az *Cloud) InstancesV2() (cloudprovider.InstancesV2, bool) {\n\treturn nil, false\n}", "func (a *Client) GetInstancesEventByEventId2(params *GetInstancesEventByEventId2Params) (*GetInstancesEventByEventId2OK, error) {\n\t// TODO: Validate the params before sending\n\tif params == nil {\n\t\tparams = NewGetInstancesEventByEventId2Params()\n\t}\n\n\tresult, err := a.transport.Submit(&runtime.ClientOperation{\n\t\tID: \"getInstancesEventByEventId2\",\n\t\tMethod: \"GET\",\n\t\tPathPattern: \"/instances/{id}/events/{eventId}\",\n\t\tProducesMediaTypes: []string{\"\"},\n\t\tConsumesMediaTypes: []string{\"\"},\n\t\tSchemes: []string{\"https\"},\n\t\tParams: params,\n\t\tReader: &GetInstancesEventByEventId2Reader{formats: a.formats},\n\t\tContext: params.Context,\n\t\tClient: params.HTTPClient,\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn result.(*GetInstancesEventByEventId2OK), nil\n\n}", "func (a *AwsCloudprovider) removeAWSUnusedIPs(rc *ReconcileContext) error {\n\tresults := make(chan error)\n\tdefer close(results)\n\tfor node, ips := range rc.FinallyAssignedIPsByNode {\n\t\tnodec := node\n\t\tipsc := ips\n\t\tgo func() {\n\t\t\tinstance := a.selectedAWSInstances[a.getAWSIDFromNode(rc.AllNodes[nodec])]\n\t\t\tvar awsAssignedIPs []string\n\t\t\tfor _, ipipas := range instance.NetworkInterfaces[0].PrivateIpAddresses {\n\t\t\t\tif !(*ipipas.Primary) {\n\t\t\t\t\tawsAssignedIPs = append(awsAssignedIPs, *ipipas.PrivateIpAddress)\n\t\t\t\t}\n\t\t\t}\n\t\t\ttoBeRemovedIPs := strset.Difference(strset.New(awsAssignedIPs...), strset.New(ipsc...)).List()\n\t\t\tif len(toBeRemovedIPs) > 0 {\n\t\t\t\tlog.Info(\"vm\", \"instance \", instance.InstanceId, \" will be freed from IPs \", toBeRemovedIPs)\n\t\t\t\tvar toBeRemovedIPsAWSStr []*string\n\t\t\t\tfor _, ip := range toBeRemovedIPs {\n\t\t\t\t\tcopyIP := ip\n\t\t\t\t\ttoBeRemovedIPsAWSStr = append(toBeRemovedIPsAWSStr, &copyIP)\n\t\t\t\t}\n\n\t\t\t\tinput := &ec2.UnassignPrivateIpAddressesInput{\n\t\t\t\t\tNetworkInterfaceId: instance.NetworkInterfaces[0].NetworkInterfaceId,\n\t\t\t\t\tPrivateIpAddresses: toBeRemovedIPsAWSStr,\n\t\t\t\t}\n\t\t\t\t_, err := a.awsClient.UnassignPrivateIpAddresses(input)\n\t\t\t\tif err != nil {\n\t\t\t\t\tlog.Error(err, \"unable to remove IPs from \", \"instance\", instance.InstanceId)\n\t\t\t\t\tresults <- err\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t}\n\t\t\tresults <- nil\n\t\t\treturn\n\t\t}()\n\t}\n\tresult := &multierror.Error{}\n\tfor range rc.FinallyAssignedIPsByNode {\n\t\t_ = multierror.Append(result, <-results)\n\t}\n\n\treturn result.ErrorOrNil()\n}", "func ReceiveEvals(conf *Conf, svc *Services, pool *Pool) (err error) {\n\tfor {\n\t\tvar out *sqs.ReceiveMessageOutput\n\t\tif out, err = svc.SQS.ReceiveMessage(&sqs.ReceiveMessageInput{\n\t\t\tQueueUrl: aws.String(pool.QueueURL),\n\t\t\tVisibilityTimeout: aws.Int64(1),\n\t\t\tMaxNumberOfMessages: aws.Int64(1),\n\t\t}); err != nil {\n\t\t\tsvc.Logs.Error(\"failed to receive message\", zap.Error(err))\n\t\t\treturn\n\t\t}\n\n\t\tfor _, msg := range out.Messages {\n\t\t\tsvc.Logs.Info(\"received schedule msg\", zap.String(\"msg\", msg.String()))\n\n\t\t\teval := &Eval{}\n\t\t\terr = json.Unmarshal([]byte(aws.StringValue(msg.Body)), eval)\n\t\t\tif err != nil {\n\t\t\t\tsvc.Logs.Error(\"failed to unmarshal eval\", zap.Error(err))\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tif eval.Size < 1 {\n\t\t\t\teval.Size = 1\n\t\t\t}\n\n\t\t\t//if the eval requires specific dataset we can provide locality based scheduling by finding replicas in the pool\n\t\t\treplicas := []*Replica{}\n\t\t\tif eval.Dataset != \"\" {\n\t\t\t\treplicas, err = FindReplicas(conf, svc, eval, pool)\n\t\t\t\tif err != nil {\n\t\t\t\t\tsvc.Logs.Error(\"failed to find replicas\", zap.Error(err))\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t}\n\n\t\t\t//find capacity in the pool\n\t\t\talloc, err := Schedule(conf, svc, eval, pool, replicas)\n\t\t\tif err != nil {\n\t\t\t\tsvc.Logs.Error(\"eval cannot be scheduled\", zap.Error(err))\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\terr = PutNewAlloc(conf, svc.DB, alloc)\n\t\t\tif err != nil {\n\t\t\t\tsvc.Logs.Error(\"failed to put allocation\", zap.Error(err))\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tallocPl := &client.Alloc{\n\t\t\t\tAllocID: alloc.AllocID,\n\t\t\t\tPoolID: pool.PoolID,\n\t\t\t\tWorkerID: alloc.WorkerID,\n\t\t\t\t//@TODO fill with information the worker needs:\n\t\t\t\t// - RAM/CPU limits\n\t\t\t\t// - Docker image\n\t\t\t\t// - DatasetID/version\n\t\t\t\t// - AllocID\n\t\t\t}\n\n\t\t\tallocPlMsg, err := json.Marshal(allocPl)\n\t\t\tif err != nil {\n\t\t\t\tsvc.Logs.Error(\"failed to encode alloc message\", zap.Error(err))\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tif _, err = svc.SQS.SendMessage(&sqs.SendMessageInput{\n\t\t\t\tQueueUrl: aws.String(FmtWorkerQueueURL(conf, pool.PoolID, alloc.WorkerID)),\n\t\t\t\tMessageBody: aws.String(string(allocPlMsg)),\n\t\t\t}); err != nil {\n\t\t\t\tsvc.Logs.Error(\"failed to send alloc msg\", zap.Error(err))\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tif _, err = svc.SQS.DeleteMessage(&sqs.DeleteMessageInput{\n\t\t\t\tQueueUrl: aws.String(pool.QueueURL),\n\t\t\t\tReceiptHandle: msg.ReceiptHandle,\n\t\t\t}); err != nil {\n\t\t\t\tsvc.Logs.Error(\"failed to delete eval msg\", zap.Error(err))\n\t\t\t\tcontinue\n\t\t\t}\n\t\t}\n\t}\n}", "func (r Virtual_ReservedCapacityGroup) GetInstances() (resp []datatypes.Virtual_ReservedCapacityGroup_Instance, err error) {\n\terr = r.Session.DoRequest(\"SoftLayer_Virtual_ReservedCapacityGroup\", \"getInstances\", nil, &r.Options, &resp)\n\treturn\n}", "func (ts *tester) _createASGs() (tss tupleTimes, err error) {\n\tts.cfg.Logger.Info(\"creating ASGs\")\n\n\tfor asgName, cur := range ts.cfg.EKSConfig.AddOnNodeGroups.ASGs {\n\t\timgID := cur.ImageID\n\t\tif imgID == \"\" {\n\t\t\timgID, err = ts.fetchImageID(cur.ImageIDSSMParameter)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t}\n\t\tts.cfg.Logger.Info(\"creating launch template\",\n\t\t\tzap.String(\"launch-template-name\", cur.LaunchTemplateName),\n\t\t\tzap.String(\"image-id\", imgID),\n\t\t)\n\n\t\tuserData, err := ts.generateUserData(asgName, cur.AMIType, cur.KubeletExtraArgs, cur.BootstrapArgs)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"failed to create user data for %q (%v)\", asgName, err)\n\t\t}\n\t\tuserData = base64.StdEncoding.EncodeToString([]byte(userData))\n\n\t\t_, err = ts.cfg.EC2APIV2.CreateLaunchTemplate(\n\t\t\tcontext.Background(),\n\t\t\t&aws_ec2_v2.CreateLaunchTemplateInput{\n\t\t\t\tLaunchTemplateName: aws_v2.String(cur.LaunchTemplateName),\n\n\t\t\t\tLaunchTemplateData: &aws_ec2_v2_types.RequestLaunchTemplateData{\n\t\t\t\t\tIamInstanceProfile: &aws_ec2_v2_types.LaunchTemplateIamInstanceProfileSpecificationRequest{\n\t\t\t\t\t\tArn: aws_v2.String(ts.cfg.EKSConfig.AddOnNodeGroups.Role.InstanceProfileARN),\n\t\t\t\t\t},\n\n\t\t\t\t\tKeyName: aws_v2.String(ts.cfg.EKSConfig.RemoteAccessKeyName),\n\n\t\t\t\t\tImageId: aws_v2.String(imgID),\n\t\t\t\t\tInstanceType: aws_ec2_v2_types.InstanceType(cur.InstanceType),\n\n\t\t\t\t\tBlockDeviceMappings: []aws_ec2_v2_types.LaunchTemplateBlockDeviceMappingRequest{\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tDeviceName: aws_v2.String(\"/dev/xvda\"),\n\t\t\t\t\t\t\tEbs: &aws_ec2_v2_types.LaunchTemplateEbsBlockDeviceRequest{\n\t\t\t\t\t\t\t\tDeleteOnTermination: aws_v2.Bool(true),\n\t\t\t\t\t\t\t\tEncrypted: aws_v2.Bool(true),\n\t\t\t\t\t\t\t\tVolumeType: cur.VolumeType,\n\t\t\t\t\t\t\t\tVolumeSize: aws_v2.Int32(cur.VolumeSize),\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\n\t\t\t\t\t// for public DNS + SSH access\n\t\t\t\t\tNetworkInterfaces: []aws_ec2_v2_types.LaunchTemplateInstanceNetworkInterfaceSpecificationRequest{\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tAssociatePublicIpAddress: aws_v2.Bool(true),\n\t\t\t\t\t\t\tDeleteOnTermination: aws_v2.Bool(true),\n\t\t\t\t\t\t\tDeviceIndex: aws_v2.Int32(0),\n\t\t\t\t\t\t\tGroups: []string{ts.cfg.EKSConfig.VPC.NodeGroupSecurityGroupID},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\n\t\t\t\t\tUserData: aws_v2.String(userData),\n\n\t\t\t\t\tMonitoring: &aws_ec2_v2_types.LaunchTemplatesMonitoringRequest{Enabled: aws_v2.Bool(true)},\n\t\t\t\t\tInstanceInitiatedShutdownBehavior: aws_ec2_v2_types.ShutdownBehaviorTerminate,\n\t\t\t\t},\n\n\t\t\t\tTagSpecifications: []aws_ec2_v2_types.TagSpecification{\n\t\t\t\t\t{\n\t\t\t\t\t\tResourceType: aws_ec2_v2_types.ResourceTypeLaunchTemplate,\n\t\t\t\t\t\tTags: []aws_ec2_v2_types.Tag{\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\tKey: aws_v2.String(\"Name\"),\n\t\t\t\t\t\t\t\tValue: aws_v2.String(fmt.Sprintf(\"%s-instance-launch-template\", cur.Name)),\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"failed to create launch template for %q (%v)\", asgName, err)\n\t\t}\n\n\t\tselect {\n\t\tcase <-time.After(10 * time.Second):\n\t\tcase <-ts.cfg.Stopc:\n\t\t\treturn nil, errors.New(\"stopped\")\n\t\t}\n\n\t\tts.cfg.Logger.Info(\"creating ASG\",\n\t\t\tzap.String(\"asg-name\", asgName),\n\t\t\tzap.String(\"image-id\", imgID),\n\t\t)\n\n\t\t// TOOD: tag instance and volume\n\t\t// Valid requests must contain either LaunchTemplate, LaunchConfigurationName, InstanceId or MixedInstancesPolicy parameter\n\t\tasgInput := &aws_asg_v2.CreateAutoScalingGroupInput{\n\t\t\tAutoScalingGroupName: aws_v2.String(asgName),\n\t\t\tMaxSize: aws_v2.Int32(cur.ASGMaxSize),\n\t\t\tMinSize: aws_v2.Int32(cur.ASGMinSize),\n\t\t\tVPCZoneIdentifier: aws_v2.String(strings.Join(ts.cfg.EKSConfig.VPC.PublicSubnetIDs, \",\")),\n\t\t\tHealthCheckGracePeriod: aws_v2.Int32(300),\n\t\t\tHealthCheckType: aws_v2.String(\"EC2\"),\n\t\t\tLaunchTemplate: &aws_asg_v2_types.LaunchTemplateSpecification{\n\t\t\t\tLaunchTemplateName: aws_v2.String(cur.LaunchTemplateName),\n\t\t\t\tVersion: aws_v2.String(\"$Latest\"),\n\t\t\t},\n\t\t\tTags: []aws_asg_v2_types.Tag{\n\t\t\t\t{\n\t\t\t\t\tKey: aws_v2.String(\"Name\"),\n\t\t\t\t\tValue: aws_v2.String(cur.Name),\n\t\t\t\t\tPropagateAtLaunch: aws_v2.Bool(true),\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tKey: aws_v2.String(fmt.Sprintf(\"kubernetes.io/cluster/%s\", ts.cfg.EKSConfig.Name)),\n\t\t\t\t\tValue: aws_v2.String(\"owned\"),\n\t\t\t\t\tPropagateAtLaunch: aws_v2.Bool(true),\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tKey: aws_v2.String(fmt.Sprintf(\"kubernetes.io/cluster-autoscaler/%s\", ts.cfg.EKSConfig.Name)),\n\t\t\t\t\tValue: aws_v2.String(\"owned\"),\n\t\t\t\t\tPropagateAtLaunch: aws_v2.Bool(true),\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tKey: aws_v2.String(\"kubernetes.io/cluster-autoscaler/enabled\"),\n\t\t\t\t\tValue: aws_v2.String(\"true\"),\n\t\t\t\t\tPropagateAtLaunch: aws_v2.Bool(true),\n\t\t\t\t},\n\t\t\t},\n\t\t}\n\t\tif cur.ASGDesiredCapacity > 0 {\n\t\t\tasgInput.DesiredCapacity = aws_v2.Int32(cur.ASGDesiredCapacity)\n\t\t}\n\t\t_, err = ts.cfg.ASGAPIV2.CreateAutoScalingGroup(context.Background(), asgInput)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"failed to create ASG for %q (%v)\", asgName, err)\n\t\t}\n\n\t\tcur.Instances = make(map[string]ec2config.Instance)\n\t\tcur.Logs = make(map[string][]string)\n\t\tts.cfg.EKSConfig.AddOnNodeGroups.ASGs[asgName] = cur\n\t\tts.cfg.EKSConfig.AddOnNodeGroups.Created = true\n\t\tts.cfg.EKSConfig.Sync()\n\n\t\ttss = append(tss, tupleTime{ts: time.Now(), name: asgName})\n\t}\n\n\tsort.Sort(sort.Reverse(tss))\n\tts.cfg.Logger.Info(\"created ASGs\")\n\treturn tss, nil\n}", "func main() {\n\t// 创建ecsClient实例\n\tecsClient, err := ecs.NewClientWithAccessKey(\n\t\t\"cn-shenzhen\", // 地域ID\n\t\t\"LTAI4FkfkEVNFGV7S3294foA\", // 您的Access Key ID\n\t\t\"fzotL4uCygsstuie6WzUs0tIRd1Lfy\") // 您的Access Key Secret\n\tif err != nil {\n\t\t// 异常处理\n\t\tpanic(err)\n\t}\n\t/*\n\t// 创建API请求并设置参数\n\trequest := ecs.CreateDescribeInstancesRequest()\n\t// 等价于 request.PageSize = \"10\"\n\trequest.PageSize = requests.NewInteger(10)\n\t// 发起请求并处理异常\n\tresponse, err := ecsClient.DescribeInstances(request)\n\tif err != nil {\n\t\t// 异常处理\n\t\tpanic(err)\n\t}\n\tfmt.Printf(\"类型:%#\\n值:%v\\n\",response.Instances,response.Instances)\n\t */\n\n\trequest := ecs.CreateDescribeInstanceAttributeRequest()\n\trequest.InstanceId = \"i-wz962mggaelnnz3kupfw\"\n\tresponse, err := ecsClient.DescribeInstanceAttribute(request)\n\tfmt.Printf(\"值:%v \\n\", response.ImageId)\n}", "func main() {\n\tif len(os.Args) != 3 {\n\t\texitErrorf(\"AMI ID and Instance Type are required\"+\n\t\t\t\"\\nUsage: %s image_id instance_type\", os.Args[0])\n\t}\n\n\n\t//Initialize the session that the SDK uses to load credentials from the shared credentials file ~/.aws/credentials\n\tsess, err := session.NewSession(&aws.Config{Region: aws.String(\"eu-west-1\")}, )\n\tsvc := ec2.New(sess)\n\tif err != nil {\n\t\texitErrorf(\"Error creating session, %v\", err)\n\t}\n\n}", "func DescribeInstances(svc *ec2.EC2, instanceName *string) (*ec2.DescribeInstancesOutput, error) {\n\n\tinput := &ec2.DescribeInstancesInput{\n\t\tFilters: []*ec2.Filter{\n\t\t\t{\n\t\t\t\tName: aws.String(\"tag:Name\"),\n\t\t\t\tValues: []*string{\n\t\t\t\t\tinstanceName,\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\n\toutput, err := svc.DescribeInstances(input)\n\tif err != nil {\n\t\tif aerr, ok := err.(awserr.Error); ok {\n\t\t\tswitch aerr.Code() {\n\t\t\tdefault:\n\t\t\t\tutil.CoriPrintln(aerr.Error())\n\t\t\t}\n\t\t} else {\n\t\t\t// Print the error, cast err to awserr.Error to get the Code and\n\t\t\t// Message from an error.\n\t\t\tutil.CoriPrintln(err.Error())\n\t\t}\n\t\treturn nil, err\n\t}\n\n\t//util.CoriPrintln(\"Describe instance\", result)\n\treturn output, nil\n}", "func GetInstanceInfoMaps(awsSession *session.Session, targets []string) (infomaps InstanceInfoMaps, err error) {\n\tclient := ec2.New(awsSession)\n\tname2info := make(map[string]InstanceInfo)\n\tid2info := make(map[string]InstanceInfo)\n\n\tinfomaps.Id2Info = id2info\n\tinfomaps.Name2Info = name2info\n\n\tfilters := make([]*ec2.Filter, 0)\n\n\tparams := &ec2.DescribeInstancesInput{}\n\n\tif targets != nil {\n\t\tawsnames := make([]*string, 0)\n\n\t\tfor _, name := range targets {\n\t\t\tawsnames = append(awsnames, aws.String(name))\n\t\t}\n\n\t\tnameFilter := ec2.Filter{\n\t\t\tName: aws.String(\"tag:Name\"),\n\t\t\tValues: awsnames,\n\t\t}\n\n\t\tfilters = append(filters, &nameFilter)\n\t}\n\n\t// add the filters if any\n\tif len(filters) > 0 {\n\t\tparams.Filters = filters\n\t}\n\n\t// Actually call aws for the information\n\tresult, err := client.DescribeInstances(params)\n\tif err != nil {\n\t\terr = errors.Wrapf(err, \"failed to call describe instances\")\n\t\treturn infomaps, err\n\t}\n\n\t// we get back reservations\n\tfor _, reservation := range result.Reservations {\n\t\t// which we step through to get instances\n\t\tfor _, instance := range reservation.Instances {\n\n\t\t\tvar name string\n\n\t\t\t// Names, however are tags, which we have to loop through\n\t\t\tfor _, tag := range instance.Tags {\n\t\t\t\tif *tag.Key == \"Name\" {\n\t\t\t\t\tname = *tag.Value\n\t\t\t\t}\n\t\t\t}\n\n\t\t\ti := InstanceInfo{\n\t\t\t\tInstanceId: *instance.InstanceId,\n\t\t\t\tInstanceName: name,\n\t\t\t}\n\n\t\t\t// add the info to the maps for easy and cheap lookup later\n\t\t\tid2info[*instance.InstanceId] = i\n\t\t\tname2info[name] = i\n\t\t}\n\t}\n\n\treturn infomaps, err\n}", "func AWSScale() {\n\tSetClusterName()\n\t// Scale the AWS infrastructure\n\tfmt.Printf(\"\\t\\t===============Starting AWS Scaling====================\\n\\n\")\n\tsshUser, osLabel := distSelect()\n\tprepareConfigFiles(osLabel)\n\tprovisioner.ExecuteTerraform(\"apply\", \"./inventory/\"+common.Name+\"/provisioner/\")\n\tmvHost := exec.Command(\"mv\", \"./inventory/hosts\", \"./inventory/\"+common.Name+\"/provisioner/hosts\")\n\tmvHost.Run()\n\tmvHost.Wait()\n\t// waiting for Infrastructure\n\ttime.Sleep(30)\n\t// Scale the Kubernetes cluster\n\tfmt.Printf(\"\\n\\n\\t\\t===============Starting Kubernetes Scaling====================\\n\\n\")\n\t_, err := os.Stat(\"./inventory/\" + common.Name + \"/provisioner/hosts\")\n\tcommon.ErrorCheck(\"No host file found.\", err)\n\tcpHost := exec.Command(\"cp\", \"./inventory/\"+common.Name+\"/provisioner/hosts\", \"./inventory/\"+common.Name+\"/installer/hosts\")\n\tcpHost.Run()\n\tcpHost.Wait()\n\tinstaller.RunPlaybook(\"./inventory/\"+common.Name+\"/installer/\", \"scale.yml\", sshUser, osLabel)\n\n\treturn\n}", "func main() {\n\n\t// 1. Load credentials/config\n\tcfg, err := config.LoadDefaultConfig(\n\t\tcontext.TODO(),\n\t\tconfig.WithRegion(\"eu-west-1\"),\n\t\tconfig.WithSharedConfigProfile(\"frontend\"),\n\t)\n\tcheck(err, \"Error loading config\")\n\n\t// 2. Initialise client for service you want (here ec2 is used)\n\tclient := ec2.NewFromConfig(cfg)\n\n\t// 3. Build and execute requests\n\tinput := ec2.DescribeSnapshotsInput{}\n\toutput, err := client.DescribeSnapshots(context.TODO(), &input)\n\tcheck(err, \"Describe snapshots failed\")\n\n\t// (4. Do some extra stuff)\n\tsixtyDaysAgo := time.Now().AddDate(0, 0, -60)\n\n\tfmt.Println(\"Deleting snapshots...\")\n\tfor _, s := range output.Snapshots {\n\t\tif s.StartTime.Before(sixtyDaysAgo) && *s.OwnerId == \"SOME ID\" {\n\t\t\tfmt.Printf(\"%s %s\\n\", s.StartTime.Format(time.RFC3339), *s.SnapshotId)\n\t\t\t_, err = client.DeleteSnapshot(context.TODO(), &ec2.DeleteSnapshotInput{SnapshotId: s.SnapshotId, DryRun: false})\n\t\t\tcheck(err, fmt.Sprintf(\"Unable to delete snapshot %s\", *s.SnapshotId))\n\t\t}\n\t}\n}", "func (sd *ServerDiscovery) GetInstances() (addresses []ServerAddress) {\n\tfor _, a := range sd.list {\n\t\taddresses = append(addresses, a)\n\t}\n\treturn addresses\n}", "func (ec2Mgr *ec2InstanceManager) ListInstances(instanceIds ...string) ([]common.Instance, error) {\n\tec2InputParameters := &ec2.DescribeInstancesInput{\n\t\tInstanceIds: aws.StringSlice(instanceIds),\n\t}\n\n\tvar instances []common.Instance\n\tec2Mgr.ec2API.DescribeInstancesPages(ec2InputParameters, func(page *ec2.DescribeInstancesOutput, lastPage bool) bool {\n\t\tfor _, reservation := range page.Reservations {\n\t\t\tfor _, instance := range reservation.Instances {\n\t\t\t\tinstances = append(instances, instance)\n\t\t\t}\n\t\t}\n\t\treturn true\n\t})\n\n\treturn instances, nil\n}", "func (a *Client) GetInstancesTransformations2(params *GetInstancesTransformations2Params) (*GetInstancesTransformations2OK, error) {\n\t// TODO: Validate the params before sending\n\tif params == nil {\n\t\tparams = NewGetInstancesTransformations2Params()\n\t}\n\n\tresult, err := a.transport.Submit(&runtime.ClientOperation{\n\t\tID: \"getInstancesTransformations2\",\n\t\tMethod: \"GET\",\n\t\tPathPattern: \"/instances/{id}/transformations\",\n\t\tProducesMediaTypes: []string{\"\"},\n\t\tConsumesMediaTypes: []string{\"\"},\n\t\tSchemes: []string{\"https\"},\n\t\tParams: params,\n\t\tReader: &GetInstancesTransformations2Reader{formats: a.formats},\n\t\tContext: params.Context,\n\t\tClient: params.HTTPClient,\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn result.(*GetInstancesTransformations2OK), nil\n\n}", "func NewEC2SG(sg *ec2.SecurityGroup, region string) *EC2SG {\n\tentity := &EC2SG{\n\t\tEntity: NewEntity(),\n\t}\n\tif sg == nil {\n\t\tlog.Debug(\"nil sg\")\n\t\treturn entity\n\t}\n\n\tentity.Region = region\n\n\tif sg.GroupId != nil {\n\t\tentity.ID = *sg.GroupId\n\t}\n\n\tfor _, tag := range sg.Tags {\n\t\tif tag == nil {\n\t\t\tcontinue\n\t\t}\n\t\tif tag.Key != nil && tag.Value != nil && *tag.Key == \"Name\" {\n\t\t\tentity.Name = *tag.Value\n\t\t}\n\t\tentity.AddTag(tag.Key, tag.Value)\n\t}\n\tentity.AddLabel(vpcID, sg.VpcId)\n\n\tfor _, permission := range sg.IpPermissions {\n\t\tfor _, rang := range permission.IpRanges {\n\t\t\tif rang.CidrIp != nil && util.ContainsPublicIps(*rang.CidrIp) {\n\t\t\t\tentity.AddLabel(publicIngress, aws.String(\"true\"))\n\t\t\t}\n\t\t}\n\t}\n\n\treturn entity\n}", "func (n *NodeFactory) For(ctx context.Context, instanceIDs []*string) (map[string]*v1.Node, error) {\n\t// EC2 will return all instances if unspecified, so we must short circuit\n\tif len(instanceIDs) == 0 {\n\t\treturn nil, nil\n\t}\n\tdescribeInstancesOutput, err := n.ec2api.DescribeInstancesWithContext(ctx, &ec2.DescribeInstancesInput{InstanceIds: instanceIDs})\n\tif err == nil {\n\t\treturn n.nodesFrom(describeInstancesOutput.Reservations), nil\n\t}\n\tif aerr, ok := err.(awserr.Error); ok {\n\t\treturn nil, aerr\n\t}\n\treturn nil, fmt.Errorf(\"failed to describe ec2 instances, %w\", err)\n}", "func populateRunning(aws awsECSClient, state *ecsState) error {\n\t// Collect data across all clusters being queried\n\tfor _, cluster := range state.meta.clusters {\n\t\tclusterTasks, err := getRunningTasks(aws, state, cluster)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tif err = getContainerInstances(aws, state, cluster, clusterTasks); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\t// ...then fetch any associated ec2 hosts\n\treturn getEC2Hosts(aws, state)\n}", "func runInstances(message string, fn runner) {\n logger.Log(fmt.Sprintf(\"%s %v\\n\", message, process))\n for i := 0; i < cfg.Instances; i++ {\n logger.Log(fmt.Sprintf(\"...Instance %d of %d %s\\n\", i, cfg.Instances, process))\n id, _ := pid(i)\n fn(i, id)\n }\n\n}", "func (c *EC2) createAWSEC2Instance(input *RunInstancesInput) (r aws.Referencer, attr aws.Attrabuter, err error) {\n\n\truninstancesrequest := input\n\treservation, err := RunInstances(runinstancesrequest)\n\tif err == nil {\n\t\tdescribeinstancesrequest := &DescribeInstancesInput{}\n\t\tif err := awsutil.CopyValue(describeinstancesrequest, \"InstanceIds\", reservation, \"Instances[].InstanceId\"); err != nil {\n\t\t\treturn reservation, reservation, err\n\t\t}\n\t\tif err := WaitUntilInstanceRunning(describeinstancesrequest); err != nil {\n\t\t\treturn reservation, reservation, err\n\t\t}\n\n\t} else {\n\t\treturn nil, nil, err\n\t}\n\tstartinstancesrequest := &StartInstancesInput{}\n\tif err := awsutil.CopyValue(startinstancesrequest, \"InstanceIds\", reservation, \"Instances[].InstanceId\"); err != nil {\n\t\treturn nil, nil, err\n\t}\n\tstartinstancesresult, err := StartInstances(startinstancesrequest)\n\tif err == nil {\n\t\tdescribeinstancesrequest := &DescribeInstancesInput{}\n\t\tif err := awsutil.CopyValue(describeinstancesrequest, \"InstanceIds\", reservation, \"Instances[].InstanceId\"); err != nil {\n\t\t\treturn reservation, reservation, err\n\t\t}\n\t\tif err := WaitUntilInstanceRunning(describeinstancesrequest); err != nil {\n\t\t\treturn reservation, reservation, err\n\t\t}\n\n\t} else {\n\t\treturn nil, nil, err\n\t}\n\treturn reservation, reservation, nil\n}", "func GetInstanceIdsForInstanceGroupE(t *testing.T, projectID string, zone string, groupName string) ([]string, error) {\n\tlogger.Logf(t, \"Get instances for instance group %s in zone %s\", groupName, zone)\n\n\tctx := context.Background()\n\n\tservice, err := NewComputeServiceE(t)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\trequestBody := &compute.InstanceGroupsListInstancesRequest{\n\t\tInstanceState: \"ALL\",\n\t}\n\n\tinstanceIDs := []string{}\n\treq := service.InstanceGroups.ListInstances(projectID, zone, groupName, requestBody)\n\tif err := req.Pages(ctx, func(page *compute.InstanceGroupsListInstances) error {\n\t\tfor _, instance := range page.Items {\n\t\t\t// For some reason service.InstanceGroups.ListInstances returns us a collection\n\t\t\t// with Instance URLs and we need only the Instance ID for the next call. Use\n\t\t\t// the path functions to chop the Instance ID off the end of the URL.\n\t\t\tinstanceID := path.Base(instance.Instance)\n\t\t\tinstanceIDs = append(instanceIDs, instanceID)\n\t\t}\n\t\treturn nil\n\t}); err != nil {\n\t\treturn nil, fmt.Errorf(\"InstanceGroups.ListInstances(%s) got error: %v\", groupName, err)\n\t}\n\n\treturn instanceIDs, nil\n}", "func (a *Agent) spawnInstance(ctx context.Context, c instance.Config) {\n\tinst, err := a.instanceFactory(a.cfg.Global, c, a.cfg.WALDir, a.logger)\n\tif err != nil {\n\t\tlevel.Error(a.logger).Log(\"msg\", \"failed to create instance\", \"err\", err)\n\t\treturn\n\t}\n\n\tfor {\n\t\terr = inst.Run(ctx)\n\t\tif err != nil && err != context.Canceled {\n\t\t\tinstanceAbnormalExits.WithLabelValues(c.Name).Inc()\n\t\t\tlevel.Error(a.logger).Log(\"msg\", \"instance stopped abnormally, restarting after backoff period\", \"err\", err, \"backoff\", a.cfg.InstanceRestartBackoff, \"instance\", c.Name)\n\t\t\ttime.Sleep(a.cfg.InstanceRestartBackoff)\n\t\t} else {\n\t\t\tlevel.Info(a.logger).Log(\"msg\", \"stopped instance\", \"instance\", c.Name)\n\t\t\tbreak\n\t\t}\n\t}\n}", "func SpotInstances(sf ServiceFactory, am ActivityMonitor, allRegions bool) int {\n\t// Indicate activity\n\tam.StartAction(\"Retrieving Spot instance counts\")\n\n\t// Should we get the counts for all regions?\n\tinstanceCount := 0\n\tif allRegions {\n\t\t// Get the list of all enabled regions for this account\n\t\tregionsSlice := GetEC2Regions(sf.GetEC2InstanceService(\"\"), am)\n\n\t\t// Loop through all of the regions\n\t\tfor _, regionName := range regionsSlice {\n\t\t\t// Get the EC2 counts for a specific region\n\t\t\tinstanceCount += spotInstancesForSingleRegion(sf.GetEC2InstanceService(regionName), am)\n\t\t}\n\t} else {\n\t\t// Get the EC2 counts for the region selected by this session\n\t\tinstanceCount = spotInstancesForSingleRegion(sf.GetEC2InstanceService(\"\"), am)\n\t}\n\n\t// Indicate end of activity\n\tam.EndAction(\"OK (%d)\", color.Bold(instanceCount))\n\n\treturn instanceCount\n}", "func (client *AWSClient) GetPrivateIPsOfInstancesOfAutoscalingGroup(name string) ([]string, error) {\n\tgroup, exists, err := client.getAutoscalingGroup(name)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif !exists {\n\t\treturn nil, fmt.Errorf(\"autoscaling group %v doesn't exists\", name)\n\t}\n\n\tinstances, err := client.getInstancesOfAutoscalingGroup(group)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar result []string\n\tfor _, ins := range instances {\n\t\tif len(ins.NetworkInterfaces) > 0 && ins.NetworkInterfaces[0].PrivateIpAddress != nil {\n\t\t\tresult = append(result, *ins.NetworkInterfaces[0].PrivateIpAddress)\n\t\t}\n\t}\n\n\treturn result, nil\n}", "func (s ec2sessions) DeleteExpiredInstances() {\n\tfor _, session := range s.sessions {\n\t\tsession.deleteExpiredInstances()\n\t}\n}", "func callECSAgent(desiredStatus string, tasks []*string, privateIP *string) ([]*ECSContainer, error) {\n\trequestURL := fmt.Sprintf(\"%v://%v:%v/v1/tasks\", protocol, *privateIP, port)\n\n\tresp, err := http.Get(requestURL)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"Error getting running tasks from ecs agent\")\n\t}\n\tdefer resp.Body.Close()\n\n\tbody, err := ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"Error reading response body for request %v\", requestURL)\n\t}\n\n\tecsEnveloppe := new(ECSEnveloppe)\n\terr = json.Unmarshal(body, ecsEnveloppe)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"Error decoding response from request %v\", requestURL)\n\t}\n\n\tcontainers := []*ECSContainer{}\n\tfor _, task := range ecsEnveloppe.Tasks {\n\t\tif task.DesiredStatus != desiredStatus {\n\t\t\tcontinue\n\t\t}\n\t\tfor _, wantedTask := range tasks {\n\t\t\tif *wantedTask == task.ARN {\n\t\t\t\tfor _, c := range task.Containers {\n\t\t\t\t\tc.PrivateIP = *privateIP\n\t\t\t\t\tc.TaskName = task.ARN\n\t\t\t\t}\n\t\t\t\tcontainers = append(containers, task.Containers...)\n\t\t\t}\n\t\t}\n\t}\n\n\treturn containers, nil\n}", "func (client AccessGovernanceCPClient) listGovernanceInstances(ctx context.Context, request common.OCIRequest, binaryReqBody *common.OCIReadSeekCloser, extraHeaders map[string]string) (common.OCIResponse, error) {\n\n\thttpRequest, err := request.HTTPRequest(http.MethodGet, \"/governanceInstances\", binaryReqBody, extraHeaders)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar response ListGovernanceInstancesResponse\n\tvar httpResponse *http.Response\n\thttpResponse, err = client.Call(ctx, &httpRequest)\n\tdefer common.CloseBodyIfValid(httpResponse)\n\tresponse.RawResponse = httpResponse\n\tif err != nil {\n\t\tapiReferenceLink := \"https://docs.oracle.com/iaas/api/#/en/access-governance-cp/20220518/GovernanceInstanceCollection/ListGovernanceInstances\"\n\t\terr = common.PostProcessServiceError(err, \"AccessGovernanceCP\", \"ListGovernanceInstances\", apiReferenceLink)\n\t\treturn response, err\n\t}\n\n\terr = common.UnmarshalResponse(httpResponse, &response)\n\treturn response, err\n}", "func ChangeImagesForInstances(sess *session.Session, target targets.IfTarget, machineInst instance.IfInstance, firstImage *string, secondImage *string) error {\n\n\tfmt.Println(\"starting DescribeTargets\")\n\tresult, err := target.DescribeTargets()\n\tif err != nil {\n\t\treturn err\n\t}\n\tinstances := target.TakeIds(result)\n\n\t// Loop for removing and creating instances with new image\n\tfor _, inst := range instances {\n\n\t\tinfo, err := machineInst.DescribeInstance(inst)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tpresentImage := info.Reservations[0].Instances[0].ImageId\n\n\t\t// Check if image need to be changed\n\t\tif *presentImage == *firstImage {\n\t\t\tfmt.Println(\"Starting ami change for instance: \", inst)\n\n\t\t\tfmt.Println(\"Deregistring target instance from target group\")\n\n\t\t\t// Remove instance from target group before terminating instance\n\t\t\ttarget.DeregisterTarget(inst)\n\n\t\t\t// Terminate old image instance\n\t\t\tmachineInst.TerminateInstance(inst)\n\n\t\t\t// Create new instance with new image\n\t\t\tnewMachine, err := machineInst.RunInstance(*secondImage)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\t// Take instance id for future usage\n\t\t\tinst = *newMachine.Instances[0].InstanceId\n\t\t\tinfo, err := machineInst.DescribeInstance(inst)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\t// Check state of newly create instance\n\t\t\tstate := info.Reservations[0].Instances[0].State.Name\n\n\t\t\t// Waiting loop for instance be fully available\n\t\t\tfmt.Println(\"Waiting for instance to be running\")\n\t\t\tfor *state != \"running\" {\n\n\t\t\t\tinfo, err := machineInst.DescribeInstance(inst)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\n\t\t\t\tstate = info.Reservations[0].Instances[0].State.Name\n\t\t\t\tfmt.Println(*state)\n\t\t\t\ttime.Sleep(1 * time.Second)\n\t\t\t}\n\t\t\tfmt.Println(\"Registring new target instance in target group\")\n\t\t\ttarget.RegisterTarget(inst)\n\n\t\t\t// before we proceed to another instance, here we should add helth status checking\n\t\t\t// for target in target group\n\t\t\t// but i do not have full accesses to AWS and time to create fully working test environment\n\n\t\t}\n\t}\n\n\treturn nil\n}", "func (i *instance) Instance() *ec2.Instance {\n\treturn i.ec2inst\n}", "func getInstanceByName(ec2Service *ec2.EC2, instanceName string) (*ec2.Instance, error) {\n\tresult, err := ec2Service.DescribeInstances(nil)\n\n\tif err == nil {\n\t\tfor _, v := range result.Reservations {\n\t\t\tfor _, instance := range v.Instances {\n\t\t\t\tfor _, value := range instance.Tags {\n\t\t\t\t\tif *value.Value == instanceName {\n\t\t\t\t\t\treturn instance, nil\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\treturn nil, fmt.Errorf(\"could not find instance\")\n}", "func newInstances(pod *Pod, prov provider.DataCenter, cfg *config.Instances) (*instances, error) {\n\tlog.Debug(\"Initializing Instances\")\n\n\ti := &instances{\n\t\tResources: resource.NewResources(),\n\t\tpod: pod,\n\t\tinstances: map[string]resource.Instance{},\n\t}\n\n\t// The reference to the network resource.\n\tnet := pod.Cluster().Compute().DataCenter().Network()\n\n\t// The availability zones available to these instances.\n\tavailabilityZones := net.AvailabilityZones()\n\n\t// The subnet group associated with these instances.\n\tsubnetGroup := net.SubnetGroups().Find(pod.SubnetGroup())\n\tif subnetGroup == nil {\n\t\treturn nil, fmt.Errorf(\"Cannot find subnet group %s configured for pod %s\", pod.SubnetGroup(), pod.Name())\n\t}\n\n\t// The keypair to be used with these instances.\n\tkeypair := pod.Cluster().Compute().KeyPair()\n\n\tn := 0\n\tfor _, conf := range *cfg {\n\t\t// Ensure the instance is uniquely named.\n\t\tif i.Find(conf.Name()) != nil {\n\t\t\treturn nil, fmt.Errorf(\"Instance name %q must be unique but is used multiple times\", conf.Name())\n\t\t}\n\n\t\t// The availability zone for this instance. Chosing via round robin. Always starting at 0.\n\t\taz := availabilityZones[n%len(availabilityZones)]\n\n\t\t// Get the subnet associated with the AZ.\n\t\tsubnetName := pod.SubnetGroup() + \"-\" + az\n\t\tsubnet := subnetGroup.Find(subnetName)\n\t\tif subnet == nil {\n\t\t\treturn nil, fmt.Errorf(\"Cannot find subnet %s configured for instance %s\", subnetName, conf.Name())\n\t\t}\n\n\t\tinstance, err := newInstance(pod, subnet, keypair, prov, conf)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\ti.instances[instance.Name()] = instance\n\t\ti.Append(instance)\n\n\t\tn++\n\t}\n\treturn i, nil\n}", "func testDescribeInstancesOutput() *ec2.DescribeInstancesOutput {\n\treturn &ec2.DescribeInstancesOutput{\n\t\tReservations: []*ec2.Reservation{\n\t\t\ttestEC2Reservation(),\n\t\t},\n\t}\n}", "func UpdateFromEC2API(ctx context.Context, ec2Client *ec2shim.Client) error {\n\tinstanceTypeInfos, err := ec2Client.GetInstanceTypes(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tlimitsOnce.Do(populateStaticENILimits)\n\n\tlimits.Lock()\n\tdefer limits.Unlock()\n\n\tfor _, instanceTypeInfo := range instanceTypeInfos {\n\t\tinstanceType := string(instanceTypeInfo.InstanceType)\n\t\tadapterLimit := aws.ToInt32(instanceTypeInfo.NetworkInfo.MaximumNetworkInterfaces)\n\t\tipv4PerAdapter := aws.ToInt32(instanceTypeInfo.NetworkInfo.Ipv4AddressesPerInterface)\n\t\tipv6PerAdapter := aws.ToInt32(instanceTypeInfo.NetworkInfo.Ipv6AddressesPerInterface)\n\t\thypervisorType := instanceTypeInfo.Hypervisor\n\n\t\tlimits.m[instanceType] = ipamTypes.Limits{\n\t\t\tAdapters: int(adapterLimit),\n\t\t\tIPv4: int(ipv4PerAdapter),\n\t\t\tIPv6: int(ipv6PerAdapter),\n\t\t\tHypervisorType: string(hypervisorType),\n\t\t}\n\t}\n\n\treturn nil\n}", "func (r Virtual_ReservedCapacityGroup) GetAvailableInstances() (resp []datatypes.Virtual_ReservedCapacityGroup_Instance, err error) {\n\terr = r.Session.DoRequest(\"SoftLayer_Virtual_ReservedCapacityGroup\", \"getAvailableInstances\", nil, &r.Options, &resp)\n\treturn\n}", "func (p *ec2ProviderImpl) GetPrivateDNSName(id string) (string, error) {\n\tprivateDNSName, err := p.getPrivateDNSNameCache(id)\n\tif err == nil {\n\t\treturn privateDNSName, nil\n\t}\n\tlogrus.Debugf(\"Missed the cache for the InstanceId = %s Verifying if its already in requestQueue \", id)\n\t// check if the request for instanceId already in queue.\n\tif p.getRequestInFlightForInstanceId(id) {\n\t\tlogrus.Debugf(\"Found the InstanceId:= %s request In Queue waiting in 5 seconds loop \", id)\n\t\tfor i := 0; i < totalIterationForWaitInterval; i++ {\n\t\t\ttime.Sleep(defaultWaitInterval)\n\t\t\tprivateDNSName, err := p.getPrivateDNSNameCache(id)\n\t\t\tif err == nil {\n\t\t\t\treturn privateDNSName, nil\n\t\t\t}\n\t\t}\n\t\treturn \"\", fmt.Errorf(\"failed to find node %s in PrivateDNSNameCache returning from loop\", id)\n\t}\n\tlogrus.Debugf(\"Missed the requestQueue cache for the InstanceId = %s\", id)\n\tp.setRequestInFlightForInstanceId(id)\n\trequestQueueLength := p.getRequestInFlightSize()\n\t//The code verifies if the requestQuqueMap size is greater than max request in flight with rate\n\t//limiting then writes to the channel where we are making batch ec2:DescribeInstances API call.\n\tif requestQueueLength > maxAllowedInflightRequest {\n\t\tlogrus.Debugf(\"Writing to buffered channel for instance Id %s \", id)\n\t\tp.instanceIdsChannel <- id\n\t\treturn p.GetPrivateDNSName(id)\n\t}\n\n\tlogrus.Infof(\"Calling ec2:DescribeInstances for the InstanceId = %s \", id)\n\tmetrics.Get().EC2DescribeInstanceCallCount.Inc()\n\t// Look up instance from EC2 API\n\toutput, err := p.ec2.DescribeInstances(&ec2.DescribeInstancesInput{\n\t\tInstanceIds: aws.StringSlice([]string{id}),\n\t})\n\tif err != nil {\n\t\tp.unsetRequestInFlightForInstanceId(id)\n\t\treturn \"\", fmt.Errorf(\"failed querying private DNS from EC2 API for node %s: %s \", id, err.Error())\n\t}\n\tfor _, reservation := range output.Reservations {\n\t\tfor _, instance := range reservation.Instances {\n\t\t\tif aws.StringValue(instance.InstanceId) == id {\n\t\t\t\tprivateDNSName = aws.StringValue(instance.PrivateDnsName)\n\t\t\t\tp.setPrivateDNSNameCache(id, privateDNSName)\n\t\t\t\tp.unsetRequestInFlightForInstanceId(id)\n\t\t\t}\n\t\t}\n\t}\n\n\tif privateDNSName == \"\" {\n\t\treturn \"\", fmt.Errorf(\"failed to find node %s \", id)\n\t}\n\treturn privateDNSName, nil\n}", "func CreateNodes(client *rancher.Client, rolesPerPool []string, quantityPerPool []int32) (ec2Nodes []*nodes.Node, err error) {\n\tec2Client, err := client.GetEC2Client()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\trunningReservations := []*ec2.Reservation{}\n\treservationConfigs := []*rancherEc2.AWSEC2Config{}\n\t// provisioning instances in reverse order to allow windows instances time to become ready\n\tfor i := len(quantityPerPool) - 1; i >= 0; i-- {\n\t\tconfig := MatchRoleToConfig(rolesPerPool[i], ec2Client.ClientConfig.AWSEC2Config)\n\t\tif config == nil {\n\t\t\treturn nil, errors.New(\"No matching nodesAndRole for AWSEC2Config with role:\" + rolesPerPool[i])\n\t\t}\n\t\tsshName := getSSHKeyName(config.AWSSSHKeyName)\n\t\trunInstancesInput := &ec2.RunInstancesInput{\n\t\t\tImageId: aws.String(config.AWSAMI),\n\t\t\tInstanceType: aws.String(config.InstanceType),\n\t\t\tMinCount: aws.Int64(int64(quantityPerPool[i])),\n\t\t\tMaxCount: aws.Int64(int64(quantityPerPool[i])),\n\t\t\tKeyName: aws.String(sshName),\n\t\t\tBlockDeviceMappings: []*ec2.BlockDeviceMapping{\n\t\t\t\t{\n\t\t\t\t\tDeviceName: aws.String(\"/dev/sda1\"),\n\t\t\t\t\tEbs: &ec2.EbsBlockDevice{\n\t\t\t\t\t\tVolumeSize: aws.Int64(int64(config.VolumeSize)),\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tIamInstanceProfile: &ec2.IamInstanceProfileSpecification{\n\t\t\t\tName: aws.String(config.AWSIAMProfile),\n\t\t\t},\n\t\t\tPlacement: &ec2.Placement{\n\t\t\t\tAvailabilityZone: aws.String(config.AWSRegionAZ),\n\t\t\t},\n\t\t\tNetworkInterfaces: []*ec2.InstanceNetworkInterfaceSpecification{\n\t\t\t\t{\n\t\t\t\t\tDeviceIndex: aws.Int64(0),\n\t\t\t\t\tAssociatePublicIpAddress: aws.Bool(true),\n\t\t\t\t\tGroups: aws.StringSlice(config.AWSSecurityGroups),\n\t\t\t\t},\n\t\t\t},\n\t\t\tTagSpecifications: []*ec2.TagSpecification{\n\t\t\t\t{\n\t\t\t\t\tResourceType: aws.String(\"instance\"),\n\t\t\t\t\tTags: []*ec2.Tag{\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tKey: aws.String(\"Name\"),\n\t\t\t\t\t\t\tValue: aws.String(nodeBaseName),\n\t\t\t\t\t\t},\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tKey: aws.String(\"CICD\"),\n\t\t\t\t\t\t\tValue: aws.String(config.AWSCICDInstanceTag),\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t}\n\n\t\treservation, err := ec2Client.SVC.RunInstances(runInstancesInput)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\t// instead of waiting on each node pool to complete provisioning, add to a queue and check run status later\n\t\trunningReservations = append(runningReservations, reservation)\n\t\treservationConfigs = append(reservationConfigs, config)\n\t}\n\n\tfor i := 0; i < len(quantityPerPool); i++ {\n\t\tvar listOfInstanceIds []*string\n\n\t\tfor _, instance := range runningReservations[i].Instances {\n\t\t\tlistOfInstanceIds = append(listOfInstanceIds, instance.InstanceId)\n\t\t}\n\n\t\t//wait until instance is running\n\t\terr = ec2Client.SVC.WaitUntilInstanceRunning(&ec2.DescribeInstancesInput{\n\t\t\tInstanceIds: listOfInstanceIds,\n\t\t})\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\t//wait until instance status is ok\n\t\terr = ec2Client.SVC.WaitUntilInstanceStatusOk(&ec2.DescribeInstanceStatusInput{\n\t\t\tInstanceIds: listOfInstanceIds,\n\t\t})\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\t// describe instance to get attributes\n\t\tdescribe, err := ec2Client.SVC.DescribeInstances(&ec2.DescribeInstancesInput{\n\t\t\tInstanceIds: listOfInstanceIds,\n\t\t})\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\treadyInstances := describe.Reservations[0].Instances\n\n\t\tsshKey, err := nodes.GetSSHKey(reservationConfigs[i].AWSSSHKeyName)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tfor _, readyInstance := range readyInstances {\n\t\t\tec2Node := &nodes.Node{\n\t\t\t\tNodeID: *readyInstance.InstanceId,\n\t\t\t\tPublicIPAddress: *readyInstance.PublicIpAddress,\n\t\t\t\tPrivateIPAddress: *readyInstance.PrivateIpAddress,\n\t\t\t\tSSHUser: reservationConfigs[i].AWSUser,\n\t\t\t\tSSHKey: sshKey,\n\t\t\t}\n\t\t\t// re-reverse the list so that the order is corrected\n\t\t\tec2Nodes = append([]*nodes.Node{ec2Node}, ec2Nodes...)\n\t\t}\n\t}\n\n\tclient.Session.RegisterCleanupFunc(func() error {\n\t\treturn DeleteNodes(client, ec2Nodes)\n\t})\n\n\treturn ec2Nodes, nil\n}", "func GetInstances(entriesBytes []byte, kubeAuth bool, threadPoolSize int) []string {\n\tvar instances []Instance\n\tif err := yaml.Unmarshal(entriesBytes, &instances); err != nil {\n\t\tlog.WithError(err).Fatal(\"[Vault Instance] failed to decode instance configuration\")\n\t}\n\n\tinstanceCreds, err := processInstances(instances, kubeAuth)\n\tif err != nil {\n\t\tlog.WithError(err).Fatal(\"[Vault Instance] failed to retrieve access credentials\")\n\t}\n\tinitClients(instanceCreds, threadPoolSize)\n\n\t// return list of addresses that clients were initialized for\n\taddresses := []string{}\n\tfor address := range vaultClients {\n\t\taddresses = append(addresses, address)\n\t}\n\treturn addresses\n}", "func testEC2Reservation() *ec2.Reservation {\n\treturn &ec2.Reservation{\n\t\tInstances: []*ec2.Instance{\n\t\t\t&ec2.Instance{\n\t\t\t\tState: &ec2.InstanceState{\n\t\t\t\t\tCode: aws.Int64(16),\n\t\t\t\t\tName: aws.String(\"running\"),\n\t\t\t\t},\n\t\t\t\tInstanceId: aws.String(\"i-1234567890abcdef0\"),\n\t\t\t\tPrivateIpAddress: aws.String(\"10.0.0.1\"),\n\t\t\t\tPublicIpAddress: aws.String(\"54.0.0.1\"),\n\t\t\t},\n\t\t},\n\t}\n}", "func (o OrganizationConfigurationDatasourcesMalwareProtectionOutput) ScanEc2InstanceWithFindings() OrganizationConfigurationDatasourcesMalwareProtectionScanEc2InstanceWithFindingsOutput {\n\treturn o.ApplyT(func(v OrganizationConfigurationDatasourcesMalwareProtection) OrganizationConfigurationDatasourcesMalwareProtectionScanEc2InstanceWithFindings {\n\t\treturn v.ScanEc2InstanceWithFindings\n\t}).(OrganizationConfigurationDatasourcesMalwareProtectionScanEc2InstanceWithFindingsOutput)\n}", "func writeEnv(out io.Writer) error {\n\tmetaSvc := ec2metadata.New(session.New(), &aws.Config{\n\t\tRegion: aws.String(\"us-east-1\"),\n\t})\n\tif !metaSvc.Available() {\n\t\treturn errors.New(\"not running on an ec2 instance\")\n\t}\n\n\tidentity, err := metaSvc.GetInstanceIdentityDocument()\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"failed to retrieve instance identity\")\n\t}\n\n\tec2Svc := ec2.New(session.New(), &aws.Config{\n\t\tRegion: aws.String(identity.Region),\n\t})\n\tres, err := ec2Svc.DescribeInstances(&ec2.DescribeInstancesInput{\n\t\tInstanceIds: aws.StringSlice([]string{identity.InstanceID}),\n\t})\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"failed to describe instance \")\n\t}\n\tif len(res.Reservations) == 0 {\n\t\treturn errors.Errorf(\"reservations for instance %s not found\", identity.InstanceID)\n\t}\n\tif len(res.Reservations[0].Instances) == 0 {\n\t\treturn errors.Errorf(\"instance %s not found\", identity.InstanceID)\n\t}\n\tinstance := res.Reservations[0].Instances[0]\n\n\twr := func(name string, value interface{}) {\n\t\tfmt.Fprintf(out, \"%s=%s\\n\", name, value)\n\t}\n\n\twr(\"EC2_AVAILABILITY_ZONE\", identity.AvailabilityZone)\n\twr(\"EC2_AVAILABILITY_ZONE_LETTER\", zoneLetter(identity.AvailabilityZone))\n\twr(\"EC2_REGION\", identity.Region)\n\twr(\"EC2_REGION_SHORT\", shortRegion(identity.Region))\n\twr(\"EC2_INSTANCE_ID\", identity.InstanceID)\n\twr(\"EC2_INSTANCE_TYPE\", identity.InstanceType)\n\twr(\"EC2_ACCOUNT_ID\", identity.AccountID)\n\twr(\"EC2_IMAGE_ID\", identity.ImageID)\n\n\twr(\"EC2_PRIVATE_DNS\", aws.StringValue(instance.PrivateDnsName))\n\twr(\"EC2_PRIVATE_IP\", aws.StringValue(instance.PrivateIpAddress))\n\twr(\"EC2_PUBLIC_DNS\", aws.StringValue(instance.PublicDnsName))\n\twr(\"EC2_PUBLIC_IP\", aws.StringValue(instance.PublicIpAddress))\n\twr(\"EC2_SUBNET_ID\", aws.StringValue(instance.SubnetId))\n\twr(\"EC2_VPC_ID\", aws.StringValue(instance.VpcId))\n\twr(\"EC2_KEYNAME\", aws.StringValue(instance.KeyName))\n\twr(\"EC2_NAMESERVER\", getNameserver(metaSvc))\n\n\tfor _, tag := range instance.Tags {\n\t\tname := fmt.Sprintf(\"EC2_TAG_%s\", tagEnvName(aws.StringValue(tag.Key)))\n\t\twr(name, aws.StringValue(tag.Value))\n\t}\n\treturn nil\n}", "func (ins *EC2RemoteClient) startInstance() error {\n\tlog.Printf(\"Starting EC2 Instance %s\", ins.InstanceID)\n\t_, err := ins.ec2Client.StartInstances(&ec2.StartInstancesInput{InstanceIds: aws.StringSlice([]string{ins.InstanceID})})\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Error starting instance : %s\", err)\n\t}\n\tlog.Printf(\"Waiting for Instance %s to become ready (may take a few minutes)\", ins.InstanceID)\n\terr = ins.ec2Client.WaitUntilInstanceStatusOk(&ec2.DescribeInstanceStatusInput{InstanceIds: aws.StringSlice([]string{ins.InstanceID})})\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Error waiting for instance to become available : %s\", err)\n\t}\n\treturn err\n}", "func (d *EtcdDiscoverManager) Scrape(ch chan<- prometheus.Metric, namespace, exporter string) error {\n\tinstanceDesc := prometheus.NewDesc(\n\t\tprometheus.BuildFQName(namespace, exporter, \"instance_up\"),\n\t\t\"the instance in cluster status.\",\n\t\t[]string{\"from\", \"instance\", \"status\"}, nil,\n\t)\n\tfor _, i := range d.othersInstance {\n\t\tif i.Status == \"delete\" || i.Status == \"abnormal\" {\n\t\t\tch <- prometheus.MustNewConstMetric(instanceDesc, prometheus.GaugeValue, 0, d.selfInstance.HostIP.String(), i.HostIP.String(), i.Status)\n\t\t} else {\n\t\t\tch <- prometheus.MustNewConstMetric(instanceDesc, prometheus.GaugeValue, 1, d.selfInstance.HostIP.String(), i.HostIP.String(), i.Status)\n\t\t}\n\t}\n\treturn nil\n}", "func getRDSInstances(client rdsiface.RDSAPI) []*rds.DBInstance {\n\tinput := &rds.DescribeDBInstancesInput{}\n\n\tvar result []*rds.DBInstance\n\n\terr := client.DescribeDBInstancesPages(input,\n\t\tfunc(page *rds.DescribeDBInstancesOutput, lastPage bool) bool {\n\t\t\tresult = append(result, page.DBInstances...)\n\t\t\treturn !lastPage\n\t\t})\n\tif err != nil {\n\t\tlog.Fatal(\"Not able to get rds instances\", err)\n\t\treturn nil\n\t}\n\treturn result\n}", "func TestGetAllInstances(t *testing.T) {\n\tctx, cancelFunc := context.WithTimeout(context.Background(), standardTimeout)\n\tdefer cancelFunc()\n\n\tinstances, err := bat.StartRandomInstances(ctx, \"\", 3)\n\tif err != nil {\n\t\tt.Fatalf(\"Failed to launch instance: %v\", err)\n\t}\n\n\tscheduled, err := bat.WaitForInstancesLaunch(ctx, \"\", instances, false)\n\tdefer func() {\n\t\t_, err := bat.DeleteInstances(ctx, \"\", scheduled)\n\t\tif err != nil {\n\t\t\tt.Errorf(\"Failed to delete instances: %v\", err)\n\t\t}\n\t}()\n\tif err != nil {\n\t\tt.Fatalf(\"Instance %s did not launch: %v\", instances[0], err)\n\t}\n\n\tinstanceDetails, err := bat.GetAllInstances(ctx, \"\")\n\tif err != nil {\n\t\tt.Fatalf(\"Failed to retrieve instances: %v\", err)\n\t}\n\n\tfor _, instance := range instances {\n\t\tinstanceDetail, ok := instanceDetails[instance]\n\t\tif !ok {\n\t\t\tt.Fatalf(\"Failed to retrieve instance %s\", instance)\n\t\t}\n\n\t\t// Check some basic information\n\n\t\tif instanceDetail.FlavorID == \"\" || instanceDetail.HostID == \"\" ||\n\t\t\tinstanceDetail.TenantID == \"\" || instanceDetail.MacAddress == \"\" ||\n\t\t\tinstanceDetail.PrivateIP == \"\" {\n\t\t\tt.Fatalf(\"Instance missing information: %+v\", instanceDetail)\n\t\t}\n\t}\n}", "func (u *UpdateServerInput) UpdateServer(con aws.EstablishConnectionInput) ([]ServerResponse, error) {\n\n\t//get the relative sessions before proceeding further\n\tec2, sesserr := con.EstablishConnection()\n\tif sesserr != nil {\n\t\treturn nil, sesserr\n\t}\n\n\tsearchInput := CommonComputeInput{InstanceIds: u.InstanceIds}\n\tsearch, serverr := searchInput.SearchInstance(con)\n\tif serverr != nil {\n\t\treturn nil, serverr\n\t}\n\n\tif search != true {\n\t\treturn nil, fmt.Errorf(\"Could not find the entered Instances, please enter valid/existing InstanceIds\")\n\t}\n\tserverResponse := make([]ServerResponse, 0)\n\n\tswitch strings.ToLower(u.Action) {\n\tcase \"start\":\n\t\tresult, startErr := ec2.StartInstances(\n\t\t\t&aws.UpdateComputeInput{\n\t\t\t\tInstanceIds: u.InstanceIds,\n\t\t\t},\n\t\t)\n\n\t\tif startErr != nil {\n\t\t\treturn nil, startErr\n\t\t}\n\n\t\twaitErr := ec2.WaitTillInstanceRunning(\n\t\t\t&aws.DescribeComputeInput{\n\t\t\t\tInstanceIds: u.InstanceIds,\n\t\t\t},\n\t\t)\n\t\tif waitErr != nil {\n\t\t\treturn nil, waitErr\n\t\t}\n\n\t\tif u.GetRaw == true {\n\t\t\tserverResponse = append(serverResponse, ServerResponse{StartInstRaw: result, Cloud: \"Amazon\"})\n\t\t\treturn serverResponse, nil\n\t\t}\n\n\t\tfor _, inst := range result.StartingInstances {\n\t\t\tserverResponse = append(serverResponse, ServerResponse{InstanceId: *inst.InstanceId, CurrentState: \"running\", PreviousState: *inst.PreviousState.Name})\n\t\t}\n\t\treturn serverResponse, nil\n\n\tcase \"stop\":\n\t\tresult, stopErr := ec2.StopInstances(\n\t\t\t&aws.UpdateComputeInput{\n\t\t\t\tInstanceIds: u.InstanceIds,\n\t\t\t},\n\t\t)\n\n\t\tif stopErr != nil {\n\t\t\treturn nil, stopErr\n\t\t}\n\t\twaitErr := ec2.WaitTillInstanceStopped(\n\t\t\t&aws.DescribeComputeInput{\n\t\t\t\tInstanceIds: u.InstanceIds,\n\t\t\t},\n\t\t)\n\n\t\tif waitErr != nil {\n\t\t\treturn nil, waitErr\n\t\t}\n\n\t\tif u.GetRaw == true {\n\t\t\tserverResponse = append(serverResponse, ServerResponse{StopInstRaw: result, Cloud: \"Amazon\"})\n\t\t\treturn serverResponse, nil\n\t\t}\n\n\t\tfor _, inst := range result.StoppingInstances {\n\t\t\tserverResponse = append(serverResponse, ServerResponse{InstanceId: *inst.InstanceId, CurrentState: \"stopped\", PreviousState: *inst.PreviousState.Name})\n\t\t}\n\t\treturn serverResponse, nil\n\n\tdefault:\n\t\treturn nil, fmt.Errorf(\"Sorry...!!!!. I am not aware of the action you asked me to perform, please enter the action which we support. The available actions are: start/stop\")\n\t}\n}", "func (c Client) GetInstances(appName, serviceName, partitionName string) (*InstanceItemsPage, error) {\n\tvar aggregateInstanceItemsPages InstanceItemsPage\n\tvar continueToken string\n\tfor {\n\t\tbasePath := \"Applications/\" + appName + \"/$/GetServices/\" + serviceName + \"/$/GetPartitions/\" + partitionName + \"/$/GetReplicas\"\n\t\tres, err := c.getHTTP(basePath, withContinue(continueToken))\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tvar instanceItemsPage InstanceItemsPage\n\t\terr = json.Unmarshal(res, &instanceItemsPage)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"could not deserialise JSON response: %+v\", err)\n\t\t}\n\n\t\taggregateInstanceItemsPages.Items = append(aggregateInstanceItemsPages.Items, instanceItemsPage.Items...)\n\n\t\tcontinueToken = getString(instanceItemsPage.ContinuationToken)\n\t\tif continueToken == \"\" {\n\t\t\tbreak\n\t\t}\n\t}\n\treturn &aggregateInstanceItemsPages, nil\n}", "func (provider AWSProvider) GetServers(providerOptions map[string]string, logger *log.Logger) ([]string, error) {\n\n\t// Initialise Discover struct from go-discover\n\tdiscoverer := discover.Discover{\n\t\tProviders : map[string]discover.Provider{\n\t\t\t\"aws\": discover.Providers[\"aws\"],\n\t\t},\n\t}\n\n\t// Discard logs if loggger is not set\n\tif logger == nil {\n\t\tlogger = log.New(ioutil.Discard, \"\", log.LstdFlags)\n\t}\n\n\t// Create the constraint list for discovering AWS instances\n\tcfg := fmt.Sprintf(\"provider=aws region=%s access_key_id=%s secret_access_key=%s addr_type=%s tag_key=%s tag_value=%s\", providerOptions[\"region\"], providerOptions[\"accessKeyId\"], providerOptions[\"secretAccessKey\"], providerOptions[\"addrType\"], providerOptions[\"tagKey\"], providerOptions[\"tagValue\"])\n\tserverIps, err := discoverer.Addrs(cfg, logger)\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn serverIps, nil\n}", "func (o DetectorDatasourcesMalwareProtectionOutput) ScanEc2InstanceWithFindings() DetectorDatasourcesMalwareProtectionScanEc2InstanceWithFindingsOutput {\n\treturn o.ApplyT(func(v DetectorDatasourcesMalwareProtection) DetectorDatasourcesMalwareProtectionScanEc2InstanceWithFindings {\n\t\treturn v.ScanEc2InstanceWithFindings\n\t}).(DetectorDatasourcesMalwareProtectionScanEc2InstanceWithFindingsOutput)\n}", "func TestListInstances(t *testing.T) {\n\tinstances := []*aws.Instance{\n\t\t{\n\t\t\tHostname: \"testHostname1\",\n\t\t\tIPAddress: \"10.10.10.1\",\n\t\t\tID: \"i-xxxxxxxxxxxxxxxx1\",\n\t\t\tPrivateDNSName: \"test1.local\",\n\t\t\tName: \"testNode1\",\n\t\t\tOSName: \"Amazon Linux\",\n\t\t\tOSType: \"Linux\",\n\t\t\tOSVersion: \"2\",\n\t\t},\n\t\t{\n\t\t\tHostname: \"testHostname2\",\n\t\t\tIPAddress: \"10.10.10.2\",\n\t\t\tID: \"i-xxxxxxxxxxxxxxxx2\",\n\t\t\tPrivateDNSName: \"test2.local\",\n\t\t\tName: \"testNode2\",\n\t\t\tOSName: \"Ubuntu\",\n\t\t\tOSType: \"Linux\",\n\t\t\tOSVersion: \"18.04\",\n\t\t},\n\t}\n\tinteractive := false\n\tformat := FormatText\n\tinput := StartInput{\n\t\tOutputFormat: &format,\n\t\tInteractive: &interactive,\n\t}\n\n\tctrl := gomock.NewController(t)\n\tdefer ctrl.Finish()\n\tm := NewMockCloudInstances(ctrl) // skipcq: SCC-compile\n\n\tm.EXPECT().ListInstances().Return(instances, nil)\n\n\tassert.NoError(t, input.listInstances(m))\n\t// TODO test integractive part\n}", "func getInstanceList(nodeNames sets.String) *compute.InstanceGroupsListInstances {\n\tinstanceNames := nodeNames.List()\n\tcomputeInstances := []*compute.InstanceWithNamedPorts{}\n\tfor _, name := range instanceNames {\n\t\tinstanceLink := getInstanceUrl(name)\n\t\tcomputeInstances = append(\n\t\t\tcomputeInstances, &compute.InstanceWithNamedPorts{\n\t\t\t\tInstance: instanceLink})\n\t}\n\treturn &compute.InstanceGroupsListInstances{\n\t\tItems: computeInstances,\n\t}\n}", "func (c *Client) GetAsgNodes(groupID string, clusterID string) ([]CceInstance, error) {\n\tif clusterID == \"\" {\n\t\treturn nil, fmt.Errorf(\"clusterID should not be nil\")\n\t}\n\n\tif groupID == \"\" {\n\t\treturn nil, fmt.Errorf(\"groupID should not be nil\")\n\t}\n\n\tparams := map[string]string{\n\t\t\"clusterUuid\": clusterID,\n\t\t\"groupId\": groupID,\n\t}\n\treq, err := bce.NewRequest(\"GET\", c.GetURL(\"/v1/cluster/group/instances\", params), nil)\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tresp, err := c.SendRequest(req, nil)\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tbodyContent, err := resp.GetBodyContent()\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar insList ListInstancesResponse\n\terr = json.Unmarshal(bodyContent, &insList)\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn insList.Instances, nil\n}", "func (e *EC2Mock) RunInstances(*ec2.RunInstancesInput) (*ec2.Reservation, error) {\n\t// expected input includes:\n\t// ImageId, InstanceType, MinCount, MaxCount, SecurityGroupIds, SubnetId,\n\t// IamInstanceProfile, UserData (optional)\n\treturn &ec2.Reservation{}, nil\n}", "func (dir *Dir) Instances() ([]*tengo.Instance, error) {\n\thosts, err := dir.Hostnames()\n\tif err != nil {\n\t\treturn nil, err\n\t} else if len(hosts) == 0 {\n\t\t// If no host defined in this dir (meaning this dir's .skeema, as well as\n\t\t// parent dirs' .skeema, global option files, or command-line) then nothing\n\t\t// to do\n\t\treturn nil, nil\n\t}\n\n\t// Before looping over hostnames, do a single lookup of user, password,\n\t// connect-options, port, socket.\n\tuser := dir.Config.GetAllowEnvVar(\"user\")\n\tpassword, err := dir.Password(hosts...)\n\tif err != nil {\n\t\treturn nil, err // for example, need interactive password but STDIN isn't a TTY\n\t}\n\tvar userAndPass string\n\tif password == \"\" {\n\t\tuserAndPass = user\n\t} else {\n\t\tuserAndPass = user + \":\" + password\n\t}\n\tparams, err := dir.InstanceDefaultParams()\n\tif err != nil {\n\t\treturn nil, ConfigErrorf(\"Invalid connection options: %w\", err)\n\t}\n\tportValue, portWasSupplied := dir.Port()\n\tsocketValue := dir.Config.GetAllowEnvVar(\"socket\")\n\tsocketWasSupplied := dir.Config.Supplied(\"socket\")\n\n\t// For each hostname, construct a DSN and use it to create an Instance\n\tvar instances []*tengo.Instance\n\tfor _, host := range hosts {\n\t\tvar net, addr string\n\t\tthisPortValue := portValue\n\t\tif host == \"localhost\" && (socketWasSupplied || !portWasSupplied) {\n\t\t\tnet, addr = \"unix\", socketValue\n\t\t} else {\n\t\t\tsplitHost, splitPort, err := tengo.SplitHostOptionalPort(host)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tif splitPort > 0 {\n\t\t\t\tif splitPort != portValue && portWasSupplied {\n\t\t\t\t\treturn nil, ConfigErrorf(\"Port was supplied as %d inside hostname %s but as %d in option file\", splitPort, host, portValue)\n\t\t\t\t}\n\t\t\t\thost = splitHost\n\t\t\t\tthisPortValue = splitPort\n\t\t\t}\n\t\t\tnet, addr = \"tcp\", fmt.Sprintf(\"%s:%d\", host, thisPortValue)\n\t\t}\n\t\tdsn := fmt.Sprintf(\"%s@%s(%s)/?%s\", userAndPass, net, addr, params)\n\t\tinstance, err := util.NewInstance(\"mysql\", dsn)\n\t\tif err != nil {\n\t\t\tif password != \"\" {\n\t\t\t\tsafeUserPass := user + \":*****\"\n\t\t\t\tdsn = strings.Replace(dsn, userAndPass, safeUserPass, 1)\n\t\t\t}\n\t\t\treturn nil, ConfigErrorf(\"Invalid connection information for %s (DSN=%s): %w\", dir, dsn, err)\n\t\t}\n\t\tinstances = append(instances, instance)\n\t}\n\treturn instances, nil\n}", "func (m *AzureManager) GetAsgForInstance(instance *azureRef) (cloudprovider.NodeGroup, error) {\n\treturn m.asgCache.FindForInstance(instance, m.config.VMType)\n}", "func Updater(config *types.Config) {\n\n\tlog.Infof(\"[hostInventoryUpdater] Started\")\n\n\tupdateFrequency := 5\n\n\t// Run forever:\n\tfor {\n\n\t\t// Sleep until the next run:\n\t\tlog.Debugf(\"[hostInventoryUpdater] Sleeping for %vs ...\", updateFrequency)\n\t\ttime.Sleep(time.Duration(updateFrequency) * time.Second)\n\n\t\t// Authenticate with AWS:\n\t\tawsAuth, err := aws.GetAuth(\"\", \"\", \"\", time.Now())\n\t\tif err != nil {\n\t\t\tlog.Errorf(\"[hostInventoryUpdater] Unable to authenticate to AWS! (%s)\", err)\n\t\t\tcontinue\n\t\t} else {\n\t\t\tlog.Debugf(\"[hostInventoryUpdater] Authenticated to AWS\")\n\t\t}\n\n\t\t// Make a new EC2 connection:\n\t\tlog.Debugf(\"[hostInventoryUpdater] Connecting to EC2 ...\")\n\t\tec2Connection := ec2.New(awsAuth, aws.Regions[config.AWSRegion])\n\n\t\t// Prepare a filter:\n\t\tfilter := ec2.NewFilter()\n\t\tfilter.Add(\"instance-state-name\", \"running\")\n\n\t\t// Make a \"DescribeInstances\" call (lists ALL instances in your account):\n\t\tdescribeInstancesResponse, err := ec2Connection.DescribeInstances([]string{}, filter)\n\t\tif err != nil {\n\t\t\tlog.Errorf(\"[hostInventoryUpdater] Failed to make describe-instances call: %v\", err)\n\t\t} else {\n\t\t\tlog.Debugf(\"[hostInventoryUpdater] Found %v instances running in your account\", len(describeInstancesResponse.Reservations))\n\n\t\t\t// Lock the host-list (so we don't change it while another goroutine is using it):\n\t\t\tlog.Tracef(\"[hostInventoryUpdater] Trying to lock config.HostInventoryMutex ...\")\n\t\t\tconfig.HostInventoryMutex.Lock()\n\t\t\tlog.Tracef(\"[hostInventoryUpdater] Locked config.HostInventoryMutex\")\n\n\t\t\t// Clear out the existing host-inventory:\n\t\t\tconfig.HostInventory = types.HostInventory{\n\t\t\t\tEnvironments: make(map[string]types.Environment),\n\t\t\t}\n\n\t\t\t// Re-populate it from the describe instances response:\n\t\t\tfor _, reservation := range describeInstancesResponse.Reservations {\n\n\t\t\t\t// Search for our role and environment tags:\n\t\t\t\tvar role, environment string\n\t\t\t\tfor _, tag := range reservation.Instances[0].Tags {\n\t\t\t\t\tif tag.Key == config.RoleTag {\n\t\t\t\t\t\trole = tag.Value\n\t\t\t\t\t}\n\t\t\t\t\tif tag.Key == config.EnvironmentTag {\n\t\t\t\t\t\tenvironment = tag.Value\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\t// Make sure we have environment and role tags:\n\t\t\t\tif environment == \"\" || role == \"\" {\n\t\t\t\t\tlog.Debugf(\"[hostInventoryUpdater] Instance (%v) must have both 'environment' and 'role' metadata in order for DNS records to be creted!\", reservation.Instances[0].InstanceId)\n\n\t\t\t\t\t// Continue with the next instance:\n\t\t\t\t\tcontinue\n\t\t\t\t} else {\n\t\t\t\t\tlog.Infof(\"[hostInventoryUpdater] Building records for instance (%v) in zone (%v) ...\", reservation.Instances[0].InstanceId, reservation.Instances[0].AvailabilityZone)\n\t\t\t\t}\n\n\t\t\t\t// Add a new environment to the inventory (unless we already have it):\n\t\t\t\tif _, ok := config.HostInventory.Environments[environment]; !ok {\n\t\t\t\t\tconfig.HostInventory.Environments[environment] = types.Environment{\n\t\t\t\t\t\tDNSRecords: make(map[string][]string),\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\t// Either create or add to the role-per-zone record:\n\t\t\t\tinternalZoneRecord := fmt.Sprintf(\"%v.%v.i.%v.%v\", role, reservation.Instances[0].AvailabilityZone, environment, config.DNSDomainName)\n\t\t\t\tif _, ok := config.HostInventory.Environments[environment].DNSRecords[internalZoneRecord]; !ok {\n\t\t\t\t\tconfig.HostInventory.Environments[environment].DNSRecords[internalZoneRecord] = []string{reservation.Instances[0].PrivateIPAddress}\n\t\t\t\t} else {\n\t\t\t\t\tconfig.HostInventory.Environments[environment].DNSRecords[internalZoneRecord] = append(config.HostInventory.Environments[environment].DNSRecords[internalZoneRecord], reservation.Instances[0].PrivateIPAddress)\n\t\t\t\t}\n\n\t\t\t\t// Either create or add to the role-per-region record:\n\t\t\t\tinternalRegionRecord := fmt.Sprintf(\"%v.%v.i.%v.%v\", role, config.AWSRegion, environment, config.DNSDomainName)\n\t\t\t\tif _, ok := config.HostInventory.Environments[environment].DNSRecords[internalRegionRecord]; !ok {\n\t\t\t\t\tconfig.HostInventory.Environments[environment].DNSRecords[internalRegionRecord] = []string{reservation.Instances[0].PrivateIPAddress}\n\t\t\t\t} else {\n\t\t\t\t\tconfig.HostInventory.Environments[environment].DNSRecords[internalRegionRecord] = append(config.HostInventory.Environments[environment].DNSRecords[internalRegionRecord], reservation.Instances[0].PrivateIPAddress)\n\t\t\t\t}\n\n\t\t\t\t// Either create or add to the external record:\n\t\t\t\tif reservation.Instances[0].IPAddress != \"\" {\n\t\t\t\t\texternalRecord := fmt.Sprintf(\"%v.%v.e.%v.%v\", role, config.AWSRegion, environment, config.DNSDomainName)\n\t\t\t\t\tif _, ok := config.HostInventory.Environments[environment].DNSRecords[externalRecord]; !ok {\n\t\t\t\t\t\tconfig.HostInventory.Environments[environment].DNSRecords[externalRecord] = []string{reservation.Instances[0].IPAddress}\n\t\t\t\t\t} else {\n\t\t\t\t\t\tconfig.HostInventory.Environments[environment].DNSRecords[externalRecord] = append(config.HostInventory.Environments[environment].DNSRecords[externalRecord], reservation.Instances[0].IPAddress)\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t}\n\n\t\t}\n\n\t\t// Unlock the host-inventory:\n\t\tlog.Tracef(\"[hostInventoryUpdater] Unlocking config.HostInventoryMutex ...\")\n\t\tconfig.HostInventoryMutex.Unlock()\n\n\t\t// Now set the sleep time to the correct value:\n\t\tupdateFrequency = config.HostUpdateFrequency\n\n\t}\n\n}", "func (m *manager) listIGInstances(name string) (sets.String, error) {\n\tnodeNames := sets.NewString()\n\tzones, err := m.ListZones(utils.AllNodesPredicate)\n\tif err != nil {\n\t\treturn nodeNames, err\n\t}\n\n\tfor _, zone := range zones {\n\t\tinstances, err := m.cloud.ListInstancesInInstanceGroup(name, zone, allInstances)\n\t\tif err != nil {\n\t\t\treturn nodeNames, err\n\t\t}\n\t\tfor _, ins := range instances {\n\t\t\tname, err := utils.KeyName(ins.Instance)\n\t\t\tif err != nil {\n\t\t\t\treturn nodeNames, err\n\t\t\t}\n\t\t\tnodeNames.Insert(name)\n\t\t}\n\t}\n\treturn nodeNames, nil\n}", "func (s *EcsService) WaitForEcsInstance(instanceId string, status Status, timeout int) error {\n\tif timeout <= 0 {\n\t\ttimeout = DefaultTimeout\n\t}\n\tfor {\n\t\tinstance, err := s.DescribeInstance(instanceId)\n\t\tif err != nil && !NotFoundError(err) {\n\t\t\treturn err\n\t\t}\n\t\tif instance.Status == string(status) {\n\t\t\t//Sleep one more time for timing issues\n\t\t\ttime.Sleep(DefaultIntervalMedium * time.Second)\n\t\t\tbreak\n\t\t}\n\t\ttimeout = timeout - DefaultIntervalShort\n\t\tif timeout <= 0 {\n\t\t\treturn GetTimeErrorFromString(GetTimeoutMessage(\"ECS Instance\", string(status)))\n\t\t}\n\t\ttime.Sleep(DefaultIntervalShort * time.Second)\n\n\t}\n\treturn nil\n}", "func GetInstances(albc aws.ALBAPI, arn *string, instances []string) (aws.Instances, error) {\n\thealthOutput, err := albc.DescribeTargetHealth(createDescribeTargetHealthInput(arn, instances))\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\ttgInstances := aws.Instances{}\n\tfor _, thd := range healthOutput.TargetHealthDescriptions {\n\t\ttgInstances.AddTargetGroupInstance(thd)\n\t}\n\n\treturn tgInstances, nil\n}", "func (m *EC2Client) StartEC2Instances(instanceIDs []*string) (*ec2.StartInstancesOutput, error) {\n\tresult, err := m.EC2API.StartInstances(&ec2.StartInstancesInput{\n\t\tInstanceIds: instanceIDs,\n\t})\n\tif err != nil {\n\t\tif awsErr, ok := err.(awserr.Error); ok {\n\t\t\treturn nil, awsErr\n\t\t}\n\t}\n\treturn result, nil\n}", "func TerminateInstance() {\n\tlog.Println(\"Starting to run terminate instance process\")\n\t// Get instance id and region from metadata\n\tinstanceId, instanceRegion := getInstanceID()\n\tlog.Printf(\"Working on %v in %v region\", instanceId, instanceRegion)\n\n\t// Init aws session\n\tawsSession,_ := initSession(instanceRegion)\n\tlog.Println(\"Initialized aws session\")\n\n\t// Init Aws auto scaling session\n\tinitAutoScalingAwsSession(awsSession)\n\tlog.Println(\"Initialized auto scaling session\")\n\n\t// Get auto scaling group name\n\tinstanceAutoScaleGroupName := getAutoScalingName(instanceId)\n\tlog.Printf(\"Instance %v auto scaling group name is: %v\", instanceId, instanceAutoScaleGroupName)\n\n\t// Set instance scale in policy to false\n\tsuccess := setScaleInProtectionToInstance(instanceAutoScaleGroupName, instanceId)\n\n\t// Terminate ec2 instance after setting scale in policy to false\n\tif success{\n\t\tterminateInstance(instanceId)\n\t}\n}", "func (a *Client) ReplaceInstancesEnabled2(params *ReplaceInstancesEnabled2Params) (*ReplaceInstancesEnabled2OK, error) {\n\t// TODO: Validate the params before sending\n\tif params == nil {\n\t\tparams = NewReplaceInstancesEnabled2Params()\n\t}\n\n\tresult, err := a.transport.Submit(&runtime.ClientOperation{\n\t\tID: \"replaceInstancesEnabled2\",\n\t\tMethod: \"PUT\",\n\t\tPathPattern: \"/instances/{id}/enabled\",\n\t\tProducesMediaTypes: []string{\"\"},\n\t\tConsumesMediaTypes: []string{\"\"},\n\t\tSchemes: []string{\"https\"},\n\t\tParams: params,\n\t\tReader: &ReplaceInstancesEnabled2Reader{formats: a.formats},\n\t\tContext: params.Context,\n\t\tClient: params.HTTPClient,\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn result.(*ReplaceInstancesEnabled2OK), nil\n\n}", "func GetEC2InstanceStatus(experimentsDetails *experimentTypes.ExperimentDetails) (string, error) {\n\n\tvar err error\n\t// Load session from shared config\n\tsess := session.Must(session.NewSessionWithOptions(session.Options{\n\t\tSharedConfigState: session.SharedConfigEnable,\n\t\tConfig: aws.Config{Region: aws.String(experimentsDetails.Region)},\n\t}))\n\n\tif experimentsDetails.Ec2InstanceID == \"\" {\n\t\tlog.Infof(\"[PreChaos]: Instance id is not provided, selecting a random instance from %v region\", experimentsDetails.Region)\n\t\texperimentsDetails.Ec2InstanceID, err = GetRandomInstance(experimentsDetails.Region, sess)\n\t\tif err != nil {\n\t\t\treturn \"\", errors.Errorf(\"fail to select a random running instance from %v region, err: %v\", experimentsDetails.Region, err)\n\t\t}\n\t}\n\n\t// Create new EC2 client\n\tec2Svc := ec2.New(sess)\n\n\t// Call to get detailed information on each instance\n\tresult, err := ec2Svc.DescribeInstances(nil)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tfor _, reservationDetails := range result.Reservations {\n\n\t\tfor _, instanceDetails := range reservationDetails.Instances {\n\n\t\t\tif *instanceDetails.InstanceId == experimentsDetails.Ec2InstanceID {\n\t\t\t\treturn *instanceDetails.State.Name, nil\n\t\t\t}\n\t\t}\n\t}\n\treturn \"\", errors.Errorf(\"failed to get the status of ec2 instance with instanceID %v\", experimentsDetails.Ec2InstanceID)\n\n}", "func ec2Provider(providers []ProviderPayload) *ProviderPayload {\n\tfor _, provider := range providers {\n\t\tif provider.Name == providerEc2 {\n\t\t\treturn &provider\n\t\t}\n\t}\n\tlog.Fatal(\"ec2 provider is missing\")\n\treturn nil\n}", "func (m *InfrastructureProviderAWS) Bootstrap() *CreatedInstances {\n\tinstances, err := m.createInstances()\n\tif err != nil {\n\t\tlog.Error(\"failed to create instances: %s\", err)\n\t}\n\tfor i := 0; i < 50; i++ {\n\t\ttime.Sleep(5 * time.Second)\n\t\tif m.assureRunning() {\n\t\t\tlog.Info(\"all instances are running\")\n\t\t\tbreak\n\t\t}\n\t}\n\tm.collectPublicAddresses(instances)\n\tm.dumpPrivateKeysToFile(instances)\n\t// TODO: general retry function, even if VM is in state \"running\" ssh may be unavailable,\n\t// has no flag to know it for sure\n\treturn instances\n}", "func createENI(ec2Client *ec2.EC2, cfg *config) (*ec2.NetworkInterface, error) {\n\tvar filterValuesGroupName []*string\n\tfor _, sg := range cfg.securityGroups {\n\t\tfilterValuesGroupName = append(filterValuesGroupName, aws.String(sg))\n\t}\n\t// Get security group id for the security group that the instance was\n\t// started with\n\tsecurityGroups, err := ec2Client.DescribeSecurityGroups(&ec2.DescribeSecurityGroupsInput{\n\t\tFilters: []*ec2.Filter{\n\t\t\t{\n\t\t\t\tName: aws.String(\"group-name\"),\n\t\t\t\tValues: filterValuesGroupName,\n\t\t\t},\n\t\t\t{\n\t\t\t\tName: aws.String(\"vpc-id\"),\n\t\t\t\tValues: []*string{aws.String(cfg.vpc)},\n\t\t\t},\n\t\t}})\n\tif err != nil {\n\t\treturn nil, errors.Wrapf(err, \"unable to get security group ids\")\n\t}\n\tvar securityGroupIDs []*string\n\tfor _, sg := range securityGroups.SecurityGroups {\n\t\tsecurityGroupIDs = append(securityGroupIDs, sg.GroupId)\n\t}\n\n\t// Create the ENI\n\toutput, err := ec2Client.CreateNetworkInterface(&ec2.CreateNetworkInterfaceInput{\n\t\tDescription: aws.String(\"for running end-to-end test for ECS ENI Plugin\"),\n\t\tGroups: securityGroupIDs,\n\t\tSubnetId: aws.String(cfg.subnet),\n\t})\n\tif err != nil {\n\t\treturn nil, errors.Wrapf(err, \"unable to create network interface\")\n\t}\n\treturn output.NetworkInterface, nil\n}" ]
[ "0.64689547", "0.6289963", "0.6155988", "0.6135144", "0.6055158", "0.60221434", "0.58921194", "0.5825013", "0.57424814", "0.56512195", "0.559661", "0.55487907", "0.5525461", "0.5500957", "0.54959786", "0.5408313", "0.538541", "0.5382405", "0.5380657", "0.53662044", "0.53608495", "0.53175294", "0.531646", "0.5308578", "0.52984554", "0.5283315", "0.5270289", "0.5263782", "0.52319443", "0.5231244", "0.5219396", "0.5215077", "0.5196634", "0.5185761", "0.51403767", "0.5134135", "0.5120724", "0.51185477", "0.5102034", "0.50787544", "0.5067903", "0.5057198", "0.50536096", "0.5053215", "0.5051905", "0.5049852", "0.50033695", "0.49682635", "0.49588162", "0.4957845", "0.4953554", "0.4948679", "0.49146506", "0.49143657", "0.49094424", "0.4894537", "0.48678613", "0.48607138", "0.4854364", "0.48516896", "0.485118", "0.4848221", "0.4844767", "0.4829053", "0.48006734", "0.47999287", "0.47941765", "0.4792243", "0.4786117", "0.47726774", "0.4761781", "0.4761367", "0.4753388", "0.47494978", "0.4749085", "0.4733828", "0.4732576", "0.47292575", "0.47280928", "0.47181836", "0.47156858", "0.4708666", "0.46907133", "0.4689835", "0.4681854", "0.46746802", "0.46700642", "0.4653931", "0.46461135", "0.4628407", "0.46188396", "0.46129364", "0.4604352", "0.46015427", "0.45936367", "0.4593487", "0.4591247", "0.459023", "0.45875", "0.458568" ]
0.6949228
0
Load parses environment varibles for each field on config that has an "opt" tag. If flags is nonnil, it will be used to define command line flags.
func Load(prefix string, config interface{}, flags *flag.FlagSet) error { opts, err := inferOptions(prefix, config) if err != nil { return err } fs := flags if fs == nil { // Caller doesn't need flags, but create a FlagSet anyway to keep things simple. This // doesn't get parsed. fs = flag.NewFlagSet(prefix, flag.ExitOnError) } for _, opt := range opts { if err := opt.set(fs); err != nil { return err } } if flags != nil { args := os.Args[1:] if err := fs.Parse(args); err != nil { return err } } return nil }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func Load(structs ...interface{}) error {\n\tif flag.Parsed() {\n\t\treturn fmt.Errorf(\"Load must be called before a call to flag.Pars\")\n\t}\n\n\tm := make(map[string]ConfigFlag)\n\n\tnumStructs := len(structs)\n\n\tfieldSetters := make([]setters, numStructs)\n\n\tfor i := 0; i < numStructs; i++ {\n\t\ts := structs[i]\n\n\t\tt := reflect.ValueOf(s).Elem()\n\t\ttypeOfT := t.Type()\n\n\t\tfieldSetters[i].s = make([]SetValue, t.NumField())\n\n\t\tfor j := 0; j < t.NumField(); j++ {\n\t\t\tf := t.Field(j)\n\n\t\t\ttag := typeOfT.Field(j).Tag\n\n\t\t\tvar ok bool\n\t\t\tif _, ok = tag.Lookup(\"env_no\"); ok {\n\t\t\t\tcontinue\n\t\t\t} // Not a configuration field\n\n\t\t\tvar name string\n\t\t\tif name, ok = tag.Lookup(\"env_name\"); !ok {\n\t\t\t\tname = typeOfT.Field(i).Name\n\t\t\t}\n\n\t\t\tdefVal, isDefVal := tag.Lookup(\"env_def\")\n\n\t\t\tvar desc string\n\t\t\tif desc, ok = tag.Lookup(\"env_desc\"); !ok {\n\t\t\t\tdesc = name\n\t\t\t}\n\n\t\t\tvar err error\n\t\t\tfieldSetters[i].s[j], err = parseDefault(m, f, name, defVal, desc, isDefVal)\n\n\t\t\tif err != nil {\n\t\t\t\treturn nil\n\t\t\t}\n\t\t}\n\t}\n\n\tflag.Parse()\n\tfor i := 0; i < numStructs; i++ {\n\t\tfor j := 0; j < len(fieldSetters[i].s); j++ {\n\t\t\tif fieldSetters[i].s[j] != nil {\n\t\t\t\tif err := fieldSetters[i].s[j].Set(); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\tif v, ok := structs[i].(Validate); ok {\n\t\t\tif err := v.Validate(); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t\tif v, ok := structs[i].(Initialize); ok {\n\t\t\tv.Initialize()\n\t\t}\n\t}\n\n\treturn nil\n}", "func (c *Conf) loadFlags(from []string) {\n\n\tif len(from) == 0 {\n\t\t// Nothing to parse.\n\t\treturn\n\t}\n\tc.Command = from[0]\n\tfs := flag.NewFlagSet(c.Command, flag.ExitOnError)\n\n\t// Define relevant flags\n\tfs.BoolVar(&c.Test.Verbose, \"verbose\", c.Test.Verbose, \"Verbose execution.\")\n\tfs.BoolVar(&c.Test.Verbose, \"v\", c.Test.Verbose, \"Verbose execution.\")\n\tfs.BoolVar(&c.Test.Verbose, \"test.v\", c.Test.Verbose, \"Verbose execution.\")\n\tfs.BoolVar(&c.Test.Verbose, \"test.verbose\", c.Test.Verbose, \"Verbose execution.\")\n\n\tfs.BoolVar(&c.Test.Short, \"test.short\", c.Test.Short, \"Verbose execution.\")\n\tfs.BoolVar(&c.Test.Short, \"short\", c.Test.Short, \"Verbose execution.\")\n\n\tfs.String(\"test.testlogfile\", c.Test.LogFile, \"DO NOT USE - used by go test.\")\n\tfs.String(\"test.timeout\", \"\", \"DO NOT USE - used by go test.\")\n\tfs.String(\"test.run\", \"\", \"DO NOT USE - used by go test.\")\n\n\tfs.StringVar(&c.Addr.Public, \"pubaddr\", c.Addr.Public, \"Public address.\")\n\tfs.StringVar(&c.Addr.Private, \"privaddr\", c.Addr.Private, \"Private address.\")\n\n\t// Actual parsing\n\tfs.Parse(from[1:])\n\n\t// Remaining args ..\n\tc.Args = fs.Args()\n\n\t// Display default flags.\n\t// fs.PrintDefaults()\n\n\t//fmt.Println(\"Flag parsed : \", *fs)\n\n\tc.Parsed.Flags = true\n}", "func Load(opts ...Option) {\n\tdefer flagOut()\n\tmtx.Lock()\n\tdefer mtx.Unlock()\n\tif loaded {\n\t\tpanic(fmt.Errorf(\"flags already loaded\"))\n\t}\n\n\tdefineAliases()\n\n\tvar flagfiles []string\n\tvar skipArgs bool\n\tvar ignoreUnknowns bool\n\tshort_usage := ShortUsage\n\tfull_usage := FullUsage\n\tfor _, opt := range opts {\n\t\tif opt.flagfilePath != \"\" {\n\t\t\tflagfiles = append(flagfiles, opt.flagfilePath)\n\t\t}\n\t\tif opt.skipArgs {\n\t\t\tskipArgs = true\n\t\t}\n\t\tif opt.ignoreUnknowns {\n\t\t\tignoreUnknowns = true\n\t\t}\n\t\tif opt.short_usage != nil {\n\t\t\tshort_usage = opt.short_usage\n\t\t}\n\t\tif opt.full_usage != nil {\n\t\t\tfull_usage = opt.full_usage\n\t\t}\n\t}\n\n\tcmdline_set_flags := map[string]bool{}\n\tif !skipArgs {\n\t\tflag.CommandLine.Usage = short_usage\n\t\targs := os.Args[1:]\n\t\tfor _, arg := range args {\n\t\t\tif arg == \"--\" {\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tif arg == \"--help-all\" || arg == \"-help-all\" {\n\t\t\t\tfull_usage()\n\t\t\t\tos.Exit(2)\n\t\t\t}\n\t\t}\n\t\tflag.CommandLine.Parse(os.Args[1:])\n\t\tflag.Visit(func(f *flag.Flag) {\n\t\t\tcmdline_set_flags[f.Name] = true\n\t\t\tset_flags[f.Name] = true\n\t\t})\n\t}\n\tloaded = true\n\n\tflagfiles = append(flagfiles, strings.Split(*flagfile, \",\")...)\n\n\tfor len(flagfiles) > 0 {\n\t\tfile := flagfiles[0]\n\t\tflagfiles = flagfiles[1:]\n\t\tif len(file) == 0 {\n\t\t\tcontinue\n\t\t}\n\t\tfh, err := os.Open(file)\n\t\tif err != nil {\n\t\t\tpanic(fmt.Errorf(\"unable to open flagfile '%s': %s\", file, err))\n\t\t}\n\t\terr = parser.Parse(fh, func(name, value string) {\n\t\t\tif name == \"flagfile\" {\n\t\t\t\t// allow flagfile chaining\n\t\t\t\tflagfiles = append(flagfiles, value)\n\t\t\t\treturn\n\t\t\t}\n\t\t\t// command line flags override file flags\n\t\t\tif cmdline_set_flags[name] {\n\t\t\t\treturn\n\t\t\t}\n\t\t\tif ignoreUnknowns && flag.Lookup(name) == nil {\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\terr := trySet(name, value)\n\n\t\t\tif err != nil {\n\t\t\t\tset_flags[name] = true\n\t\t\t}\n\t\t})\n\t\tfh.Close()\n\t\tif err != nil {\n\t\t\tpanic(fmt.Errorf(\"'%s': %s\", file, err))\n\t\t}\n\t}\n\n\tsetAliases()\n}", "func UnmarshalFromFlags(c interface{}) {\n\ttopType := reflect.TypeOf(c).Elem()\n\ttopValue := reflect.ValueOf(c)\n\tfor i := 0; i < topType.NumField(); i++ {\n\t\tfield := topType.Field(i)\n\t\tif field.Tag.Get(\"cli\") != \"\" {\n\t\t\tdesc := field.Tag.Get(\"desc\")\n\t\t\tif field.Tag.Get(\"env\") != \"\" {\n\t\t\t\tdesc = desc + \"; This variable is set by ENV \\\"\" + field.Tag.Get(\"env\") + \"\\\"\"\n\t\t\t}\n\t\t\tswitch field.Type.Kind() {\n\t\t\tcase reflect.Bool:\n\t\t\t\ttemp := flag.Bool(field.Tag.Get(\"cli\"), topValue.Elem().Field(i).Bool(), desc)\n\t\t\t\tf := topValue.Elem().Field(i)\n\t\t\t\tdefer func() { f.SetBool(*temp) }()\n\t\t\tcase reflect.Int64:\n\t\t\t\ttemp := flag.Int64(field.Tag.Get(\"cli\"), topValue.Elem().Field(i).Int(), desc)\n\t\t\t\tf := topValue.Elem().Field(i)\n\t\t\t\tdefer func() { f.SetInt(*temp) }()\n\t\t\tcase reflect.String:\n\t\t\t\ttemp := flag.String(field.Tag.Get(\"cli\"), topValue.Elem().Field(i).String(), desc)\n\t\t\t\tf := topValue.Elem().Field(i)\n\t\t\t\tdefer func() { f.SetString(*temp) }()\n\t\t\tcase reflect.Float64:\n\t\t\t\ttemp := flag.Float64(field.Tag.Get(\"cli\"), topValue.Elem().Field(i).Float(), desc)\n\t\t\t\tf := topValue.Elem().Field(i)\n\t\t\t\tdefer func() { f.SetFloat(*temp) }()\n\t\t\t}\n\t\t}\n\t}\n\tflag.Parse()\n}", "func Parse(fs *flag.FlagSet, args []string, options ...Option) error {\n\tvar c Context\n\tfor _, option := range options {\n\t\toption(&c)\n\t}\n\n\t// First priority: commandline flags (explicit user preference).\n\tif err := fs.Parse(args); err != nil {\n\t\treturn fmt.Errorf(\"error parsing commandline args: %w\", err)\n\t}\n\n\tprovided := map[string]bool{}\n\tfs.Visit(func(f *flag.Flag) {\n\t\tprovided[f.Name] = true\n\t})\n\n\t// Second priority: environment variables (session).\n\tif parseEnv := c.envVarPrefix != \"\" || c.envVarNoPrefix; parseEnv {\n\t\tvar visitErr error\n\t\tfs.VisitAll(func(f *flag.Flag) {\n\t\t\tif visitErr != nil {\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tif provided[f.Name] {\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tvar key string\n\t\t\tkey = strings.ToUpper(f.Name)\n\t\t\tkey = envVarReplacer.Replace(key)\n\t\t\tkey = maybePrefix(key, c.envVarNoPrefix, c.envVarPrefix)\n\n\t\t\tvalue := os.Getenv(key)\n\t\t\tif value == \"\" {\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tfor _, v := range maybeSplit(value, c.envVarSplit) {\n\t\t\t\tif err := fs.Set(f.Name, v); err != nil {\n\t\t\t\t\tvisitErr = fmt.Errorf(\"error setting flag %q from env var %q: %w\", f.Name, key, err)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t}\n\t\t})\n\t\tif visitErr != nil {\n\t\t\treturn fmt.Errorf(\"error parsing env vars: %w\", visitErr)\n\t\t}\n\t}\n\n\tfs.Visit(func(f *flag.Flag) {\n\t\tprovided[f.Name] = true\n\t})\n\n\t// Third priority: config file (host).\n\tif c.configFile == \"\" && c.configFileFlagName != \"\" {\n\t\tif f := fs.Lookup(c.configFileFlagName); f != nil {\n\t\t\tc.configFile = f.Value.String()\n\t\t}\n\t}\n\n\tif parseConfig := c.configFile != \"\" && c.configFileParser != nil; parseConfig {\n\t\tf, err := os.Open(c.configFile)\n\t\tswitch {\n\t\tcase err == nil:\n\t\t\tdefer f.Close()\n\t\t\tif err := c.configFileParser(f, func(name, value string) error {\n\t\t\t\tif provided[name] {\n\t\t\t\t\treturn nil\n\t\t\t\t}\n\n\t\t\t\tdefined := fs.Lookup(name) != nil\n\t\t\t\tswitch {\n\t\t\t\tcase !defined && c.ignoreUndefined:\n\t\t\t\t\treturn nil\n\t\t\t\tcase !defined && !c.ignoreUndefined:\n\t\t\t\t\treturn fmt.Errorf(\"config file flag %q not defined in flag set\", name)\n\t\t\t\t}\n\n\t\t\t\tif err := fs.Set(name, value); err != nil {\n\t\t\t\t\treturn fmt.Errorf(\"error setting flag %q from config file: %w\", name, err)\n\t\t\t\t}\n\n\t\t\t\treturn nil\n\t\t\t}); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\tcase os.IsNotExist(err) && c.allowMissingConfigFile:\n\t\t\t// no problem\n\n\t\tdefault:\n\t\t\treturn err\n\t\t}\n\t}\n\n\tfs.Visit(func(f *flag.Flag) {\n\t\tprovided[f.Name] = true\n\t})\n\n\treturn nil\n}", "func Load(environmentVariablesWithDefaults Map) *config {\n\tparsedConfigs := Map{}\n\n\t// Load bundledConfigs into custom ones only if custom not define them already\n\tfor environmentVariable, defaultValue := range bundledConfigs {\n\t\tif _, exists := environmentVariablesWithDefaults[environmentVariable]; !exists {\n\t\t\tenvironmentVariablesWithDefaults[environmentVariable] = defaultValue\n\t\t}\n\t}\n\n\t// Parse merge custom and bundledConfigs fetching environment variables\n\tfor environmentVariable, defaultValue := range environmentVariablesWithDefaults {\n\t\tparsedConfigs[toCamelCase(environmentVariable)] = getEnvVarWithDefault(environmentVariable, defaultValue)\n\t}\n\n\treturn &config{parsedConfigs}\n}", "func (config *Config) Load() error {\n\tvar env string\n\tflag.StringVar(&env, \"env\", \"dev\", \"environment\")\n\n\tflag.Parse()\n\n\tviperRegistry := viper.New()\n\tviperRegistry.AddConfigPath(\"./config\")\n\tviperRegistry.SetConfigName(env)\n\tviperRegistry.SetConfigType(\"json\")\n\tviperRegistry.SetEnvPrefix(\"todo\")\n\tviperRegistry.AutomaticEnv()\n\n\tconfig.Env = env\n\n\tif err := viperRegistry.ReadInConfig(); err != nil {\n\t\treturn err\n\t}\n\n\tif err := config.configureApplication(viperRegistry); err != nil {\n\t\treturn err\n\t}\n\n\tif err := config.configureDB(viperRegistry); err != nil {\n\t\treturn err\n\t}\n\n\tif err := config.configureAuth(viperRegistry); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}", "func LoadAppFlags() {\n\tconfigFile := pflag.StringP(\"config\", \"c\", \"\", \"Config file for the server\")\n\tshowVersion := pflag.BoolP(\"version\", \"v\", false, \"Version of the server\")\n\tpflag.Parse()\n\t// print version with the flag: --version\n\tif *showVersion {\n\t\tfmt.Printf(\"v%s\\n\", Version)\n\t\tos.Exit(0)\n\t}\n\t// export configFile if present\n\tif *configFile != \"\" {\n\t\tConfigFile = configFile\n\t}\n}", "func (s *FlagsSource) Load(ctx context.Context, services []string) (err error) {\n\tconst (\n\t\tdelimiter = \"-\"\n\t\tassignment = \"=\"\n\t)\n\n\tfor _, svc := range uniqueStrings(services) {\n\t\ts.data[svc] = conf.NewMapConfig(sieveServiceConfig(svc, s.prefix, delimiter, assignment, s.args))\n\t}\n\n\treturn err\n}", "func (c *Config) LoadFromFlags() {\n\tfileName := flag.String(\"puppetfile\", \"\", \"Original PuppetFile\")\n\tmodule := flag.String(\"module\", \"\", \"The module whos properties to update\")\n\tkey := flag.String(\"key\", \"\", \"The key of the property to change\")\n\tvalue := flag.String(\"value\", \"\", \"Value of the property that will be set\")\n\tflag.Parse()\n\tflag.Usage = func() {\n\t\tfmt.Printf(\"Usage: %s [OPTIONS] argument ...\\n\", os.Args[0])\n\t\tflag.PrintDefaults()\n\t}\n\tif flag.Lookup(\"help\") != nil {\n\t\tflag.Usage()\n\t\tos.Exit(1)\n\t}\n\n\tc.FileName = *fileName\n\tc.Module = *module\n\tc.Key = *key\n\tc.Value = *value\n}", "func (c *Conf) loadEnv() {\n\n\tfor _, e := range os.Environ() {\n\t\tpair := strings.Split(e, \"=\")\n\t\tk := strings.ToUpper(pair[0])\n\t\tv := strings.Join(pair[1:], \"=\")\n\t\tswitch k {\n\t\tcase \"HOME\":\n\t\t\tc.Env.Home = v\n\t\tcase \"USER\":\n\t\t\tc.Env.User = v\n\t\tcase \"PWD\":\n\t\t\tc.Env.PWD = v\n\t\tcase \"TRAVIS\":\n\t\t\tc.Env.Travis = true\n\t\t}\n\t}\n\tc.Parsed.Env = true\n\n}", "func (c *Config) LoadFlags(arguments []string) error {\n\tvar peers, ignoredString string\n\n\tf := flag.NewFlagSet(os.Args[0], flag.ContinueOnError)\n\tf.SetOutput(ioutil.Discard)\n\tf.StringVar(&c.EtcdAddr, \"addr\", c.EtcdAddr, \"\")\n\tf.StringVar(&c.AppsFile, \"apps-file\", c.AppsFile, \"\")\n\tf.IntVar(&c.BalanceTimeout, \"balance-timeout\", c.BalanceTimeout, \"\")\n\tf.StringVar(&c.BindAddr, \"bind-addr\", c.BindAddr, \"\")\n\tf.StringVar(&c.CAFile, \"ca-file\", c.CAFile, \"\")\n\tf.StringVar(&c.CertFile, \"cert-file\", c.CertFile, \"\")\n\tf.StringVar(&c.DataDir, \"data-dir\", c.DataDir, \"\")\n\tf.StringVar(&c.Discovery, \"discovery\", c.Discovery, \"\")\n\tf.IntVar(&c.InstanceExpirationTime, \"instance-expiration-time\", c.InstanceExpirationTime, \"\")\n\tf.StringVar(&c.KeyFile, \"key-file\", c.KeyFile, \"\")\n\tf.BoolVar(&c.Force, \"f\", false, \"\")\n\tf.BoolVar(&c.Force, \"force\", false, \"\")\n\tf.StringVar(&c.LoadBalancerAddr, \"load-balancer-addr\", c.LoadBalancerAddr, \"\")\n\tf.StringVar(&c.Name, \"name\", c.Name, \"\")\n\tf.StringVar(&peers, \"peers\", \"\", \"\")\n\tf.StringVar(&c.PrivateAddr, \"private-addr\", c.PrivateAddr, \"\")\n\tf.StringVar(&c.PublicAddr, \"public-addr\", c.PublicAddr, \"\")\n\tf.BoolVar(&c.Snapshot, \"snapshot\", true, \"\")\n\tf.IntVar(&c.SnapshotCount, \"snapshot-count\", c.SnapshotCount, \"\")\n\tf.BoolVar(&c.Verbose, \"v\", c.Verbose, \"\")\n\tf.BoolVar(&c.Verbose, \"verbose\", c.Verbose, \"\")\n\n\tf.StringVar(&c.Peer.Addr, \"peer-addr\", c.Peer.Addr, \"\")\n\tf.StringVar(&c.Peer.BindAddr, \"peer-bind-addr\", c.Peer.BindAddr, \"\")\n\tf.StringVar(&c.Peer.CAFile, \"peer-ca-file\", c.Peer.CAFile, \"\")\n\tf.StringVar(&c.Peer.CertFile, \"peer-cert-file\", c.Peer.CertFile, \"\")\n\tf.StringVar(&c.Peer.KeyFile, \"peer-key-file\", c.Peer.KeyFile, \"\")\n\tf.IntVar(&c.Peer.HeartbeatTimeout, \"peer-heartbeat-timeout\", c.Peer.HeartbeatTimeout, \"\")\n\tf.IntVar(&c.Peer.ElectionTimeout, \"peer-election-timeout\", c.Peer.ElectionTimeout, \"\")\n\n\t// BEGIN IGNORED FLAGS\n\tf.StringVar(&ignoredString, \"config\", \"\", \"\")\n\t// BEGIN IGNORED FLAGS\n\n\tif err := f.Parse(arguments); err != nil {\n\t\treturn err\n\t}\n\n\t// Convert some parameters to lists.\n\tif peers != \"\" {\n\t\tc.Peers = strings.Split(peers, \",\")\n\t}\n\n\treturn nil\n}", "func (cf *CoreFlags) ParseEnv() {\n\n\tif env := os.Getenv(envLogLevel); env != \"\" {\n\t\tcf.LogLevel = env\n\t}\n\tfmt.Printf(\"Environment variable '%s' setting to : %s\\n\", envLogLevel, cf.LogLevel)\n\n\tif env := os.Getenv(envMsgbusEndPoint); env != \"\" {\n\t\tcf.MsgbusEndPoint = env\n\t}\n\tfmt.Printf(\"Environment variable '%s' setting to : %s\\n\", envMsgbusEndPoint, cf.MsgbusEndPoint)\n\n\tif env := os.Getenv(envMsgbusRetryInterval); env != \"\" {\n\t\tinterval, err := strconv.Atoi(env)\n\t\tif err == nil {\n\t\t\tcf.MsgbusRetryInterval = time.Duration(interval) * time.Second\n\t\t} else {\n\t\t\tfmt.Printf(\"Invalid value '%s' passed for '%s'. Taking the default value.\\n\", env, envMsgbusRetryInterval)\n\t\t}\n\t}\n\tfmt.Printf(\"Environment variable '%s' setting to : %s\\n\", envMsgbusRetryInterval, cf.MsgbusRetryInterval)\n\n\tif env := os.Getenv(envDbEndPoint); env != \"\" {\n\t\tcf.DbEndPoint = env\n\t}\n\tfmt.Printf(\"Environment variable '%s' setting to : %s\\n\", envDbEndPoint, cf.DbEndPoint)\n\n\tif env := os.Getenv(envDbTimeout); env != \"\" {\n\t\tinterval, err := strconv.Atoi(env)\n\t\tif err == nil {\n\t\t\tcf.DbTimeout = time.Duration(interval) * time.Second\n\t\t} else {\n\t\t\tfmt.Printf(\"Invalid value '%s' passed for '%s'. Taking the default value.\\n\", env, envDbTimeout)\n\t\t}\n\t}\n\tfmt.Printf(\"Environment variable '%s' setting to : %s\\n\", envDbTimeout, cf.DbTimeout)\n\n\tif env := os.Getenv(envGrpcEndPoint); env != \"\" {\n\t\tcf.GrpcFlags.GrpcEndPoint = env\n\t}\n\tfmt.Printf(\"Environment variable '%s' setting to : %s\\n\", envGrpcEndPoint, cf.GrpcFlags.GrpcEndPoint)\n\n\tif env := os.Getenv(envGrpcRetryInterval); env != \"\" {\n\t\tinterval, err := strconv.Atoi(env)\n\t\tif err == nil {\n\t\t\tcf.GrpcFlags.GrpcRetryInterval = time.Duration(interval) * time.Second\n\t\t} else {\n\t\t\tfmt.Printf(\"Invalid value '%s' passed for '%s'. Taking the default value.\\n\", env, envGrpcRetryInterval)\n\t\t}\n\t}\n\tfmt.Printf(\"Environment variable '%s' setting to : %s\\n\", envGrpcRetryInterval, cf.GrpcFlags.GrpcRetryInterval)\n\n\tif env := os.Getenv(envGrpcBackoffMaxDelay); env != \"\" {\n\t\tinterval, err := strconv.Atoi(env)\n\t\tif err == nil {\n\t\t\tcf.GrpcFlags.GrpcBackoffMaxDelay = time.Duration(interval) * time.Second\n\t\t} else {\n\t\t\tfmt.Printf(\"Invalid value '%s' passed for '%s'. Taking the default value.\\n\", env, envGrpcBackoffMaxDelay)\n\t\t}\n\t}\n\tfmt.Printf(\"Environment variable '%s' setting to : %s\\n\", envGrpcBackoffMaxDelay, cf.GrpcFlags.GrpcBackoffMaxDelay)\n\n\tif env := os.Getenv(envGrpcMaxRetryCount); env != \"\" {\n\t\tmaxRetry, err := strconv.Atoi(env)\n\t\tif err == nil {\n\t\t\tcf.GrpcFlags.GrpcMaxRetryCount = maxRetry\n\t\t} else {\n\t\t\tfmt.Printf(\"Invalid value '%s' passed for '%s'. Taking the default value.\\n\", env, envGrpcMaxRetryCount)\n\t\t}\n\t}\n\tfmt.Printf(\"Environment variable '%s' setting to : %v\\n\", envGrpcMaxRetryCount, cf.GrpcFlags.GrpcMaxRetryCount)\n\n\tif env := os.Getenv(envSecureConnection); env != \"\" {\n\t\tsecureCon, err := strconv.ParseBool(env)\n\t\tif err == nil {\n\t\t\tcf.SecureConnection = secureCon\n\t\t} else {\n\t\t\tfmt.Printf(\"Invalid value '%s' passed for '%s'. Taking the default value.\\n\", env, envSecureConnection)\n\t\t}\n\t}\n\tfmt.Printf(\"Environment variable '%s' setting to : %v\\n\", envSecureConnection, cf.SecureConnection)\n\n}", "func (c *Config) setupEnvAndFlags(gCfg interface{}) error {\n\t// Supports fetching value from env for all config of type: int, float64, bool, and string\n\tc.Viper.AutomaticEnv()\n\tc.Viper.SetEnvKeyReplacer(strings.NewReplacer(\"-\", \"_\"))\n\tc.Viper.SetEnvPrefix(c.Cmd.Name())\n\treturn eachSubField(gCfg, func(parent reflect.Value, subFieldName string, crumbs []string) error {\n\t\tp := strings.Join(crumbs, \"\")\n\t\tenvStr := envString(p, subFieldName)\n\t\tflagStr := flagString(p, subFieldName)\n\t\tc.Viper.BindEnv(envStr)\n\n\t\tsubField, _ := parent.Type().FieldByName(subFieldName)\n\n\t\tdesc := subField.Tag.Get(\"desc\")\n\t\tif desc == \"\" {\n\t\t\tdesc = subField.Tag.Get(\"description\")\n\t\t}\n\t\t_def := subField.Tag.Get(\"def\")\n\t\tif _def == \"\" {\n\t\t\t_def = subField.Tag.Get(\"default\")\n\t\t}\n\t\t_, req := subField.Tag.Lookup(\"required\")\n\t\tswitch subField.Type.Kind() {\n\t\tcase reflect.Bool:\n\t\t\tc.Cmd.PersistentFlags().Bool(flagStr, false, desc)\n\t\tcase reflect.Int:\n\t\t\tvar def int\n\t\t\tif b, err := strconv.ParseInt(_def, 10, 32); err == nil {\n\t\t\t\tdef = int(b)\n\t\t\t}\n\t\t\tc.Cmd.PersistentFlags().Int(flagStr, def, desc)\n\t\tcase reflect.Int64:\n\t\t\tvar def int64\n\t\t\tif b, err := strconv.ParseInt(_def, 10, 64); err == nil {\n\t\t\t\tdef = b\n\t\t\t}\n\t\t\tc.Cmd.PersistentFlags().Int64(flagStr, def, desc)\n\t\tcase reflect.String:\n\t\t\tc.Cmd.PersistentFlags().String(flagStr, _def, desc)\n\t\tcase reflect.Float32:\n\t\t\tvar def float64\n\t\t\tif b, err := strconv.ParseFloat(_def, 32); err == nil {\n\t\t\t\tdef = b\n\t\t\t}\n\t\t\tc.Cmd.PersistentFlags().Float64(flagStr, def, desc)\n\t\tcase reflect.Float64:\n\t\t\tvar def float64\n\t\t\tif b, err := strconv.ParseFloat(_def, 64); err == nil {\n\t\t\t\tdef = b\n\t\t\t}\n\t\t\tc.Cmd.PersistentFlags().Float64(flagStr, def, desc)\n\t\tcase reflect.Slice:\n\t\t\tdef := strings.Split(_def, \",\")\n\t\t\tif len(def[0]) == 0 {\n\t\t\t\tdef = nil\n\t\t\t}\n\t\t\tif subField.Type.Elem().Kind() != reflect.String {\n\t\t\t\treturn fmt.Errorf(\"%s is unsupported by config @ %s.%s\", subField.Type.String(), p, subFieldName)\n\t\t\t}\n\t\t\tc.Cmd.PersistentFlags().StringSlice(flagStr, def, desc)\n\t\tdefault:\n\t\t\treturn fmt.Errorf(\"%s is unsupported by config @ %s.%s\", subField.Type.String(), p, subFieldName)\n\t\t}\n\t\tif req {\n\t\t\tc.Cmd.MarkPersistentFlagRequired(flagStr)\n\t\t}\n\t\tc.Viper.BindPFlag(flagStr, c.Cmd.PersistentFlags().Lookup(flagStr))\n\t\treturn nil\n\t})\n\n}", "func LoadFromEnv(v interface{}, prefix string) (result []MarshalledEnvironmentVar) {\n\tpointerValue := reflect.ValueOf(v)\n\tstructValue := pointerValue.Elem()\n\tstructType := structValue.Type()\n\n\tfor i := 0; i < structValue.NumField(); i++ {\n\t\tstructField := structType.Field(i)\n\t\tfieldValue := structValue.Field(i)\n\n\t\tif fieldValue.CanSet() {\n\t\t\tenvKey := strings.ToUpper(prefix) + gocase.ToUpperSnake(structField.Name)\n\t\t\tenvVal := os.Getenv(envKey)\n\n\t\t\tif envVal != \"\" {\n\t\t\t\t// create a json blob with the env data\n\t\t\t\tjsonStr := \"\"\n\t\t\t\tif fieldValue.Kind() == reflect.String {\n\t\t\t\t\tjsonStr = fmt.Sprintf(`{\"%s\": \"%s\"}`, structField.Name, envVal)\n\t\t\t\t} else {\n\t\t\t\t\tjsonStr = fmt.Sprintf(`{\"%s\": %s}`, structField.Name, envVal)\n\t\t\t\t}\n\n\t\t\t\terr := json.Unmarshal([]byte(jsonStr), v)\n\t\t\t\tresult = append(result, MarshalledEnvironmentVar{envKey, envVal, structField.Name, err})\n\t\t\t}\n\t\t}\n\t}\n\n\treturn\n}", "func (in *Input) LoadFromEnv() {\n\tnum := reflect.ValueOf(in).Elem().NumField()\n\tfor i := 0; i < num; i++ {\n\t\ttField := reflect.TypeOf(in).Elem().Field(i)\n\t\tvField := reflect.ValueOf(in).Elem().Field(i)\n\t\tvalue, ok := os.LookupEnv(envPrefix + tField.Tag.Get(\"env\"))\n\t\tif ok {\n\t\t\tvField.Set(reflect.ValueOf(value))\n\t\t}\n\t}\n}", "func Load(lc *LoadConfig) error {\n\t// Configure Viper and read in the configuration file.\n\tviper.SetConfigName(lc.configName)\n\tviper.SetConfigType(lc.configType)\n\tviper.AddConfigPath(lc.configPath)\n\tif err := viper.ReadInConfig(); err != nil {\n\t\treturn fmt.Errorf(\"loading env: %v\", err)\n\t}\n\n\t// Return only the variables for the target environment.\n\tenv.Lock()\n\tdefer env.Unlock()\n\tenv.vars = viper.Sub(lc.targetEnv)\n\tif env.vars == nil {\n\t\treturn ErrNoEnvKeys\n\t}\n\n\treturn nil\n}", "func ParseFlags(envVar string, parsedVal *string) {\n\tInstanceArgs[envVar] = parsedVal\n}", "func Load(cmd *cobra.Command) (*Config, error) {\n\terr := viper.BindPFlags(cmd.Flags())\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// default viper configs\n\tviper.SetEnvPrefix(\"EXM\")\n\tviper.SetEnvKeyReplacer(strings.NewReplacer(\".\", \"_\"))\n\tviper.AutomaticEnv()\n\n\t// set default configs\n\tsetDefaultConfig()\n\n\tif configFile, _ := cmd.Flags().GetString(\"config\"); configFile != \"\" {\n\t\tviper.SetConfigFile(configFile)\n\t} else {\n\t\tviper.SetConfigName(\".exemplar\")\n\t\tviper.AddConfigPath(\"./\")\n\t\tviper.AddConfigPath(\"$HOME/.exemplar\")\n\t}\n\n\tif err := viper.ReadInConfig(); err != nil {\n\t\tfmt.Println(\"Warning: No configuration file found. Proceeding with defaults\")\n\t}\n\n\treturn populateConfig(new(Config))\n}", "func Load(path string) error {\n\tfile, err := os.Open(path)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer file.Close()\n\n\tset := make(map[string]bool)\n\tflag.Visit(func(f *flag.Flag) {\n\t\tset[f.Name] = true\n\t})\n\n\tscanner := bufio.NewScanner(file)\n\tlineno := 0\n\tfor scanner.Scan() {\n\t\tlineno++\n\t\tline := strings.TrimSpace(scanner.Text())\n\t\tif len(line) == 0 || strings.HasPrefix(line, \"#\") {\n\t\t\tcontinue\n\t\t}\n\t\tm := keyValue.FindStringSubmatch(line)\n\t\tif m == nil {\n\t\t\treturn fmt.Errorf(\"%s:%d: bad format, expected 'key = value'\", path, lineno)\n\t\t}\n\t\tname := m[1]\n\t\tvalue := m[2]\n\t\tif strings.HasPrefix(value, \"\\\"\") {\n\t\t\tvar s string\n\t\t\t_, err := fmt.Sscanf(value, \"%q\", &s)\n\t\t\tif err != nil {\n\t\t\t\treturn fmt.Errorf(\"%s:%d: bad format, unmatched doublequote\", path, lineno)\n\t\t\t}\n\t\t\tvalue = s\n\t\t}\n\t\tf := flag.Lookup(name)\n\t\tif f == nil {\n\t\t\treturn fmt.Errorf(\"%s:%d: unrecognized flag '%v'\", path, lineno, name)\n\t\t}\n\t\tif !set[name] {\n\t\t\terr = f.Value.Set(value)\n\t\t\tif err != nil {\n\t\t\t\treturn fmt.Errorf(\"%s:%d: %s\", path, lineno, err)\n\t\t\t}\n\t\t}\n\t}\n\tif err := scanner.Err(); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}", "func Parse(config interface{}) { Define(config); flag.Parse() }", "func (l *Loader) Load(s konfig.Values) error {\n\tif l.cfg.Vars != nil && len(l.cfg.Vars) > 0 {\n\t\treturn l.loadVars(s)\n\t}\n\tfor _, v := range os.Environ() {\n\t\tvar spl = strings.SplitN(v, sepEnvVar, 2)\n\t\t// if has regex and key does not macth regexp we continue\n\t\tif l.r != nil && !l.r.MatchString(spl[0]) {\n\t\t\tcontinue\n\t\t}\n\t\tvar k = spl[0]\n\t\tif l.cfg.Replacer != nil {\n\t\t\tk = l.cfg.Replacer.Replace(k)\n\t\t}\n\t\tk = l.cfg.Prefix + k\n\t\ts.Set(k, spl[1])\n\t}\n\n\treturn nil\n}", "func (ctx *AppContext) Load(env string) {\n\tlog.Println(\"Load app context\")\n\n\t// Load env specific config\n\tenvConfig := viper.Sub(env)\n\tctx.Env = env\n\tctx.ProjectID = envConfig.GetString(\"project_id\")\n\tctx.SuffixOfKind = envConfig.GetString(\"datastore.kind_suffix\")\n\tctx.EtcdServers = envConfig.GetStringSlice(\"etcd\")\n\n\t// Load common config\n\tctx.CommonConfig = viper.Sub(\"common\")\n}", "func Load() {\n\t_ = env.Parse(&RedisConfig)\n\t_ = env.Parse(&MongoConfig)\n}", "func Load(filenames ...string) error {\n\tif len(filenames) == 0 {\n\t\tfilenames = envFileNames\n\t}\n\n\t// load files\n\tfiles, err := loadFiles(false, filenames...)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tglobalEnvMap := NewMap()\n\n\t// parse files\n\tfor _, content := range files {\n\t\t// parse file\n\t\temap := Parse(content)\n\n\t\tglobalEnvMap.SetMap(emap)\n\t}\n\n\tif len(adapters) != 0 {\n\t\t// run pull secrets from adapters\n\t\tfor _, adapter := range adapters {\n\n\t\t\t// pulling secrets\n\t\t\temap, err := adapter.Pull()\n\t\t\tif err != nil {\n\t\t\t\treturn fmt.Errorf(\"error occured running adapter: %s\", err)\n\t\t\t}\n\n\t\t\t// set adapters EnvMap to global EnvMap\n\t\t\tglobalEnvMap.SetMap(emap)\n\t\t}\n\t}\n\n\t// set env map to env\n\terr = setEnvMap(globalEnvMap)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}", "func parseFlags(c *Configuration) {\n\tif err := c.pFlag.Parse(os.Args[1:]); err != nil {\n\t\thandleError(err)\n\t}\n\tif err := c.viperFlag.BindPFlags(c.pFlag); err != nil {\n\t\thandleError(err)\n\t}\n}", "func Load(filenames ...string) error {\n\treturn loadenv(false, filenames...)\n}", "func (s *EnvironmentSource) Load(_ *schema.StructValidator) (err error) {\n\tkeyMap, ignoredKeys := getEnvConfigMap(schema.Keys, s.prefix, s.delimiter)\n\n\treturn s.koanf.Load(env.ProviderWithValue(s.prefix, constDelimiter, koanfEnvironmentCallback(keyMap, ignoredKeys, s.prefix, s.delimiter)), nil)\n}", "func parseFlags() error {\n\tpflag.UintVar(&params.Port, Port, 0, \"application HTTP port\")\n\n\tpflag.StringVar(&params.ConfigFileName, FileName, \"config\", \"config file name\")\n\tparams.ConfigFileName = strings.TrimSuffix(params.ConfigFileName, filepath.Ext(params.ConfigFileName))\n\n\tpflag.StringVar(&params.DSN, DSN, \"\", \"DSN data for DB access\")\n\tpflag.StringVar(&params.DBDriver, DBDriver, \"postgres\", \"DB driver name\")\n\tpflag.BoolVar(&params.Debug, Debug, false, \"enable debug mode\")\n\n\tpflag.Parse()\n\treturn viper.BindPFlags(pflag.CommandLine)\n}", "func Load() error {\n\t// read .env\n\tcontents, fileErr := ioutil.ReadFile(\"./.env\")\n\tif fileErr != nil {\n\t\treturn fileErr\n\t}\n\n\t// parse .env conents\n\tenvs, parseErr := parse(string(contents))\n\tif parseErr != nil {\n\t\treturn parseErr\n\t}\n\n\t// set parsed environment variables\n\tfor k, v := range envs {\n\t\tos.Setenv(k, v)\n\t}\n\n\treturn nil\n}", "func (c *Configuration) ParseFlags(args []string) {\n\tflag.CommandLine.Parse(args)\n}", "func ParseFlags() {\n\tconfig.ParseFlags()\n}", "func ParseFlags(monolith bool) *config.Dendrite {\n\tflag.Parse()\n\n\tif *version {\n\t\tfmt.Println(internal.VersionString())\n\t\tos.Exit(0)\n\t}\n\n\tif *configPath == \"\" {\n\t\tlogrus.Fatal(\"--config must be supplied\")\n\t}\n\n\tcfg, err := config.Load(*configPath)\n\n\tif err != nil {\n\t\tlogrus.Fatalf(\"Invalid config file: %s\", err)\n\t}\n\n\tif *enableRegistrationWithoutVerification {\n\t\tcfg.ClientAPI.OpenRegistrationWithoutVerificationEnabled = true\n\t}\n\n\treturn cfg\n}", "func LoadEnv(fn string) ([]string, error) {\n\tbuf, err := os.ReadFile(fn)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tlines := strings.Split(string(buf), \"\\n\")\n\tenv := []string{}\n\tfor _, x := range lines {\n\t\tif strings.Index(x, \"=\") == -1 {\n\t\t\tcontinue\n\t\t}\n\n\t\ta := strings.SplitN(x, \"=\", 2)\n\t\tk := strings.TrimSpace(a[0])\n\t\tv := strings.TrimSpace(a[1])\n\t\te := fmt.Sprintf(\"%s=%s\", k, v)\n\t\tenv = append(env, e)\n\t}\n\treturn env, nil\n}", "func ParseFeaturesFromEnv() error {\n\treturn ParseFeatures(viper.GetString(FeatureGateFlag))\n}", "func Load() error {\n\tfile, err := os.Open(\".env/config.json\")\n\tdefer file.Close()\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tparser := json.NewDecoder(file)\n\terr = parser.Decode(&config)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif len(config.Backend.MongoDB.URI) < 1 {\n\t\tconfig.Backend.MongoDB.URI = \"mongodb://127.0.0.1:27017\"\n\t}\n\n\tif len(config.Backend.MongoDB.Database) < 1 {\n\t\tconfig.Backend.MongoDB.Database = \"ikuta\"\n\t}\n\n\tif len(config.HTTP.Address) < 1 {\n\t\tconfig.HTTP.Address = \":7136\"\n\t}\n\n\treturn nil\n}", "func LoadEnv(l logger.ILog, prefix string) *Settings {\n\ts := new(Settings)\n\terr := env.Unmarshal(s, prefix)\n\tif err != nil {\n\t\tl.Fatalf(\"error getting environment variables: %v\", err.Error())\n\t}\n\n\treturn s\n}", "func (c *Config) Load() error {\n\tif err := env.Parse(c); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}", "func flagsFromEnv(prefix string, fs *flag.FlagSet) {\n\talreadySet := make(map[string]bool)\n\tfs.Visit(func(f *flag.Flag) {\n\t\talreadySet[f.Name] = true\n\t})\n\tfs.VisitAll(func(f *flag.Flag) {\n\t\tif !alreadySet[f.Name] {\n\t\t\tkey := strings.ToUpper(prefix + \"_\" + strings.Replace(f.Name, \"-\", \"_\", -1))\n\t\t\tval := os.Getenv(key)\n\t\t\tif val != \"\" {\n\t\t\t\tfs.Set(f.Name, val)\n\t\t\t}\n\t\t}\n\t})\n}", "func flagsFromEnv(prefix string, fs *flag.FlagSet) {\n\talreadySet := make(map[string]bool)\n\tfs.Visit(func(f *flag.Flag) {\n\t\talreadySet[f.Name] = true\n\t})\n\tfs.VisitAll(func(f *flag.Flag) {\n\t\tif !alreadySet[f.Name] {\n\t\t\tkey := strings.ToUpper(prefix + \"_\" + strings.Replace(f.Name, \"-\", \"_\", -1))\n\t\t\tval := os.Getenv(key)\n\t\t\tif val != \"\" {\n\t\t\t\tfs.Set(f.Name, val)\n\t\t\t}\n\t\t}\n\t})\n}", "func (e *Environ) load(s store.Store, service string, collisions *[]string, noPaths bool) error {\n\trawSecrets, err := s.ListRaw(strings.ToLower(service))\n\tif err != nil {\n\t\treturn err\n\t}\n\tenvVarKeys := make([]string, 0)\n\tfor _, rawSecret := range rawSecrets {\n\t\tenvVarKey := strings.ToUpper(key(rawSecret.Key, noPaths))\n\t\tenvVarKey = strings.Replace(envVarKey, \"-\", \"_\", -1)\n\n\t\tenvVarKeys = append(envVarKeys, envVarKey)\n\n\t\tif e.IsSet(envVarKey) {\n\t\t\t*collisions = append(*collisions, envVarKey)\n\t\t}\n\t\te.Set(envVarKey, rawSecret.Value)\n\t}\n\treturn nil\n}", "func Load() {\n\tif err := viper.ReadInConfig(); err != nil {\n\t\tLoaded = false\n\n\t\treturn\n\t}\n\n\tLoaded = true\n}", "func (appConfig *AppConfiguration) Load(filename string) {\n\n\tif filename != \"\" {\n\t\t// Use config file from the flag.\n\t\tappConfig.viper.SetConfigFile(filename)\n\t} else {\n\t\t// Find home directory.\n\t\thome, err := homedir.Dir()\n\t\tif err != nil {\n\t\t\tglog.Errorf(\"Error determining home directory: %v\", err)\n\t\t\tos.Exit(1)\n\t\t}\n\n\t\t// Search config in home directory with name \".restic-manager\" (without extension).\n\t\tappConfig.viper.AddConfigPath(home)\n\t\tappConfig.viper.SetConfigName(\".restic-manager\")\n\t}\n\n\tappConfig.viper.AutomaticEnv() // read in environment variables that match\n\n\t// If a config file is found, read it in.\n\tif err := appConfig.viper.ReadInConfig(); err != nil {\n\t\tglog.Errorf(\"Could not load application configuration from %v: %v\", filename, err)\n\t} else {\n\t\tglog.Debugf(\"Using config file: %s\", appConfig.viper.ConfigFileUsed())\n\t}\n}", "func ReadFlag(c interface{}) {\r\n\tvar err error\r\n\tinfo := configinfos[c]\r\n\tfor _,v := range os.Args[1:] {\r\n\t\tif !strings.HasPrefix(v, \"--\") {\r\n\t\t\tfmt.Println(\"invalid args\",v)\r\n\t\t\tcontinue\r\n\t\t}\r\n\t\tkv := strings.SplitN(v[2:],\"=\",2)\r\n\t\tswitch kv[0]{\r\n\t\tcase \"test\",\"help\",\"enable\",\"disable\",\"mode\":\r\n\t\t\terr = SetData(info, v[2:])\r\n\t\tdefault:\r\n\t\t\terr = SetData(c, v[2:])\r\n\t\t}\r\n\t\tif err != nil {\r\n\t\t\tfmt.Println(\"error:\",err,v)\r\n\t\t}\r\n\t}\r\n}", "func LoadByFlag(config interface{}, flagPath string) error {\n\tvar filePath string\n\tflag.StringVar(&filePath, flagPath, \"config.yaml\", \"Path of config file\")\n\tflag.Parse()\n\n\treturn Load(config, filePath)\n}", "func Load() (Config, error) {\n\tvar c Config\n\terr := envconfig.Process(\"ferrum\", &c)\n\tif err != nil {\n\t\treturn Config{}, fmt.Errorf(\"failed to parse configuration env vars: %v\", err)\n\t}\n\n\tc.LogLevel, err = log.ParseLevel(c.LogLevelRaw)\n\tif err != nil {\n\t\treturn Config{}, fmt.Errorf(\"failed to parse log level: %v\", err)\n\t}\n\n\tc.Version = version\n\tc.BuildDate = buildDate\n\n\treturn c, nil\n}", "func LoadArgs(argConfs []snlapi.Arg, cliArgs []string) (map[string]*string, error) {\n\targRes := map[string]*string{}\n\n\tam := map[string]snlapi.Arg{}\n\tposSl := []snlapi.Arg{}\n\tfor _, ac := range argConfs {\n\t\tif ac.Type == \"\" || ac.Type == \"bool\" || ac.Type == \"named\" {\n\t\t\tam[ac.Name] = ac\n\t\t\tcontinue\n\t\t}\n\t\tif ac.Type == \"pos\" {\n\t\t\tposSl = append(posSl, ac)\n\t\t\tcontinue\n\t\t}\n\t\t//TODO: Validation\n\t\treturn nil, fmt.Errorf(\"unknown argument type: name: '%s', type: '%s' should be one of: pos, bool, named\", ac.Name, ac.Type)\n\t}\n\n\tprevHandled := false\n\tpassedPosArg := 0\n\tfor i, cliArg := range cliArgs {\n\t\tif prevHandled {\n\t\t\tprevHandled = false\n\t\t\tcontinue\n\t\t}\n\t\t// Whitespace separated named or bool flag\n\t\tif match := argWsSeparatedRegex.MatchString(cliArg); match {\n\t\t\targName := strings.TrimLeft(cliArg, \"-\")\n\t\t\tc, exists := am[argName]\n\t\t\tif !exists {\n\t\t\t\treturn nil, fmt.Errorf(\"named argument does not exist: name '%s'\", argName)\n\t\t\t}\n\t\t\t// Bool flag\n\t\t\tif c.Type != \"\" && c.Type == \"bool\" {\n\t\t\t\ttrueVal := \"1\"\n\t\t\t\targRes[argName] = &trueVal\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif c.Type == \"\" || c.Type == \"named\" {\n\t\t\t\t// Named flag whitespace separated\n\t\t\t\tif i+1 >= len(cliArgs) {\n\t\t\t\t\treturn nil, fmt.Errorf(\"missing value after last named argument: name '%s'\", cliArg)\n\t\t\t\t}\n\t\t\t\targRes[argName] = &cliArgs[i+1]\n\t\t\t\tprevHandled = true\n\t\t\t\tcontinue\n\t\t\t}\n\t\t}\n\t\t// Equal sign separated named argument\n\t\tif match := argEqSeparatedRegex.MatchString(cliArg); match {\n\t\t\ttmpS := strings.TrimLeft(cliArg, \"-\")\n\t\t\tspl := strings.SplitN(tmpS, \"=\", 2)\n\t\t\targName, argValue := spl[0], spl[1]\n\t\t\tc, exists := am[argName]\n\t\t\tif !exists {\n\t\t\t\treturn nil, fmt.Errorf(\"named argument does not exist: '%s'\", argName)\n\t\t\t}\n\t\t\tif !(c.Type == \"\" || c.Type == \"named\") {\n\t\t\t\treturn nil, fmt.Errorf(\"value provided for non-named argument %s: '%s'\", argName, cliArg)\n\t\t\t}\n\t\t\targRes[argName] = &argValue\n\t\t\tcontinue\n\t\t}\n\n\t\t// Positional arguments\n\t\tif len(posSl) > passedPosArg {\n\t\t\ta := posSl[passedPosArg]\n\t\t\targRes[a.Name] = &cliArgs[i]\n\t\t\tpassedPosArg++\n\t\t} else {\n\t\t\treturn nil, fmt.Errorf(\"too many positional arguments given: '%s'\", cliArg)\n\t\t}\n\t}\n\n\tfor i := range argConfs {\n\t\targName := argConfs[i].Name\n\t\tif _, exists := argRes[argName]; !exists {\n\t\t\tif argConfs[i].FromEnvVar != nil {\n\t\t\t\tvalue, isSet := os.LookupEnv(argName)\n\t\t\t\tif isSet {\n\t\t\t\t\targRes[argName] = &value\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t}\n\t\t\tif argConfs[i].Default != nil {\n\t\t\t\targRes[argName] = argConfs[i].Default\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif argConfs[i].Type == \"bool\" {\n\t\t\t\tfalseVal := \"0\"\n\t\t\t\targRes[argName] = &falseVal\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif argConfs[i].Type == \"pos\" {\n\t\t\t\treturn nil, fmt.Errorf(\"value for positional argument missing: '%s'\", argName)\n\t\t\t}\n\t\t\treturn nil, fmt.Errorf(\"value for argument missing: not given via parameter, environment variable or default value: '%s'\", argName)\n\t\t}\n\t}\n\tlogrus.Trace(\"parsed args\", argRes)\n\n\treturn argRes, nil\n}", "func init() {\n\tprepareOptionsFromCommandline(&configFromInit)\n\tparseConfigFromEnvironment(&configFromInit)\n}", "func (f *FlagSet) parseEnv(environ []string) error {\n\tm := f.formal\n\n\tenv := make(map[string]string)\n\tfor _, s := range environ {\n\t\ti := strings.Index(s, \"=\")\n\t\tif i < 1 {\n\t\t\tcontinue\n\t\t}\n\t\tenv[s[0:i]] = s[i+1:]\n\t}\n\n\tfor _, flag := range m {\n\t\tname := flag.Name\n\t\t_, set := f.actual[name]\n\t\tif set {\n\t\t\tcontinue\n\t\t}\n\n\t\tflag, alreadythere := m[name]\n\t\tif !alreadythere {\n\t\t\tif name == \"help\" || name == \"h\" { // special case for nice help message.\n\t\t\t\tf.usage()\n\t\t\t\treturn ErrHelp\n\t\t\t}\n\t\t\treturn f.failf(\"environment variable provided but not defined: %s\", name)\n\t\t}\n\n\t\tenvKey := normalizeNameForEnv(f.envPrefix, flag.Name)\n\t\tvalue, isSet := env[envKey]\n\t\tif !isSet {\n\t\t\tcontinue\n\t\t}\n\n\t\tif err := flag.Value.Set(value); err != nil {\n\t\t\treturn f.failf(\"invalid value %q for environment variable %s: %v\", value, name, err)\n\t\t}\n\n\t\t// update f.actual\n\t\tif f.actual == nil {\n\t\t\tf.actual = make(map[string]*Flag)\n\t\t}\n\t\tf.actual[name] = flag\n\t}\n\treturn nil\n}", "func loadEnvVars(envars []string) []EnvVar {\n\tenvs := []EnvVar{}\n\tfor _, e := range envars {\n\t\tcharacter := \"\"\n\t\tequalPos := strings.Index(e, \"=\")\n\t\tcolonPos := strings.Index(e, \":\")\n\t\tswitch {\n\t\tcase equalPos == -1 && colonPos == -1:\n\t\t\tcharacter = \"\"\n\t\tcase equalPos == -1 && colonPos != -1:\n\t\t\tcharacter = \":\"\n\t\tcase equalPos != -1 && colonPos == -1:\n\t\t\tcharacter = \"=\"\n\t\tcase equalPos != -1 && colonPos != -1:\n\t\t\tif equalPos > colonPos {\n\t\t\t\tcharacter = \":\"\n\t\t\t} else {\n\t\t\t\tcharacter = \"=\"\n\t\t\t}\n\t\t}\n\n\t\tif character == \"\" {\n\t\t\tenvs = append(envs, EnvVar{\n\t\t\t\tName: e,\n\t\t\t\tValue: os.Getenv(e),\n\t\t\t})\n\t\t} else {\n\t\t\tvalues := strings.SplitN(e, character, 2)\n\t\t\t// try to get value from os env\n\t\t\tif values[1] == \"\" {\n\t\t\t\tvalues[1] = os.Getenv(values[0])\n\t\t\t}\n\t\t\tenvs = append(envs, EnvVar{\n\t\t\t\tName: values[0],\n\t\t\t\tValue: values[1],\n\t\t\t})\n\t\t}\n\t}\n\n\treturn envs\n}", "func Load(filename string, useDefault bool) error {\n\tdefOptions := []byte(`\n{\n\t\"Env\": \"DEV\",\n\t\"Address\": \":7070\",\n\t\"HTTPAddress\": \":80\",\n\t\"ExternalAddress\": \"http://localhost:7070\",\n\t\"DB\": {\n\t\t\"ConnectString\": \"alfred.db\"\n\t},\n\t\"Web\": true,\n\t\"Bot\": true,\n\t\"Worker\": true,\n\t\"ClamCtl\": \"/var/run/clamav/clamd.ctl\",\n\t\"QueuePoll\": 10,\n\t\"Security\": {\n\t\t\"SessionKey\": \"xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx\",\n\t\t\"Timeout\": 525600,\n\t\t\"Recaptcha\": \"xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx_xx_xxx\",\n\t\t\"DBKey\": \"xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx\"\n\t}\n}`)\n\t// Start the options with the defaults and override with the file\n\terr := json.Unmarshal(defOptions, &Options)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif filename != \"\" {\n\t\toptions, err := ioutil.ReadFile(filename)\n\t\tif err != nil {\n\t\t\tif !useDefault {\n\t\t\t\tlogrus.WithError(err).Warn(\"Could not open config file and not using default\")\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tlogrus.WithError(err).Info(\"Could not open config file - using defaults\")\n\t\t} else {\n\t\t\terr = json.Unmarshal(options, &Options)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t} else if !useDefault {\n\t\tlogrus.Warn(\"no file provided and we are not using default\")\n\t\treturn errors.New(\"no file and no default\")\n\t}\n\tfinalOptions, err := json.MarshalIndent(&Options, \"\", \" \")\n\tif err != nil {\n\t\treturn err\n\t}\n\tlogrus.Infof(\"Using options:\\n%s\\n\", string(finalOptions))\n\treturn nil\n}", "func Load() {\n\t// Load config file\n\tpath := GetHomeDir()\n\tviper.AddConfigPath(GetHomeDir())\n\tviper.SetConfigName(viper.GetString(\"filename_config\"))\n\tviper.ReadInConfig()\n\n\t// Alias for platform\n\tviper.RegisterAlias(\"platform_addrport\", \"platform\")\n\n\t// Setup file paths\n\tviper.Set(\"home_dir\", path)\n\tviper.Set(\"file_ca\", filepath.Join(path, viper.GetString(\"filename_ca\")))\n\tviper.Set(\"file_cert\", filepath.Join(path, viper.GetString(\"filename_cert\")))\n\tviper.Set(\"file_key\", filepath.Join(path, viper.GetString(\"filename_key\")))\n\tviper.Set(\"file_config\", filepath.Join(path, viper.GetString(\"filename_config\"))+\".json\")\n\n\t// Fill virtual-only fields\n\tviper.Set(\"registered\", isFileValid(viper.GetString(\"file_key\")))\n\tviper.Set(\"authenticated\", isFileValid(viper.GetString(\"file_cert\")))\n\tviper.Set(\"local_port\", 9005)\n\n\t// Configure timeout\n\tif t := viper.GetDuration(\"timeout\"); t > 0 {\n\t\tnet.DefaultTimeout = t\n\t}\n\n\treturn\n}", "func Parse() (*EnvFlags, error) {\n\treturn ParseArgs(os.Args[1:])\n}", "func LoadEnv(filenames ...string) error {\n\tfor _, filename := range setFilename(filenames) {\n\t\tlines, err := readFile(filename)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tsetEnv(lines)\n\t}\n\n\treturn nil\n}", "func LoadEnvs() {\n\tManifestsPath = loadw(\"BROADWAY_MANIFESTS_PATH\")\n\tif ManifestsPath == \"\" {\n\t\tManifestsPath = defaultManifestsPath\n\t}\n\tPlaybooksPath = loadw(\"BROADWAY_PLAYBOOKS_PATH\")\n\tif PlaybooksPath == \"\" {\n\t\tPlaybooksPath = defaultPlaybooksPath\n\t}\n\tAuthBearerToken = loadw(\"BROADWAY_AUTH_TOKEN\")\n\n\tServerHost = loadw(\"HOST\")\n\n\tSlackWebhook = loadw(\"SLACK_WEBHOOK\")\n\tSlackToken = loadw(\"SLACK_VERIFICATION_TOKEN\")\n\n\tK8sServiceHost = loadw(\"KUBERNETES_SERVICE_HOST\")\n\tK8sServicePort = loadw(\"KUBERNETES_PORT_443_TCP_PORT\")\n\tK8sNamespace = loadf(\"KUBERNETES_NAMESPACE\")\n\n\tK8sCertFile = loadw(\"KUBERNETES_CERT_FILE\")\n\tK8sKeyFile = loadw(\"KUBERNETES_KEY_FILE\")\n\tK8sCAFile = loadw(\"KUBERNETES_CA_FILE\")\n\n\tEtcdEndpoints = loadw(\"ETCD_ENDPOINTS\")\n\tEtcdPath = loadw(\"ETCD_PATH\")\n}", "func ParseFlags() {\n\tvar err error\n\tvar s, n, v, backupDriverName, specDir, logLoc, logLevel, appListCSV, provisionerName, configMapName string\n\tvar schedulerDriver scheduler.Driver\n\tvar volumeDriver volume.Driver\n\tvar nodeDriver node.Driver\n\tvar backupDriver backup.Driver\n\tvar appScaleFactor int\n\tvar volUpgradeEndpointURL string\n\tvar volUpgradeEndpointVersion string\n\tvar minRunTimeMins int\n\tvar chaosLevel int\n\tvar storageNodesPerAZ int\n\tvar destroyAppTimeout time.Duration\n\tvar driverStartTimeout time.Duration\n\tvar autoStorageNodeRecoveryTimeout time.Duration\n\tvar licenseExpiryTimeoutHours time.Duration\n\tvar meteringIntervalMins time.Duration\n\tvar bundleLocation string\n\tvar customConfigPath string\n\tvar customAppConfig map[string]scheduler.AppConfig\n\tvar enableStorkUpgrade bool\n\tvar secretType string\n\tvar pureVolumes bool\n\tvar vaultAddress string\n\tvar vaultToken string\n\tvar schedUpgradeHops string\n\tvar autopilotUpgradeImage string\n\tvar csiGenericDriverConfigMapName string\n\n\tflag.StringVar(&s, schedulerCliFlag, defaultScheduler, \"Name of the scheduler to use\")\n\tflag.StringVar(&n, nodeDriverCliFlag, defaultNodeDriver, \"Name of the node driver to use\")\n\tflag.StringVar(&v, storageDriverCliFlag, defaultStorageDriver, \"Name of the storage driver to use\")\n\tflag.StringVar(&backupDriverName, backupCliFlag, \"\", \"Name of the backup driver to use\")\n\tflag.StringVar(&specDir, specDirCliFlag, defaultSpecsRoot, \"Root directory containing the application spec files\")\n\tflag.StringVar(&logLoc, logLocationCliFlag, defaultLogLocation,\n\t\t\"Path to save logs/artifacts upon failure. Default: /mnt/torpedo_support_dir\")\n\tflag.StringVar(&logLevel, logLevelCliFlag, defaultLogLevel, \"Log level\")\n\tflag.IntVar(&appScaleFactor, scaleFactorCliFlag, defaultAppScaleFactor, \"Factor by which to scale applications\")\n\tflag.IntVar(&minRunTimeMins, minRunTimeMinsFlag, defaultMinRunTimeMins, \"Minimum Run Time in minutes for appliation deletion tests\")\n\tflag.IntVar(&chaosLevel, chaosLevelFlag, defaultChaosLevel, \"Application deletion frequency in minutes\")\n\tflag.StringVar(&volUpgradeEndpointURL, storageUpgradeEndpointURLCliFlag, defaultStorageUpgradeEndpointURL,\n\t\t\"Endpoint URL link which will be used for upgrade storage driver\")\n\tflag.StringVar(&volUpgradeEndpointVersion, storageUpgradeEndpointVersionCliFlag, defaultStorageUpgradeEndpointVersion,\n\t\t\"Endpoint version which will be used for checking version after upgrade storage driver\")\n\tflag.BoolVar(&enableStorkUpgrade, enableStorkUpgradeFlag, false, \"Enable stork upgrade during storage driver upgrade\")\n\tflag.StringVar(&appListCSV, appListCliFlag, \"\", \"Comma-separated list of apps to run as part of test. The names should match directories in the spec dir.\")\n\tflag.StringVar(&provisionerName, provisionerFlag, defaultStorageProvisioner, \"Name of the storage provisioner Portworx or CSI.\")\n\tflag.IntVar(&storageNodesPerAZ, storageNodesPerAZFlag, defaultStorageNodesPerAZ, \"Maximum number of storage nodes per availability zone\")\n\tflag.DurationVar(&destroyAppTimeout, \"destroy-app-timeout\", defaultTimeout, \"Maximum \")\n\tflag.DurationVar(&driverStartTimeout, \"driver-start-timeout\", defaultDriverStartTimeout, \"Maximum wait volume driver startup\")\n\tflag.DurationVar(&autoStorageNodeRecoveryTimeout, \"storagenode-recovery-timeout\", defaultAutoStorageNodeRecoveryTimeout, \"Maximum wait time in minutes for storageless nodes to transition to storagenodes in case of ASG\")\n\tflag.DurationVar(&licenseExpiryTimeoutHours, licenseExpiryTimeoutHoursFlag, defaultLicenseExpiryTimeoutHours, \"Maximum wait time in hours after which force expire license\")\n\tflag.DurationVar(&meteringIntervalMins, meteringIntervalMinsFlag, defaultMeteringIntervalMins, \"Metering interval in minutes for metering agent\")\n\tflag.StringVar(&configMapName, configMapFlag, \"\", \"Name of the config map to be used.\")\n\tflag.StringVar(&bundleLocation, \"bundle-location\", defaultBundleLocation, \"Path to support bundle output files\")\n\tflag.StringVar(&customConfigPath, \"custom-config\", \"\", \"Path to custom configuration files\")\n\tflag.StringVar(&secretType, \"secret-type\", scheduler.SecretK8S, \"Path to custom configuration files\")\n\tflag.BoolVar(&pureVolumes, \"pure-volumes\", false, \"To enable using Pure backend for shared volumes\")\n\tflag.StringVar(&vaultAddress, \"vault-addr\", \"\", \"Path to custom configuration files\")\n\tflag.StringVar(&vaultToken, \"vault-token\", \"\", \"Path to custom configuration files\")\n\tflag.StringVar(&schedUpgradeHops, \"sched-upgrade-hops\", \"\", \"Comma separated list of versions scheduler upgrade to take hops\")\n\tflag.StringVar(&autopilotUpgradeImage, autopilotUpgradeImageCliFlag, \"\", \"Autopilot version which will be used for checking version after upgrade autopilot\")\n\tflag.StringVar(&csiGenericDriverConfigMapName, csiGenericDriverConfigMapFlag, \"\", \"Name of config map that stores provisioner details when CSI generic driver is being used\")\n\tflag.StringVar(&testrailuttils.MilestoneName, milestoneFlag, \"\", \"Testrail milestone name\")\n\tflag.StringVar(&testrailuttils.RunName, testrailRunNameFlag, \"\", \"Testrail run name, this run will be updated in testrail\")\n\tflag.StringVar(&testrailuttils.JobRunID, testrailRunIDFlag, \"\", \"Run ID for the testrail run\")\n\tflag.StringVar(&testrailuttils.JenkinsBuildURL, testrailJenkinsBuildURLFlag, \"\", \"Jenins job url for testrail update\")\n\tflag.StringVar(&testRailHostname, testRailHostFlag, \"\", \"Testrail server hostname\")\n\tflag.StringVar(&testRailUsername, testRailUserNameFlag, \"\", \"Username to be used for adding entries to testrail\")\n\tflag.StringVar(&testRailPassword, testRailPasswordFlag, \"\", \"Password to be used for testrail update\")\n\tflag.StringVar(&jiraUserName, jiraUserNameFlag, \"\", \"Username to be used for JIRA client\")\n\tflag.StringVar(&jiraToken, jiraTokenFlag, \"\", \"API token for accessing the JIRA\")\n\tflag.StringVar(&jirautils.AccountID, jiraAccountIDFlag, \"\", \"AccountID for issue assignment\")\n\tflag.Parse()\n\n\tappList, err := splitCsv(appListCSV)\n\tif err != nil {\n\t\tlogrus.Fatalf(\"failed to parse app list: %v. err: %v\", appListCSV, err)\n\t}\n\n\tsched.Init(time.Second)\n\n\tif schedulerDriver, err = scheduler.Get(s); err != nil {\n\t\tlogrus.Fatalf(\"Cannot find scheduler driver for %v. Err: %v\\n\", s, err)\n\t} else if volumeDriver, err = volume.Get(v); err != nil {\n\t\tlogrus.Fatalf(\"Cannot find volume driver for %v. Err: %v\\n\", v, err)\n\t} else if nodeDriver, err = node.Get(n); err != nil {\n\t\tlogrus.Fatalf(\"Cannot find node driver for %v. Err: %v\\n\", n, err)\n\t} else if err = os.MkdirAll(logLoc, os.ModeDir); err != nil {\n\t\tlogrus.Fatalf(\"Cannot create path %s for saving support bundle. Error: %v\", logLoc, err)\n\t} else {\n\t\tif _, err = os.Stat(customConfigPath); err == nil {\n\t\t\tvar data []byte\n\n\t\t\tlogrus.Infof(\"Using custom app config file %s\", customConfigPath)\n\t\t\tdata, err = ioutil.ReadFile(customConfigPath)\n\t\t\tif err != nil {\n\t\t\t\tlogrus.Fatalf(\"Cannot read file %s. Error: %v\", customConfigPath, err)\n\t\t\t}\n\t\t\terr = yaml.Unmarshal(data, &customAppConfig)\n\t\t\tif err != nil {\n\t\t\t\tlogrus.Fatalf(\"Cannot unmarshal yml %s. Error: %v\", customConfigPath, err)\n\t\t\t}\n\t\t\tlogrus.Infof(\"Parsed custom app config file: %+v\", customAppConfig)\n\t\t}\n\t\tlogrus.Infof(\"Backup driver name %s\", backupDriverName)\n\t\tif backupDriverName != \"\" {\n\t\t\tif backupDriver, err = backup.Get(backupDriverName); err != nil {\n\t\t\t\tlogrus.Fatalf(\"cannot find backup driver for %s. Err: %v\\n\", backupDriverName, err)\n\t\t\t} else {\n\t\t\t\tlogrus.Infof(\"Backup driver found %v\", backupDriver)\n\t\t\t}\n\t\t}\n\n\t\tonce.Do(func() {\n\t\t\tinstance = &Torpedo{\n\t\t\t\tInstanceID: time.Now().Format(\"01-02-15h04m05s\"),\n\t\t\t\tS: schedulerDriver,\n\t\t\t\tV: volumeDriver,\n\t\t\t\tN: nodeDriver,\n\t\t\t\tSpecDir: specDir,\n\t\t\t\tLogLoc: logLoc,\n\t\t\t\tLogLevel: logLevel,\n\t\t\t\tGlobalScaleFactor: appScaleFactor,\n\t\t\t\tMinRunTimeMins: minRunTimeMins,\n\t\t\t\tChaosLevel: chaosLevel,\n\t\t\t\tStorageDriverUpgradeEndpointURL: volUpgradeEndpointURL,\n\t\t\t\tStorageDriverUpgradeEndpointVersion: volUpgradeEndpointVersion,\n\t\t\t\tEnableStorkUpgrade: enableStorkUpgrade,\n\t\t\t\tAppList: appList,\n\t\t\t\tProvisioner: provisionerName,\n\t\t\t\tMaxStorageNodesPerAZ: storageNodesPerAZ,\n\t\t\t\tDestroyAppTimeout: destroyAppTimeout,\n\t\t\t\tDriverStartTimeout: driverStartTimeout,\n\t\t\t\tAutoStorageNodeRecoveryTimeout: autoStorageNodeRecoveryTimeout,\n\t\t\t\tConfigMap: configMapName,\n\t\t\t\tBundleLocation: bundleLocation,\n\t\t\t\tCustomAppConfig: customAppConfig,\n\t\t\t\tBackup: backupDriver,\n\t\t\t\tSecretType: secretType,\n\t\t\t\tPureVolumes: pureVolumes,\n\t\t\t\tVaultAddress: vaultAddress,\n\t\t\t\tVaultToken: vaultToken,\n\t\t\t\tSchedUpgradeHops: schedUpgradeHops,\n\t\t\t\tAutopilotUpgradeImage: autopilotUpgradeImage,\n\t\t\t\tCsiGenericDriverConfigMap: csiGenericDriverConfigMapName,\n\t\t\t\tLicenseExpiryTimeoutHours: licenseExpiryTimeoutHours,\n\t\t\t\tMeteringIntervalMins: meteringIntervalMins,\n\t\t\t}\n\t\t})\n\t}\n\n\t// Set log level\n\tlogLvl, err := logrus.ParseLevel(instance.LogLevel)\n\tif err != nil {\n\t\tlogrus.Fatalf(\"Failed to set log level due to Err: %v\", err)\n\t}\n\tlogrus.SetLevel(logLvl)\n\n}", "func (el *EnvVars) load(key string) (string, bool) {\n\tfor _, env := range el.path {\n\t\trv, found := env(key)\n\t\tif found {\n\t\t\treturn rv, true\n\t\t}\n\t}\n\treturn \"\", false\n}", "func (o *Options) Parse() error {\n\terr := o.flags.Parse(os.Args)\n\treturn err\n}", "func (o *options) parseFlags() {\n\tflag.StringVar(&o.context, \"context\", \"\", \"The name of the kubeconfig context to use.\")\n\tflag.StringVarP(&o.name, \"name\", \"n\", defaultContextName, \"Context name for the kubeconfig entry.\")\n\tflag.StringVarP(&o.output, \"output\", \"o\", defaultConfigFileName, \"Output path for generated kubeconfig file.\")\n\tflag.BoolVarP(&o.certificate, \"certificate\", \"c\", false, \"Authorize with a client certificate and key.\")\n\tflag.BoolVarP(&o.serviceaccount, \"serviceaccount\", \"s\", false, \"Authorize with a service account.\")\n\tflag.BoolVar(&o.overwrite, \"overwrite\", false, \"Overwrite (rather than merge) output file if exists.\")\n\n\tflag.Parse()\n}", "func parseArgv(l *linter) {\n\tconst enableAll = \"all\"\n\n\tflag.Usage = func() {\n\t\tlog.Printf(\"usage: [flags] package...\")\n\t\tflag.PrintDefaults()\n\t}\n\n\tenable := flag.String(\"enable\", enableAll,\n\t\t`comma-separated list of enabled checkers`)\n\tflag.BoolVar(&l.withExperimental, `withExperimental`, false,\n\t\t`only for -enable=all, include experimental checks`)\n\tflag.BoolVar(&l.withOpinionated, `withOpinionated`, false,\n\t\t`only for -enable=all, include very opinionated checks`)\n\tflag.IntVar(&l.failureExitCode, \"failcode\", 1,\n\t\t`exit code to be used when lint issues are found`)\n\tflag.BoolVar(&l.checkGenerated, \"checkGenerated\", false,\n\t\t`whether to check machine-generated files`)\n\n\tflag.Parse()\n\n\tl.packages = flag.Args()\n\n\tif len(l.packages) == 0 {\n\t\tblame(\"no packages specified\\n\")\n\t}\n\tif *enable != enableAll && l.withExperimental {\n\t\tblame(\"-withExperimental used with -enable=%q\", *enable)\n\t}\n\tif *enable != enableAll && l.withOpinionated {\n\t\tblame(\"-withOpinionated used with -enable=%q\", *enable)\n\t}\n\n\tswitch *enable {\n\tcase enableAll:\n\t\t// Special case. l.enabledCheckers remains nil.\n\tcase \"\":\n\t\t// Empty slice. Semantically \"disable-all\".\n\t\t// Can be used to run all pipelines without actual checkers.\n\t\tl.enabledCheckers = []string{}\n\tdefault:\n\t\t// Comma-separated list of names.\n\t\tl.enabledCheckers = strings.Split(*enable, \",\")\n\t}\n}", "func Load() (config *Config, err error) {\n\tconfig = &Config{}\n\n\tif err = env.Set(config); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn\n}", "func Load() (*Config, error) {\n\tcfg := &Config{}\n\terr := envconfig.Process(\"\", cfg)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif cfg.TranslationsPath == \"\" {\n\t\tv, err := promtParameter(\"TRANSLATIONS_PATH\", true)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tcfg.TranslationsPath = v\n\t}\n\n\tif cfg.TargetAPIAuthorizationKey == \"\" {\n\t\tv, err := promtParameter(\"TARGET_API_AUTHORIZATION_KEY\", true)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tcfg.TargetAPIAuthorizationKey = v\n\t}\n\n\tif cfg.TargetAPIHost == \"\" {\n\t\tv, err := promtParameter(\"TARGET_API_HOST\", true)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tcfg.TargetAPIHost = v\n\t}\n\n\tif cfg.OrgIDSNCF == \"\" {\n\t\tv, err := promtParameter(\"ORGID_SNCF\", false)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tif v == \"\" {\n\t\t\tfmt.Println(fmt.Sprintf(\"Note! Translations won't be uploaded for SNCF\"))\n\t\t}\n\n\t\tcfg.OrgIDSNCF = v\n\t}\n\n\tif cfg.OrgIDThalys == \"\" {\n\t\tv, err := promtParameter(\"ORGID_THALYS\", false)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tif v == \"\" {\n\t\t\tfmt.Println(fmt.Sprintf(\"Note! Translations won't be uploaded for THALYS\"))\n\t\t}\n\n\t\tcfg.OrgIDThalys = v\n\t}\n\n\treturn cfg, nil\n}", "func LoadFromEnvironmentVariables(Enabled bool, EnvPrefix string) ConfigOptions {\n\treturn func(h *Config) {\n\t\th.envLoad = Enabled\n\t\th.envPrefix = strings.ToUpper(EnvPrefix)\n\t}\n}", "func ReadVars(envarprefixes ...string) error {\n\tvar (\n\t\ti interface{}\n\t)\n\tif len(envarprefixes) == 0 {\n\t\tfor k := range envFiles {\n\t\t\tenvarprefixes = append(envarprefixes, k)\n\t\t}\n\t}\n\tfmt.Println(\"envarprefixes\", envarprefixes)\n\tfor _, prefix := range envarprefixes {\n\t\tswitch prefix {\n\t\tcase \"go_crawler\":\n\t\t\ti = &crawlerEnvVars\n\t\tcase \"aws\":\n\t\t\ti = &awsEnvVars\n\t\tdefault:\n\t\t\treturn fmt.Errorf(\"No existe la configuración %s\", prefix)\n\t\t}\n\t\terr := envconfig.Process(prefix, i)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"Error al leer las variables: %v\", err)\n\t\t}\n\t}\n\treturn nil\n}", "func LoadEnvs() {\n\tvar err error\n\n\tif err = godotenv.Load(); err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tPort, err = strconv.Atoi(os.Getenv(\"APP_PORT\"))\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tAPIURL = os.Getenv(\"API_URL\")\n\tHashKey = []byte(os.Getenv(\"HASH_KEY\"))\n\tBlockKey = []byte(os.Getenv(\"BLOCK_KEY\"))\n}", "func (m *Main) ParseFlags(ctx context.Context, args []string) error {\n\t// Our flag set is very simple. It only includes a config path.\n\tfs := flag.NewFlagSet(\"booking\", flag.ContinueOnError)\n\tfs.StringVar(&m.ConfigPath, \"config\", DefaultConfigPath, \"config path\")\n\tif err := fs.Parse(args); err != nil {\n\t\treturn err\n\t}\n\n\t// The expand() function is here to automatically expand \"~\" to the user's\n\t// home directory. This is a common task as configuration files are typing\n\t// under the home directory during local development.\n\tconfigPath, err := expand(m.ConfigPath)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// Read our TOML formatted configuration file.\n\tconfig, err := ReadConfigFile(configPath)\n\tif os.IsNotExist(err) {\n\t\treturn fmt.Errorf(\"config file not found: %s\", m.ConfigPath)\n\t} else if err != nil {\n\t\treturn err\n\t}\n\tm.Config = config\n\n\treturn nil\n}", "func Load(env string) *Configuration {\n\t_, filePath, _, _ := runtime.Caller(0)\n\tconfigName := \"config.\" + env + \".yaml\"\n\tconfigPath := filePath[:len(filePath)-9] + \"files\" + string(filepath.Separator)\n\n\tviper.SetConfigName(configName)\n\tviper.AddConfigPath(configPath)\n\tviper.SetConfigType(\"yaml\")\n\n\terr := viper.ReadInConfig()\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tvar config Configuration\n\tviper.Unmarshal(&config)\n\tsetGinMode(config.Server.Mode)\n\n\treturn &config\n}", "func loadConfig(l log.Logger) *operator.Config {\n\tfs := flag.NewFlagSet(os.Args[0], flag.ExitOnError)\n\n\tvar (\n\t\tprintVersion bool\n\t)\n\n\tcfg, err := operator.NewConfig(fs)\n\tif err != nil {\n\t\tlevel.Error(l).Log(\"msg\", \"failed to parse flags\", \"err\", err)\n\t\tos.Exit(1)\n\t}\n\n\tfs.BoolVar(&printVersion, \"version\", false, \"Print this build's version information\")\n\n\tif err := fs.Parse(os.Args[1:]); err != nil {\n\t\tlevel.Error(l).Log(\"msg\", \"failed to parse flags\", \"err\", err)\n\t\tos.Exit(1)\n\t}\n\n\tif printVersion {\n\t\tfmt.Println(build.Print(\"agent-operator\"))\n\t\tos.Exit(0)\n\t}\n\n\treturn cfg\n}", "func Load() {\n\n\tAppCfg = appConfig{\n\t\tElasticSearchURL: \"localhost:9200\",\n\t\tItemServiceURL: \"localhost:4040\",\n\t}\n\n\t/*\n\t\tAppCfg = appConfig{\n\t\t\tElasticSearchURL: os.Getenv(\"ES_HOST\"),\n\t\t\tItemServiceURL: os.Getenv(\"ITEM_SERVICE_URL\"),\n\t\t}\n\t*/\n}", "func (m *Main) ParseFlags(ctx context.Context, args []string) error {\n\tvar host, port, user, password, dbname, sslmode string\n\tflag.StringVar(&host, \"db-host\", \"localhost\", \"PostgreSQL database: host\")\n\tflag.StringVar(&port, \"db-port\", \"5432\", \"PostgreSQL database: port\")\n\tflag.StringVar(&user, \"db-user\", \"main_user\", \"PostgreSQL database: user\")\n\tflag.StringVar(&password, \"db-password\", \"mysecretuserpassword\", \"PostgreSQL database: password\")\n\tflag.StringVar(&dbname, \"db-name\", \"easysubmit_db\", \"PostgreSQL database: db name\")\n\tflag.StringVar(&m.Config.FS.HashKey, \"fs-hash-key\", \"00000000000000000000000000000000000000000000000000\", \"Hash key for naming files\")\n\tflag.StringVar(&m.Config.HTTP.Addr, \"addr\", \":8080\", \"HTTP network address\")\n\tflag.StringVar(&m.Config.HTTP.Domain, \"domain\", \"\", \"HTTP network address\")\n\tflag.StringVar(&m.Config.SignKey, \"sign-key\", \"000000000000000000000000000000000000000000000000000000000000000\", \"Sign key for JWT\")\n\tflag.StringVar(&m.Config.VerifyKey, \"verify-key\", \"000000000000000000000000000000000000000000000000000000000000000\", \"Verification key for JWT\")\n\tflag.Parse()\n\n\tif host != \"localhost\" {\n\t\tsslmode = \"require\"\n\t} else {\n\t\tsslmode = \"disable\"\n\t}\n\n\tm.Config.DB.DSN = fmt.Sprintf(\"host=%s port=%s user=%s password=%s dbname=%s sslmode=%s\", host, port, user, password, dbname, sslmode)\n\treturn nil\n}", "func Fill(config interface{}, envPrefix string) []cli.Flag {\n\tconfigValue := reflect.Indirect(reflect.ValueOf(config))\n\tvar flags []cli.Flag\n\tfor i := 0; i < configValue.NumField(); i++ {\n\t\tfieldValue := configValue.Field(i)\n\t\tfieldType := configValue.Type().Field(i)\n\t\tname := snaker.CamelToSnake(fieldType.Name)\n\t\tflagName := fieldType.Tag.Get(\"flag\")\n\t\tif flagName == \"\" {\n\t\t\tflagName = name\n\t\t}\n\t\tenvName := fieldType.Tag.Get(\"env\")\n\t\tif envName == \"\" {\n\t\t\tenvName = strings.ToUpper(flagName)\n\t\t}\n\t\tenvName = envPrefix + envName\n\t\tswitch fieldType.Type.Kind() {\n\t\tcase reflect.String:\n\t\t\tflag := cli.StringFlag{\n\t\t\t\tName: flagName,\n\t\t\t\tEnvVar: envName,\n\t\t\t\tDestination: fieldValue.Addr().Interface().(*string),\n\t\t\t\tValue: fieldType.Tag.Get(\"default\"),\n\t\t\t}\n\t\t\tflags = append(flags, flag)\n\t\tcase reflect.Int:\n\t\t\tflag := cli.IntFlag{\n\t\t\t\tName: flagName,\n\t\t\t\tEnvVar: envName,\n\t\t\t\tDestination: fieldValue.Addr().Interface().(*int),\n\t\t\t\tValue: intFromString(fieldType.Tag.Get(\"default\")),\n\t\t\t}\n\t\t\tflags = append(flags, flag)\n\t\tcase reflect.Slice:\n\t\t\tif fieldType.Type.Elem().Kind() == reflect.String {\n\t\t\t\tvalues := strings.Split(fieldType.Tag.Get(\"default\"), \",\")\n\t\t\t\tvalues2 := cli.StringSlice(values)\n\t\t\t\tfieldValue.Set(reflect.ValueOf(values))\n\t\t\t\tflag := cli.StringSliceFlag {\n\t\t\t\t\tName: flagName,\n\t\t\t\t\tEnvVar: envName,\n//\t\t\t\t\tDestination: fieldValue.Addr().Interface().(*[]string),\n\t\t\t\t\tValue: &values2,\n\t\t\t\t}\n\t\t\t\tflags = append(flags, flag)\n\t\t\t}\n\t\t}\n\t}\n\treturn flags\n}", "func (c *Cli) ParseEnv() error {\n\tvar (\n\t\terr error\n\t\tu64 uint64\n\t)\n\tfor k, e := range c.env {\n\t\ts := strings.TrimSpace(os.Getenv(k))\n\t\t// NOTE: we only parse the environment if it is not an emprt string\n\t\tif s != \"\" {\n\t\t\tswitch e.Type {\n\t\t\tcase \"bool\":\n\t\t\t\te.BoolValue, err = strconv.ParseBool(s)\n\t\t\tcase \"int\":\n\t\t\t\te.IntValue, err = strconv.Atoi(s)\n\t\t\tcase \"int64\":\n\t\t\t\te.Int64Value, err = strconv.ParseInt(s, 10, 64)\n\t\t\tcase \"uint\":\n\t\t\t\tu64, err = strconv.ParseUint(s, 10, 32)\n\t\t\t\te.UintValue = uint(u64)\n\t\t\tcase \"uint64\":\n\t\t\t\te.Uint64Value, err = strconv.ParseUint(s, 10, 64)\n\t\t\tcase \"float64\":\n\t\t\t\te.Float64Value, err = strconv.ParseFloat(s, 64)\n\t\t\tcase \"time.Duration\":\n\t\t\t\te.DurationValue, err = time.ParseDuration(s)\n\t\t\tdefault:\n\t\t\t\te.StringValue = s\n\t\t\t}\n\t\t\tif err != nil {\n\t\t\t\treturn fmt.Errorf(\"%q should be type %q, %s\", e.Name, e.Type, err)\n\t\t\t}\n\t\t}\n\t\tc.env[k] = e\n\t}\n\treturn err\n}", "func loadConfig() (*config, error) {\n\tloadConfigError := func(err error) (*config, error) {\n\t\treturn nil, err\n\t}\n\n\t// Default config.\n\tcfg := defaultConfig\n\n\t// A config file in the current directory takes precedence.\n\tif _, err := os.Stat(defaultConfigFilename); !os.IsNotExist(err) {\n\t\tcfg.ConfigFile = defaultConfigFile\n\t}\n\n\t// Pre-parse the command line options to see if an alternative config\n\t// file or the version flag was specified.\n\tpreCfg := cfg\n\tpreParser := flags.NewParser(&preCfg, flags.Default)\n\t_, err := preParser.Parse()\n\tif err != nil {\n\t\te, ok := err.(*flags.Error)\n\t\tif !ok || e.Type != flags.ErrHelp {\n\t\t\tpreParser.WriteHelp(os.Stderr)\n\t\t}\n\t\tif e.Type == flags.ErrHelp {\n\t\t\tos.Exit(0)\n\t\t}\n\t\treturn loadConfigError(err)\n\t}\n\n\t// Show the version and exit if the version flag was specified.\n\tappName := filepath.Base(os.Args[0])\n\tappName = strings.TrimSuffix(appName, filepath.Ext(appName))\n\tif preCfg.ShowVersion {\n\t\tfmt.Println(appName, \"version\", currentVersion)\n\t\tos.Exit(0)\n\t}\n\n\t// Load additional config from file.\n\tvar configFileError error\n\tparser := flags.NewParser(&cfg, flags.Default)\n\terr = flags.NewIniParser(parser).ParseFile(preCfg.ConfigFile)\n\tif err != nil {\n\t\tif _, ok := err.(*os.PathError); !ok {\n\t\t\tfmt.Fprintln(os.Stderr, err)\n\t\t\tparser.WriteHelp(os.Stderr)\n\t\t\treturn loadConfigError(err)\n\t\t}\n\t\tconfigFileError = err\n\t}\n\n\t// Parse command line options again to ensure they take precedence.\n\t_, err = parser.Parse()\n\tif err != nil {\n\t\tif e, ok := err.(*flags.Error); !ok || e.Type != flags.ErrHelp {\n\t\t\tparser.WriteHelp(os.Stderr)\n\t\t}\n\t\treturn loadConfigError(err)\n\t}\n\n\t// Warn about missing config file after the final command line parse\n\t// succeeds. This prevents the warning on help messages and invalid\n\t// options.\n\tif configFileError != nil {\n\t\tlog.Warnf(\"%v\", configFileError)\n\t\t//fmt.Printf(\"%v\\n\",configFileError)\n\t\treturn loadConfigError(configFileError)\n\t}\n\n\t// Choose the active network params based on the selected network.\n\t// Multiple networks can't be selected simultaneously.\n\tnumNets := 0\n\tactiveNet = &netparams.MainNetParams\n\tif cfg.TestNet {\n\t\tactiveNet = &netparams.TestNetParams\n\t\tnumNets++\n\t}\n\tif cfg.SimNet {\n\t\tactiveNet = &netparams.SimNetParams\n\t\tnumNets++\n\t}\n\tif numNets > 1 {\n\t\tstr := \"%s: The testnet and simnet params can't be used \" +\n\t\t\t\"together -- choose one\"\n\t\terr := fmt.Errorf(str, \"loadConfig\")\n\t\tfmt.Fprintln(os.Stderr, err)\n\t\tparser.WriteHelp(os.Stderr)\n\t\treturn loadConfigError(err)\n\t}\n\n\t// Set the host names and ports to the default if the\n\t// user does not specify them.\n\tif cfg.DcrdServ == \"\" {\n\t\tcfg.DcrdServ = defaultHost + \":\" + activeNet.RPCClientPort\n\t}\n\tif cfg.DcrwServ == \"\" {\n\t\tcfg.DcrwServ = defaultHost + \":\" + activeNet.RPCServerPort\n\t}\n\n\t// The HTTP server port can not be beyond a uint16's size in value.\n\t// if cfg.HttpSvrPort > 0xffff {\n\t// \tstr := \"%s: Invalid HTTP port number for HTTP server\"\n\t// \terr := fmt.Errorf(str, \"loadConfig\")\n\t// \tfmt.Fprintln(os.Stderr, err)\n\t// \tparser.WriteHelp(os.Stderr)\n\t// \treturn loadConfigError(err)\n\t// }\n\n\t// Append the network type to the log directory so it is \"namespaced\"\n\t// per network.\n\tcfg.LogDir = cleanAndExpandPath(cfg.LogDir)\n\tcfg.LogDir = filepath.Join(cfg.LogDir, activeNet.Name)\n\n\t// Special show command to list supported subsystems and exit.\n\tif cfg.DebugLevel == \"show\" {\n\t\tfmt.Println(\"Supported subsystems\", supportedSubsystems())\n\t\tos.Exit(0)\n\t}\n\n\t// Initialize logging at the default logging level.\n\tinitSeelogLogger(filepath.Join(cfg.LogDir, defaultLogFilename))\n\tsetLogLevels(defaultLogLevel)\n\n\t// Parse, validate, and set debug log level(s).\n\tif err := parseAndSetDebugLevels(cfg.DebugLevel); err != nil {\n\t\terr := fmt.Errorf(\"%s: %v\", \"loadConfig\", err.Error())\n\t\tfmt.Fprintln(os.Stderr, err)\n\t\tparser.WriteHelp(os.Stderr)\n\t\treturn loadConfigError(err)\n\t}\n\n\t//csvPath = cfg.HttpUIPath\n\n\treturn &cfg, nil\n}", "func init() {\n\tfs := &flag.FlagSet{}\n\tklog.InitFlags(fs)\n\tkf := strings.Split(os.Getenv(\"KLOG_FLAGS\"), \" \")\n\tfs.Parse(kf)\n}", "func TestConfig_Parse_EnvOverride(t *testing.T) {\n\t// Parse configuration.\n\tvar c run.Config\n\tif _, err := toml.Decode(`\n[meta]\ndir = \"/tmp/meta\"\n\n[data]\ndir = \"/tmp/data\"\n\n[cluster]\n\n[admin]\nbind-address = \":8083\"\n\n[http]\nbind-address = \":8087\"\n\n[[graphite]]\nprotocol = \"udp\"\ntemplates = [\n \"default.* .template.in.config\"\n]\n\n[[graphite]]\nprotocol = \"tcp\"\n\n[[collectd]]\nbind-address = \":1000\"\n\n[[collectd]]\nbind-address = \":1010\"\n\n[[opentsdb]]\nbind-address = \":2000\"\n\n[[opentsdb]]\nbind-address = \":2010\"\n\n[[udp]]\nbind-address = \":4444\"\n\n[[udp]]\n\n[monitoring]\nenabled = true\n\n[continuous_queries]\nenabled = true\n\n[tls]\nmin-version = \"tls1.0\"\n`, &c); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tgetenv := func(s string) string {\n\t\tswitch s {\n\t\tcase \"FREETSDB_UDP_BIND_ADDRESS\":\n\t\t\treturn \":1234\"\n\t\tcase \"FREETSDB_UDP_0_BIND_ADDRESS\":\n\t\t\treturn \":5555\"\n\t\tcase \"FREETSDB_GRAPHITE_0_TEMPLATES_0\":\n\t\t\treturn \"override.* .template.0\"\n\t\tcase \"FREETSDB_GRAPHITE_1_TEMPLATES\":\n\t\t\treturn \"override.* .template.1.1,override.* .template.1.2\"\n\t\tcase \"FREETSDB_GRAPHITE_1_PROTOCOL\":\n\t\t\treturn \"udp\"\n\t\tcase \"FREETSDB_COLLECTD_1_BIND_ADDRESS\":\n\t\t\treturn \":1020\"\n\t\tcase \"FREETSDB_OPENTSDB_0_BIND_ADDRESS\":\n\t\t\treturn \":2020\"\n\t\tcase \"FREETSDB_DATA_CACHE_MAX_MEMORY_SIZE\":\n\t\t\t// uint64 type\n\t\t\treturn \"1000\"\n\t\tcase \"FREETSDB_LOGGING_LEVEL\":\n\t\t\t// logging type\n\t\t\treturn \"warn\"\n\t\tcase \"FREETSDB_COORDINATOR_QUERY_TIMEOUT\":\n\t\t\t// duration type\n\t\t\treturn \"1m\"\n\t\tcase \"FREETSDB_TLS_MIN_VERSION\":\n\t\t\treturn \"tls1.2\"\n\t\t}\n\t\treturn \"\"\n\t}\n\n\tif err := c.ApplyEnvOverrides(getenv); err != nil {\n\t\tt.Fatalf(\"failed to apply env overrides: %v\", err)\n\t}\n\n\tif c.UDPInputs[0].BindAddress != \":5555\" {\n\t\tt.Fatalf(\"unexpected udp bind address: %s\", c.UDPInputs[0].BindAddress)\n\t}\n\n\tif c.UDPInputs[1].BindAddress != \":1234\" {\n\t\tt.Fatalf(\"unexpected udp bind address: %s\", c.UDPInputs[1].BindAddress)\n\t}\n\n\tif len(c.GraphiteInputs[0].Templates) != 1 || c.GraphiteInputs[0].Templates[0] != \"override.* .template.0\" {\n\t\tt.Fatalf(\"unexpected graphite 0 templates: %+v\", c.GraphiteInputs[0].Templates)\n\t}\n\n\tif len(c.GraphiteInputs[1].Templates) != 2 || c.GraphiteInputs[1].Templates[1] != \"override.* .template.1.2\" {\n\t\tt.Fatalf(\"unexpected graphite 1 templates: %+v\", c.GraphiteInputs[1].Templates)\n\t}\n\n\tif c.GraphiteInputs[1].Protocol != \"udp\" {\n\t\tt.Fatalf(\"unexpected graphite protocol: %s\", c.GraphiteInputs[1].Protocol)\n\t}\n\n\tif c.CollectdInputs[1].BindAddress != \":1020\" {\n\t\tt.Fatalf(\"unexpected collectd bind address: %s\", c.CollectdInputs[1].BindAddress)\n\t}\n\n\tif c.OpenTSDBInputs[0].BindAddress != \":2020\" {\n\t\tt.Fatalf(\"unexpected opentsdb bind address: %s\", c.OpenTSDBInputs[0].BindAddress)\n\t}\n\n\tif c.Data.CacheMaxMemorySize != 1000 {\n\t\tt.Fatalf(\"unexpected cache max memory size: %v\", c.Data.CacheMaxMemorySize)\n\t}\n\n\tif c.Logging.Level != zapcore.WarnLevel {\n\t\tt.Fatalf(\"unexpected logging level: %v\", c.Logging.Level)\n\t}\n\n\tif c.Cluster.QueryTimeout != freetstoml.Duration(time.Minute) {\n\t\tt.Fatalf(\"unexpected query timeout: %v\", c.Cluster.QueryTimeout)\n\t}\n\n\tif c.TLS.MinVersion != \"tls1.2\" {\n\t\tt.Fatalf(\"unexpected tls min version: %q\", c.TLS.MinVersion)\n\t}\n}", "func ParseCLIParametersFromFlags(flags *flag.FlagSet, prefix string) *CLIOptions {\n\toptions := CLIOptions{}\n\n\tif f := flags.Lookup(prefix + \"encryptor_config_file\"); f != nil {\n\t\toptions.EncryptorConfigFile = f.Value.String()\n\t}\n\n\treturn &options\n}", "func Load(ctx context.Context, filename string, opts ...LoadOption) (*Config, error) {\n\tparsedOpts := &loadOptions{\n\t\tglobals: starlark.StringDict{},\n\t\tfileReader: LocalFileReader(filepath.Dir(filename)),\n\t}\n\tfor _, opt := range opts {\n\t\topt.applyLoad(parsedOpts)\n\t}\n\n\toverriddenGlobals := parsedOpts.globals\n\tparsedOpts.globals = UnstablePredeclaredModules(parsedOpts.protoRegistry)\n\tfor key, value := range overriddenGlobals {\n\t\tparsedOpts.globals[key] = value\n\t}\n\tconfigLocals, tests, err := loadImpl(ctx, parsedOpts, filename)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &Config{\n\t\tfilename: filename,\n\t\tglobals: parsedOpts.globals,\n\t\tlocals: configLocals,\n\t\ttests: tests,\n\t}, nil\n}", "func (cfg *Config) ParseFlags(flags Flags, f *flag.FlagSet, arguments []string) error {\n\tvar (\n\t\tworkspaces string\n\t\tuserServers serverFlag\n\t\tdialServers serverFlag\n\t)\n\n\tf.StringVar(&cfg.AcmeNetwork, \"acme.net\", cfg.AcmeNetwork,\n\t\t\"network where acme is serving 9P file system\")\n\tf.StringVar(&cfg.AcmeAddress, \"acme.addr\", cfg.AcmeAddress,\n\t\t\"address where acme is serving 9P file system\")\n\tf.BoolVar(&cfg.Verbose, \"v\", cfg.Verbose, \"Verbose output\")\n\tf.BoolVar(&cfg.ShowConfig, \"showconfig\", false, \"show configuration values and exit\")\n\n\tif flags&ProxyFlags != 0 {\n\t\tf.StringVar(&cfg.ProxyNetwork, \"proxy.net\", cfg.ProxyNetwork,\n\t\t\t\"network used for communication between acme-lsp and L\")\n\t\tf.StringVar(&cfg.ProxyAddress, \"proxy.addr\", cfg.ProxyAddress,\n\t\t\t\"address used for communication between acme-lsp and L\")\n\t}\n\tif flags&LangServerFlags != 0 {\n\t\tf.BoolVar(&cfg.Verbose, \"debug\", cfg.Verbose, \"turn on debugging prints (deprecated: use -v)\")\n\t\tf.StringVar(&cfg.RootDirectory, \"rootdir\", cfg.RootDirectory, \"root directory used for LSP initialization\")\n\t\tf.BoolVar(&cfg.HideDiagnostics, \"hidediag\", false, \"hide diagnostics sent by LSP server\")\n\t\tf.BoolVar(&cfg.RPCTrace, \"rpc.trace\", false, \"print the full rpc trace in lsp inspector format\")\n\t\tf.StringVar(&workspaces, \"workspaces\", \"\", \"colon-separated list of initial workspace directories\")\n\t\tf.Var(&userServers, \"server\", `map filename to language server command. The format is\n'handlers:cmd' where cmd is the LSP server command and handlers is\na comma separated list of 'regexp[@lang]'. The regexp matches the\nfilename and lang is a language identifier. (e.g. '\\.go$:gopls' or\n'[email protected],[email protected],\\.go$@go:gopls')`)\n\t\tf.Var(&dialServers, \"dial\", `map filename to language server address. The format is\n'handlers:host:port'. See -server flag for format of\nhandlers. (e.g. '\\.go$:localhost:4389')`)\n\t}\n\tif err := f.Parse(arguments); err != nil {\n\t\treturn err\n\t}\n\n\tif flags&LangServerFlags != 0 {\n\t\tif len(workspaces) > 0 {\n\t\t\tcfg.WorkspaceDirectories = strings.Split(workspaces, \":\")\n\t\t}\n\t\tif cfg.Servers == nil {\n\t\t\tcfg.Servers = make(map[string]*Server)\n\t\t}\n\t\thandlers := make([]FilenameHandler, 0)\n\t\tfor i, sa := range userServers {\n\t\t\tkey := fmt.Sprintf(\"_userCmdServer%v\", i)\n\t\t\tcfg.Servers[key] = &Server{\n\t\t\t\tCommand: strings.Fields(sa.args),\n\t\t\t}\n\t\t\tfor _, h := range sa.handlers {\n\t\t\t\th.ServerKey = key\n\t\t\t\thandlers = append(handlers, h)\n\t\t\t}\n\t\t}\n\t\tfor i, sa := range dialServers {\n\t\t\tkey := fmt.Sprintf(\"_userDialServer%v\", i)\n\t\t\tcfg.Servers[key] = &Server{\n\t\t\t\tAddress: sa.args,\n\t\t\t}\n\t\t\tfor _, h := range sa.handlers {\n\t\t\t\th.ServerKey = key\n\t\t\t\thandlers = append(handlers, h)\n\t\t\t}\n\t\t}\n\t\t// Prepend to give higher priority to command line flags.\n\t\tcfg.FilenameHandlers = append(handlers, cfg.FilenameHandlers...)\n\t}\n\treturn nil\n}", "func init() {\n\tflag.StringVar(&KubectlPath, \"kubectl-path\", \"\", \"Path to the kubectl binary\")\n\tflag.StringVar(&ClusterctlPath, \"clusterctl-path\", \"\", \"Path to the clusterctl binary\")\n\tflag.StringVar(&DumpPath, \"dump-path\", \"\", \"Path to the kubevirt artifacts dump cmd binary\")\n\tflag.StringVar(&WorkingDir, \"working-dir\", \"\", \"Path used for e2e test files\")\n}", "func ParseArgs(args []string) (*EnvFlags, error) {\n\tvar (\n\t\tfeature string\n\t\tassess string\n\t\tnamespace string\n\t\tkubeconfig string\n\t\tskipFeature string\n\t\tskipAssessment string\n\t\tparallelTests bool\n\t\tdryRun bool\n\t\tfailFast bool\n\t\tdisableGracefulTeardown bool\n\t\tkubeContext string\n\t)\n\n\tlabels := make(LabelsMap)\n\tskipLabels := make(LabelsMap)\n\n\tif flag.Lookup(featureFlag.Name) == nil {\n\t\tflag.StringVar(&feature, featureFlag.Name, featureFlag.DefValue, featureFlag.Usage)\n\t}\n\n\tif flag.Lookup(assessFlag.Name) == nil {\n\t\tflag.StringVar(&assess, assessFlag.Name, assessFlag.DefValue, assessFlag.Usage)\n\t}\n\n\tif flag.Lookup(kubecfgFlag.Name) == nil {\n\t\tflag.StringVar(&kubeconfig, kubecfgFlag.Name, kubecfgFlag.DefValue, kubecfgFlag.Usage)\n\t}\n\n\tif flag.Lookup(kubeNSFlag.Name) == nil {\n\t\tflag.StringVar(&namespace, kubeNSFlag.Name, kubeNSFlag.DefValue, kubeNSFlag.Usage)\n\t}\n\n\tif flag.Lookup(labelsFlag.Name) == nil {\n\t\tflag.Var(&labels, labelsFlag.Name, labelsFlag.Usage)\n\t}\n\n\tif flag.Lookup(skipLabelsFlag.Name) == nil {\n\t\tflag.Var(&skipLabels, skipLabelsFlag.Name, skipLabelsFlag.Usage)\n\t}\n\n\tif flag.Lookup(skipAssessmentFlag.Name) == nil {\n\t\tflag.StringVar(&skipAssessment, skipAssessmentFlag.Name, skipAssessmentFlag.DefValue, skipAssessmentFlag.Usage)\n\t}\n\n\tif flag.Lookup(skipFeatureFlag.Name) == nil {\n\t\tflag.StringVar(&skipFeature, skipFeatureFlag.Name, skipFeatureFlag.DefValue, skipFeatureFlag.Usage)\n\t}\n\n\tif flag.Lookup(parallelTestsFlag.Name) == nil {\n\t\tflag.BoolVar(&parallelTests, parallelTestsFlag.Name, false, parallelTestsFlag.Usage)\n\t}\n\n\tif flag.Lookup(dryRunFlag.Name) == nil {\n\t\tflag.BoolVar(&dryRun, dryRunFlag.Name, false, dryRunFlag.Usage)\n\t}\n\n\tif flag.Lookup(failFastFlag.Name) == nil {\n\t\tflag.BoolVar(&failFast, failFastFlag.Name, false, failFastFlag.Usage)\n\t}\n\n\tif flag.Lookup(disableGracefulTeardownFlag.Name) == nil {\n\t\tflag.BoolVar(&disableGracefulTeardown, disableGracefulTeardownFlag.Name, false, disableGracefulTeardownFlag.Usage)\n\t}\n\n\tif flag.Lookup(contextFlag.Name) == nil {\n\t\tflag.StringVar(&kubeContext, contextFlag.Name, contextFlag.DefValue, contextFlag.Usage)\n\t}\n\n\t// Enable klog/v2 flag integration\n\tklog.InitFlags(nil)\n\n\tif err := flag.CommandLine.Parse(args); err != nil {\n\t\treturn nil, fmt.Errorf(\"flags parsing: %w\", err)\n\t}\n\n\t// Hook into the default test.list of the `go test` and integrate that with the `--dry-run` behavior. Treat them the same way\n\tif !dryRun && flag.Lookup(\"test.list\") != nil && flag.Lookup(\"test.list\").Value.String() == \"true\" {\n\t\tklog.V(2).Info(\"Enabling dry-run mode as the tests were invoked in list mode\")\n\t\tdryRun = true\n\t}\n\n\tif failFast && parallelTests {\n\t\tpanic(fmt.Errorf(\"--fail-fast and --parallel are mutually exclusive options\"))\n\t}\n\n\treturn &EnvFlags{\n\t\tfeature: feature,\n\t\tassess: assess,\n\t\tlabels: labels,\n\t\tnamespace: namespace,\n\t\tkubeconfig: kubeconfig,\n\t\tskiplabels: skipLabels,\n\t\tskipFeatures: skipFeature,\n\t\tskipAssessments: skipAssessment,\n\t\tparallelTests: parallelTests,\n\t\tdryRun: dryRun,\n\t\tfailFast: failFast,\n\t\tdisableGracefulTeardown: disableGracefulTeardown,\n\t\tkubeContext: kubeContext,\n\t}, nil\n}", "func LoadEnvironment(object interface{}, metaDataKey string) error {\n\tvar values = func(key string) (string, bool) {\n\t\treturn os.LookupEnv(key)\n\t}\n\treturn commonLoad(values, object, metaDataKey)\n}", "func ParseFlags() (*Config, error) {\n\thomeDir, err := os.UserHomeDir()\n\tif err != nil {\n\t\treturn nil, trace.Wrap(err, \"failed to get user's home directory path\")\n\t}\n\n\tconfig := &Config{}\n\tflag.StringVar(&config.artifactPath, \"artifact-path\", \"/artifacts\", \"Path to the filesystem tree containing the *.deb files to add to the APT repos\")\n\tflag.StringVar(&config.artifactVersion, \"artifact-version\", \"\", \"The version of the artifacts that will be added to the APT repos\")\n\tflag.StringVar(&config.releaseChannel, \"release-channel\", \"\", \"The release channel of the APT repos that the artifacts should be added to\")\n\tflag.StringVar(&config.bucketName, \"bucket\", \"\", \"The name of the S3 bucket where the repo should be synced to/from\")\n\tflag.StringVar(&config.localBucketPath, \"local-bucket-path\", \"/bucket\", \"The local path where the bucket should be synced to\")\n\tflag.StringVar(&config.aptlyPath, \"aptly-root-dir\", homeDir, \"The Aptly \\\"rootDir\\\" (see https://www.aptly.info/doc/configuration/ for details)\")\n\tflag.UintVar(&config.logLevel, \"log-level\", uint(logrus.InfoLevel), \"Log level from 0 to 6, 6 being the most verbose\")\n\tflag.BoolVar(&config.logJSON, \"log-json\", false, \"True if the log entries should use JSON format, false for text logging\")\n\n\tflag.Parse()\n\tif err := Check(config); err != nil {\n\t\treturn nil, trace.Wrap(err, \"failed to validate flags\")\n\t}\n\n\treturn config, nil\n}", "func (g *Gonf) load(cfg map[string]interface{}, urlAcc []string, flagsAcc []otoFlag) error {\n\tif err := legalOtoFlags(flagsAcc); err != nil {\n\t\treturn err\n\t}\n\n\tfor key, v := range cfg {\n\t\tk := otoFlag(key)\n\t\tif m, ok := v.(map[string]interface{}); ok {\n\t\t\tvar err error\n\t\t\tif isOtoFlag(k) {\n\t\t\t\terr = g.load(m, urlAcc, append(flagsAcc, k))\n\t\t\t} else {\n\t\t\t\terr = g.load(m, append(urlAcc, string(k)), flagsAcc)\n\t\t\t}\n\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t} else if !isOtoFlag(k) {\n\t\t\troutes := buildRoutes(append(urlAcc, string(k)), flagsAcc, g.methodMap)\n\t\t\tg.configRoutes = append(g.configRoutes, routes...)\n\t\t}\n\t}\n\n\treturn nil\n}", "func Parse(config interface{}) {\n\t// load config to flag\n\tloadRawConfig(pflag.CommandLine, config)\n\n\t// parse flags\n\tutil.InitFlags()\n\tif err := goflag.CommandLine.Parse([]string{}); err != nil {\n\t\tfmt.Fprintf(os.Stderr, \"%v\\n\", err)\n\t\tos.Exit(1)\n\t}\n\n\t// parse config file if exists\n\tif err := decJSON(pflag.CommandLine, config); err != nil {\n\t\tfmt.Fprintf(os.Stderr, \"%v\\n\", err)\n\t\tos.Exit(1)\n\t}\n}", "func SetAllConfig(v *viper.Viper, flags *pflag.FlagSet, envPrefix string) error { // nolint: unparam\n\t// add cmd line flag def to viper\n\terr := v.BindPFlags(flags)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif envPrefix == \"\" {\n\t\tenvPrefix = \"PILOSA\"\n\t\tif v.GetBool(\"future.rename\") {\n\t\t\tenvPrefix = \"FEATUREBASE\"\n\t\t}\n\t}\n\n\t// add env to viper\n\tv.SetEnvPrefix(envPrefix)\n\tv.SetEnvKeyReplacer(strings.NewReplacer(\"-\", \"_\", \".\", \"_\"))\n\tv.AutomaticEnv()\n\n\tc := v.GetString(\"config\")\n\tvar flagErr error\n\tvalidTags := make(map[string]bool)\n\tflags.VisitAll(func(f *pflag.Flag) {\n\t\tvalidTags[f.Name] = true\n\t})\n\n\t// add config file to viper\n\tif c != \"\" {\n\t\tv.SetConfigFile(c)\n\t\tv.SetConfigType(\"toml\")\n\t\terr := v.ReadInConfig()\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"error reading configuration file '%s': %v\", c, err)\n\t\t}\n\n\t\tfor _, key := range v.AllKeys() {\n\t\t\tif _, ok := validTags[key]; !ok {\n\t\t\t\tif key == \"future.rename\" {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\treturn fmt.Errorf(\"invalid option in configuration file: %v\", key)\n\t\t\t}\n\t\t}\n\t}\n\n\t// set all values from viper\n\tflags.VisitAll(func(f *pflag.Flag) {\n\t\tif flagErr != nil {\n\t\t\treturn\n\t\t}\n\t\tvar value string\n\t\tif f.Value.Type() == \"stringSlice\" {\n\t\t\t// special handling is needed for stringSlice as v.GetString will\n\t\t\t// always return \"\" in the case that the value is an actual string\n\t\t\t// slice from a config file rather than a comma separated string\n\t\t\t// from a flag or env var.\n\t\t\tvss := v.GetStringSlice(f.Name)\n\t\t\tvalue = strings.Join(vss, \",\")\n\t\t} else {\n\t\t\tvalue = v.GetString(f.Name)\n\t\t}\n\n\t\tif f.Changed {\n\t\t\t// If f.Changed is true, that means the value has already been set\n\t\t\t// by a flag, and we don't need to ask viper for it since the flag\n\t\t\t// is the highest priority. This works around a problem with string\n\t\t\t// slices where f.Value.Set(csvString) would cause the elements of\n\t\t\t// csvString to be appended to the existing value rather than\n\t\t\t// replacing it.\n\t\t\treturn\n\t\t}\n\t\tflagErr = f.Value.Set(value)\n\t})\n\treturn flagErr\n}", "func (c *Config) Parse(arguments []string) error {\n\t// Parse first to get config file.\n\terr := c.flagSet.Parse(arguments)\n\tif err != nil {\n\t\treturn errors.WithStack(err)\n\t}\n\n\t// Load config file if specified.\n\tvar meta *toml.MetaData\n\tif c.configFile != \"\" {\n\t\tmeta, err = configutil.ConfigFromFile(c, c.configFile)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\t// Parse again to replace with command line options.\n\terr = c.flagSet.Parse(arguments)\n\tif err != nil {\n\t\treturn errors.WithStack(err)\n\t}\n\n\tif len(c.flagSet.Args()) != 0 {\n\t\treturn errors.Errorf(\"'%s' is an invalid flag\", c.flagSet.Arg(0))\n\t}\n\n\tc.Adjust(meta)\n\treturn nil\n}", "func initConfig() {\n\tlogging.LogLevel = logging.InfoLevel\n\tif verboseLogging && quietLogging {\n\t\tfmt.Println(\"Verbose logging and quiet output are mutually exclusive flags. Please use only one.\")\n\t\tos.Exit(1)\n\t}\n\tif verboseLogging {\n\t\tlogging.LogLevel = logging.VerboseLevel\n\t}\n\tif quietLogging {\n\t\tlogging.LogLevel = logging.QuietLevel\n\t}\n\n\tif cfgFile != \"\" {\n\t\t// Use config file from the flag.\n\t\tviper.SetConfigFile(cfgFile)\n\t} else {\n\t\t// Find home directory.\n\t\thome, err := homedir.Dir()\n\t\tif err != nil {\n\t\t\tfmt.Println(err)\n\t\t\tos.Exit(1)\n\t\t}\n\n\t\t// Search config in home directory with name \".cli\" (without extension).\n\t\tviper.AddConfigPath(home)\n\t\tviper.SetConfigName(\".cli\")\n\t}\n\n\tviper.AutomaticEnv() // read in environment variables that match\n\n\t// If a config file is found, read it in.\n\tif err := viper.ReadInConfig(); err == nil {\n\t\tlogging.PrintLog(fmt.Sprintf(\"Using config file: %s\", viper.ConfigFileUsed()), logging.InfoLevel)\n\t}\n}", "func loadEnvConfig(filenames ...string) bool {\n\tfor _, filename := range filenames {\n\t\tif util.Exists(filename) {\n\t\t\terr := godotenv.Load(filename)\n\n\t\t\t// if the config file cannot be read we want to know about it\n\t\t\tif err != nil {\n\t\t\t\tlog.Fatal(err.Error())\n\t\t\t} else {\n\t\t\t\treturn true\n\t\t\t}\n\t\t}\n\t}\n\treturn false\n}", "func ParseEnv(fs *flag.FlagSet, prefix string) error {\n\tvar err error\n\talreadySet := make(map[string]bool)\n\tfs.Visit(func(f *flag.Flag) {\n\t\talreadySet[f.Name] = true\n\t})\n\n\tfs.VisitAll(func(f *flag.Flag) {\n\t\tif !alreadySet[f.Name] {\n\t\t\tkey := prefix + toEnvName(f.Name)\n\t\t\tval := os.Getenv(key)\n\t\t\tif val != \"\" {\n\t\t\t\tif serr := fs.Set(f.Name, val); serr != nil {\n\t\t\t\t\terr = fmt.Errorf(\"invalid value %q for %s: %v\", val, key, serr)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t})\n\treturn err\n}", "func initConfig() {\n\tif debug {\n\t\tlogrus.SetLevel(logrus.DebugLevel)\n\t}\n\tif cfgFile != \"\" {\n\t\t// Use config file from the flag.\n\t\tviper.SetConfigFile(cfgFile)\n\t} else {\n\t\t// Find home directory.\n\t\thome, err := homedir.Dir()\n\t\tif err != nil {\n\t\t\tfmt.Println(err)\n\t\t\tos.Exit(1)\n\t\t}\n\n\t\t// Search config in home directory with name \".izlyctl\" (without extension).\n\t\tviper.AddConfigPath(home)\n\t\tviper.SetConfigName(\".izlyctl\")\n\t}\n\n\tviper.AutomaticEnv() // read in environment variables that match\n\n\t// If a config file is found, read it in.\n\tif err := viper.ReadInConfig(); err == nil {\n\t\tlogrus.Debugf(\"Using config file: %s\", viper.ConfigFileUsed())\n\t}\n\tviper.BindPFlag(\"api.auth.login\", rootCmd.PersistentFlags().Lookup(\"user\"))\n\tviper.BindPFlag(\"api.auth.password\", rootCmd.PersistentFlags().Lookup(\"password\"))\n\tviper.BindPFlag(\"api.url\", rootCmd.PersistentFlags().Lookup(\"server\"))\n\tviper.BindPFlag(\"user.rate\", rootCmd.PersistentFlags().Lookup(\"rate\"))\n\tviper.BindPFlag(\"user.dueDate\", rootCmd.PersistentFlags().Lookup(\"dueDate\"))\n}", "func loadEnvironmentConfig(env string) types.Options {\n\tconfigFile := \"config/\" + ServiceName + \"/\" + env + \".json\"\n\tif _, err := os.Stat(configFile); os.IsNotExist(err) {\n\t\tpanic(err)\n\t}\n\treturn parseConfigFile(configFile)\n}", "func setupEnvConfigViper(environmentFlag string) *viper.Viper {\n\tconfig := viper.New()\n\tconfig.SetConfigName(environmentFlag)\n\tconfig.AddConfigPath(\"./.argo/environments\")\n\tif err := config.ReadInConfig(); err != nil {\n\t\tcolor.Red(\"%s\",err)\n\t\tcolor.Red(\"Error locating or parsing %s env's helm value file (should be ./argo/environments/%s.yaml!\", environmentFlag)\n\t\tos.Exit(1)\n\t}\n\treturn config\n}", "func Load(config interface{}, filename string) error {\n\tv := reflect.ValueOf(config).Elem()\n\tif err := applyDefaults(reflect.StructField{}, v); err != nil {\n\t\treturn fmt.Errorf(\"init config with default values: %s\", err)\n\t}\n\n\tif err := mergeJSONConfig(config, filename); err != nil {\n\t\treturn err\n\t}\n\n\tif err := applyEnv(config); err != nil {\n\t\treturn err\n\t}\n\n\treturn validate(config)\n}", "func LoadEnvVars() {\n\tenv := GetEnv(\"GIN_ENV\", \"development\")\n\n\tif env == \"production\" || env == \"staging\" {\n\t\tlog.Println(\"Not using .env file in production or staging.\")\n\t\treturn\n\t}\n\n\tfilename := \".env.\" + env\n\n\tif _, err := os.Stat(filename); os.IsNotExist(err) {\n\t\tfilename = \".env\"\n\t}\n\n\terr := godotenv.Load(filename)\n\tif err != nil {\n\t\tlog.Println(\".env file not loaded\")\n\t}\n}", "func (parser envParser) Parse(r io.Reader) (interface{}, error) {\n\tcnf, ok := r.(*conf)\n\tif !ok {\n\t\treturn nil, errs.New(0, \"provided reader must be a a *conf pointer\")\n\t}\n\tcnf.values = ValueMap{}\n\tfor _, e := range os.Environ() {\n\t\tp1 := strings.Split(e, \"\\n\")\n\t\tif len(p1) > 0 {\n\t\t\tp2 := strings.Split(p1[0], \"=\")\n\t\t\tswitch {\n\t\t\tcase \"_\" == p2[0]:\n\t\t\t\tcontinue\n\t\t\tcase strings.HasPrefix(p2[0], \"BASH_FUNC_\"):\n\t\t\t\tcontinue\n\t\t\tcase \"\" != cnf.envPrefix && !strings.HasPrefix(p2[0], cnf.envPrefix):\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tcnf.values[p2[0]] = NewVal(String, os.Getenv(p2[0]))\n\t\t}\n\t}\n\treturn cnf, nil\n}", "func (c *Configuration) LoadEnvironmentVars(prefix string) error {\n\tif prefix != \"\" {\n\t\tprefix += \"_\"\n\t}\n\n\treturn c.config.Load(env.Provider(prefix, \".\", func(s string) string {\n\t\tmapKey := strings.Replace(strings.ToLower(strings.TrimPrefix(s, prefix)), \"_\", \".\", -1)\n\t\tif !c.config.Exists(mapKey) {\n\t\t\t// only accept values from env vars that already exist in the config\n\t\t\treturn \"\"\n\t\t}\n\t\treturn mapKey\n\t}), nil)\n}", "func Load(configPaths ...string) error {\n\tv := viper.New()\n\t// look for a config file named server.yaml\n\tv.SetConfigName(\"server\")\n\tv.SetConfigType(\"yaml\")\n\t// look for env variables that start with \"DOITNEXT_\".\n\tv.SetEnvPrefix(\"doitnext\")\n\tv.AutomaticEnv()\n\t// Add paths to look for\n\tfor _, path := range configPaths {\n\t\tv.AddConfigPath(path)\n\t}\n\t// Find and read the config file\n\tif err := v.ReadInConfig(); err != nil {\n\t\tlog.Error(err, \"Failed to read the configuration file\")\n\t\treturn err\n\t}\n\t// Fill ServerConfig variable with data from server.yaml\n\tif err := v.Unmarshal(&Configuration); err != nil {\n\t\tlog.Error(err, \"Failed to unmarshal to ServerConfig\")\n\t\treturn err\n\t}\n\t//\n\tlog.Debug(\"Server configuration loaded\")\n\t// Validate imported data\n\treturn Configuration.validate()\n}", "func ParseEnv(opts *Options) {\n\topts.ParseEnv = true\n}", "func ConfigFromFlags() Config {\n\tvar config Config\n\tconfig = make(Config)\n\tconfig[\"dbpath\"] = flag.String(\"dbpath\", \"/tmp/GO.DB\", \"the path to DB\")\n\tconfig[\"httpuri\"] = flag.String(\"uri\", \"0.0.0.0\", \"what Port to Run HTTP Server at\")\n\tconfig[\"httpport\"] = flag.String(\"port\", \"9999\", \"what Port to Run HTTP Server at\")\n\tconfig[\"req_port\"] = flag.String(\"req-port\", \"9797\", \"what PORT to run ZMQ REQ at\")\n\tconfig[\"rep_port\"] = flag.String(\"rep-port\", \"9898\", \"what PORT to run ZMQ REP at\")\n\tconfig[\"cpuprofile\"] = flag.String(\"cpuprofile\", \"\", \"write cpu profile to file\")\n\n\t//flag.Parse()\n\treturn config\n}", "func UnmarshalFromEnv(c interface{}) {\n\ttopType := reflect.TypeOf(c).Elem()\n\ttopValue := reflect.ValueOf(c)\n\tfor i := 0; i < topType.NumField(); i++ {\n\t\tfield := topType.Field(i)\n\t\tif field.Tag.Get(\"env\") != \"\" {\n\t\t\tenvVar := os.Getenv(field.Tag.Get(\"env\"))\n\t\t\tif envVar == \"\" {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tswitch field.Type.Kind() {\n\t\t\tcase reflect.Bool:\n\t\t\t\tb, err := strconv.ParseBool(envVar)\n\t\t\t\tif err != nil {\n\t\t\t\t\tfmt.Println(\"didn't set from \", field.Tag.Get(\"env\"), \" due to \", err)\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tf := topValue.Elem().Field(i)\n\t\t\t\tf.SetBool(b)\n\t\t\tcase reflect.Int64:\n\t\t\t\tinteger, err := strconv.ParseInt(envVar, 0, 64)\n\t\t\t\tif err != nil {\n\t\t\t\t\tfmt.Println(\"didn't set from \", field.Tag.Get(\"env\"), \" due to \", err)\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tf := topValue.Elem().Field(i)\n\t\t\t\tf.SetInt(integer)\n\t\t\tcase reflect.String:\n\t\t\t\tf := topValue.Elem().Field(i)\n\t\t\t\tf.SetString(envVar)\n\t\t\tcase reflect.Float64:\n\t\t\t\tfloat, err := strconv.ParseFloat(envVar, 64)\n\t\t\t\tif err != nil {\n\t\t\t\t\tfmt.Println(\"didn't set from \", field.Tag.Get(\"env\"), \" due to \", err)\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tf := topValue.Elem().Field(i)\n\t\t\t\tf.SetFloat(float)\n\t\t\t}\n\t\t}\n\t}\n}" ]
[ "0.68992835", "0.6791181", "0.65859896", "0.62089425", "0.6188783", "0.60993916", "0.607084", "0.6027738", "0.6001844", "0.5999913", "0.5981991", "0.59760606", "0.5927655", "0.58649254", "0.5816481", "0.581598", "0.5798837", "0.5781175", "0.5767561", "0.57588875", "0.5752042", "0.5749703", "0.57485235", "0.5733691", "0.5686242", "0.5677668", "0.56593406", "0.5565155", "0.5538162", "0.55305004", "0.5511833", "0.55033964", "0.5500983", "0.54784024", "0.5469456", "0.54665065", "0.5464927", "0.5454501", "0.54457253", "0.54457253", "0.5436653", "0.5433165", "0.54128134", "0.54105324", "0.5408059", "0.5395268", "0.53856796", "0.5379336", "0.53741044", "0.53607154", "0.5322478", "0.53175735", "0.5312335", "0.52992463", "0.5287389", "0.5283896", "0.52665156", "0.5257943", "0.52505296", "0.52383417", "0.52337843", "0.5228554", "0.5225477", "0.52253956", "0.52235186", "0.5220231", "0.5216714", "0.5212604", "0.52085316", "0.5207534", "0.5204392", "0.51920193", "0.5188385", "0.518336", "0.51821506", "0.5180488", "0.5173752", "0.5168611", "0.51669294", "0.515926", "0.51525253", "0.514771", "0.513016", "0.51282436", "0.5123849", "0.51236284", "0.51213014", "0.51187354", "0.51147956", "0.5111342", "0.5101012", "0.5099361", "0.5093055", "0.50914496", "0.5088909", "0.5080721", "0.50775", "0.50649685", "0.50635517", "0.50557965" ]
0.65618354
3
NewResponse initializes a Response object hat is used to test the expected output against the actual HTTP response
func NewResponse(expectedCode int, expectedJSONBody string) *Response { return &Response{ expectedCode: expectedCode, expectedBody: expectedJSONBody, recorder: httptest.NewRecorder(), } }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func newResponse(w http.ResponseWriter) *response {\n\treturn &response{\n\t\tResponseWriter: w,\n\t\tsize: 0,\n\t\tstatus: http.StatusOK,\n\t\theadersSend: false,\n\t}\n}", "func newResponse(r *http.Response) *Response {\n\tresponse := Response{Response: r}\n\n\treturn &response\n}", "func newResponse(r *http.Response) *Response {\n\tresponse := &Response{Response: r}\n\treturn response\n}", "func newResponse(r *http.Response) *Response {\n\tresponse := &Response{Response: r}\n\treturn response\n}", "func newResponse(r *http.Response) *Response {\n\tresponse := &Response{Response: r}\n\treturn response\n}", "func newResponse(r *http.Response) *Response {\n\treturn &Response{Response: r}\n}", "func newResponse(r *http.Response) *Response {\n\tresponse := &Response{Response: r}\n\tresponse.populatePageValues()\n\treturn response\n}", "func newResponse(r *http.Response) *Response {\n\tresponse := &Response{Response: r}\n\tresponse.populatePageValues()\n\n\tdata, err := ioutil.ReadAll(r.Body)\n\tif err == nil && data != nil {\n\t\tjson.Unmarshal(data, response)\n\t}\n\n\treturn response\n}", "func newResponse() *Response {\n\treturn &Response{\n\t\t// Header is always 5 bytes\n\t\tHeader: make([]byte, 5),\n\t}\n}", "func newResponse(r *http.Response) *Response {\n\tresp := &Response{\n\t\tResponse: r,\n\t}\n\tif v := r.Header.Get(headerXRemaining); v != \"\" {\n\t\tresp.Remaining, _ = strconv.Atoi(v)\n\t}\n\tif v := r.Header.Get(headerXReset); v != \"\" {\n\t\tresp.Reset, _ = strconv.Atoi(v)\n\t}\n\tif v := r.Header.Get(headerXTotal); v != \"\" {\n\t\tresp.Total, _ = strconv.Atoi(v)\n\t}\n\treturn resp\n}", "func NewResponse(request Request) Response {\n\tr := Response{\n\t\tRequest: request,\n\t\tVersion: \"1.0\",\n\t\tBody: responseBody{},\n\t}\n\treturn r.EndSession(true)\n}", "func newResponse(r *http.Response) *Response {\n\tresponse := &Response{Response: r}\n\tresponse.Rate = parseRate(r)\n\treturn response\n}", "func newResponse(total, perPage, currentPage, lastPage int) *Response {\n\tr := &Response{\n\t\tTotal: total,\n\t\tPerPage: perPage,\n\t\tCurrentPage: currentPage,\n\t\tLastPage: lastPage,\n\t}\n\n\t// Set the next page.\n\tif r.LastPage > r.CurrentPage {\n\t\tnextPage := r.CurrentPage + 1\n\t\tr.NextPage = &nextPage\n\t}\n\n\t// Set the previous page.\n\tif r.CurrentPage > 1 {\n\t\tprevPage := r.CurrentPage - 1\n\t\tr.PrevPage = &prevPage\n\t}\n\n\treturn r\n}", "func NewResponse(w http.ResponseWriter) *Response {\n\treturn &Response{ResponseWriter: w, Status: http.StatusOK}\n}", "func newResponse(code int, body io.Reader, req *http.Request) *http.Response {\n\tif body == nil {\n\t\tbody = &bytes.Buffer{}\n\t}\n\n\trc, ok := body.(io.ReadCloser)\n\tif !ok {\n\t\trc = ioutil.NopCloser(body)\n\t}\n\n\tres := &http.Response{\n\t\tStatusCode: code,\n\t\tStatus: fmt.Sprintf(\"%d %s\", code, http.StatusText(code)),\n\t\tProto: \"HTTP/1.1\",\n\t\tProtoMajor: 1,\n\t\tProtoMinor: 1,\n\t\tHeader: http.Header{},\n\t\tBody: rc,\n\t\tRequest: req,\n\t}\n\n\tif req != nil {\n\t\tres.Close = req.Close\n\t\tres.Proto = req.Proto\n\t\tres.ProtoMajor = req.ProtoMajor\n\t\tres.ProtoMinor = req.ProtoMinor\n\t}\n\n\treturn res\n}", "func NewResponse(ctx iris.Context) Response {\n\treturn Response{ctx: ctx}\n}", "func NewResponse(pt *influx.Point, tr *Tracer) Response {\r\n\treturn Response{\r\n\t\tPoint: pt,\r\n\t\tTracer: tr,\r\n\t}\r\n}", "func NewResponse(status int, headers, body string, start time.Time) *Response {\n\treturn &Response{\n\t\tStatus: status,\n\t\tHeaders: headers,\n\t\tBody: body,\n\t\tStart: start,\n\t\tEnd: time.Now(),\n\t}\n}", "func newResponse(data map[string]string) (*AMIResponse, error) {\n\tr, found := data[\"Response\"]\n\tif !found {\n\t\treturn nil, errors.New(\"Not Response\")\n\t}\n\tresponse := &AMIResponse{ID: data[\"ActionID\"], Status: r, Params: make(map[string]string)}\n\tfor k, v := range data {\n\t\tif k == \"Response\" {\n\t\t\tcontinue\n\t\t}\n\t\tresponse.Params[k] = v\n\t}\n\treturn response, nil\n}", "func NewResponse(p interface{}) *Response {\n\treturn &Response{Payload: p}\n}", "func NewResponse(req *Request) *Response {\n\treturn &Response{Request: req, items: []map[string]interface{}{}}\n}", "func NewResponse(req Request) Response {\n\treturn &response{\n\t\treq: req,\n\t\tstdout: os.Stdout,\n\t\tstderr: os.Stderr,\n\t}\n}", "func NewResponse(code int, body interface{}) Response {\n\treturn Response{\n\t\tcode: code,\n\t\tbody: body,\n\t}\n}", "func NewResponse(id string, ret Output, errMsg string) *Response {\n\tvar err *string\n\tif len(errMsg) != 0 {\n\t\terr = &errMsg\n\t}\n\n\treturn &Response{ID: id, Output: ret, Error: err}\n}", "func newResponse(regex string, code int) Response {\n\tr := Response{Code: code}\n\tr.Regex = regexp.MustCompile(regex)\n\treturn r\n}", "func NewResponse() *Response {\n\n\tresp := http.Response{\n\t\tHeader: http.Header{},\n\t}\n\treturn &Response{\n\t\tResp: &resp,\n\t}\n}", "func NewResponse(err error, statusCode int) *Response {\n\treturn &Response{\n\t\tError: err,\n\t\tStatusCode: statusCode,\n\t}\n}", "func NewResponse(status int, body string) Response {\n\treturn &response{\n\t\tstatus: status,\n\t\tbody: body,\n\t}\n}", "func NewResponse(eventID string) *Response {\n\treturn &Response{\n\t\tEventID: eventID,\n\t\tStatus: shared.StatusOK,\n\t\tStarted: time.Now(),\n\t}\n}", "func (p *Processor) newResponse(load Load, accepted bool) *Response {\n\tr := Response{\n\t\tID: load.ID,\n\t\tCustomerID: load.CustomerID,\n\t\tAccepted: accepted,\n\t}\n\n\t// store load into processed Loads\n\tp.processedLoads[load.ID] = r\n\n\treturn &r\n}", "func NewResponse(msg string, code int) *Response {\n\treturn &Response{\n\t\tStatus: http.StatusText(code),\n\t\tMessage: msg,\n\t\tStatusCode: code,\n\t}\n}", "func NewResponse(i int64, r string, e Error) Response {\n\treturn Response{\n\t\tID: i,\n\t\tJSONRpc: JSONRPCVersion,\n\t\tResult: r,\n\t\tError: e,\n\t}\n}", "func NewResponse(statusCode int, data interface{}, headers map[string]string) *Response {\n\treturn &Response{\n\t\tStatusCode: statusCode,\n\t\tData: data,\n\t\tHeaders: headers,\n\t}\n}", "func createResponse(req *http.Request) *http.Response {\n\treturn &http.Response{\n\t\tStatusCode: http.StatusOK,\n\t\tRequest: req,\n\t\tHeader: make(http.Header),\n\t\tBody: ioutil.NopCloser(bytes.NewBuffer([]byte{})),\n\t}\n}", "func NewResponse(statusCode int16, reqID int32) *Response {\n\treturn &Response{\n\t\tProtocolVersionMajor: ProtocolVersionMajor,\n\t\tProtocolVersionMinor: ProtocolVersionMinor,\n\t\tStatusCode: statusCode,\n\t\tRequestId: reqID,\n\t\tOperationAttributes: make(Attributes),\n\t\tPrinterAttributes: make([]Attributes, 0),\n\t\tJobAttributes: make([]Attributes, 0),\n\t}\n}", "func NewResponse(body []byte, rawResponse *http.Response) *Response {\n\treturn &Response{\n\t\tbody: body,\n\t\trawResponse: rawResponse,\n\t}\n}", "func NewResponse(statusCode int, contentType string, value interface{}) *Response {\n\tr := makeResponse(statusCode, contentType, value)\n\treturn &r\n}", "func newResponseWriter(w http.ResponseWriter) *responseWriter {\n\treturn &responseWriter{\n\t\tResponseWriter: w,\n\t\tstatus: 200,\n\t}\n}", "func NewResponse() *Response {\n\tresponse := Response{\n\t\tHeaders: make(map[string][]string),\n\t\tSelectors: make(map[string][]*element),\n\t}\n\treturn &response\n}", "func NewResponse(version string) *Response {\n\treturn &Response{\n\t\tVersion: version,\n\t\tItems: []Item{},\n\t}\n}", "func NewResponse(req *reqres.Req, data interface{}) []byte {\n\n\tres := &reqres.Res{\n\t\tData: data,\n\t}\n\n\tswitch req.ResponseMarshalingMode {\n\tcase \"DataOnly\":\n\t\treturn res.MarshalDataOnly(req)\n\tdefault:\n\t\treturn res.Marshal(req)\n\t}\n}", "func NewResponse(call *Call) *Response {\n\tresp := &Response{\n\t\tVersion: call.Version,\n\t\tID: call.ID,\n\t}\n\treturn resp\n}", "func NewResponse(id string, result interface{}) *Response {\n\treturn &Response{JsonRPC: \"2.0\", Id: id, Result: result}\n}", "func NewResponse() *Response {\n\tr := &Response{}\n\treturn r\n}", "func NewResponse(c echo.Context, success bool, code int, message string, content echo.Map) Response {\n\t// no custom context defined, returns basic response api\n\treturn NewResponseAPI(success, code, message, content)\n}", "func NewResponse(speech string) Response {\n\treturn Response{\n\t\tVersion: \"1.0\",\n\t\tBody: ResBody{\n\t\t\tOutputSpeech: Payload{\n\t\t\t\tType: \"PlainText\",\n\t\t\t\tText: speech,\n\t\t\t},\n\t\t\tShouldEndSession: true,\n\t\t},\n\t}\n}", "func newHTTPResponse(res *http.Response, responseBody []byte) *ApiResponse {\n\treturn &ApiResponse{\n\t\tStatus: res.Status,\n\t\tStatusCode: res.StatusCode,\n\t\tProto: res.Proto,\n\t\tHeader: res.Header,\n\t\tRawBody: responseBody,\n\t\tRequest: res.Request,\n\t}\n}", "func newEmptyResponse() *Response {\n\treturn &Response{\n\t\tBody: &HTTPResponse{},\n\t\tError: &HTTPResponse{},\n\t}\n}", "func NewApiResponse() *ApiResponse {\n this := ApiResponse{}\n return &this\n}", "func NewResponse(group string) *Response {\n\treturn &Response{\n\t\tGroup: group,\n\t\tErrors: make([]string, 0),\n\t\tData: make(map[string]interface{}, 0),\n\t}\n}", "func NewResponse() (*Response, error) {\n\treturn &Response{\n\t\tMoves: make([]*ResponseMove, 0, 10),\n\t}, nil\n}", "func NewResponse(resp *http.Response) (*Response, error) {\n\tjson, err := ParseJSON(resp.Body)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &Response{\n\t\tCode: resp.StatusCode,\n\t\tBody: json,\n\t}, err\n}", "func New() *Response {\n\tresponse := Response{\n\t\tresult: nil,\n\t\tsuccess: true,\n\t\terrors: []*Error{},\n\t\tstatusCode: 0,\n\t}\n\n\treturn &response\n}", "func NewResponse() (*Response) {\n\tr := new(Response)\n\tr.TableNames = make([]string,0)\n\treturn r\n}", "func NewResponse(status int, data json.RawMessage, err error) Response {\n\terrs := \"\"\n\n\tif err != nil {\n\t\terrs = err.Error()\n\t}\n\n\treturn Response(JSONResponse{status: status, Body: &data, Err: errs})\n}", "func NewResponse(raw []byte) (res *Response, err error) {\n\tif len(raw) == 0 {\n\t\treturn nil, nil\n\t}\n\n\t// The minimum length is 3 + CRLF\n\tif len(raw) < 5 {\n\t\treturn nil, errors.New(\"invalid response length\")\n\t}\n\n\tres = &Response{}\n\n\tcode, isMultiline, err := res.parseCode(raw)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar parser = libbytes.NewParser(raw[4:], []byte{'-', ' ', '\\n'})\n\n\terr = res.parseMessage(parser, isMultiline)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif !isMultiline {\n\t\treturn res, nil\n\t}\n\n\terr = res.parseBody(parser, code)\n\n\treturn res, err\n}", "func NewResponse() *Response {\n\tresponse := new(Response)\n\tresponse.Channel = make(chan interface{})\n\tresponse.Done = func() handlerComplete {\n\t\treturn handlerComplete{}\n\t}\n\tresponse.Headers = make(map[string][]string)\n\tresponse.SetStatusCode(200)\n\treturn response\n}", "func (s *Server) NewResponse() *Response {\n\tr := NewResponse(s.Storage)\n\tr.ErrorStatusCode = s.Config.ErrorStatusCode\n\treturn r\n}", "func NewResponse(payload []byte) (*Response, error) {\n\tvar err error\n\tvar resp *Response = &Response{}\n\n\tif err = json.Unmarshal(payload, resp); err != nil {\n\t\tlog.Fatalln(\"Error on JSON marchal:\", err)\n\t\treturn nil, err\n\t}\n\n\treturn resp, nil\n}", "func newErrResponse(code int, err error) *ErrResponse {\n\treturn &ErrResponse{Code: code, Err: err}\n}", "func NewResponseWriter(w http.ResponseWriter) *ResponseWriter {\n\treturn &ResponseWriter{w, 200}\n}", "func NewResponseStatus()(*ResponseStatus) {\n m := &ResponseStatus{\n }\n m.SetAdditionalData(make(map[string]interface{}));\n return m\n}", "func NewResponse(botCtx BotContext) ResponseWriter {\n\treturn &response{botCtx: botCtx}\n}", "func NewResponse(status int, message, errCode string) *Response {\n\treturn &Response{\n\t\tStatus: status,\n\t\tMessage: message,\n\t\tErrorCode: errCode,\n\t}\n}", "func WriteNewResponse(w *http.ResponseWriter, r *http.Request, response JSONResponse) {\n\t// Echo back message\n\tresponse.Write(w, r)\n}", "func newHTTPResponse(resp *http.Response) (*httpResponse, error) {\n\tswitch code := resp.StatusCode; code {\n\tcase http.StatusOK, http.StatusAccepted, http.StatusNoContent:\n\tcase http.StatusBadRequest:\n\tdefault:\n\t\tresp.Body.Close()\n\t\treturn nil, NewError(HTTPError, \"The status is unexpected.\", map[string]interface{}{\n\t\t\t\"status\": fmt.Sprintf(\"%d %s\", code, http.StatusText(code)),\n\t\t})\n\t}\n\t// Read the leading bytes to get the response header.\n\tbuf := make([]byte, httpBufferSize)\n\tn, err := io.ReadFull(resp.Body, buf)\n\tif err != nil && err != io.EOF && err != io.ErrUnexpectedEOF {\n\t\tresp.Body.Close()\n\t\treturn nil, NewError(NetworkError, \"io.ReadFull failed.\", map[string]interface{}{\n\t\t\t\"error\": err.Error(),\n\t\t})\n\t}\n\tdata := bytes.TrimLeft(buf[:n], \" \\t\\r\\n\")\n\tif bytes.HasPrefix(data, []byte(\"[\")) {\n\t\t// The response must be JSON-encoded.\n\t\tr, err := parseHTTPResponseHeader(resp, data)\n\t\tif err != nil {\n\t\t\tresp.Body.Close()\n\t\t\treturn nil, err\n\t\t}\n\t\treturn r, nil\n\t}\n\treturn &httpResponse{\n\t\tresp: resp,\n\t\tplain: true,\n\t\tleft: data,\n\t}, nil\n}", "func NewResponse(\n\treporter Reporter, response *http.Response, rtt ...time.Duration,\n) *Response {\n\tconfig := Config{Reporter: reporter}\n\tconfig = config.withDefaults()\n\n\treturn newResponse(responseOpts{\n\t\tconfig: config,\n\t\tchain: newChainWithConfig(\"Response()\", config),\n\t\thttpResp: response,\n\t\trtt: rtt,\n\t})\n}", "func NewResponse(content ResponseContent, req *http.Request, opts *ResponseOptions) (*http.Response, error) {\n\tresp, err := exported.NewResponse(content, req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif opts != nil {\n\t\tif opts.Body != nil {\n\t\t\tresp.Body = opts.Body\n\t\t}\n\t\tif opts.ContentType != \"\" {\n\t\t\tresp.Header.Set(shared.HeaderContentType, opts.ContentType)\n\t\t}\n\t}\n\treturn resp, nil\n}", "func NewResp(r *ghttp.Request, code int, msg string, data ...interface{}) *apiResp {\n\tvar d interface{}\n\tif len(data) > 0 {\n\t\td = data[0]\n\t}\n\n\treturn &apiResp{\n\t\tResp: Resp{\n\t\t\tCode: code,\n\t\t\tMsg: msg,\n\t\t\tData: d,\n\t\t},\n\t\tr: r,\n\t}\n}", "func newWriter(w http.ResponseWriter) *responseWriter {\n\trw := writerPool.Get().(*responseWriter)\n\trw.status = http.StatusOK\n\trw.ResponseWriter = w\n\trw.wroteHeader = false\n\trw.n = 0\n\n\treturn rw\n}", "func NewResponse(description string) *spec.Response {\n\tresp := new(spec.Response)\n\tresp.Description = description\n\treturn resp\n}", "func newResult(resp *internal.Response) Result {\n\treturn &result{resp: resp}\n}", "func NewResponse(in *yaml.Node, context *compiler.Context) (*Response, error) {\n\terrors := make([]error, 0)\n\tx := &Response{}\n\tm, ok := compiler.UnpackMap(in)\n\tif !ok {\n\t\tmessage := fmt.Sprintf(\"has unexpected value: %+v (%T)\", in, in)\n\t\terrors = append(errors, compiler.NewError(context, message))\n\t} else {\n\t\tallowedKeys := []string{\"$ref\"}\n\t\tvar allowedPatterns []*regexp.Regexp\n\t\tinvalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns)\n\t\tif len(invalidKeys) > 0 {\n\t\t\tmessage := fmt.Sprintf(\"has invalid %s: %+v\", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, \", \"))\n\t\t\terrors = append(errors, compiler.NewError(context, message))\n\t\t}\n\t\t// string _ref = 1;\n\t\tv1 := compiler.MapValueForKey(m, \"$ref\")\n\t\tif v1 != nil {\n\t\t\tx.XRef, ok = compiler.StringForScalarNode(v1)\n\t\t\tif !ok {\n\t\t\t\tmessage := fmt.Sprintf(\"has unexpected value for $ref: %s\", compiler.Display(v1))\n\t\t\t\terrors = append(errors, compiler.NewError(context, message))\n\t\t\t}\n\t\t}\n\t}\n\treturn x, compiler.NewErrorGroupOrNil(errors)\n}", "func NewResponse(input sarah.Input, msg string, options ...RespOption) (*sarah.CommandResponse, error) {\n\ttyped, ok := input.(*Input)\n\tif !ok {\n\t\treturn nil, fmt.Errorf(\"%T is not currently supported to automatically generate response\", input)\n\t}\n\n\tstash := &respOptions{\n\t\tattachments: []*webapi.MessageAttachment{},\n\t\tuserContext: nil,\n\t\tlinkNames: 1, // Linkify channel names and usernames. ref. https://api.slack.com/docs/message-formatting#parsing_modes\n\t\tparseMode: webapi.ParseModeFull,\n\t\tunfurlLinks: true,\n\t\tunfurlMedia: true,\n\t}\n\tfor _, opt := range options {\n\t\topt(stash)\n\t}\n\n\tpostMessage := webapi.NewPostMessage(typed.channelID, msg).\n\t\tWithAttachments(stash.attachments).\n\t\tWithLinkNames(stash.linkNames).\n\t\tWithParse(stash.parseMode).\n\t\tWithUnfurlLinks(stash.unfurlLinks).\n\t\tWithUnfurlMedia(stash.unfurlMedia)\n\tif replyInThread(typed, stash) {\n\t\tpostMessage.\n\t\t\tWithThreadTimeStamp(threadTimeStamp(typed).String()).\n\t\t\tWithReplyBroadcast(stash.replyBroadcast)\n\t}\n\treturn &sarah.CommandResponse{\n\t\tContent: postMessage,\n\t\tUserContext: stash.userContext,\n\t}, nil\n}", "func NewResponseModifier(req *http.Request, res *http.Response) *ResponseModifier {\n\treturn &ResponseModifier{Request: req, Response: res, Header: res.Header}\n}", "func TestNewResponse(t *testing.T) {\n\n\tyml := `description: this is a response\nheaders:\n someHeader:\n description: a header\ncontent:\n something/thing:\n description: a thing\nx-pizza-man: pizza!\nlinks:\n someLink:\n description: a link! `\n\n\tvar idxNode yaml.Node\n\t_ = yaml.Unmarshal([]byte(yml), &idxNode)\n\tidx := index.NewSpecIndexWithConfig(&idxNode, index.CreateOpenAPIIndexConfig())\n\n\tvar n v3.Response\n\t_ = low.BuildModel(idxNode.Content[0], &n)\n\t_ = n.Build(idxNode.Content[0], idx)\n\n\tr := NewResponse(&n)\n\n\tassert.Len(t, r.Headers, 1)\n\tassert.Len(t, r.Content, 1)\n\tassert.Equal(t, \"pizza!\", r.Extensions[\"x-pizza-man\"])\n\tassert.Len(t, r.Links, 1)\n\tassert.Equal(t, 1, r.GoLow().Description.KeyNode.Line)\n\n}", "func newSuccessResp(key, msg string) *ResponseType {\n\treturn &ResponseType{Ok: true, Message: msg, Key: key}\n}", "func TestNewSimpleResponse(t *testing.T) {\n\t// Create without format.\n\tr := NewSimpleResponse(\"TAG\", \"XXX\")\n\tif r.Status != \"TAG\" {\n\t\tt.Fatalf(\"Unexpected status. Expected: %s - Found: %s.\", \"TAG\", r.Status)\n\t}\n\tif r.Message != \"XXX\" {\n\t\tt.Fatalf(\"Unexpected message. Expected: %s - Found: %s.\", \"XXX\", r.Message)\n\t}\n\t// Create with format.\n\tr = NewSimpleResponsef(\"TAG2\", \"the%s\", \"message\")\n\tif r.Status != \"TAG2\" {\n\t\tt.Fatalf(\"Unexpected status. Expected: %s - Found: %s.\", \"TAG2\", r.Status)\n\t}\n\tif r.Message != \"themessage\" {\n\t\tt.Fatalf(\"Unexpected message. Expected: %s - Found: %s.\", \"themessage\", r.Message)\n\t}\n}", "func buildResponse(httpReq *Request, httpResp *http.Response) (*Response, error) {\n\tcontent, err := ioutil.ReadAll(httpResp.Body)\n\tif err != nil {\n\t\tif !errors.Is(err, io.ErrUnexpectedEOF) { // Ignore Unexpected EOF error\n\t\t\treturn nil, WrapErr(err, \"read Response.Body failed\")\n\t\t}\n\t}\n\treturn &Response{\n\t\tURL: httpReq.URL,\n\t\tStatusCode: httpResp.StatusCode,\n\t\tProto: httpResp.Proto,\n\t\tHeaders: httpResp.Header,\n\t\tCookies: httpResp.Cookies(),\n\t\tRequest: httpReq,\n\t\tContentLength: httpResp.ContentLength,\n\t\tContent: content,\n\t\tencoding: \"UTF-8\",\n\t}, nil\n}", "func buildResponse(rawResponse *http.Response, err error) *Response {\n\treturn &Response{\n\t\tResponse: rawResponse,\n\t\tError: err,\n\t}\n}", "func CreateResponse(w *gin.Context, payload interface{}) {\n\tw.JSON(200, payload)\n}", "func newErrorResp(key, msg string, err error) *ResponseType {\n\treturn &ResponseType{Ok: false, Message: msg, Error: err.Error(), Key: key}\n}", "func MockCreateResponse(t *testing.T) {\n\tth.Mux.HandleFunc(shareEndpoint, func(w http.ResponseWriter, r *http.Request) {\n\t\tth.TestMethod(t, r, \"POST\")\n\t\tth.TestHeader(t, r, \"X-Auth-Token\", fake.TokenID)\n\t\tth.TestHeader(t, r, \"Content-Type\", \"application/json\")\n\t\tth.TestHeader(t, r, \"Accept\", \"application/json\")\n\t\tth.TestJSONRequest(t, r, createRequest)\n\t\tw.Header().Add(\"Content-Type\", \"application/json\")\n\t\tw.WriteHeader(http.StatusOK)\n\t\tfmt.Fprintf(w, createResponse)\n\t})\n}", "func NewResponseWriter(w http.ResponseWriter, r *http.Request) ResponseWriter {\n\tpretty := r.URL.Query().Get(\"pretty\") == \"true\"\n\trw := &responseWriter{ResponseWriter: w}\n\tswitch r.Header.Get(\"Accept\") {\n\tcase \"application/json\":\n\t\tfallthrough\n\tdefault:\n\t\tw.Header().Add(\"Content-Type\", \"application/json\")\n\t\trw.formatter = &jsonFormatter{Pretty: pretty, Writer: w}\n\t}\n\treturn rw\n}", "func NewBindResponse(i interface{}, description string) *spec.Response {\n\tresp := new(spec.Response)\n\tresp.Description = description\n\n\tt := reflect.TypeOf(i)\n\n\tif t.Kind() == reflect.Ptr {\n\t\tt = t.Elem()\n\t}\n\n\tslice := false\n\tif t.Kind() == reflect.Slice {\n\t\tslice = true\n\t\tt = t.Elem()\n\t}\n\n\tif t.Kind() == reflect.Struct {\n\t\tschema := spec.RefSchema(\"#/definitions/\" + t.Name())\n\t\tif slice {\n\t\t\tresp.WithSchema(spec.ArrayProperty(schema))\n\t\t} else {\n\t\t\tresp.WithSchema(schema)\n\t\t}\n\t}\n\n\treturn resp\n}", "func normalResponse(w http.ResponseWriter, r *http.Request){\n\trespStr := `<html>\n<head><title> My Custom Response </title> </head>\n<body> <h1> Testing the response headers ...... </h1></body>\n</html>`\nw.Write([]byte(respStr))\n}", "func NewResponseC(\n\tconfig Config, response *http.Response, rtt ...time.Duration,\n) *Response {\n\tconfig = config.withDefaults()\n\n\treturn newResponse(responseOpts{\n\t\tconfig: config,\n\t\tchain: newChainWithConfig(\"Response()\", config),\n\t\thttpResp: response,\n\t\trtt: rtt,\n\t})\n}", "func (ccr ContainersCreateResponse) Response() *http.Response {\n\treturn ccr.rawResponse\n}", "func NewResponseWriter() *ResponseWriter {\n\treturn &ResponseWriter{\n\t\theader: make(http.Header),\n\t\tcloseNotifyChan: make(chan bool),\n\t}\n}", "func newReply(ctx *Context) *Reply {\n\treturn &Reply{\n\t\tCode: http.StatusOK,\n\t\tgzip: true,\n\t\tctx: ctx,\n\t}\n}", "func NewGenericResponse(stsCd, isError int, messages []string, data interface{}) *GenericResponse {\n\n\treturn &GenericResponse{\n\t\tStatus: stsCd,\n\t\tSuccess: isError == 0,\n\t\tMessages: messages,\n\t\tData: data,\n\t}\n}", "func CreateResponse() *application.HTTPResponse {\n\tresponse := &application.HTTPResponse{}\n\tresponse.Headers = make(map[string]*application.HTTPResponse_HTTPHeaderParameter)\n\treturn response\n}", "func NewResponseWriter(rw http.ResponseWriter) ResponseWriter {\n\treturn &responseWriter{\n\t\tResponseWriter: rw,\n\t}\n}", "func (sb *SetBalancer) NewResponse() proto.Message {\n\treturn &pb.SetBalancerRunningResponse{}\n}", "func NewResponse(channel chat1.ChatChannel, client KeybaseChatAPIClient) ResponseWriter {\n\treturn &response{channel: channel, client: client}\n}", "func CreateTestSeeResponse() (response *TestSeeResponse) {\n\tresponse = &TestSeeResponse{\n\t\tBaseResponse: &responses.BaseResponse{},\n\t}\n\treturn\n}", "func (m *Mutate) NewResponse() proto.Message {\n\treturn &pb.MutateResponse{}\n}", "func CreateResponse(result interface{}, err error) *Response {\n\tif err == nil {\n\t\treturn CreateSuccessResponse(result)\n\t}\n\treturn CreateErrorResponse(err)\n}", "func Response(statusCode int, body []byte, w http.ResponseWriter) {\n\tw.WriteHeader(statusCode)\n\t_, err := w.Write(body)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n}", "func generateResponse(h *header, recvTime ntpTime, authErr error) *Response {\n\tr := &Response{\n\t\tTime: h.TransmitTime.Time(),\n\t\tClockOffset: offset(h.OriginTime, h.ReceiveTime, h.TransmitTime, recvTime),\n\t\tRTT: rtt(h.OriginTime, h.ReceiveTime, h.TransmitTime, recvTime),\n\t\tPrecision: toInterval(h.Precision),\n\t\tStratum: h.Stratum,\n\t\tReferenceID: h.ReferenceID,\n\t\tReferenceTime: h.ReferenceTime.Time(),\n\t\tRootDelay: h.RootDelay.Duration(),\n\t\tRootDispersion: h.RootDispersion.Duration(),\n\t\tLeap: h.getLeap(),\n\t\tMinError: minError(h.OriginTime, h.ReceiveTime, h.TransmitTime, recvTime),\n\t\tPoll: toInterval(h.Poll),\n\t\tauthErr: authErr,\n\t}\n\n\t// Calculate values depending on other calculated values\n\tr.RootDistance = rootDistance(r.RTT, r.RootDelay, r.RootDispersion)\n\n\t// If a kiss of death was received, interpret the reference ID as\n\t// a kiss code.\n\tif r.Stratum == 0 {\n\t\tr.KissCode = kissCode(r.ReferenceID)\n\t}\n\n\treturn r\n}" ]
[ "0.85427314", "0.8370056", "0.83428556", "0.83428556", "0.83428556", "0.8293683", "0.8232112", "0.7997844", "0.78990436", "0.7838708", "0.7819803", "0.7797125", "0.7781591", "0.77441895", "0.77202547", "0.7505292", "0.7461293", "0.7445837", "0.7443147", "0.7376085", "0.7363521", "0.7355918", "0.7327234", "0.7284536", "0.72777563", "0.72726846", "0.7247689", "0.7228647", "0.71869767", "0.71869004", "0.7182776", "0.71404195", "0.712796", "0.71209127", "0.7107411", "0.70962733", "0.70861936", "0.7076238", "0.7058618", "0.7017707", "0.69793504", "0.69784", "0.6977157", "0.69581896", "0.6957892", "0.695541", "0.6935018", "0.6933808", "0.6924573", "0.6918884", "0.6915829", "0.68908733", "0.6887528", "0.68820167", "0.6877444", "0.6856542", "0.6820968", "0.6787003", "0.6775596", "0.6769972", "0.6766773", "0.67434543", "0.6727367", "0.6700836", "0.6669607", "0.6661979", "0.66293323", "0.6625099", "0.6584074", "0.6563691", "0.6544064", "0.6501211", "0.6463974", "0.645854", "0.6456966", "0.64409554", "0.64322054", "0.6425286", "0.6397943", "0.63979256", "0.6372019", "0.6339475", "0.6338345", "0.6325089", "0.6317378", "0.63016284", "0.62929934", "0.62702346", "0.6265733", "0.6250555", "0.62450737", "0.62385476", "0.62106514", "0.6150544", "0.6143263", "0.6129438", "0.6128943", "0.6126059", "0.6124929", "0.61189705" ]
0.72748876
25
WithSessionCookie sets an expected session cookie in the response object
func (r *Response) WithSessionCookie(val string) *Response { r.cookie = &http.Cookie{ Name: "ses", Value: val, Path: "/", } return r }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func setSession(userName string, response http.ResponseWriter) {\n value := map[string]string{\n \"name\": userName,\n }\n if encoded, err := cookieHandler.Encode(\"session\", value); err == nil {\n cookie := &http.Cookie{\n Name: \"session\",\n Value: encoded,\n Path: \"/\",\n }\n http.SetCookie(response, cookie)\n }\n }", "func setSession(userName, token string, response http.ResponseWriter) {\n\tvalue := map[string]string{\n\t\t\"name\": userName,\n\t\t\"token\": token,\n\t}\n\tif encoded, err := cookieHandler.Encode(\"session\", value); err == nil {\n\t\tcookie := &http.Cookie{\n\t\t\tName: \"session\",\n\t\t\tValue: encoded,\n\t\t\tPath: \"/\",\n\t\t}\n\t\thttp.SetCookie(response, cookie)\n\t} else {\n\t\tlog.Println(err)\n\t}\n}", "func setSession(c echo.Context, r *http.Response) {\r\n\r\n\tfor _, cookie := range r.Cookies() {\r\n\r\n\t\tif cookie.Name == \"PHPSESSID\" {\r\n\r\n\t\t\tsess, _ := session.Get(\"Session\", c)\r\n\t\t\tsess.Options = &sessions.Options{\r\n\t\t\t\tPath: \"*\",\r\n\t\t\t\tMaxAge: 60 * 3,\r\n\t\t\t\tHttpOnly: true,\r\n\t\t\t}\r\n\t\t\tsess.Values[\"PHPSESSID\"] = cookie.Value\r\n\r\n\t\t\tsess.Save(c.Request(), c.Response())\r\n\t\t}\r\n\t}\r\n}", "func SetSessionCookie(w http.ResponseWriter, r *http.Request, s *Session) string {\n\tif s == nil {\n\t\thttp.SetCookie(w, &http.Cookie{\n\t\t\tName: \"session\",\n\t\t\tValue: \"nouser:\" + GenerateBase64(124),\n\t\t\tSameSite: http.SameSiteStrictMode,\n\t\t\tPath: \"/\",\n\t\t\tExpires: time.Now().AddDate(0, 0, 1),\n\t\t})\n\t} else {\n\t\tsessionCookie := &http.Cookie{\n\t\t\tName: \"session\",\n\t\t\tValue: s.Key,\n\t\t\tSameSite: http.SameSiteStrictMode,\n\t\t\tPath: \"/\",\n\t\t}\n\t\tif s.ExpiresOn != nil {\n\t\t\tsessionCookie.Expires = *s.ExpiresOn\n\t\t}\n\t\thttp.SetCookie(w, sessionCookie)\n\n\t\tjwt := createJWT(r, s)\n\t\tjwtCookie := &http.Cookie{\n\t\t\tName: \"access-jwt\",\n\t\t\tValue: jwt,\n\t\t\tSameSite: http.SameSiteStrictMode,\n\t\t\tPath: \"/\",\n\t\t}\n\t\tif s.ExpiresOn != nil {\n\t\t\tjwtCookie.Expires = *s.ExpiresOn\n\t\t}\n\t\thttp.SetCookie(w, jwtCookie)\n\n\t\treturn jwt\n\t}\n\treturn \"\"\n}", "func writeSessionOnCookie(c *fiber.Ctx, session string, config *authConfig.Configuration) {\n\tappConfig := coreConfig.AppConfig\n\tparts := strings.Split(session, \".\")\n\theaderCookie := &fiber.Cookie{\n\t\tHTTPOnly: true,\n\t\tName: *appConfig.HeaderCookieName,\n\t\tValue: parts[0],\n\t\tPath: \"/\",\n\t\t// Expires: time.Now().Add(config.CookieExpiresIn),\n\t\tDomain: config.CookieRootDomain,\n\t}\n\n\tpayloadCookie := &fiber.Cookie{\n\t\t// HttpOnly: true,\n\t\tName: *appConfig.PayloadCookieName,\n\t\tValue: parts[1],\n\t\tPath: \"/\",\n\t\t// Expires: time.Now().Add(config.CookieExpiresIn),\n\t\tDomain: config.CookieRootDomain,\n\t}\n\n\tsignCookie := &fiber.Cookie{\n\t\tHTTPOnly: true,\n\t\tName: *appConfig.SignatureCookieName,\n\t\tValue: parts[2],\n\t\tPath: \"/\",\n\t\t// Expires: time.Now().Add(config.CookieExpiresIn),\n\t\tDomain: config.CookieRootDomain,\n\t}\n\t// Set cookie\n\tc.Cookie(headerCookie)\n\tc.Cookie(payloadCookie)\n\tc.Cookie(signCookie)\n}", "func setSession(email string, writer http.ResponseWriter) {\n\tvalue := map[string]string{\n\t\t\"email\": email,\n\t}\n\tif encoded, err := cookieHandler.Encode(\"session\", value); err == nil {\n\t\tcookie := &http.Cookie{\n\t\t\tName: \"session\",\n\t\t\tValue: encoded,\n\t\t\tPath: \"/\",\n\t\t\tExpires: time.Now().Add(4 * time.Hour),\n\t\t}\n\t\thttp.SetCookie(writer, cookie)\n\t}\n}", "func (c *userManagementClient) SessionCookie(\n\tctx context.Context,\n\tidToken string,\n\texpiresIn time.Duration,\n) (string, error) {\n\n\tif idToken == \"\" {\n\t\treturn \"\", errors.New(\"id token must not be empty\")\n\t}\n\n\tif expiresIn < 5*time.Minute || expiresIn > 14*24*time.Hour {\n\t\treturn \"\", errors.New(\"expiry duration must be between 5 minutes and 14 days\")\n\t}\n\n\tpayload := map[string]interface{}{\n\t\t\"idToken\": idToken,\n\t\t\"validDuration\": int64(expiresIn.Seconds()),\n\t}\n\tresp, err := c.post(ctx, \":createSessionCookie\", payload)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tif resp.Status != http.StatusOK {\n\t\treturn \"\", handleHTTPError(resp)\n\t}\n\n\tvar result struct {\n\t\tSessionCookie string `json:\"sessionCookie\"`\n\t}\n\terr = json.Unmarshal(resp.Body, &result)\n\treturn result.SessionCookie, err\n}", "func (cookie *Cookie) SetCookieOnResponse(w http.ResponseWriter, setSiteCookie bool, cfg *config.HostCookie, ttl time.Duration) {\n\thttpCookie := cookie.ToHTTPCookie(ttl)\n\tvar domain string = cfg.Domain\n\thttpCookie.Secure = true\n\n\tif domain != \"\" {\n\t\thttpCookie.Domain = domain\n\t}\n\n\tvar currSize int = len([]byte(httpCookie.String()))\n\tfor cfg.MaxCookieSizeBytes > 0 && currSize > cfg.MaxCookieSizeBytes && len(cookie.uids) > 0 {\n\t\tvar oldestElem string = \"\"\n\t\tvar oldestDate int64 = math.MaxInt64\n\t\tfor key, value := range cookie.uids {\n\t\t\ttimeUntilExpiration := time.Until(value.Expires)\n\t\t\tif timeUntilExpiration < time.Duration(oldestDate) {\n\t\t\t\toldestElem = key\n\t\t\t\toldestDate = int64(timeUntilExpiration)\n\t\t\t}\n\t\t}\n\t\tdelete(cookie.uids, oldestElem)\n\t\thttpCookie = cookie.ToHTTPCookie(ttl)\n\t\tif domain != \"\" {\n\t\t\thttpCookie.Domain = domain\n\t\t}\n\t\tcurrSize = len([]byte(httpCookie.String()))\n\t}\n\n\tif setSiteCookie {\n\t\t// httpCookie.Secure = true\n\t\thttpCookie.SameSite = http.SameSiteNoneMode\n\t}\n\tw.Header().Add(\"Set-Cookie\", httpCookie.String())\n}", "func SetSession(w http.ResponseWriter, sid string) {\n\thttp.SetCookie(w, &http.Cookie{Name: SidName, Value: sid})\n}", "func createSessionCookie(token string, maxAge int) *http.Cookie {\n cookie := http.Cookie{}\n cookie.Name = SessionCookieName\n cookie.Value = token\n cookie.Path = \"/\"\n cookie.MaxAge = maxAge\n cookie.Secure = true\n cookie.HttpOnly = true\n return &cookie\n}", "func sessionHandler(w http.ResponseWriter, r *http.Request) {\n\tctx := context.Background()\n\tif b.authenticator == nil {\n\t\tvar err error\n\t\tb.authenticator, err = initAuth(ctx)\n\t\tif err != nil {\n\t\t\tlog.Print(\"sessionHandler authenticator could not be initialized\")\n\t\t\thttp.Error(w, \"Server error\", http.StatusInternalServerError)\n\t\t\treturn\n\t\t}\n\t}\n\tsessionInfo := identity.InvalidSession()\n\tcookie, err := r.Cookie(\"session\")\n\tif err == nil {\n\t\tsessionInfo = b.authenticator.CheckSession(ctx, cookie.Value)\n\t}\n\tif (err != nil) || (!sessionInfo.Valid) {\n\t\t// OK, just don't show the contents that don't require a login\n\t\tlog.Println(\"sessionHandler: creating a new cookie\")\n\t\tsessionid := identity.NewSessionId()\n\t\tcookie := &http.Cookie{\n\t\t\tName: \"session\",\n\t\t\tValue: sessionid,\n\t\t\tDomain: config.GetSiteDomain(),\n\t\t\tPath: \"/\",\n\t\t\tMaxAge: 86400, // One day\n\t\t}\n\t\thttp.SetCookie(w, cookie)\n\t\tuserInfo := identity.UserInfo{\n\t\t\tUserID: 1,\n\t\t\tUserName: \"\",\n\t\t\tEmail: \"\",\n\t\t\tFullName: \"\",\n\t\t\tRole: \"\",\n\t\t}\n\t\tb.authenticator.SaveSession(ctx, sessionid, userInfo, 0)\n\t}\n\tw.Header().Set(\"Content-Type\", \"application/json; charset=utf-8\")\n\tresultsJson, err := json.Marshal(sessionInfo)\n\tif err != nil {\n\t\tlog.Printf(\"sessionHandler: error marshalling JSON, %v\", err)\n\t}\n\tfmt.Fprint(w, string(resultsJson))\n}", "func (t *CookieAuthTransport) setSessionObject() error {\n\treq, err := t.buildAuthRequest()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tvar authClient = &http.Client{\n\t\tTimeout: time.Second * 60,\n\t}\n\tresp, err := authClient.Do(req)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer resp.Body.Close()\n\n\tt.SessionObject = resp.Cookies()\n\treturn nil\n}", "func createSessionCookie(w http.ResponseWriter, r *http.Request, value string) {\n\tcookie, err := r.Cookie(\"session\")\n\tif err != nil {\n\t\tcookie = &http.Cookie{\n\t\t\tName: \"session\",\n\t\t\tMaxAge: SessionTime,\n\t\t\tValue: value,\n\t\t\tHttpOnly: true,\n\t\t}\n\t\thttp.SetCookie(w, cookie)\n\t}\n}", "func SetCookie(w http.ResponseWriter, r *http.Request) {\r\n\tcookieName := envvar.CookieName()\r\n\tcookie, err := r.Cookie(cookieName)\r\n\tif err != nil {\r\n\t\tcookie := &http.Cookie{\r\n\t\t\tName: cookieName,\r\n\t\t\tValue: (uuid.NewV4()).String(),\r\n\t\t\tHttpOnly: true,\r\n\t\t\tPath: \"/\",\r\n\t\t\tDomain: envvar.HostAddress(),\r\n\t\t\tSecure: true,\r\n\t\t}\r\n\t\thttp.SetCookie(w, cookie)\r\n\t\tlogger.Info.Println(\"set cookie : \" + cookie.Value + \"-\" + cookieName)\r\n\t\treturn\r\n\t}\r\n\t_, found := Get(r)\r\n\tif found {\r\n\t\tRefresh(r)\r\n\t\tlogger.Info.Println(\"session refresh: \" + cookie.Value)\r\n\t\treturn\r\n\t}\r\n\tlogger.Info.Println(cookie.Value + \" already set\")\r\n\r\n\treturn\r\n}", "func (resp *Response) SetCookie(cookie *http.Cookie) {\n\tresp.Resp.Header.Add(\"Set-Cookie\", cookie.String())\n}", "func (s *CookieStore) makeSessionCookie(req *http.Request, value string, expiration time.Duration, now time.Time) *http.Cookie {\n\treturn s.makeCookie(req, s.Name, value, expiration, now)\n}", "func (ch *CFHosting) SessionEchoMiddleware(h echo.HandlerFunc) echo.HandlerFunc {\n\treturn func(c echo.Context) error {\n\t\t// Make sure there is a JSESSIONID cookie set to the session ID\n\t\tsession, err := ch.portalProxy.GetSession(c)\n\t\tif err == nil {\n\t\t\t// We have a session\n\t\t\tguid, err := ch.portalProxy.GetSessionValue(c, cfSessionCookieName)\n\t\t\tif err != nil || guid == nil {\n\t\t\t\tguid = uuid.NewV4().String()\n\t\t\t\tsession.Values[cfSessionCookieName] = guid\n\t\t\t\tch.portalProxy.SaveSession(c, session)\n\t\t\t}\n\t\t\tsessionGUID := fmt.Sprintf(\"%s\", guid)\n\t\t\t// Set the JSESSIONID coolie for Cloud Foundry session affinity\n\t\t\tw := c.Response().Writer\n\t\t\tcookie := sessions.NewCookie(cfSessionCookieName, sessionGUID, session.Options)\n\t\t\thttp.SetCookie(w, cookie)\n\t\t}\n\t\treturn h(c)\n\t}\n}", "func TestSessionExtension(t *testing.T) {\n\trt := NewRestTester(t, &RestTesterConfig{GuestEnabled: true})\n\tdefer rt.Close()\n\n\tid, err := base.GenerateRandomSecret()\n\trequire.NoError(t, err)\n\n\tconst username = \"Alice\"\n\n\tauthenticator := rt.GetDatabase().Authenticator(base.TestCtx(t))\n\tuser, err := authenticator.NewUser(username, \"Password\", channels.BaseSetOf(t, \"*\"))\n\trequire.NoError(t, err)\n\trequire.NoError(t, authenticator.Save(user))\n\n\t// Fake session with more than 10% of the 24 hours TTL has elapsed. It should cause a new\n\t// cookie to be sent by the server with the same session ID and an extended expiration date.\n\tfakeSession := auth.LoginSession{\n\t\tID: id,\n\t\tUsername: username,\n\t\tExpiration: time.Now().Add(4 * time.Hour),\n\t\tTtl: 24 * time.Hour,\n\t\tSessionUUID: user.GetSessionUUID(),\n\t}\n\n\tassert.NoError(t, rt.MetadataStore().Set(authenticator.DocIDForSession(fakeSession.ID), 0, nil, fakeSession))\n\treqHeaders := map[string]string{\n\t\t\"Cookie\": auth.DefaultCookieName + \"=\" + fakeSession.ID,\n\t}\n\n\tresponse := rt.SendRequestWithHeaders(\"PUT\", \"/{{.keyspace}}/doc1\", `{\"hi\": \"there\"}`, reqHeaders)\n\tlog.Printf(\"PUT Request: Set-Cookie: %v\", response.Header().Get(\"Set-Cookie\"))\n\tRequireStatus(t, response, http.StatusCreated)\n\tassert.Contains(t, response.Header().Get(\"Set-Cookie\"), auth.DefaultCookieName+\"=\"+fakeSession.ID)\n\n\tresponse = rt.SendRequestWithHeaders(\"GET\", \"/{{.keyspace}}/doc1\", \"\", reqHeaders)\n\tlog.Printf(\"GET Request: Set-Cookie: %v\", response.Header().Get(\"Set-Cookie\"))\n\tRequireStatus(t, response, http.StatusOK)\n\tassert.Equal(t, \"\", response.Header().Get(\"Set-Cookie\"))\n\n\t// Explicitly delete the fake session doc from the bucket to simulate the test\n\t// scenario for expired session. In reality, Sync Gateway rely on Couchbase\n\t// Server to nuke the expired document based on TTL. Couchbase Server periodically\n\t// removes all items with expiration times that have passed.\n\tassert.NoError(t, rt.MetadataStore().Delete(authenticator.DocIDForSession(fakeSession.ID)))\n\n\tresponse = rt.SendRequestWithHeaders(\"GET\", \"/{{.keyspace}}/doc1\", \"\", reqHeaders)\n\tlog.Printf(\"GET Request: Set-Cookie: %v\", response.Header().Get(\"Set-Cookie\"))\n\tRequireStatus(t, response, http.StatusUnauthorized)\n\trequire.Contains(t, response.Body.String(), \"Session Invalid\")\n}", "func getCookie(w http.ResponseWriter, req *http.Request) *http.Cookie {\n\tc, err := req.Cookie(\"session\")\n\tif err != nil {\n\t\tsID, _ := uuid.NewV4()\n\t\tc = &http.Cookie{\n\t\t\tName: \"session\",\n\t\t\tValue: sID.String(),\n\t\t}\n\t\thttp.SetCookie(w, c)\n\t}\n\treturn c\n}", "func (r *StandardResponse) SetCookie(cookie *http.Cookie) {\n\tr.cookie = cookie\n}", "func (c *Action) SetCookie(cookie *http.Cookie) {\n\tc.AddHeader(\"Set-Cookie\", cookie.String())\n}", "func Test_Session_Cookie(t *testing.T) {\n\tt.Parallel()\n\t// session store\n\tstore := New()\n\t// fiber instance\n\tapp := fiber.New()\n\t// fiber context\n\tctx := app.AcquireCtx(&fasthttp.RequestCtx{})\n\tdefer app.ReleaseCtx(ctx)\n\n\t// get session\n\tsess, _ := store.Get(ctx)\n\tsess.Save()\n\n\t// cookie should not be set if empty data\n\tutils.AssertEqual(t, 0, len(ctx.Response().Header.PeekCookie(store.CookieName)))\n}", "func Session(w http.ResponseWriter, r *http.Request) (models.Session, error) {\n\tvar s models.Session\n\tcookie, err := r.Cookie(\"_cookie\")\n\tif err != nil {\n\t\tfmt.Printf(\"%s\", err.Error())\n\t\treturn s, err\n\t}\n\ts = models.Session{UUID: cookie.Value}\n\tif ok, _ := s.Check(); !ok {\n\t\treturn s, errors.New(\"Invalid session. \")\n\t}\n\treturn s, nil\n}", "func SetCookie(w http.ResponseWriter, sessionID string) {\n\texpiration := time.Now().Add(365 * 24 * time.Hour)\n\tcookie := http.Cookie{Name: \"SecretSessID\", Value: sessionID, Expires: expiration}\n\thttp.SetCookie(w, &cookie)\n}", "func (hc *httpContext) setSession(userInfo *Authentication) {\n\n\tsession := hc.getSession()\n\tif session == nil {\n\t\treturn\n\t}\n\n\tsession.Values[sv[\"provider\"]] = userInfo.Provider\n\tsession.Values[sv[\"name\"]] = userInfo.Name\n\tsession.Values[sv[\"email\"]] = userInfo.Email\n\tsession.Values[sv[\"user\"]] = userInfo.UserName\n\tsession.Values[sv[\"token\"]] = userInfo.Token\n\thc.clearFlashes()\n\tsession.AddFlash(\"Logged in via \" + userInfo.Provider)\n\n\thc.saveSession(session)\n}", "func (s *TrafficOpsSessionThreadsafe) setSession(url, username, password string, insecure bool, userAgent string, useCache bool, timeout time.Duration) error {\n\toptions := cookiejar.Options{\n\t\tPublicSuffixList: publicsuffix.List,\n\t}\n\tjar, err := cookiejar.New(&options)\n\tif err != nil {\n\t\treturn err\n\t}\n\tto := client.NewSession(username, password, url, userAgent, &http.Client{\n\t\tTimeout: timeout,\n\t\tTransport: &http.Transport{\n\t\t\tTLSClientConfig: &tls.Config{InsecureSkipVerify: insecure},\n\t\t},\n\t\tJar: jar,\n\t}, useCache)\n\t*s.session = to\n\treturn nil\n}", "func session(rp core.Roundtrip) (sess model.Session, err error) {\n\tcookie := rp.GetCookie(\"_cookie\")\n\tif err == nil {\n\t\tsess = model.Session{Uuid: cookie}\n\t\tif ok, _ := sess.Check(); !ok {\n\t\t\terr = errors.New(\"Invalid session\")\n\t\t}\n\t}\n\treturn\n}", "func LogoutRoute(res http.ResponseWriter, req *http.Request) {\n session := http.Cookie{\n Name: \"session\",\n Value: strconv.Itoa(-1),\n\n //MaxAge: 10 * 60,\n Secure: false,\n HttpOnly: true,\n SameSite: 1,\n\n Path: \"/\",\n }\n http.SetCookie(res, &session)\n Redirect(\"/login\", res)\n}", "func session(request *http.Request, cookName string) (sess data.Session, err error) {\n\tcookie, err := request.Cookie(cookName)\n\tif err == nil {\n\t\tsess = data.Session{Uuid: cookie.Value}\n\t\tif ok, _ := sess.Check(); !ok {\n\t\t\terr = errors.New(\"Invalid session\")\n\t\t}\n\t}\n\treturn\n}", "func setCookie(res http.ResponseWriter, req *http.Request, id string) error {\r\n\t//name of cookies = \"cookie\" for 1hr & \"CRA\" for 2yrs\r\n\tco, _ := req.Cookie(\"CRA\")\r\n\tco = &http.Cookie{\r\n\t\tName: \"CRA\",\r\n\t\tValue: id,\r\n\t\tHttpOnly: false,\r\n\t\tExpires: time.Now().AddDate(2, 0, 0),\r\n\t}\r\n\thttp.SetCookie(res, co)\r\n\t// fmt.Println(\"Htmlmain.setCookie - done with set id = \", id)\r\n\treturn nil\r\n}", "func (c *Client) doWithResponse(req *http.Request) (*http.Response, error) {\n\tif c.cookie.session == \"\" {\n\t\treturn nil, fmt.Errorf(\"missing cookie.session for request\")\n\t}\n\treq.AddCookie(&http.Cookie{Name: \"SESSIONID\", Value: c.cookie.session})\n\treq.Header.Set(\"Accept\", \"application/json, text/javascript, */*; q=0.01\")\n\treq.Header.Set(\"Content-Type\", \"application/json\")\n\treq.Header.Set(\"NK\", \"NT\")\n\treq.Header.Set(\"User-Agent\", \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/61.0.3163.100 Safari/537.36\")\n\n\tresp, err := http.DefaultClient.Do(req)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"request failed: %s\", err)\n\t}\n\t// error\n\tif resp.StatusCode != http.StatusOK {\n\t\trawBody, err := ioutil.ReadAll(resp.Body)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"failed to read %s response payload: %s\", resp.Status, err)\n\t\t}\n\t\treturn nil, fmt.Errorf(\"request failed with %s: %s\", resp.Status, string(rawBody))\n\t}\n\treturn resp, nil\n}", "func GetSession(h httprouter.Handle) httprouter.Handle {\n\treturn func(w http.ResponseWriter, r *http.Request, ps httprouter.Params) {\n\n\t\tcookie, err := r.Cookie(\"kRtrima\") //Grab the cookie from the header\n\t\tif err != nil {\n\t\t\tswitch err {\n\t\t\tcase http.ErrNoCookie:\n\t\t\t\tLogger.Println(\"No Cookie was Found with Name kRtrima\")\n\t\t\t\t//remove the user ID from the session\n\t\t\t\tr.Header.Del(\"User\")\n\t\t\t\thttp.Redirect(w, r, \"/login\", 302)\n\t\t\t\treturn\n\t\t\tdefault:\n\t\t\t\tLogger.Println(\"No Cookie was Found with Name kRtrima\")\n\t\t\t\t//remove the user ID from the session\n\t\t\t\tr.Header.Del(\"User\")\n\t\t\t\thttp.Redirect(w, r, \"/login\", 302)\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\n\t\tLogger.Println(\"Cookie was Found with Name kRtrima\")\n\n\t\t// Create a BSON ObjectID by passing string to ObjectIDFromHex() method\n\t\tdocID, err := primitive.ObjectIDFromHex(cookie.Value)\n\t\tif err != nil {\n\t\t\tLogger.Printf(\"Cannot Convert %T type to object id\", cookie.Value)\n\t\t\tLogger.Println(err)\n\t\t}\n\n\t\tvar SP m.Session\n\t\tif err = m.Sessions.Find(\"_id\", docID, &SP); err != nil {\n\t\t\tLogger.Println(\"Cannot found a valid User Session!!\")\n\t\t\t//session is missing, returns with error code 403 Unauthorized\n\t\t\t//remove the user ID from the session\n\t\t\tr.Header.Del(\"User\")\n\t\t\thttp.Redirect(w, r, \"/login\", 302)\n\t\t\treturn\n\t\t}\n\n\t\tLogger.Println(\"Valid User Session was Found!!\")\n\n\t\tvar UP m.LogInUser\n\n\t\terr = m.Users.Find(\"salt\", SP.Salt, &UP)\n\t\tif err != nil {\n\t\t\tLogger.Println(\"Cannot Find user with salt\")\n\t\t\t//Delete the old session\n\t\t\tif _, err := m.Sessions.DeleteItem(SP.ID); err != nil {\n\t\t\t\tLogger.Printf(\"Not able to Delete the session with ID: %v\", SP.ID)\n\t\t\t\t//remove the user ID from the session\n\t\t\t\tr.Header.Del(\"User\")\n\t\t\t\thttp.Redirect(w, r, \"/login\", 302)\n\t\t\t\treturn\n\t\t\t}\n\t\t\t// reset session and login user\n\t\t\t//remove the user ID from the session\n\t\t\tr.Header.Del(\"User\")\n\t\t\thttp.Redirect(w, r, \"/register\", 302)\n\t\t\treturn\n\t\t}\n\n\t\tvar LIP m.LogInUser\n\n\t\terr = m.GetLogInUser(\"User\", &LIP, r)\n\t\tif err != nil {\n\t\t\tm.AddToHeader(\"User\", UP, r)\n\t\t} else if UP.Email != LIP.Email {\n\t\t\t//remove the user ID from the session\n\t\t\tr.Header.Del(\"User\")\n\t\t\tm.AddToHeader(\"User\", UP, r)\n\t\t}\n\n\t\th(w, r, ps)\n\t}\n}", "func (b *BaseHandler) SetSession(key interface{}, value interface{}) {\n\tb.sessionStore.Set(b, key, value)\n}", "func clearSession(response http.ResponseWriter) {\n cookie := &http.Cookie{\n Name: \"session\",\n Value: \"\",\n Path: \"/\",\n MaxAge: -1,\n }\n http.SetCookie(response, cookie)\n }", "func (this *Context) SetCookie(cookie *http.Cookie) {\n\tthis.SetHeader(\"Set-Cookie\", cookie.String(), false)\n}", "func unmarshalSessionResponseBodyToUserSession(v *SessionResponseBody) *user.Session {\n\tif v == nil {\n\t\treturn nil\n\t}\n\tres := &user.Session{\n\t\tCookie: *v.Cookie,\n\t}\n\tres.User = unmarshalUserResponseBodyToUserUser(v.User)\n\tres.Credentials = unmarshalCredentialsResponseBodyToUserCredentials(v.Credentials)\n\n\treturn res\n}", "func EnsureSessionOrFallback(ch,fallback ContextHandler) ContextHandler {\n\treturn func(ctx context.Context, w http.ResponseWriter, r *http.Request) {\n\t\tcrumbs := CrumbTrail{}\n\t\tcrumbCookieName := CookieName + \"crumbs\"\n\n\t\t// First, extract prev breadcrumbs and log them\n\t\tcookies := map[string]string{}\n\t\tfor _,c := range r.Cookies() {\n\t\t\tcrumbs.Add(\"C:\"+c.Name)\n\t\t\tcookies[c.Name] = c.Value\n\t\t}\n\t\tif val,exists := cookies[crumbCookieName]; exists {\n\t\t\tlogPrintf(r, \"%s in : %s\", crumbCookieName, val)\n\t\t} \n\n\t\thandler := fallback\n\n\t\tif _,exists := cookies[CookieName]; exists {\n\t\t\tsesh,err := req2Session(r, &crumbs)\n\t\t\tif err == nil && !sesh.IsEmpty() {\n\t\t\t\t// Stash the session in the context, and move on to the proper handler\n\t\t\t\tctx = setUserSession(ctx, sesh)\n\t\t\t\thandler = ch\n\n\t\t\t} else {\n\t\t\t\tif err != nil { logPrintf(r, \"req2session err: \" + err.Error()) }\n\t\t\t\tlogPrintf(r, \"crumbs: \" + crumbs.String())\n\t\t\t}\n\n\t\t} else {\n\t\t\tcrumbs.Add(\"NoMainCookie\")\n\t\t}\n\t\t\n\t\t// Before invoking final handler, log breadcrumb trail, and stash in cookie\n\t\tlogPrintf(r, \"%s out: %s\", crumbCookieName, crumbs)\n\t\tcookie := http.Cookie{\n\t\t\tName: crumbCookieName,\n\t\t\tValue: crumbs.String(),\n\t\t\tExpires:time.Now().AddDate(1,0,0),\n\t\t}\n\t\thttp.SetCookie(w, &cookie)\n\n\t\treqLog,_ := httputil.DumpRequest(r,true)\n\t\tlogPrintf(r, \"HTTP req>>>>\\n%s====\\n\", reqLog)\n\t\t\n\t\tif handler == nil {\n\t\t\tlogPrintf(r, \"WithSession had no session, no NoSessionHandler\")\n\t\t\thttp.Error(w, fmt.Sprintf(\"no session, no NoSessionHandler (%s)\", r.URL), http.StatusInternalServerError)\n\t\t\treturn\n\t\t}\n\n\t\thandler(ctx, w, r)\n\t}\n}", "func (env *Env) session(r *http.Request) (*models.Session, error) {\n\tcValue := \"\"\n\tfor _, c := range r.Cookies() {\n\t\tif c.Name == \"cryptotax\" && c.Value != \"\" {\n\t\t\tcValue = c.Value\n\t\t}\n\t}\n\tif cValue == \"\" {\n\t\treturn nil, errors.New(\"unable to read cryptotax cookie\")\n\t}\n\n\treturn env.db.Session(cValue)\n}", "func (this *SessionStorage) CreateSession(w http.ResponseWriter) (*Session) {\n session := &Session{ lastAccessed: time.Now(), storage: make(map[string]string) }\n this.sessions[this.sessionCounter] = session\n http.SetCookie(w, &http.Cookie{Name: \"session\", Value: fmt.Sprint(this.sessionCounter)})\n this.sessionCounter += 1\n session.lastAccessed = time.Now()\n return session\n}", "func SetCookie(session string, method string) *http.Client {\n\tjar, _ := cookiejar.New(nil)\n\n\tvar cookies []*http.Cookie\n\n\tfirstCookie := &http.Cookie{\n\t\tName: \"session\",\n\t\tValue: session,\n\t\tPath: \"/\",\n\t\tDomain: \".irccloud.com\",\n\t}\n\n\tcookies = append(cookies, firstCookie)\n\n\t// URL for cookies to remember. i.e reply when encounter this URL\n\tcookieURL, _ := url.Parse(\"https://www.irccloud.com/chat/\" + method)\n\n\tjar.SetCookies(cookieURL, cookies)\n\tclient := &http.Client{\n\t\tJar: jar,\n\t}\n\n\treturn client\n\n}", "func (F *Frisby) SetCookie(key, value string) *Frisby {\n\tif F.Req.Cookies == nil {\n\t\tF.Req.Cookies = make(map[string]string)\n\t}\n\tF.Req.Cookies[key] = value\n\treturn F\n}", "func MockProjectSessionCookie(projectID, secret string) *http.Cookie {\n\tstore := mockCookieStore()\n\n\tr := &http.Request{}\n\tw := httptest.NewRecorder()\n\n\tsession, _ := store.Get(r, getProjectSessionNameFromString(projectID))\n\n\tsession.Values[projectSecretKeyName] = secret\n\n\terr := session.Save(r, w)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\treturn w.Result().Cookies()[0]\n}", "func (c *CookieOverseer) Set(w http.ResponseWriter, r *http.Request, value string) error {\n\tev, err := c.encode(value)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"unable to encode session value into cookie\")\n\t}\n\n\tw.(cookieWriter).SetCookie(c.options.makeCookie(ev))\n\n\treturn nil\n}", "func (oc *Client) authWithSession(loginDetails *creds.LoginDetails) (string, error) {\n\tlogger.Debug(\"auth with session func called\")\n\tsessionCookie := loginDetails.OktaSessionCookie\n\terr := oc.validateSession(loginDetails)\n\tif err != nil {\n\t\tmodifiedLoginDetails := loginDetails\n\t\tmodifiedLoginDetails.OktaSessionCookie = \"\"\n\t\treturn oc.Authenticate(modifiedLoginDetails)\n\t}\n\n\treq, err := http.NewRequest(\"GET\", loginDetails.URL, nil)\n\tif err != nil {\n\t\treturn \"\", errors.Wrap(err, \"error building authWithSession request\")\n\t}\n\n\treq.Header.Add(\"Content-Type\", \"application/json\")\n\treq.Header.Add(\"Accept\", \"application/json\")\n\treq.Header.Add(\"Cookie\", fmt.Sprintf(\"sid=%s\", sessionCookie))\n\n\tctx := context.WithValue(context.Background(), ctxKey(\"authWithSession\"), loginDetails)\n\n\tres, err := oc.client.Do(req)\n\tif err != nil {\n\t\tlogger.Debugf(\"error authing with session: %v\", err)\n\t}\n\n\tbody, err := io.ReadAll(res.Body)\n\tif err != nil {\n\t\tlogger.Debugf(\"error reading body for auth with session: %v\", err)\n\t}\n\n\t// This usually happens if using an active session (> 5 mins) but MFA was NOT remembered\n\tif strings.Contains(string(body), \"/login/step-up/\") { // https://developer.okta.com/docs/reference/api/authn/#step-up-authentication-with-okta-session\n\t\tlogger.Debug(\"okta step-up prompted, need mfa...\")\n\t\tstateToken, err := getStateTokenFromOktaPageBody(string(body))\n\t\tif err != nil {\n\t\t\treturn \"\", errors.Wrap(err, \"error retrieving saml response\")\n\t\t}\n\t\tloginDetails.StateToken = stateToken\n\t\treturn oc.Authenticate(loginDetails)\n\t}\n\n\treturn oc.follow(ctx, req, loginDetails)\n}", "func SessionHandler(next http.Handler) http.HandlerFunc {\n\treturn func(w http.ResponseWriter, r *http.Request) {\n\t\tlog.Logger.Println(\"Session Handler\")\n\n\t\tctx := newContextWithSessionID(r.Context(), r)\n\t\tsessionID := SessionIDFromContext(ctx)\n\n\t\texpiration := time.Now().Add(365 * 24 * time.Hour)\n\t\tcookie := http.Cookie{Name: \"session\", Value: sessionID, Expires: expiration}\n\t\thttp.SetCookie(w, &cookie)\n\n\t\tlog.Logger.Println(\"SessionID:\" + sessionID)\n\t\tlog.Logger.Println(\"Session NEXT\")\n\t\tif next != nil {\n\t\t\tnext.ServeHTTP(w, r.WithContext(ctx))\n\t\t}\n\t\tlog.Logger.Println(\"Session NEXT END\")\n\t}\n}", "func (c *CookieOverseer) SessionID(w http.ResponseWriter, r *http.Request) (string, error) {\n\tpanic(\"cookie sessions do not use session ids\")\n}", "func SetCookie(h map[string][]string, c *http.Cookie) {\n\th[\"Set-Cookie\"] = append(h[\"Set-Cookie\"], c.String())\n}", "func validateSession(f func(http.ResponseWriter, *http.Request)) func(http.ResponseWriter, *http.Request) { \n\treturn func(w http.ResponseWriter, r *http.Request) {\n\t\t// get cookie from request\n\t \tc, err := r.Cookie(\"JSESSION_ID\")\n\t\tif err != nil {\n\t\t\tif err == http.ErrNoCookie {\t\t\n\t\t\t\thttp.Redirect(w, r, \"/login?errorM=No session present in request\", http.StatusSeeOther)\n\t\t\t\treturn\t\n\t\t\t}\n\t\t\thttp.Redirect(w, r, \"/login?errorM=Not authorised\", http.StatusSeeOther)\n\t\t\treturn\n\t\t}\n\t\t// if no errors, validate the cookie\n\t\tsessToken := c.Value\n\t\tsv := strings.Split(sessToken, \"_\")\t\t\n\t\tif len(sv) != 2 {\t\t\n\t\t\thttp.Redirect(w, r, \"/login?errorM=Invalid cookie format\", http.StatusSeeOther)\n\t\t\treturn\t\n\t\t}\n\t\tuserName := sv[0]\n\t\texpSessToken := getStringFromDB(\"Sessions\", userName)\n\t\tif sv[1] != expSessToken {\n\t\t\thttp.Redirect(w, r, \"/login?errorM=Invalid session\", http.StatusSeeOther)\t\t\n\t\t\treturn\n\t\t}\n\n\t\t// after sucess, refresh the cookie\tto extend the session\n\t\t// Create a new random session token\n\t\tsessionToken, err := uuid.NewUUID()\n\t\tif err != nil {\n\t\t\thttp.Redirect(w, r, \"/login?errorM=Unable to create token\", http.StatusSeeOther)\n\t\t}\n\t\t// Set the token in the db, along with the userName\n\t\tupdateDBString(\"Sessions\", userName, sessionToken.String())\n\n\t\t// set the expiration time\n\t\texpires := time.Now().Add(600 * time.Second)\n\t\tck := http.Cookie{\n\t\t\tName: \"JSESSION_ID\",\n\t\t\tPath: \"/\",\n\t\t\tExpires: expires,\n\t\t\tValue: userName+\"_\"+sessionToken.String(),\n\t\t}\n\n\t\t// write the cookie to response\n\t\thttp.SetCookie(w, &ck)\n\n\t\t// if sucess process the handler\t\t\n\t\tf(w, r)\n\t\t\n\t}\n}", "func makeCookie(session string) *http.Cookie {\n\tcookie := new(http.Cookie)\n\tcookie.Name = \"JSESSIONID\"\n\tcookie.Value = session\n\treturn cookie\n}", "func (ctx *RequestContext) PutSession(key string, value interface{}) {\n\tvars := ctx.token.Claims[\"vars\"].(map[string]interface{})\n\tvars[key] = value\n}", "func (resp *Response) LoadSession(req *Request) *Response {\n\tresp.Session.SessionID = req.Session.SessionID\n\tresp.Session.MessageID = req.Session.MessageID\n\tresp.Session.UserID = req.Session.UserID\n\tresp.Version = req.Version\n\treturn resp\n}", "func (ctx *Context) SetCookie(cookie *http.Cookie) {\n\tctx.SetHeader(\"Set-Cookie\", cookie.String(), false)\n}", "func (zr *ZRequest) SetCookie(ck *http.Cookie) *ZRequest {\n\tif zr.ended {\n\t\treturn zr\n\t}\n\tzr.cookies = append(zr.cookies, ck)\n\treturn zr\n}", "func (r *Response) Cookie(name, value string) *Response {\n\tr.cookies = append(r.cookies, NewCookie(name).Value(value))\n\treturn r\n}", "func (h *ResponseHeader) SetCookie(cookie *Cookie) {\n\th.cookies = setArgBytes(h.cookies, cookie.Key(), cookie.Cookie(), argsHasValue)\n}", "func GenerateSession(tb testing.TB, k []byte, sc rb.SessionCore, pairs ...interface{}) (c *http.Cookie, tok string) {\n\tw, r := httptest.NewRecorder(), httptest.NewRequest(\"GET\", \"/\", nil)\n\tr = r.WithContext(rb.WithRequestLogger(r.Context(), zap.NewNop()))\n\n\trb.NewCSRFMiddlware(sc, rb.BasicErrorHandler)(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tfmt.Fprintf(w, \"%s\", rb.CSRFToken(r.Context()))\n\t\tfor i := 0; i < len(pairs); i += 2 {\n\t\t\tsc.Session(w, r).Set(pairs[i], pairs[i+1])\n\t\t}\n\n\t\tif err := sc.SaveSession(w, r, sc.Session(w, r)); err != nil {\n\t\t\ttb.Fatalf(\"failed to save cookie during generation: %v\", err)\n\t\t}\n\t})).ServeHTTP(w, r)\n\n\tc, err := parseCookie(w.Header().Get(\"Set-Cookie\"), rb.DefaultSessionName)\n\tif err != nil {\n\t\ttb.Fatalf(\"failed to parse cookie: %v\", err)\n\t}\n\n\treturn c, w.Body.String()\n}", "func session(w http.ResponseWriter, r *http.Request) (sess data.Session, err error) {\n\t\n\t// リクエストからクッキーを取得\n\tcookie, err := r.Cookie(\"_cookie\")\n\t// ユーザーがログインしているならクッキーがあるはず\n\tif err == nil {\n\t\t// データベースを検索\n\t\t// ユニークIDが存在してるか?\n\t\tsess = data.Session{ Uuid: cookie.Value }\n\t\t\n\t\tif ok, _ := sess.Check(); !ok {\n\t\t\terr = errors.New(\"Invalid session\")\n\t\t}\n\t}\n\treturn\n}", "func (h *RequestHeader) SetCookie(key, value string) {\n\th.collectCookies()\n\th.cookies = setArg(h.cookies, key, value, argsHasValue)\n}", "func deleteSession(w http.ResponseWriter) {\n\thttp.SetCookie(w, &http.Cookie{\n\t\tName: cookieName,\n\t\tPath: \"/\",\n\t\t// expires must be non-zero to get output\n\t\tExpires: time.Unix(1, 0),\n\t\tHttpOnly: true,\n\t})\n}", "func (ctx *Context) SetCookie(name string, value string, others ...interface{}) {\n\tctx.Output.Cookie(name, value, others...)\n}", "func SetSession(ctx context.Context, session bool, logoutURL string) context.Context {\n\tctx = context.WithValue(ctx, sessionKey, session)\n\tctx = context.WithValue(ctx, logoutKey, logoutURL)\n\treturn ctx\n}", "func setUserCookies(w http.ResponseWriter, id int, sessId string) {\n\tuserIdCookie := http.Cookie{\n\t\tName: \"user_id\",\n\t\tValue: strconv.Itoa(id),\n\t\tExpires: time.Now().Add(30 * 24 * time.Hour),\n\t\tHttpOnly: false,\n\t\tPath: \"/\",\n\t}\n\tuserSessionCookie := http.Cookie{\n\t\tName: \"userSession_id\",\n\t\tValue: sessId,\n\t\tExpires: time.Now().Add(30 * 24 * time.Hour),\n\t\tHttpOnly: false,\n\t\tPath: \"/\",\n\t}\n\thttp.SetCookie(w, &userIdCookie)\n\thttp.SetCookie(w, &userSessionCookie)\n}", "func LoginRoute(res http.ResponseWriter, req *http.Request) {\n if req.Method == \"GET\" {\n res.Write([]byte(`\n <html>\n <head>\n <title> Login </title>\n </head>\n <body>\n <h1> Login </h1>\n <form action = \"/login\" method = \"post\">\n Username:<br>\n <input type=\"text\" name=\"Username\"><br>\n Password:<br>\n <input type = \"password\" name = \"Password\">\n <input type = \"submit\" value = \"Login\">\n </form>\n </body>\n </html>\n `))\n } else {\n req.ParseForm()\n username := req.FormValue(\"Username\")\n password := req.FormValue(\"Password\")\n\n uid, err := CheckUser(username, password)\n if err == nil {\n\n session := http.Cookie{\n Name: \"session\",\n Value: strconv.Itoa(uid),\n\n //MaxAge: 10 * 60,\n Secure: false,\n HttpOnly: true,\n SameSite: 1,\n\n Path: \"/\",\n }\n http.SetCookie(res, &session)\n Redirect(\"/library\", res)\n } else {\n res.Write([]byte(err.Error()))\n }\n }\n}", "func SessionCheck(writer http.ResponseWriter, request *http.Request) (sess Session, err error) {\n\tcookie, err := request.Cookie(\"_cookie\")\n\tif err == nil {\n\t\tsess = Session{Uuid: cookie.Value}\n\t\tif ok, _ := sess.Valid(); ok {\n\t\t\terr = errors.New(\"invalid session\")\n\t\t}\n\t}\n\treturn\n}", "func Session(c *gin.Context) {\n\tuser, err := shared.GetUserAuth(c)\n\n\tif err {\n\t\tc.JSON(http.StatusBadRequest, gin.H{\"success\": false})\n\t\treturn\n\t}\n\n\tc.JSON(http.StatusOK, gin.H{\"success\": true, \"user\": user})\n}", "func Test_Session(t *testing.T) {\n\tt.Parallel()\n\n\t// session store\n\tstore := New()\n\n\t// fiber instance\n\tapp := fiber.New()\n\n\t// fiber context\n\tctx := app.AcquireCtx(&fasthttp.RequestCtx{})\n\tdefer app.ReleaseCtx(ctx)\n\n\t// set cookie\n\tctx.Request().Header.SetCookie(store.CookieName, \"123\")\n\n\t// get session\n\tsess, err := store.Get(ctx)\n\tutils.AssertEqual(t, nil, err)\n\tutils.AssertEqual(t, true, sess.Fresh())\n\n\t// get value\n\tname := sess.Get(\"name\")\n\tutils.AssertEqual(t, nil, name)\n\n\t// set value\n\tsess.Set(\"name\", \"john\")\n\n\t// get value\n\tname = sess.Get(\"name\")\n\tutils.AssertEqual(t, \"john\", name)\n\n\t// delete key\n\tsess.Delete(\"name\")\n\n\t// get value\n\tname = sess.Get(\"name\")\n\tutils.AssertEqual(t, nil, name)\n\n\t// get id\n\tid := sess.ID()\n\tutils.AssertEqual(t, \"123\", id)\n\n\t// delete cookie\n\tctx.Request().Header.Del(fiber.HeaderCookie)\n\n\t// get session\n\tsess, err = store.Get(ctx)\n\tutils.AssertEqual(t, nil, err)\n\tutils.AssertEqual(t, true, sess.Fresh())\n\n\t// get id\n\tid = sess.ID()\n\tutils.AssertEqual(t, 36, len(id))\n}", "func (this *Session) Before(txn *Txn) bool {\n\tif txn.Type() != \"html\" {\n\t\t//skip\n\t\treturn true\n\t}\n\t// log.Println(\"SESSION!\")\n\thttpWriter, err := ToHttpWriter(txn)\n\tif err != nil {\n\t\tlog.Printf(\"ERROR in session.before %s\", err)\n\t\treturn true //should we continue with the request?\n\t}\n\n\tcookie, err := httpWriter.HttpRequest.Cookie(\"session_id\")\n\n\tif err != nil {\n\t\t//create new session id\n\t\ttxn.Session.Put(\"session_id\", SessionId())\n\t\t// log.Println(\"Created session!\")\n\t} else {\n\t\t//load the session. \n\t\tsessionId := cookie.Value\n\t\t// log.Printf(\"Found session cookie! %s\", sessionId)\n\t\tbytes, ok := this.cache.Get(sessionId)\n\t\tif !ok {\n\t\t\t//create a new session, since the old one is gone\n\t\t\tsessionId = SessionId()\n\t\t\t// log.Printf(\"Old session expired, setting new one (%s)\", sessionId)\n\t\t} else {\n\t\t\terr = txn.Session.UnmarshalJSON(bytes)\n\t\t\tif err != nil {\n\t\t\t\tlog.Printf(\"Error unmarshaling json (%s) -> (%s)\", bytes, err)\n\t\t\t}\n\t\t}\n\t\ttxn.Session.Put(\"session_id\", sessionId)\n\t}\n\treturn true\n}", "func (cj *CookieJar) SaveSession() {\n\terr := sessions.Save(cj.r, cj.w)\n\tif err != nil {\n\t\tfmt.Println(\"Error saving cookies:\", err)\n\t}\n}", "func (s *SSO) StartSession(w http.ResponseWriter, cs crowd.Session) {\n\tck := http.Cookie{\n\t\tName: s.CookieConfig.Name,\n\t\tDomain: s.CookieConfig.Domain,\n\t\tSecure: s.CookieConfig.Secure,\n\t\tValue: cs.Token,\n\t\tExpires: cs.Expires,\n\t}\n\thttp.SetCookie(w, &ck)\n}", "func NewSessionCookie(token *oauth2.Token) *http.Cookie {\n\tvar buf bytes.Buffer\n\tjson.NewEncoder(&buf).Encode(token) // FIXME(bzz): handle JSON encoding failures\n\tsessionVal := base64.StdEncoding.EncodeToString(buf.Bytes())\n\n\treturn &http.Cookie{\n\t\tName: string(sessionKey),\n\t\tValue: sessionVal,\n\t\tPath: \"/\",\n\t\tExpires: token.Expiry,\n\t\tHttpOnly: true,\n\t}\n}", "func (ctx *Context) SetCookie(cookie *http.Cookie) {\n\tif v := cookie.String(); v != \"\" {\n\t\tctx.AddHeader(\"Set-Cookie\", v)\n\t}\n}", "func ValidateSession(w http.ResponseWriter, r *http.Request) (session Session, err error) {\n\tcookie, err := r.Cookie(\"goblog_session_cookie\")\n\tif err == nil {\n\t\tsession = Session{UUID: cookie.Value}\n\t\tif valid, _ := session.Validate(); !valid {\n\t\t\tcookie := http.Cookie{\n\t\t\t\tName: \"goblog_session_cookie\",\n\t\t\t\tValue: session.UUID,\n\t\t\t\tHttpOnly: true,\n\t\t\t\tExpires: time.Now(),\n\t\t\t\tMaxAge: -1,\n\t\t\t}\n\t\t\thttp.SetCookie(w, &cookie)\n\t\t\tlog.Println(\"Invalid session\", err)\n\t\t\thttp.Redirect(w, r, \"/login\", 302)\n\t\t}\n\t} else {\n\t\tlog.Println(\"Session doesn't exists\", err)\n\t\thttp.Redirect(w, r, \"/login\", 302)\n\t}\n\treturn\n}", "func NewSessionResponse(s *Session, details string) *Response {\n\treturn &Response{Session: s, Details: details}\n}", "func (s *RestStore) SaveSession(w http.ResponseWriter, r *http.Request, sessionState *SessionState) error {\n\tencToken, err := MarshalSession(sessionState, s.Cipher)\n\tif err != nil {\n\t\treturn err\n\t}\n\tjsonBytes, err := json.Marshal(\n\t\t&RestStoreResponse{\n\t\t\tToken: encToken,\n\t\t\tExpiry: sessionState.RefreshDeadline,\n\t\t})\n\tif err != nil {\n\t\treturn fmt.Errorf(\"internal/sessions: couldn't marshal token struct: %v\", err)\n\t}\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\tw.Write(jsonBytes)\n\treturn nil\n}", "func (s *Session) Cookie() *http.Cookie {\n\tma := 0\n\tif s.Valid() {\n\t\tma = int(DefaultDuration.Seconds())\n\t}\n\treturn &http.Cookie{\n\t\tName: SessionKey,\n\t\tValue: s.Id,\n\t\tExpires: s.Expires,\n\t\tMaxAge: ma,\n\t}\n}", "func SetCookieToken(w http.ResponseWriter, token string) http.ResponseWriter {\n expiration := time.Now().Add(time.Minute * time.Duration(tokenValidity))\n cookie := http.Cookie{Name: \"Token\", Value: token, Expires: expiration}\n http.SetCookie(w, &cookie)\n return w\n}", "func WithSession(ctx context.Context, session *Session) context.Context {\n\treturn context.WithValue(ctx, sessionKey, session)\n}", "func (a *Auth) Session(w http.ResponseWriter, r *http.Request) {\n\tuser := a.userstate.Username(r)\n\temail, err := a.userstate.Email(user)\n\tif err != nil {\n\t\tutil.Error(\"session: %s\", err.Error())\n\t}\n\tutil.OKWith(w, r, map[string]string{\n\t\t\"serialNumber\": user,\n\t\t\"emailAddress\": email,\n\t})\n}", "func setLoggedIn(w http.ResponseWriter, u users.User) {\n\tc := http.Cookie{\n\t\tName: userCookieName,\n\t\tValue: u.Email,\n\t\tPath: \"/\",\n\t\tSecure: false,\n\t}\n\thttp.SetCookie(w, &c)\n}", "func (c *Client) Session(session *SessionID) {\n\tc.session = session\n}", "func SessionSetUser(user *models.User, session *session.Session, r *http.Request) {\n\t(*session).Set(\"id\", user.Id)\n\t(*session).Set(\"name\", user.Name)\n\t(*session).Set(\"email\", user.Email)\n}", "func (avisess *AviSession) collectCookiesFromResp(resp *http.Response) {\n\t// collect cookies from the resp\n\tavisess.cookiesCollectLock.Lock()\n\tdefer avisess.cookiesCollectLock.Unlock()\n\n\tvar csrfToken string\n\tvar sessionID string\n\tfor _, cookie := range resp.Cookies() {\n\t\tif cookie.Name == \"csrftoken\" {\n\t\t\tcsrfToken = cookie.Value\n\t\t}\n\t\tif cookie.Name == \"sessionid\" || cookie.Name == \"avi-sessionid\" {\n\t\t\tsessionID = cookie.Value\n\t\t}\n\t}\n\tif csrfToken != \"\" && sessionID != \"\" {\n\t\tavisess.csrfToken = csrfToken\n\t\tavisess.sessionid = sessionID\n\t}\n}", "func Session(m *session.Manager) jsonapi.Middleware {\n\treturn func(h jsonapi.Handler) jsonapi.Handler {\n\t\treturn func(req jsonapi.Request) (i interface{}, e error) {\n\t\t\tr := req.R()\n\t\t\tsess, err := m.Start(req.W(), r)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, jsonapi.E500.SetOrigin(err)\n\t\t\t}\n\n\t\t\tr = r.WithContext(context.WithValue(\n\t\t\t\tr.Context(),\n\t\t\t\tsession.SessionObjectKey,\n\t\t\t\tsess,\n\t\t\t))\n\t\t\ti, e = h(jsonapi.WrapRequest(req, r))\n\n\t\t\t_ = sess.Save(req.W())\n\t\t\treturn\n\t\t}\n\t}\n}", "func (s *CookieStore) SaveSession(rw http.ResponseWriter, req *http.Request, sessionState *SessionState) error {\n\tvalue, err := MarshalSession(sessionState, s.CookieCipher)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\ts.setSessionCookie(rw, req, value)\n\treturn nil\n}", "func setCookie(w http.ResponseWriter, nonce string, secureMode bool, maxAge int) error {\n\n\tencoded, err := secureCookie.Encode(csrfCookieName, nonce)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"the encode cookie failed, err = %v\", err)\n\t}\n\tcookie := &http.Cookie{\n\t\tName: csrfCookieName,\n\t\tValue: encoded,\n\t\tMaxAge: maxAge,\n\t\tHttpOnly: true,\n\t\tSecure: secureMode,\n\t\tSameSite: http.SameSiteLaxMode,\n\t}\n\n\thttp.SetCookie(w, cookie)\n\treturn nil\n}", "func (this *SessionStorage) GetSession(w http.ResponseWriter, req *http.Request) (*Session,error) {\n //look for session cookie\n sCookie,err := req.Cookie(\"session\")\n var sessionId uint64\n this.mutex.Lock()\n defer this.mutex.Unlock()\n if(err != nil) {\n //if there's no session associated\n return this.CreateSession(w),nil\n } else {\n //if a session already exists\n sessionId,err = strconv.ParseUint(sCookie.Value,10,64)\n if(err != nil) {\n //if conversion failed\n return nil,err;\n }\n }\n session,ok := this.sessions[sessionId]\n if !ok {\n return this.CreateSession(w),nil\n }\n session.lastAccessed = time.Now()\n return session,nil\n}", "func (c *Controller) SetSession(name interface{}, value interface{}) error {\n\tif c.CruSession == nil {\n\t\tc.StartSession()\n\t}\n\treturn c.CruSession.Set(context2.Background(), name, value)\n}", "func Test_Ctx_Cookie(t *testing.T) {\n\tt.Parallel()\n\tapp := New()\n\tctx := app.AcquireCtx(&fasthttp.RequestCtx{})\n\tdefer app.ReleaseCtx(ctx)\n\texpire := time.Now().Add(24 * time.Hour)\n\tvar dst []byte\n\tdst = expire.In(time.UTC).AppendFormat(dst, time.RFC1123)\n\thttpdate := strings.Replace(string(dst), \"UTC\", \"GMT\", -1)\n\tctx.Cookie(&Cookie{\n\t\tName: \"username\",\n\t\tValue: \"john\",\n\t\tExpires: expire,\n\t})\n\texpect := \"username=john; expires=\" + httpdate + \"; path=/; SameSite=Lax\"\n\tutils.AssertEqual(t, expect, string(ctx.Fasthttp.Response.Header.Peek(HeaderSetCookie)))\n\n\tctx.Cookie(&Cookie{SameSite: \"strict\"})\n\tctx.Cookie(&Cookie{SameSite: \"none\"})\n}", "func (r Response) EndSession(flag bool) Response {\n\tr.Body.ShouldEndSession = &flag\n\treturn r\n}", "func (authentication *Authentication) CreateSession(id int, firstName, lastName, email string, res http.ResponseWriter) {\n\tsessionID, _ := uuid.NewV4()\n\tcookie := &http.Cookie{\n\t\tName: \"session\",\n\t\tValue: sessionID.String(),\n\t\tPath: \"/\",\n\t}\n\tcookie.MaxAge = sessionExistTime\n\thttp.SetCookie(res, cookie)\n\tauthentication.userSession[cookie.Value] = session{email, time.Now()}\n\tauthentication.loginUser[email] = User{id, firstName, lastName}\n}", "func SetSession(id interface{}, user interface{}) gin.HandlerFunc {\n\treturn func(c *gin.Context) {\n\t\tsession := sessions.Default(c)\n\t\tsession.Set(\"id\", id)\n\t\tsession.Set(\"username\", user)\n\t\tsession.Save()\n\t}\n}", "func (ctx *Context) SetCookie(c *http.Cookie) {\n\thttp.SetCookie(ctx.writer, c)\n}", "func (s *AtlasMapServer) sessionMiddleware(next http.Handler) http.Handler {\n\treturn http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tsession, err := s.store.Get(r, \"session\")\n\t\tif err != nil {\n\t\t\tlog.Error().Err(err).Msg(\"bad session\")\n\t\t\thttp.SetCookie(w, &http.Cookie{Name: \"session\", MaxAge: -1, Path: \"/\"})\n\t\t\treturn\n\t\t}\n\n\t\t_, ok := session.Values[\"steamID\"].(string)\n\t\tif !ok {\n\t\t\thttp.Error(w, \"Not authenticated\", http.StatusUnauthorized)\n\t\t\treturn\n\t\t}\n\t\t_, ok = session.Values[\"playerID\"].(int64)\n\t\tif !ok {\n\t\t\thttp.Error(w, \"Not authenticated\", http.StatusUnauthorized)\n\t\t\treturn\n\t\t}\n\n\t\tr = r.WithContext(context.WithValue(r.Context(), SessionKey, session))\n\t\tnext.ServeHTTP(w, r)\n\t})\n}", "func (ctx *Context) SetCookie(cookie *http.Cookie) {\n\thttp.SetCookie(ctx.ResponseWriter, cookie)\n}", "func (rc *Ctx) WriteCookie(cookie *http.Cookie) {\n\thttp.SetCookie(rc.response, cookie)\n}", "func addCookie(context echo.Context, authToken string) {\n\texpire := time.Now().AddDate(0, 1, 0) // 1 month\n\tcookie := &http.Cookie{\n\t\tName: \"token\",\n\t\tExpires: expire,\n\t\tValue: auth.Bearer + \" \" + authToken,\n\t\tPath: \"/\",\n\t\t// Domain must not be set for auth to work with chrome without domain name\n\t\t// http://stackoverflow.com/questions/5849013/setcookie-does-not-set-cookie-in-google-chrome\n\t}\n\tcontext.Response().Header().Set(\"Set-Cookie\", cookie.String())\n}", "func (c *Cookie) Set() error {\n\topt := map[string]interface{}{\"cookie\": c}\n\t_, _, err := c.ws.wd.post(\"/session/%s/cookie\", opt, c.ws.ID)\n\treturn err\n}", "func (s *TrafficOpsSessionThreadsafe) setLegacySession(url, username, password string, insecure bool, userAgent string, useCache bool, timeout time.Duration) error {\n\toptions := cookiejar.Options{\n\t\tPublicSuffixList: publicsuffix.List,\n\t}\n\tjar, err := cookiejar.New(&options)\n\tif err != nil {\n\t\treturn err\n\t}\n\tto := legacyClient.NewSession(username, password, url, userAgent, &http.Client{\n\t\tTimeout: timeout,\n\t\tTransport: &http.Transport{\n\t\t\tTLSClientConfig: &tls.Config{InsecureSkipVerify: insecure},\n\t\t},\n\t\tJar: jar,\n\t}, useCache)\n\t*s.legacySession = to\n\treturn nil\n}", "func (s SimpleResponse) GetCookie() *http.Cookie {\n\treturn nil\n}", "func (req *Request) SetCookie(k, v string) {\n\tc := &http.Cookie{\n\t\tName: k,\n\t\tValue: v,\n\t}\n\treq.Req.AddCookie(c)\n}" ]
[ "0.6794905", "0.6627064", "0.6594562", "0.6545771", "0.65062404", "0.6390275", "0.62856454", "0.6280704", "0.6261973", "0.6261307", "0.6229171", "0.6201126", "0.6116262", "0.6077456", "0.5991073", "0.5933297", "0.5923344", "0.589549", "0.5846591", "0.5843309", "0.5832249", "0.5828373", "0.58233815", "0.5793263", "0.57692295", "0.57623506", "0.57251215", "0.5697856", "0.56731373", "0.56573755", "0.56533337", "0.5633048", "0.5623805", "0.56181586", "0.5611283", "0.55982095", "0.55766255", "0.5566187", "0.55321544", "0.5531421", "0.55280393", "0.55210674", "0.5513123", "0.5499848", "0.54941505", "0.54932857", "0.5493258", "0.5479094", "0.54574263", "0.5454269", "0.5434075", "0.54241765", "0.5406974", "0.54058295", "0.5404478", "0.5394008", "0.5381531", "0.536877", "0.5366248", "0.5361455", "0.53605694", "0.53500783", "0.5349345", "0.53371614", "0.53336626", "0.53335893", "0.5330638", "0.5329763", "0.53265184", "0.53248745", "0.5321367", "0.5315021", "0.52987576", "0.52958417", "0.52906096", "0.5282327", "0.5281328", "0.5277973", "0.5277387", "0.52718174", "0.5257937", "0.525391", "0.5233416", "0.5229881", "0.5224825", "0.5215498", "0.520903", "0.5197475", "0.5189572", "0.5187931", "0.5169637", "0.5160115", "0.5136304", "0.51326627", "0.5131703", "0.51311225", "0.5122002", "0.512044", "0.5119122", "0.5115461" ]
0.75441295
0
ToString returns the JSON API response
func (r *Response) ToString() string { return r.recorder.Body.String() }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (o *Forecastservicegoaltemplateimpactoverrideresponse) String() string {\n \n \n \n\n j, _ := json.Marshal(o)\n str, _ := strconv.Unquote(strings.Replace(strconv.Quote(string(j)), `\\\\u`, `\\u`, -1))\n\n return str\n}", "func (o *Updatescheduleuploadresponse) String() string {\n \n \n o.Headers = map[string]string{\"\": \"\"} \n \n\n j, _ := json.Marshal(o)\n str, _ := strconv.Unquote(strings.Replace(strconv.Quote(string(j)), `\\\\u`, `\\u`, -1))\n\n return str\n}", "func (r Response) String() string {\n\tJSON, err := json.Marshal(r)\n\tif err != nil {\n\t\tlog.Fatalln(err)\n\t}\n\treturn string(JSON)\n}", "func (o *Buagentschedulehistoryresponse) String() string {\n o.PriorPublishedSchedules = []Buschedulereference{{}} \n \n o.DroppedChanges = []Buagentschedulehistorydroppedchange{{}} \n o.Changes = []Buagentschedulehistorychange{{}} \n\n j, _ := json.Marshal(o)\n str, _ := strconv.Unquote(strings.Replace(strconv.Quote(string(j)), `\\\\u`, `\\u`, -1))\n\n return str\n}", "func (j JSONResponse) String() string {\n\tstr, err := json.MarshalIndent(j, \"\", \" \")\n\tif err != nil {\n\t\treturn fmt.Sprintf(`{\n \"error\": \"%v\"\n}`, err)\n\t}\n\n\treturn string(str)\n}", "func (o *Learningassignmentbulkaddresponse) String() string {\n o.Entities = []Learningassignment{{}} \n o.DisallowedEntities = []Disallowedentitylearningassignmentitem{{}} \n\n j, _ := json.Marshal(o)\n str, _ := strconv.Unquote(strings.Replace(strconv.Quote(string(j)), `\\\\u`, `\\u`, -1))\n\n return str\n}", "func (o *Limitchangerequestdetails) String() string {\n \n \n \n \n \n\n j, _ := json.Marshal(o)\n str, _ := strconv.Unquote(strings.Replace(strconv.Quote(string(j)), `\\\\u`, `\\u`, -1))\n\n return str\n}", "func (this *Response) ToString() (string, error) {\n\tbytes, err := this.ReadAll()\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\treturn string(bytes), nil\n}", "func (o *Timeoffbalancerequest) String() string {\n o.ActivityCodeIds = []string{\"\"} \n o.DateRanges = []Localdaterange{{}} \n\n j, _ := json.Marshal(o)\n str, _ := strconv.Unquote(strings.Replace(strconv.Quote(string(j)), `\\\\u`, `\\u`, -1))\n\n return str\n}", "func (o *Evaluationresponse) String() string {\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n o.MediaType = []string{\"\"} \n \n \n \n \n \n \n \n \n \n \n o.AuthorizedActions = []string{\"\"} \n \n\n j, _ := json.Marshal(o)\n str, _ := strconv.Unquote(strings.Replace(strconv.Quote(string(j)), `\\\\u`, `\\u`, -1))\n\n return str\n}", "func (o *Buasyncagentschedulesqueryresponse) String() string {\n \n \n \n \n \n\n j, _ := json.Marshal(o)\n str, _ := strconv.Unquote(strings.Replace(strconv.Quote(string(j)), `\\\\u`, `\\u`, -1))\n\n return str\n}", "func (o *Createtimeofflimitrequest) String() string {\n \n \n\n j, _ := json.Marshal(o)\n str, _ := strconv.Unquote(strings.Replace(strconv.Quote(string(j)), `\\\\u`, `\\u`, -1))\n\n return str\n}", "func (o *Analyticsuserdetailsasyncqueryresponse) String() string {\n o.UserDetails = []Analyticsuserdetail{{}} \n \n \n\n j, _ := json.Marshal(o)\n str, _ := strconv.Unquote(strings.Replace(strconv.Quote(string(j)), `\\\\u`, `\\u`, -1))\n\n return str\n}", "func (o *Openintegration) String() string {\n \n \n \n \n \n o.WebhookHeaders = map[string]string{\"\": \"\"} \n \n \n \n \n \n\n j, _ := json.Marshal(o)\n str, _ := strconv.Unquote(strings.Replace(strconv.Quote(string(j)), `\\\\u`, `\\u`, -1))\n\n return str\n}", "func (o *Buforecastmodificationresponse) String() string {\n \n \n \n \n \n \n o.Values = []Wfmforecastmodificationintervaloffsetvalue{{}} \n \n \n \n o.PlanningGroupIds = []string{\"\"} \n\n j, _ := json.Marshal(o)\n str, _ := strconv.Unquote(strings.Replace(strconv.Quote(string(j)), `\\\\u`, `\\u`, -1))\n\n return str\n}", "func (o *Integrationstatusinfo) String() string {\n\n j, _ := json.Marshal(o)\n str, _ := strconv.Unquote(strings.Replace(strconv.Quote(string(j)), `\\\\u`, `\\u`, -1))\n\n return str\n}", "func (o *Reschedulingoptionsrunresponse) String() string {\n \n \n \n o.ManagementUnits = []Reschedulingmanagementunitresponse{{}} \n \n o.ActivityCodeIds = []string{\"\"} \n \n \n \n \n\n j, _ := json.Marshal(o)\n str, _ := strconv.Unquote(strings.Replace(strconv.Quote(string(j)), `\\\\u`, `\\u`, -1))\n\n return str\n}", "func (o *Apiusageclientquery) String() string {\n \n \n o.Metrics = []string{\"\"} \n o.GroupBy = []string{\"\"} \n\n j, _ := json.Marshal(o)\n str, _ := strconv.Unquote(strings.Replace(strconv.Quote(string(j)), `\\\\u`, `\\u`, -1))\n\n return str\n}", "func (o QtreeCreateResponse) String() string {\n\treturn ToString(reflect.ValueOf(o))\n}", "func (o *Messagetypingeventrequest) String() string {\n \n \n\n j, _ := json.Marshal(o)\n str, _ := strconv.Unquote(strings.Replace(strconv.Quote(string(j)), `\\\\u`, `\\u`, -1))\n\n return str\n}", "func (o *Addshifttraderequest) String() string {\n \n \n \n \n o.AcceptableIntervals = []string{\"\"} \n\n j, _ := json.Marshal(o)\n str, _ := strconv.Unquote(strings.Replace(strconv.Quote(string(j)), `\\\\u`, `\\u`, -1))\n\n return str\n}", "func (o *Createshareresponse) String() string {\n\tj, _ := json.Marshal(o)\n\tstr, _ := strconv.Unquote(strings.Replace(strconv.Quote(string(j)), `\\\\u`, `\\u`, -1))\n\n\treturn str\n}", "func (o LunOnlineResponse) String() string {\n\treturn ToString(reflect.ValueOf(o))\n}", "func (o *Updatebusinessunitrequest) String() string {\n \n \n \n\n j, _ := json.Marshal(o)\n str, _ := strconv.Unquote(strings.Replace(strconv.Quote(string(j)), `\\\\u`, `\\u`, -1))\n\n return str\n}", "func (f *Format) ToString() (string, error) {\n\tbytes, error := json.Marshal(f)\n\treturn string(bytes), error\n}", "func (t *Tarif) ToString() string {\n\tret, _ := t.MarshalJSON()\n\treturn string(ret)\n}", "func (z Zamowienia) String() string {\n\tjz, _ := json.Marshal(z)\n\treturn string(jz)\n}", "func (o *Posttextresponse) String() string {\n\tj, _ := json.Marshal(o)\n\tstr, _ := strconv.Unquote(strings.Replace(strconv.Quote(string(j)), `\\\\u`, `\\u`, -1))\n\n\treturn str\n}", "func (o *Screenrecordingmetadatarequest) String() string {\n \n \n o.MetaData = []Screenrecordingmetadata{{}} \n\n j, _ := json.Marshal(o)\n str, _ := strconv.Unquote(strings.Replace(strconv.Quote(string(j)), `\\\\u`, `\\u`, -1))\n\n return str\n}", "func (o SnapmirrorCreateResponse) String() string {\n\treturn ToString(reflect.ValueOf(o))\n}", "func (o *Interactionstatsalert) String() string {\n \n \n\n j, _ := json.Marshal(o)\n str, _ := strconv.Unquote(strings.Replace(strconv.Quote(string(j)), `\\\\u`, `\\u`, -1))\n\n return str\n}", "func (r *Response) String() string {\n\treturn string(r.Data)\n}", "func (i *Info) String() string {\n\tb, _ := json.Marshal(i)\n\treturn string(b)\n}", "func (s GetApiOutput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (r SendAll) String() string {\n\tJSON, err := json.Marshal(r)\n\tif err != nil {\n\t\tlog.Fatalln(err)\n\t}\n\treturn string(JSON)\n}", "func (o *Predictor) String() string {\n o.Queues = []Addressableentityref{{}} \n \n \n \n \n\n j, _ := json.Marshal(o)\n str, _ := strconv.Unquote(strings.Replace(strconv.Quote(string(j)), `\\\\u`, `\\u`, -1))\n\n return str\n}", "func (o LunGetSerialNumberResponse) String() string {\n\treturn ToString(reflect.ValueOf(o))\n}", "func (o QtreeCreateResponseResult) String() string {\n\treturn ToString(reflect.ValueOf(o))\n}", "func (statsResponse *StatsResponse) String() string {\n\tstatsResponseBytes, err := json.Marshal(statsResponse)\n\tif err != nil {\n\t\treturn err.Error()\n\t}\n\n\treturn string(statsResponseBytes)\n}", "func ToString(a interface{}) string {\n\tout, err := json.Marshal(a)\n\tif err != nil {\n\t\treturn \"ERROR CONVERTING\"\n\t}\n\n\treturn string(out)\n}", "func (s RestApi) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (o *Actioncontract) String() string {\n \n \n\n j, _ := json.Marshal(o)\n str, _ := strconv.Unquote(strings.Replace(strconv.Quote(string(j)), `\\\\u`, `\\u`, -1))\n\n return str\n}", "func (v *DCHttpResponse) ToString() (body string, err error) {\n\tv.cacheBodyToMemory()\n\tif v.bodyErr != nil {\n\t\treturn \"\", v.bodyErr\n\t}\n\treturn string(v.body), nil\n}", "func jsonToString(jsonData []byte, err error) string {\n\tif err != nil {\n\t\tlogErr(\"Error on serialization %s\", err.Error())\n\t\treturn \"{}\"\n\t}\n\n\tbuf := bytes.NewBuffer(jsonData)\n\n\treturn buf.String()\n}", "func (o *Createperformanceprofile) String() string {\n \n \n \n o.ReportingIntervals = []Reportinginterval{{}} \n \n \n\n j, _ := json.Marshal(o)\n str, _ := strconv.Unquote(strings.Replace(strconv.Quote(string(j)), `\\\\u`, `\\u`, -1))\n\n return str\n}", "func (o ExportPolicyCreateResponse) String() string {\n\treturn ToString(reflect.ValueOf(o))\n}", "func (hit Hit) String() string {\n\tout, _ := json.Marshal(hit)\n\treturn string(out)\n}", "func (t TeamResource) String() string {\n\tjt, _ := json.Marshal(t)\n\treturn string(jt)\n}", "func (s CreateApiOutput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func JSONToString(v interface{}) string {\n\tdata, _ := json.MarshalIndent(v, \"\", \" \")\n\tSTR := string(data)\n\tSTR = strings.ReplaceAll(STR, string(10), ``)\n\treturn STR\n}", "func (o *Botversionsummary) String() string {\n \n \n \n \n\n j, _ := json.Marshal(o)\n str, _ := strconv.Unquote(strings.Replace(strconv.Quote(string(j)), `\\\\u`, `\\u`, -1))\n\n return str\n}", "func (o *Outbounddomain) String() string {\n \n \n \n \n \n\n j, _ := json.Marshal(o)\n str, _ := strconv.Unquote(strings.Replace(strconv.Quote(string(j)), `\\\\u`, `\\u`, -1))\n\n return str\n}", "func (o *Sentimentfeedback) String() string {\n \n \n \n\n j, _ := json.Marshal(o)\n str, _ := strconv.Unquote(strings.Replace(strconv.Quote(string(j)), `\\\\u`, `\\u`, -1))\n\n return str\n}", "func (z Zamowienium) String() string {\n\tjz, _ := json.Marshal(z)\n\treturn string(jz)\n}", "func (self *monitoringData) String() string {\n\tstr, _ := self.JSON()\n\treturn str\n}", "func (o *Patchintegrationactionfields) String() string {\n \n o.RequestMappings = []Requestmapping{{}} \n\n j, _ := json.Marshal(o)\n str, _ := strconv.Unquote(strings.Replace(strconv.Quote(string(j)), `\\\\u`, `\\u`, -1))\n\n return str\n}", "func (o SnapmirrorResyncResponse) String() string {\n\treturn ToString(reflect.ValueOf(o))\n}", "func (j Json) String() string {\n\treturn string(j)\n}", "func (t TeamResources) String() string {\n\tjt, _ := json.Marshal(t)\n\treturn string(jt)\n}", "func (o ExportPolicyCreateResponseResult) String() string {\n\treturn ToString(reflect.ValueOf(o))\n}", "func (o *Createemailrequest) String() string {\n \n \n \n o.SkillIds = []string{\"\"} \n \n \n o.Attributes = map[string]string{\"\": \"\"} \n \n \n \n \n \n \n \n \n \n\n j, _ := json.Marshal(o)\n str, _ := strconv.Unquote(strings.Replace(strconv.Quote(string(j)), `\\\\u`, `\\u`, -1))\n\n return str\n}", "func (o IgroupAddResponse) String() string {\n\treturn ToString(reflect.ValueOf(o))\n}", "func (r Info) String() string {\n\tJSON, err := json.Marshal(r)\n\tif err != nil {\n\t\tlog.Fatalln(err)\n\t}\n\treturn string(JSON)\n}", "func (o LunOnlineResponseResult) String() string {\n\treturn ToString(reflect.ValueOf(o))\n}", "func (o *Oauthclientrequest) String() string {\n\tj, _ := json.Marshal(o)\n\tstr, _ := strconv.Unquote(strings.Replace(strconv.Quote(string(j)), `\\\\u`, `\\u`, -1))\n\n\treturn str\n}", "func (o *Knowledgegroupstatistics) String() string {\n \n \n \n\n j, _ := json.Marshal(o)\n str, _ := strconv.Unquote(strings.Replace(strconv.Quote(string(j)), `\\\\u`, `\\u`, -1))\n\n return str\n}", "func (o IgroupCreateResponse) String() string {\n\treturn ToString(reflect.ValueOf(o))\n}", "func (o *Trusteeauditqueryrequest) String() string {\n o.TrusteeOrganizationIds = []string{\"\"} \n o.TrusteeUserIds = []string{\"\"} \n \n \n \n o.Facets = []Facet{{}} \n o.Filters = []Filter{{}} \n\n j, _ := json.Marshal(o)\n str, _ := strconv.Unquote(strings.Replace(strconv.Quote(string(j)), `\\\\u`, `\\u`, -1))\n\n return str\n}", "func (r ReceiveAll) String() string {\n\tJSON, err := json.Marshal(r)\n\tif err != nil {\n\t\tlog.Fatalln(err)\n\t}\n\treturn string(JSON)\n}", "func (o *Adjustablelivespeakerdetection) String() string {\n \n \n \n \n \n \n\n j, _ := json.Marshal(o)\n str, _ := strconv.Unquote(strings.Replace(strconv.Quote(string(j)), `\\\\u`, `\\u`, -1))\n\n return str\n}", "func (b BudgetLine) String() string {\n\tjb, _ := json.Marshal(b)\n\treturn string(jb)\n}", "func (o *Conversationeventcobrowse) String() string {\n \n \n \n\n j, _ := json.Marshal(o)\n str, _ := strconv.Unquote(strings.Replace(strconv.Quote(string(j)), `\\\\u`, `\\u`, -1))\n\n return str\n}", "func (o *Outboundroute) String() string {\n \n \n \n \n o.ClassificationTypes = []string{\"\"} \n \n \n o.ExternalTrunkBases = []Domainentityref{{}} \n\n j, _ := json.Marshal(o)\n str, _ := strconv.Unquote(strings.Replace(strconv.Quote(string(j)), `\\\\u`, `\\u`, -1))\n\n return str\n}", "func (o *Routingstatus) String() string {\n \n \n \n\n j, _ := json.Marshal(o)\n str, _ := strconv.Unquote(strings.Replace(strconv.Quote(string(j)), `\\\\u`, `\\u`, -1))\n\n return str\n}", "func (o SnapmirrorCreateResponseResult) String() string {\n\treturn ToString(reflect.ValueOf(o))\n}", "func (t Taxes) String() string {\n\tjt, _ := json.Marshal(t)\n\treturn string(jt)\n}", "func (o *Metrics) String() string {\n \n \n \n \n \n \n \n \n \n \n \n\n j, _ := json.Marshal(o)\n str, _ := strconv.Unquote(strings.Replace(strconv.Quote(string(j)), `\\\\u`, `\\u`, -1))\n\n return str\n}", "func JSONString(response string) {\n\t// pretty-print the json\n\tutils.PrintJSON([]byte(response))\n}", "func (b BudgetLines) String() string {\n\tjb, _ := json.Marshal(b)\n\treturn string(jb)\n}", "func (o *Emailcampaignschedule) String() string {\n \n \n o.Intervals = []Scheduleinterval{{}} \n \n \n\n j, _ := json.Marshal(o)\n str, _ := strconv.Unquote(strings.Replace(strconv.Quote(string(j)), `\\\\u`, `\\u`, -1))\n\n return str\n}", "func ( fq *Fq_req ) To_json( ) ( *string, error ) {\n\tjbytes, err := json.Marshal( fq )\t\t\t// bundle into a json string\n\n\ts := string( jbytes )\n\n\treturn &s, err\n}", "func (s IntegrationResponse) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (s IntegrationResponse) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (s IntegrationResponse) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (o *Domainedgesoftwareversiondto) String() string {\n \n \n \n \n \n \n\n j, _ := json.Marshal(o)\n str, _ := strconv.Unquote(strings.Replace(strconv.Quote(string(j)), `\\\\u`, `\\u`, -1))\n\n return str\n}", "func (o LunGetSerialNumberResponseResult) String() string {\n\treturn ToString(reflect.ValueOf(o))\n}", "func (e Data) String() string {\n\tj, _ := e.MarshalJSON()\n\treturn string(j)\n}", "func (o *Knowledgedocumentbulkrequest) String() string {\n \n \n \n o.Categories = []Documentcategoryinput{{}} \n \n \n\n j, _ := json.Marshal(o)\n str, _ := strconv.Unquote(strings.Replace(strconv.Quote(string(j)), `\\\\u`, `\\u`, -1))\n\n return str\n}", "func (o *Object) JSON() string {\n\tif o.URL != nil {\n\t\to.VersionID = o.URL.VersionID\n\t}\n\treturn strutil.JSON(o)\n}", "func (o *Historyheaderstranslation) String() string {\n \n \n \n \n \n \n \n \n \n\n j, _ := json.Marshal(o)\n str, _ := strconv.Unquote(strings.Replace(strconv.Quote(string(j)), `\\\\u`, `\\u`, -1))\n\n return str\n}", "func (s ImportAssetFromApiGatewayApiResponseDetails) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (o *Learningshareablecontentobject) String() string {\n \n \n \n \n \n \n \n \n \n\n j, _ := json.Marshal(o)\n str, _ := strconv.Unquote(strings.Replace(strconv.Quote(string(j)), `\\\\u`, `\\u`, -1))\n\n return str\n}", "func (o *Campaign) String() string {\n \n \n \n \n \n \n \n \n \n o.PhoneColumns = []Phonecolumn{{}} \n \n o.DncLists = []Domainentityref{{}} \n \n \n \n \n \n o.RuleSets = []Domainentityref{{}} \n \n \n \n \n o.ContactSorts = []Contactsort{{}} \n \n \n \n o.ContactListFilters = []Domainentityref{{}} \n \n \n\n j, _ := json.Marshal(o)\n str, _ := strconv.Unquote(strings.Replace(strconv.Quote(string(j)), `\\\\u`, `\\u`, -1))\n\n return str\n}", "func (o *Observationvalue) String() string {\n \n \n \n o.RequestedRoutingSkillIds = []string{\"\"} \n \n \n \n \n \n \n \n \n \n \n \n \n o.RequestedRoutings = []string{\"\"} \n \n o.ScoredAgents = []Analyticsscoredagent{{}} \n\n j, _ := json.Marshal(o)\n str, _ := strconv.Unquote(strings.Replace(strconv.Quote(string(j)), `\\\\u`, `\\u`, -1))\n\n return str\n}", "func (o *Screenrecordingmetadata) String() string {\n \n \n \n \n \n \n \n\n j, _ := json.Marshal(o)\n str, _ := strconv.Unquote(strings.Replace(strconv.Quote(string(j)), `\\\\u`, `\\u`, -1))\n\n return str\n}", "func (o *Timezonemappingpreview) String() string {\n \n o.ContactsPerTimeZone = map[string]int{\"\": 0} \n o.ContactsMappedUsingZipCode = map[string]int{\"\": 0} \n \n \n \n \n \n \n\n j, _ := json.Marshal(o)\n str, _ := strconv.Unquote(strings.Replace(strconv.Quote(string(j)), `\\\\u`, `\\u`, -1))\n\n return str\n}", "func (o *Recordingjobsquery) String() string {\n \n \n \n \n \n \n \n \n \n \n\n j, _ := json.Marshal(o)\n str, _ := strconv.Unquote(strings.Replace(strconv.Quote(string(j)), `\\\\u`, `\\u`, -1))\n\n return str\n}", "func (o *Tokeninfo) String() string {\n \n\n j, _ := json.Marshal(o)\n str, _ := strconv.Unquote(strings.Replace(strconv.Quote(string(j)), `\\\\u`, `\\u`, -1))\n\n return str\n}", "func JsonToString() (string, error) {\n\treturn DefaultWorker.JsonToString()\n}", "func (o *Recording) String() string {\n \n \n \n \n \n \n o.Annotations = []Annotation{{}} \n o.Transcript = []Chatmessage{{}} \n o.EmailTranscript = []Recordingemailmessage{{}} \n o.MessagingTranscript = []Recordingmessagingmessage{{}} \n \n \n o.MediaUris = map[string]Mediaresult{\"\": {}} \n \n \n \n \n \n \n \n \n \n \n \n \n o.Users = []User{{}} \n \n \n \n \n\n j, _ := json.Marshal(o)\n str, _ := strconv.Unquote(strings.Replace(strconv.Quote(string(j)), `\\\\u`, `\\u`, -1))\n\n return str\n}" ]
[ "0.7062878", "0.6918404", "0.69082457", "0.69070494", "0.6883956", "0.6848638", "0.68151504", "0.6746523", "0.6736035", "0.67330015", "0.66542196", "0.66512746", "0.6626927", "0.6593913", "0.65868497", "0.65867937", "0.65791994", "0.6572556", "0.6546844", "0.64679724", "0.64587", "0.6443834", "0.6442571", "0.64378303", "0.6430297", "0.6418615", "0.6394161", "0.63918805", "0.6389729", "0.6379897", "0.63653064", "0.63506794", "0.6318037", "0.63142335", "0.6296114", "0.62550896", "0.6254484", "0.6249889", "0.6245324", "0.62353826", "0.6229199", "0.62251115", "0.62228286", "0.6222423", "0.62220496", "0.62190104", "0.6188447", "0.6184641", "0.61834097", "0.6181278", "0.61806273", "0.61784166", "0.6166488", "0.61661", "0.6161007", "0.6149101", "0.6147037", "0.6147011", "0.61461455", "0.61454016", "0.6143725", "0.61368954", "0.6135156", "0.611997", "0.61129785", "0.61114526", "0.6109442", "0.60956705", "0.60925835", "0.60921127", "0.6090873", "0.60906833", "0.60897195", "0.6089711", "0.60834813", "0.6073552", "0.60713494", "0.6066453", "0.60603637", "0.60598135", "0.60576916", "0.6047086", "0.6047086", "0.6047086", "0.60401094", "0.6030266", "0.6022908", "0.60131264", "0.6012999", "0.6009888", "0.6007692", "0.6007344", "0.59999305", "0.59948957", "0.59899914", "0.5984482", "0.5984189", "0.5980497", "0.5979045", "0.5977282" ]
0.6884482
4
Unmarshall takes a JSON api response and unmarshals the result data into the target interface
func (r *Response) Unmarshall(target interface{}) { var res api.Response err := json.Unmarshal(r.recorder.Body.Bytes(), &res) gomega.Expect(err).To(gomega.BeNil(), "error unmarshalling JSON response") byteData, _ := json.Marshal(res.Data) err = json.Unmarshal(byteData, target) gomega.Expect(err).To(gomega.BeNil(), "error unmarshalling JSON data to target datastructure") }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func UnmarshalResponse(response *http.Response, jsonResult interface{}) error {\n\n\tdefer func() {\n\t\terr := response.Body.Close()\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t}()\n\n\treturn json.NewDecoder(response.Body).Decode(&jsonResult)\n}", "func unmarshalApiResponse(apiResponse *RawResponse, dest interface{}) error {\n if apiResponse.StatusCode == fasthttp.StatusOK || apiResponse.StatusCode == fasthttp.StatusAccepted {\n if err := json.Unmarshal(apiResponse.Body, dest); err != nil {\n return err\n }\n return nil\n } else {\n var apiError errors.ApiError\n if err := json.Unmarshal(apiResponse.Body, &apiError); err != nil {\n return err\n }\n return apiError\n }\n}", "func HttpUnmarshall(resp *http.Response, result interface{}) error {\n\tdefer resp.Body.Close()\n\tdecoder := json.NewDecoder(resp.Body)\n\treturn decoder.Decode(result)\n}", "func (r *singleResult) Unmarshal(data []byte) error {\n\treturn json.Unmarshal(data, r)\n}", "func (response Response) Unmarshal(value interface{}) error {\n\tvalues := map[string]interface{}{}\n\terr := json.Unmarshal(response.Raw.Json, &values)\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tdecoder, err := mapstructure.NewDecoder(&mapstructure.DecoderConfig{\n\t\tDecodeHook: func(from reflect.Value, to reflect.Value) (interface{}, error) {\n\t\t\tif _, ok := to.Interface().(time.Time); ok {\n\t\t\t\treturn time.Parse(time.RFC3339, from.String())\n\t\t\t}\n\t\t\treturn from.Interface(), nil\n\t\t},\n\t\tResult: value,\n\t\tTagName: \"json\",\n\t})\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif response.dataKeyPath != \"\" {\n\t\treturn decoder.Decode(values[response.dataKeyPath])\n\t}\n\n\treturn decoder.Decode(values)\n}", "func (m *Response) Unmarshal(v *Loader) error {\n\treturn m.Output.Unmarshal(v)\n}", "func (r BatchGetAllResponse) Unmarshal(v interface{}) error {\n\treturn r.UnmarshalWithTagName(v, defaultResultTag)\n}", "func UnmarshalResponse(response *http.Response, v interface{}) error {\n\n\t// get the body as []byte\n\tbody, err := ioutil.ReadAll(response.Body)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"could not read the body: %s\", err.Error())\n\t}\n\n\t// try to Unmarshal to struct\n\terr = json.Unmarshal(body, v)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"could not unmarshal: %s\", err.Error())\n\t}\n\n\treturn nil\n}", "func (v *BlitzedItemResponse) UnmarshalJSON(data []byte) error {\n\tr := jlexer.Lexer{Data: data}\n\teasyjson6a975c40DecodeJsonBenchmark4(&r, v)\n\treturn r.Error()\n}", "func UnmarshalSourceResponse(m map[string]json.RawMessage, result interface{}) (err error) {\n\tobj := new(SourceResponse)\n\terr = core.UnmarshalPrimitive(m, \"id\", &obj.ID)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalPrimitive(m, \"name\", &obj.Name)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalPrimitive(m, \"description\", &obj.Description)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalPrimitive(m, \"enabled\", &obj.Enabled)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalPrimitive(m, \"created_at\", &obj.CreatedAt)\n\tif err != nil {\n\t\treturn\n\t}\n\treflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj))\n\treturn\n}", "func Unmarshal(data []byte) (interface{}, error) {\n\tvar value marble\n\terr := json.Unmarshal(data, &value)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &value, nil\n}", "func (v *WSResponse) UnmarshalJSON(data []byte) error {\n\tr := jlexer.Lexer{Data: data}\n\teasyjson42239ddeDecodeGithubComKhliengDispatchServer2(&r, v)\n\treturn r.Error()\n}", "func (v *GetUserResponse) UnmarshalJSON(data []byte) error {\n\tr := jlexer.Lexer{Data: data}\n\teasyjson84c0690eDecodeMainHandlers1(&r, v)\n\treturn r.Error()\n}", "func unmarshallJobResponse(resp []byte) JobApiResponse {\n\n\tvar apiResp JobApiResponse\n\n\tif err := json.Unmarshal(resp, &apiResp); err != nil {\n\t\tlogger.Fatalf(\"Problem reaading api response: %s\", err)\n\t}\n\n\tif apiResp.Status != apiError && apiResp.Status != apiSuccess {\n\t\tlogger.Fatalf(\"Unknown status response: %s\", apiResp.Status)\n\t}\n\n\treturn apiResp\n\n}", "func Unmarshall(input []byte, output interface{}) (err error) {\n\tlex := &lexer{scan: scanner.Scanner{Mode: scanner.GoTokens}}\n\tlex.scan.Init(bytes.NewReader(input))\n\n\tdefer func() {\n\t\tif x := recover(); x != nil {\n\t\t\terr = fmt.Errorf(\"error in %s: %v\", lex.scan.Position, x)\n\t\t}\n\t}()\n\n\treadYamlHeader(lex)\n\tread(lex, reflect.ValueOf(output).Elem(), 0)\n\treturn nil\n}", "func (sr *WifiConnectResponse) Unmarshal(b []byte) error {\n\treturn json.Unmarshal(b, sr)\n}", "func (r *NormalResult) Unmarshal(data []byte) error {\n\treturn json.Unmarshal(data, r)\n}", "func (m *Marshaler) Unmarshal(b []byte, x interface{}) error {\n\tvar err error\n\tswitch x := x.(type) {\n\tcase *space.Resource:\n\t\tvar r marshal.Resource\n\t\tif err := json.Unmarshal(b, &r); err != nil {\n\t\t\treturn err\n\t\t}\n\t\t*x, err = marshal.ResourceToSpace(r)\n\tcase *space.Object:\n\t\tvar o marshal.Object\n\t\tif err := json.Unmarshal(b, &o); err != nil {\n\t\t\treturn err\n\t\t}\n\t\t*x, err = marshal.ObjectToSpace(o)\n\tcase *space.Entity:\n\t\tvar e marshal.Entity\n\t\tif err := json.Unmarshal(b, &e); err != nil {\n\t\t\treturn err\n\t\t}\n\t\t*x, err = marshal.EntityToSpace(e)\n\tcase *space.Link:\n\t\tvar l marshal.Link\n\t\tif err := json.Unmarshal(b, &l); err != nil {\n\t\t\treturn err\n\t\t}\n\t\t*x, err = marshal.LinkToSpace(l)\n\tdefault:\n\t\treturn marshal.ErrUnsuportedType\n\t}\n\treturn err\n}", "func UnmarshalIntegrationGetResponse(m map[string]json.RawMessage, result interface{}) (err error) {\n\tobj := new(IntegrationGetResponse)\n\terr = core.UnmarshalPrimitive(m, \"id\", &obj.ID)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalPrimitive(m, \"type\", &obj.Type)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalModel(m, \"metadata\", &obj.Metadata, UnmarshalIntegrationMetadata)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalPrimitive(m, \"created_at\", &obj.CreatedAt)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalPrimitive(m, \"updated_at\", &obj.UpdatedAt)\n\tif err != nil {\n\t\treturn\n\t}\n\treflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj))\n\treturn\n}", "func unmarshallQueueResponse(resp []byte) QueueApiResponse {\n\n\tvar apiResp QueueApiResponse\n\n\tif err := json.Unmarshal(resp, &apiResp); err != nil {\n\t\tlogger.Fatalf(\"Problem reaading api response: %s\", err)\n\t}\n\n\tif apiResp.Status != apiError && apiResp.Status != apiSuccess {\n\t\tlogger.Fatalf(\"Unknown status response: %s\", apiResp.Status)\n\t}\n\n\treturn apiResp\n\n}", "func (j *JSON) Unmarshal(input, target interface{}) error {\n\t// take the input and convert it to target\n\treturn jsonEncoding.Unmarshal(input.([]byte), target)\n}", "func JSONToUnstructured(stub, namespace string, mapping *meta.RESTMapping, dynamicClient dynamic.Interface) (dynamic.ResourceInterface, *unstructured.Unstructured, error) {\n\ttypeMetaAdder := map[string]interface{}{}\n\tif err := json.Unmarshal([]byte(stub), &typeMetaAdder); err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\t// we don't require GVK on the data we provide, so we fill it in here. We could, but that seems extraneous.\n\ttypeMetaAdder[\"apiVersion\"] = mapping.GroupVersionKind.GroupVersion().String()\n\ttypeMetaAdder[\"kind\"] = mapping.GroupVersionKind.Kind\n\n\tif mapping.Scope == meta.RESTScopeRoot {\n\t\tnamespace = \"\"\n\t}\n\n\treturn dynamicClient.Resource(mapping.Resource).Namespace(namespace), &unstructured.Unstructured{Object: typeMetaAdder}, nil\n}", "func (resp *Response) JSONUnmarshal(v interface{}) error {\n\tif resp == nil {\n\t\treturn errors.New(\"empty response\")\n\t}\n\n\tif resp.Err != nil {\n\t\treturn resp.Err\n\t}\n\n\t// get data from resp\n\tdata, err := ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tdefer resp.Body.Close()\n\terr = json.Unmarshal(data, &v)\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}", "func UnmarshalResponse(t *testing.T, body []byte, res *http.Response, value *JsonApiResponse, responseAssertions func(res *JsonApiResponse)) *JsonApiResponse {\n\terr := json.Unmarshal(body, value)\n\tassert.Nil(t, err, \"Error unmarshaling JSONAPI response body: %s\", err)\n\tif responseAssertions != nil {\n\t\tresponseAssertions(value)\n\t}\n\treturn value\n}", "func Unmarshal(b []byte, v interface{}) error {\n\treturn json.Unmarshal(b, v)\n}", "func unmarshal(data []byte, v interface{}) {\n\terr := json.Unmarshal(data, v)\n\tassert(err == nil, \"unmarshal error: %s\", err)\n}", "func Unmarshal(data []byte, typ DataFormat, target interface{}) {\n\tswitch typ {\n\tcase GOB:\n\t\tbuf := bytes.NewReader(data)\n\t\tgob.NewDecoder(buf).Decode(target)\n\n\tdefault:\n\t\tif err := json.Unmarshal(data, target); err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t}\n}", "func (pl PLUtil) Unmarshal(data []byte, v interface{}) error {\n\tcmd := pl.execCommand(\n\t\t\"plutil\",\n\t\t\"-convert\", \"json\",\n\t\t// Read from stdin.\n\t\t\"-\",\n\t\t// Output to stdout.\n\t\t\"-o\", \"-\")\n\tcmd.Stdin = bytes.NewReader(data)\n\tstdout, err := cmd.Output()\n\tif exitErr, ok := err.(*exec.ExitError); ok {\n\t\treturn fmt.Errorf(\"`%s` failed (%w) with stderr: %s\", cmd, err, exitErr.Stderr)\n\t}\n\tif err != nil {\n\t\treturn fmt.Errorf(\"`%s` failed (%w)\", cmd, err)\n\t}\n\tif err := json.Unmarshal(stdout, v); err != nil {\n\t\treturn fmt.Errorf(\"failed to parse json: %w\", err)\n\t}\n\treturn nil\n}", "func (r *MaticSupplyResult) Unmarshal(data []byte) error {\n\treturn json.Unmarshal(data, r)\n}", "func (lump *Generic) Unmarshall(data []byte) (err error) {\n\tlump.length = len(data)\n\tlump.data = data\n\n\treturn err\n}", "func (j *Response) UnmarshalJSON(input []byte) error {\n\tfs := fflib.NewFFLexer(input)\n\treturn j.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start)\n}", "func UnmarshalResponse(data []byte) (Response, error) {\n\tvar r Response\n\terr := json.Unmarshal(data, &r)\n\treturn r, err\n}", "func (d *decoder) unmarshal(t reflect.Type, v reflect.Value, n nestedTypeData) error {\n\tswitch t.Kind() {\n\tcase reflect.Ptr:\n\t\treturn d.unmarshalPointer(t, v, n)\n\tcase reflect.Slice:\n\t\treturn d.unmarshalVector(t, v, n)\n\tcase reflect.String:\n\t\treturn d.unmarshalString(t, v, n)\n\t}\n\tif isHandleType(t) {\n\t\treturn d.unmarshalHandle(v, n)\n\t}\n\tif isInterfaceType(t) || isInterfaceRequestType(t) {\n\t\t// An interface is represented by a Proxy, whose first field is\n\t\t// a zx.Channel, and we can just marshal that. Same goes for an\n\t\t// interface request, which is just an InterfaceRequest whose\n\t\t// first field is a zx.Channel.\n\t\treturn d.unmarshalHandle(v.Field(0), n)\n\t}\n\treturn d.unmarshalInline(t, v, n)\n}", "func (c *Client) UnmarshalResponse(response *http.Response, resType interface{}) error {\n\t// Read all the response body\n\tdefer response.Body.Close()\n\tbody, err := io.ReadAll(response.Body)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// < 200 && >= 300 : API error\n\tif response.StatusCode < http.StatusOK || response.StatusCode >= http.StatusMultipleChoices {\n\t\tapiError := &APIError{\n\t\t\tCode: fmt.Sprintf(\"HTTPStatus: %d\", response.StatusCode),\n\t\t}\n\n\t\tif err = json.Unmarshal(body, apiError); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\treturn apiError\n\t}\n\n\t// Nothing to unmarshal\n\tif len(body) == 0 || resType == nil {\n\t\treturn nil\n\t}\n\n\treturn json.Unmarshal(body, &resType)\n}", "func unmarshal(info []byte) []Res {\n\t// to struct\n\tvar data R1\n\terr := json.Unmarshal(info, &data)\n\tcheck(err)\n\tfCast := data.D1.Location.Forecast\n\treturn fCast\n}", "func (v *FormulaAndFunctionResponseFormat) UnmarshalJSON(src []byte) error {\n\tvar value string\n\terr := json.Unmarshal(src, &value)\n\tif err != nil {\n\t\treturn err\n\t}\n\t*v = FormulaAndFunctionResponseFormat(value)\n\treturn nil\n}", "func (s *Serializer) Unmarshal(data []byte, v interface{}) error {\n\treturn jsoniter.Unmarshal(data,v)\n}", "func (c *client) unmarshalDataIntoStruct(responseBody []byte, i interface{}) error {\n\tisResponseDataEmpty, err := checkForEmptyResponseData(responseBody)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif !isResponseDataEmpty {\n\t\tvar basicResponse BasicResponse\n\t\tbasicResponse.Data = i\n\t\terr := json.Unmarshal(responseBody, &basicResponse)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}", "func Unmarshal(b []byte) (Payload, error) {\n\tvar p Payload\n\terr := json.Unmarshal(b, &p)\n\treturn p, err\n}", "func ResponseToStruct(res *http.Response, v interface{}) error {\n\tvar reader io.ReadCloser\n\tvar err error\n\tswitch res.Header.Get(\"Content-Encoding\") {\n\tcase gzipHeader:\n\t\treader, err = gzip.NewReader(res.Body)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tdefer reader.Close()\n\tcase deflateHeader:\n\t\treader = flate.NewReader(res.Body)\n\t\tdefer reader.Close()\n\tdefault:\n\t\treader = res.Body\n\t}\n\n\tdecoder := gojay.BorrowDecoder(reader)\n\tdefer decoder.Release()\n\n\terr = decoder.Decode(&v)\n\tif err != nil {\n\t\treturn json.NewDecoder(reader).Decode(v)\n\t}\n\n\treturn nil\n}", "func (v *UnloadCheckResponse) UnmarshalJSON(data []byte) error {\n\tr := jlexer.Lexer{Data: data}\n\teasyjson6a975c40DecodeJsonBenchmark(&r, v)\n\treturn r.Error()\n}", "func (c *Call) Unmarshal(v interface{}) error {\n\terr := json.NewDecoder(c.req.Body).Decode(v)\n\tif err != nil {\n\t\tc.code = http.StatusBadRequest\n\t\tc.reply = errors.E(\"unmarshal\", fmt.Sprint(v), err)\n\t}\n\treturn err\n}", "func (v *ItemCheckResponse) UnmarshalJSON(data []byte) error {\n\tr := jlexer.Lexer{Data: data}\n\teasyjson6a975c40DecodeJsonBenchmark2(&r, v)\n\treturn r.Error()\n}", "func (v *RespStruct) UnmarshalJSON(data []byte) error {\n\tr := jlexer.Lexer{Data: data}\n\teasyjsonD2b7633eDecodeDrhyuComIndexerModels1(&r, v)\n\treturn r.Error()\n}", "func (v *BidResponse) UnmarshalJSON(data []byte) error {\n\tr := jlexer.Lexer{Data: data}\n\teasyjson326edDecodeGithubComMxmCherryOpenrtb(&r, v)\n\treturn r.Error()\n}", "func (r *ReceiptResult) Unmarshal(data []byte) error {\n\treturn json.Unmarshal(data, r)\n}", "func (t *Output) Unmarshal(v *Loader) error {\n\tif len(*v) == 0 {\n\t\treturn nil\n\t} else if len(*v) == 1 {\n\t\treturn msgpack.Unmarshal(t.Data, (*v)[0])\n\t}\n\t// in case is more we assume we have a longer list of objects\n\treturn msgpack.Unmarshal(t.Data, v)\n}", "func Unmarshal(data []byte) (interface{}, error) {\n\tt := new(TextureTranform)\n\terr := json.Unmarshal(data, t)\n\treturn t, err\n}", "func UnmarshalFilterResp(m map[string]json.RawMessage, result interface{}) (err error) {\n\tobj := new(FilterResp)\n\terr = core.UnmarshalPrimitive(m, \"success\", &obj.Success)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalPrimitive(m, \"errors\", &obj.Errors)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalPrimitive(m, \"messages\", &obj.Messages)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalModel(m, \"result\", &obj.Result, UnmarshalFilterObject)\n\tif err != nil {\n\t\treturn\n\t}\n\treflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj))\n\treturn\n}", "func (pi *proxyItem) Unmarshal(data []byte) error {\n\tif pi == nil || pi.obj == nil {\n\t\treturn nil\n\t}\n\n\tswitch m := pi.obj.(type) {\n\tcase encoding.BinaryUnmarshaler:\n\t\treturn m.UnmarshalBinary(data)\n\tcase storage.Unmarshaler:\n\t\treturn m.Unmarshal(data)\n\t}\n\treturn json.Unmarshal(data, &pi.obj)\n}", "func (response *S3Response) UnmarshalBody(obj interface{}) error {\n defer response.Close()\n unmarshaller := xml.NewDecoder(response.httpResponse.Body)\n return unmarshaller.Decode(obj)\n}", "func (r *Response) UnmarshalJSON(data []byte) error {\n\ttype Alias Response\n\taux := &struct {\n\t\tHeaders interface{} `json:\"headers\"`\n\t\t*Alias\n\t}{\n\t\tAlias: (*Alias)(r),\n\t}\n\tif err := json.Unmarshal(data, &aux); err != nil {\n\t\treturn err\n\t}\n\n\tif r.Request != nil {\n\t\tr.Request.Headers = castHeaders(r.Request.Headers)\n\t}\n\tif r.Response != nil {\n\t\tr.Response.Headers = castHeaders(r.Response.Headers)\n\t}\n\treturn nil\n}", "func unmarshalJSON(j extv1.JSON, output *any) error {\n\tif len(j.Raw) == 0 {\n\t\treturn nil\n\t}\n\treturn json.Unmarshal(j.Raw, output)\n}", "func (v *Responce) UnmarshalJSON(data []byte) error {\n\tr := jlexer.Lexer{Data: data}\n\teasyjson6a975c40DecodeGithubComSerhio83DruidPkgStructs(&r, v)\n\treturn r.Error()\n}", "func (v *SearchResult) UnmarshalJSON(data []byte) error {\n\tr := jlexer.Lexer{Data: data}\n\teasyjson42239ddeDecodeGithubComKhliengDispatchServer7(&r, v)\n\treturn r.Error()\n}", "func (j *JsonlMarshaler) Unmarshal(data []byte, v interface{}) error {\n\treturn json.Unmarshal(data, v)\n}", "func (j *HealthcheckResponse) UnmarshalJSON(input []byte) error {\n\tfs := fflib.NewFFLexer(input)\n\treturn j.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start)\n}", "func (res *Result) Into(obj interface{}) error {\n\tif nil != res.err {\n\t\treturn res.err\n\t}\n\tif len(res.body) > 0 {\n\t\tif err := json.Unmarshal(res.body, obj); nil != err {\n\t\t\treturn internalError(\"decode response error: %v\", err)\n\t\t}\n\t}\n\treturn nil\n}", "func UnmarshalResponse(resp *http.Response, data interface{}, lowerCaseHeaderMaps bool) error {\n\tv := reflect.Indirect(reflect.ValueOf(data))\n\treturn unmarshalLocationElements(resp, v, lowerCaseHeaderMaps)\n}", "func (sr *SearchResponse) UnmarshalJSON(body []byte) error {\n\tvar m map[string]*json.RawMessage\n\terr := json.Unmarshal(body, &m)\n\tif err != nil {\n\t\treturn err\n\t}\n\tfor k, v := range m {\n\t\tswitch k {\n\t\tcase \"queryContext\":\n\t\t\tif v != nil {\n\t\t\t\tvar queryContext QueryContext\n\t\t\t\terr = json.Unmarshal(*v, &queryContext)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tsr.QueryContext = &queryContext\n\t\t\t}\n\t\tcase \"entities\":\n\t\t\tif v != nil {\n\t\t\t\tvar entities Entities\n\t\t\t\terr = json.Unmarshal(*v, &entities)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tsr.Entities = &entities\n\t\t\t}\n\t\tcase \"places\":\n\t\t\tif v != nil {\n\t\t\t\tvar places Places\n\t\t\t\terr = json.Unmarshal(*v, &places)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tsr.Places = &places\n\t\t\t}\n\t\tcase \"contractualRules\":\n\t\t\tif v != nil {\n\t\t\t\tcontractualRules, err := unmarshalBasicContractualRulesContractualRuleArray(*v)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tsr.ContractualRules = &contractualRules\n\t\t\t}\n\t\tcase \"webSearchUrl\":\n\t\t\tif v != nil {\n\t\t\t\tvar webSearchURL string\n\t\t\t\terr = json.Unmarshal(*v, &webSearchURL)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tsr.WebSearchURL = &webSearchURL\n\t\t\t}\n\t\tcase \"id\":\n\t\t\tif v != nil {\n\t\t\t\tvar ID string\n\t\t\t\terr = json.Unmarshal(*v, &ID)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tsr.ID = &ID\n\t\t\t}\n\t\tcase \"_type\":\n\t\t\tif v != nil {\n\t\t\t\tvar typeVar TypeBasicResponseBase\n\t\t\t\terr = json.Unmarshal(*v, &typeVar)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tsr.Type = typeVar\n\t\t\t}\n\t\t}\n\t}\n\n\treturn nil\n}", "func Unmarshal(data []byte, v Unmarshaler) error {\n\tl := jlexer.Lexer{Data: data}\n\tv.UnmarshalEasyJSON(&l)\n\treturn l.Error()\n}", "func (r *InfoResponse) UnmarshalJSON(data []byte) error {\n\ttype Alias InfoResponse\n\taux := &struct {\n\t\tVersion interface{} `json:\"version\"`\n\t\tAuthorProfile interface{} `json:\"author_profile\"`\n\t\tRequires interface{} `json:\"requires\"`\n\t\tRequiresPHP interface{} `json:\"requires_php\"`\n\t\tTested interface{} `json:\"tested\"`\n\t\tContributors interface{} `json:\"contributors\"`\n\t\tRatings interface{} `json:\"ratings\"`\n\t\tNumRatings interface{} `json:\"num_ratings\"`\n\t\tScreenshots interface{} `json:\"screenshots\"`\n\t\tTags interface{} `json:\"tags\"`\n\t\tVersions interface{} `json:\"versions\"`\n\t\t*Alias\n\t}{\n\t\tAlias: (*Alias)(r),\n\t}\n\tif err := json.Unmarshal(data, &aux); err != nil {\n\t\treturn err\n\t}\n\n\t// Set Version as string\n\tswitch v := aux.Version.(type) {\n\tcase string:\n\t\tr.Version = v\n\tcase int:\n\t\tr.Version = strconv.Itoa(v)\n\tdefault:\n\t\tr.Version = \"\"\n\t}\n\n\t// AuthorProfile can occasionally be a boolean (false)\n\tswitch v := aux.AuthorProfile.(type) {\n\tcase string:\n\t\tr.AuthorProfile = v\n\tdefault:\n\t\tr.AuthorProfile = \"\"\n\t}\n\n\t// Requires can occasionally be a boolean (false)\n\tswitch v := aux.Requires.(type) {\n\tcase string:\n\t\tr.Requires = v\n\tdefault:\n\t\tr.Requires = \"\"\n\t}\n\n\t// Tested can occasionally be a boolean (false)\n\tswitch v := aux.Tested.(type) {\n\tcase string:\n\t\tr.Requires = v\n\tdefault:\n\t\tr.Requires = \"\"\n\t}\n\n\t// RequiresPHP can occasionally be a boolean (false)\n\tswitch v := aux.RequiresPHP.(type) {\n\tcase string:\n\t\tr.RequiresPHP = v\n\tdefault:\n\t\tr.RequiresPHP = \"\"\n\t}\n\n\t// RequiresPHP can occasionally be a boolean (false)\n\tswitch v := aux.RequiresPHP.(type) {\n\tcase string:\n\t\tr.RequiresPHP = v\n\tdefault:\n\t\tr.RequiresPHP = \"\"\n\t}\n\n\t// Parse Contributors\n\tif aux.Contributors != nil && reflect.TypeOf(aux.Contributors).Kind() == reflect.Map {\n\t\tfor k, v := range aux.Contributors.(map[string]interface{}) {\n\t\t\tcontrib := []string{\n\t\t\t\tk, v.(string),\n\t\t\t}\n\t\t\tr.Contributors = append(r.Contributors, contrib)\n\t\t}\n\t}\n\n\t// Parse Ratings\n\tif reflect.TypeOf(aux.Ratings).Kind() == reflect.Map {\n\t\tfor k, v := range aux.Ratings.(map[string]interface{}) {\n\t\t\tvar num int\n\t\t\tvar err error\n\t\t\tswitch t := v.(type) {\n\t\t\tcase float64:\n\t\t\t\tnum = int(t)\n\t\t\tcase string:\n\t\t\t\tnum, err = strconv.Atoi(t)\n\t\t\t\tif err != nil {\n\t\t\t\t\tnum = 0\n\t\t\t\t}\n\t\t\tdefault:\n\t\t\t\tnum = 0\n\t\t\t}\n\t\t\trating := Rating{\n\t\t\t\tStars: k,\n\t\t\t\tNumber: num,\n\t\t\t}\n\t\t\tr.Ratings = append(r.Ratings, rating)\n\t\t}\n\t}\n\n\t// NumRatings can be a string \"0\" when zero\n\tswitch v := aux.NumRatings.(type) {\n\tcase int:\n\t\tr.NumRatings = v\n\tcase string:\n\t\tnum, err := strconv.Atoi(v)\n\t\tif err != nil {\n\t\t\tr.NumRatings = 0\n\t\t} else {\n\t\t\tr.NumRatings = num\n\t\t}\n\tdefault:\n\t\tr.NumRatings = 0\n\t}\n\n\t// Parse Screenshots\n\tif reflect.TypeOf(aux.Screenshots).Kind() == reflect.Map {\n\t\tfor _, v := range aux.Screenshots.(map[string]interface{}) {\n\t\t\ts := v.(map[string]interface{})\n\t\t\tscreenshot := Screenshot{\n\t\t\t\tSrc: s[\"src\"].(string),\n\t\t\t}\n\t\t\t// Handle different types for caption\n\t\t\t// Can sometimes be boolean instead of string\n\t\t\tswitch v := s[\"caption\"].(type) {\n\t\t\tcase bool:\n\t\t\t\tscreenshot.Caption = \"\"\n\t\t\tcase string:\n\t\t\t\tscreenshot.Caption = v\n\t\t\tdefault:\n\t\t\t\tscreenshot.Caption = \"\"\n\t\t\t}\n\t\t\tr.Screenshots = append(r.Screenshots, screenshot)\n\t\t}\n\t}\n\n\t// Parse Tags\n\tif reflect.TypeOf(aux.Tags).Kind() == reflect.Map {\n\t\tfor k, v := range aux.Tags.(map[string]interface{}) {\n\t\t\ttag := []string{\n\t\t\t\tk, v.(string),\n\t\t\t}\n\t\t\tr.Tags = append(r.Tags, tag)\n\t\t}\n\t}\n\n\t// Parse Versions\n\tif reflect.TypeOf(aux.Versions).Kind() == reflect.Map {\n\t\tfor k, v := range aux.Versions.(map[string]interface{}) {\n\t\t\tversion := []string{\n\t\t\t\tk, v.(string),\n\t\t\t}\n\t\t\tr.Versions = append(r.Versions, version)\n\t\t}\n\t}\n\n\treturn nil\n}", "func (v *DocumentResponse) UnmarshalJSON(data []byte) error {\n\tr := jlexer.Lexer{Data: data}\n\teasyjson6a975c40DecodeJsonBenchmark3(&r, v)\n\treturn r.Error()\n}", "func Unmarshal(data []byte, v interface{}) error {\n\treturn ReturnIfError(\n\t\tjson.Unmarshal(data, v),\n\t\tcheckValues(v),\n\t)\n}", "func (j *JsonMarshaler) Unmarshal(data []byte, v interface{}) error {\n\treturn json.Unmarshal(data, v)\n}", "func Unmarshal(weatherJson []byte) WeatherData {\n\tvar data WeatherData //[]map[string]interface{}\n\n\terr := json.Unmarshal(weatherJson, &data)\n\tif err != nil {\n\t\tfmt.Println(err)\n\t}\n\treturn data\n}", "func (t *TableQueryResponse) UnmarshalJSON(data []byte) error {\n\tvar rawMsg map[string]json.RawMessage\n\tif err := json.Unmarshal(data, &rawMsg); err != nil {\n\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", t, err)\n\t}\n\tfor key, val := range rawMsg {\n\t\tvar err error\n\t\tswitch key {\n\t\tcase \"odata.metadata\":\n\t\t\terr = unpopulate(val, \"ODataMetadata\", &t.ODataMetadata)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"value\":\n\t\t\terr = unpopulate(val, \"Value\", &t.Value)\n\t\t\tdelete(rawMsg, key)\n\t\t}\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", t, err)\n\t\t}\n\t}\n\treturn nil\n}", "func (r *Response) UnmarshalJSON(b []byte) error {\n\tvar o struct {\n\t\tResults []interface{} `json:\"results,omitempty\"`\n\t\tErr string `json:\"error,omitempty\"`\n\t}\n\n\terr := json.Unmarshal(b, &o)\n\tif err != nil {\n\t\treturn err\n\t}\n\tr.Results = o.Results\n\tif o.Err != \"\" {\n\t\tr.Err = errors.New(o.Err)\n\t}\n\treturn nil\n}", "func (v *TransactionsPagesResponse) UnmarshalJSON(data []byte) error {\n\tr := jlexer.Lexer{Data: data}\n\teasyjsonE82c8e88DecodeGithubComKamaiuOandaGoModel3(&r, v)\n\treturn r.Error()\n}", "func Unmarshal(data []byte, v interface{}) error {\n\terr := json.Unmarshal(data, v)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif ImplementsPostJSONUnmarshaler(v) {\n\t\terr := v.(PostJSONUnmarshaler).PostUnmarshalJSON()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}", "func Unmarshal(data []byte, v Unmarshaler) error {\n\tl := jlexer.Lexer{Data: data}\n\tv.UnmarshalTinyJSON(&l)\n\treturn l.Error()\n}", "func (s *SeriesServiceOp) Unmarshal(ctx context.Context, opt *SeriesOptions, out interface{}) (*Response, error) {\n\tpath := seriesBasePath\n\tpath, err := addOptions(path, opt)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tvar res seriesResult\n\tvar r *Response\n\tres, r, err = s.list(ctx, path)\n\tif err != nil {\n\t\treturn r, err\n\t}\n\tif res.Series == nil {\n\t\treturn r, nil\n\t}\n\tif err = json.Unmarshal(res.Series, out); err != nil {\n\t\treturn r, err\n\t}\n\treturn r, err\n}", "func UnwrapRegister(JSON string) (Register, error) {\n\tvar register Register\n\terr := json.Unmarshal([]byte(JSON), &register)\n\tif err != nil {\n\t\treturn Register{}, err\n\t}\n\treturn register, nil\n}", "func (v *TransactionResponse) UnmarshalJSON(data []byte) error {\n\tr := jlexer.Lexer{Data: data}\n\teasyjsonE82c8e88DecodeGithubComKamaiuOandaGoModel5(&r, v)\n\treturn r.Error()\n}", "func (FinagleFmt) Unmarshal(data []byte) (ZKRecord, error) {\n\tf := &FinagleRecord{}\n\terr := json.Unmarshal(data, f)\n\treturn f, err\n}", "func (response *Response) UnmarshalJSON(data []byte) error {\n\ttype ResponseBis Response\n\tvar x ResponseBis\n\tif err := json.Unmarshal(data, &x); err != nil {\n\t\treturn err\n\t}\n\t_ = json.Unmarshal(data, &x.Extensions)\n\tdelete(x.Extensions, \"description\")\n\tdelete(x.Extensions, \"headers\")\n\tdelete(x.Extensions, \"content\")\n\tdelete(x.Extensions, \"links\")\n\t*response = Response(x)\n\treturn nil\n}", "func (e *ErrorMsg) UnMarshall(notFound bool, d []byte, code int) {\n\tif notFound {\n\t\te.Symbol = string(d)\n\t\te.Code = 404\n\t\te.Message = \"Symbol not found\"\n\t\treturn\n\t}\n\te.Code = 500\n\te.Message = string(d)\n}", "func (v *BidResponse) UnmarshalJSON(data []byte) error {\n\tr := jlexer.Lexer{Data: data}\n\teasyjson326edDecodeGithubComApplifierGoOpenrtbOpenrtb2(&r, v)\n\treturn r.Error()\n}", "func unmarshal(resp *http.Response, obj interface{}) (err error) {\n\tvar body []byte\n\tbody, err = ioutil.ReadAll(resp.Body)\n\tresp.Body.Close()\n\tif err == nil {\n\n\t\tif debug() {\n\t\t\t// TODO: Printf-ing the output of json.Indent through the bytes.Buffer.String\n\t\t\t// produces cruft. However writting directly to it, works o.k.\n\t\t\t// prettyJSON := bytes.Buffer{}\n\t\t\tvar prettyJSON bytes.Buffer\n\t\t\tfmt.Fprintf(&prettyJSON, t.Title(\"Pretty print response body:\\n\"))\n\t\t\tindentErr := json.Indent(&prettyJSON, body, \"\", \" \")\n\t\t\tif indentErr == nil {\n\t\t\t\t// fmt.Printf(\"%s %s\\n\", t.Title(\"Response body is:\"), t.Text(\"%s\\n\", prettyJSON))\n\t\t\t\tprettyJSON.WriteTo(os.Stdout)\n\t\t\t\tfmt.Println()\n\t\t\t\tfmt.Println()\n\t\t\t} else {\n\t\t\t\tfmt.Printf(\"%s\\n\", t.Fail(\"Error indenting JSON - %s\", indentErr.Error()))\n\t\t\t\tfmt.Printf(\"%s %s\\n\", t.Title(\"Body:\"), t.Text(string(body)))\n\t\t\t}\n\t\t}\n\n\t\tjson.Unmarshal(body, &obj)\n\t\tif debug() {\n\t\t\tfmt.Printf(\"%s %s\\n\", t.Title(\"Unmarshaled object: \"), t.Text(\"%#v\", obj))\n\t\t\tfmt.Println()\n\t\t}\n\t}\n\treturn err\n}", "func decodeListResponse(_ context.Context, reply interface{}) (interface{}, error) {\n\treturn nil, errors.New(\"'Users' Decoder is not impelemented\")\n}", "func (v *WSTableResponse) UnmarshalJSON(data []byte) error {\n\tr := jlexer.Lexer{Data: data}\n\teasyjson25363b2dDecodeGithubComDarkfoxs96OpenApiV3SdkOkexGoSdkApi3(&r, v)\n\treturn r.Error()\n}", "func (c Client) decodeResponse(endpoint, verb string, params *url.Values, target interface{}) (err error) {\n\tfullURL, err := c.api(endpoint, params)\n\tif err != nil {\n\t\treturn\n\t}\n\n\tc.l.WithFields(log.Fields{\n\t\t\"url\": fullURL.String(), // TODO: remove sensitive data\n\t\t\"HTTPverb\": verb,\n\t}).Debug(\"hitting API\")\n\n\tvar resp = &http.Response{}\n\tswitch verb {\n\tcase \"GET\":\n\t\tresp, err = c.httpclient.Get(fullURL.String())\n\tcase \"POST\":\n\t\tresp, err = c.httpclient.Post(fullURL.String(), \"application/x-www-form-urlencoded\", nil)\n\tcase \"DELETE\":\n\t\treq, _ := http.NewRequest(\"DELETE\", fullURL.String(), nil)\n\t\tresp, err = c.httpclient.Do(req)\n\t}\n\tif err != nil {\n\t\treturn\n\t}\n\tdefer resp.Body.Close()\n\n\treturn json.NewDecoder(resp.Body).Decode(target)\n}", "func (jar *JsonApiResponse) UnmarshalJSON(b []byte) error {\n\tfullRes := make(map[string]interface{})\n\n\tif err := json.Unmarshal(b, &fullRes); err != nil {\n\t\treturn err\n\t}\n\n\tif e, ok := fullRes[\"data\"]; !ok {\n\t\treturn fmt.Errorf(\"missing 'data' key when unmarshaling JSONAPI response: %v\", e)\n\t} else {\n\t\tswitch e.(type) {\n\t\tcase []interface{}:\n\t\t\tjar.Data = make([]map[string]interface{}, len(e.([]interface{})))\n\t\t\tfor i, v := range e.([]interface{}) {\n\t\t\t\tjar.Data[i] = v.(map[string]interface{})\n\t\t\t}\n\t\tcase map[string]interface{}:\n\t\t\tjar.Data = make([]map[string]interface{}, 1)\n\t\t\tjar.Data[0] = e.(map[string]interface{})\n\t\tdefault:\n\t\t\treturn fmt.Errorf(\"unable to determine type of JSONAPI key 'data': %v\", e)\n\t\t}\n\t}\n\treturn nil\n}", "func decodeGetResponse(_ context.Context, reply interface{}) (interface{}, error) {\n\treturn nil, errors.New(\"'Users' Decoder is not impelemented\")\n}", "func Unmarshal(req *request.Request) {\n\tdefer req.HTTPResponse.Body.Close()\n\tif req.DataFilled() {\n\t\terr := jsonutil.UnmarshalJSON(req.Data, req.HTTPResponse.Body)\n\t\tif err != nil {\n\t\t\treq.Error = awserr.NewRequestFailure(\n\t\t\t\tawserr.New(request.ErrCodeSerialization, \"failed decoding JSON RPC response\", err),\n\t\t\t\treq.HTTPResponse.StatusCode,\n\t\t\t\treq.RequestID,\n\t\t\t)\n\t\t}\n\t}\n\treturn\n}", "func unmarshallCLIData(data []byte) CLIResponse {\n\tvar unmarshalledData CLIResponse\n\tjson.Unmarshal([]byte(data), &unmarshalledData)\n\treturn unmarshalledData\n}", "func UnmarshalTemplateSourceDataResponse(m map[string]json.RawMessage, result interface{}) (err error) {\n\tobj := new(TemplateSourceDataResponse)\n\terr = core.UnmarshalModel(m, \"env_values\", &obj.EnvValues, UnmarshalEnvVariableResponse)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalPrimitive(m, \"folder\", &obj.Folder)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalPrimitive(m, \"has_githubtoken\", &obj.HasGithubtoken)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalPrimitive(m, \"id\", &obj.ID)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalPrimitive(m, \"type\", &obj.Type)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalPrimitive(m, \"uninstall_script_name\", &obj.UninstallScriptName)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalPrimitive(m, \"values\", &obj.Values)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalPrimitive(m, \"values_metadata\", &obj.ValuesMetadata)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalPrimitive(m, \"values_url\", &obj.ValuesURL)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalModel(m, \"variablestore\", &obj.Variablestore, UnmarshalWorkspaceVariableResponse)\n\tif err != nil {\n\t\treturn\n\t}\n\treflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj))\n\treturn\n}", "func (g *GenerateExpressRoutePortsLOAResult) UnmarshalJSON(data []byte) error {\n\tvar rawMsg map[string]json.RawMessage\n\tif err := json.Unmarshal(data, &rawMsg); err != nil {\n\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", g, err)\n\t}\n\tfor key, val := range rawMsg {\n\t\tvar err error\n\t\tswitch key {\n\t\tcase \"encodedContent\":\n\t\t\terr = unpopulate(val, \"EncodedContent\", &g.EncodedContent)\n\t\t\tdelete(rawMsg, key)\n\t\t}\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", g, err)\n\t\t}\n\t}\n\treturn nil\n}", "func (self *ResourceOutput) UnmarshalJSON(b []byte) error {\n\tvar m rawResourceOutput\n\terr := json.Unmarshal(b, &m)\n\tif err == nil {\n\t\to := ResourceOutput(m)\n\t\t*self = o\n\t\terr = self.Validate()\n\t}\n\treturn err\n}", "func (x *CMsgClientToGCCavernCrawlRequestMapStateResponse_Result) UnmarshalJSON(b []byte) error {\n\tnum, err := protoimpl.X.UnmarshalJSONEnum(x.Descriptor(), b)\n\tif err != nil {\n\t\treturn err\n\t}\n\t*x = CMsgClientToGCCavernCrawlRequestMapStateResponse_Result(num)\n\treturn nil\n}", "func unmarshal(resp *http.Response, obj interface{}) (err error) {\n\tvar body []byte\n\n\t// lifted from source to http.DumpResponse\n\t// Save body\n\tsave := resp.Body\n\tsavecl := resp.ContentLength\n\tif resp.Body == nil {\n\t\tresp.Body = emptyBody\n\t} else {\n\t\tsave, resp.Body, err = drainBody(resp.Body)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tbody, err = ioutil.ReadAll(resp.Body)\n\tdefer resp.Body.Close()\n\tif err == nil {\n\n\t\tif vconfig.Debug() {\n\t\t\t// TODO: Printf-ing the output of json.Indent through the bytes.Buffer.String\n\t\t\t// produces cruft. However writting directly to it, works o.k.\n\t\t\t// prettyJSON := bytes.Buffer{}\n\t\t\tvar prettyJSON bytes.Buffer\n\t\t\tfmt.Fprintf(&prettyJSON, t.Title(\"Pretty print response body:\\n\"))\n\t\t\tindentErr := json.Indent(&prettyJSON, body, \"\", \" \")\n\t\t\tif indentErr == nil {\n\t\t\t\t// fmt.Printf(\"%s %s\\n\", t.Title(\"Response body is:\"), t.Text(\"%s\\n\", prettyJSON))\n\t\t\t\tprettyJSON.WriteTo(os.Stdout)\n\t\t\t\tfmt.Println()\n\t\t\t\tfmt.Println()\n\t\t\t} else {\n\t\t\t\tfmt.Printf(\"%s\\n\", t.Fail(\"Error indenting JSON - %s\", indentErr.Error()))\n\t\t\t\tfmt.Printf(\"%s %s\\n\", t.Title(\"Body:\"), t.Text(string(body)))\n\t\t\t}\n\t\t}\n\n\t\tjson.Unmarshal(body, &obj)\n\t\tif vconfig.Debug() {\n\t\t\tfmt.Printf(\"%s %s\\n\", t.Title(\"Unmarshaled object: \"), t.Text(\"%#v\", obj))\n\t\t\tfmt.Println()\n\t\t}\n\t}\n\n\t// Restore body.\n\tresp.Body = save\n\tresp.ContentLength = savecl\n\treturn err\n}", "func (v *SecondService_EchoStructMap_Result) UnmarshalJSON(data []byte) error {\n\tr := jlexer.Lexer{Data: data}\n\teasyjson899419cDecodeGithubComUberZanzibarExamplesExampleGatewayBuildGenCodeClientsBazBazSecondServiceEchoStructMap(&r, v)\n\treturn r.Error()\n}", "func parseResponse(res Response) events.APIGatewayProxyResponse {\n\tif res.Headers != nil {\n\t\tres.APIGatewayProxyResponse.Headers = res.Headers\n\t}\n\n\tif res.StatusCode > 0 {\n\t\tres.APIGatewayProxyResponse.StatusCode = res.StatusCode\n\t} else {\n\t\tres.APIGatewayProxyResponse.StatusCode = 200\n\t}\n\n\tif body, err := json.Marshal(res.ResponseData); err != nil {\n\t\t// if the marshalling fails, just stringify whatever's in `res.ResponseData`\n\t\tres.APIGatewayProxyResponse.Body = string(res.ResponseData.(string))\n\t} else {\n\t\tres.APIGatewayProxyResponse.Body = string(body)\n\t}\n\n\treturn res.APIGatewayProxyResponse\n}", "func Unmarshal(data string, out interface{}) error {\n\tdata = strings.Trim(data, \"\\n\")\n\n\tl := Lexer{s: newScanner(data)}\n\tif yyParse(&l) != 0 {\n\t\treturn fmt.Errorf(\"Parse error\")\n\t}\n\n\tv := reflect.ValueOf(out)\n\tif v.Kind() == reflect.Ptr && !v.IsNil() {\n\t\tv = v.Elem()\n\t}\n\n\tunmarshal(l.result, v)\n\n\treturn nil\n}", "func (v *SimpleService_Call_Result) UnmarshalJSON(data []byte) error {\n\tr := jlexer.Lexer{Data: data}\n\teasyjsonA954e906DecodeGithubComUberZanzibarExamplesExampleGatewayBuildGenCodeEndpointsBazBazSimpleServiceCall(&r, v)\n\treturn r.Error()\n}", "func (t *TableEntityQueryResponse) UnmarshalJSON(data []byte) error {\n\tvar rawMsg map[string]json.RawMessage\n\tif err := json.Unmarshal(data, &rawMsg); err != nil {\n\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", t, err)\n\t}\n\tfor key, val := range rawMsg {\n\t\tvar err error\n\t\tswitch key {\n\t\tcase \"odata.metadata\":\n\t\t\terr = unpopulate(val, \"ODataMetadata\", &t.ODataMetadata)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"value\":\n\t\t\terr = unpopulate(val, \"Value\", &t.Value)\n\t\t\tdelete(rawMsg, key)\n\t\t}\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", t, err)\n\t\t}\n\t}\n\treturn nil\n}", "func (dst *WorkflowSolutionInstanceResponse) UnmarshalJSON(data []byte) error {\n\tvar err error\n\t// use discriminator value to speed up the lookup\n\tvar jsonDict map[string]interface{}\n\terr = json.Unmarshal(data, &jsonDict)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Failed to unmarshal JSON into map for the discriminator lookup.\")\n\t}\n\n\t// check if the discriminator value is 'mo.AggregateTransform'\n\tif jsonDict[\"ObjectType\"] == \"mo.AggregateTransform\" {\n\t\t// try to unmarshal JSON data into MoAggregateTransform\n\t\terr = json.Unmarshal(data, &dst.MoAggregateTransform)\n\t\tif err == nil {\n\t\t\treturn nil // data stored in dst.MoAggregateTransform, return on the first match\n\t\t} else {\n\t\t\tdst.MoAggregateTransform = nil\n\t\t\treturn fmt.Errorf(\"Failed to unmarshal WorkflowSolutionInstanceResponse as MoAggregateTransform: %s\", err.Error())\n\t\t}\n\t}\n\n\t// check if the discriminator value is 'mo.DocumentCount'\n\tif jsonDict[\"ObjectType\"] == \"mo.DocumentCount\" {\n\t\t// try to unmarshal JSON data into MoDocumentCount\n\t\terr = json.Unmarshal(data, &dst.MoDocumentCount)\n\t\tif err == nil {\n\t\t\treturn nil // data stored in dst.MoDocumentCount, return on the first match\n\t\t} else {\n\t\t\tdst.MoDocumentCount = nil\n\t\t\treturn fmt.Errorf(\"Failed to unmarshal WorkflowSolutionInstanceResponse as MoDocumentCount: %s\", err.Error())\n\t\t}\n\t}\n\n\t// check if the discriminator value is 'mo.TagSummary'\n\tif jsonDict[\"ObjectType\"] == \"mo.TagSummary\" {\n\t\t// try to unmarshal JSON data into MoTagSummary\n\t\terr = json.Unmarshal(data, &dst.MoTagSummary)\n\t\tif err == nil {\n\t\t\treturn nil // data stored in dst.MoTagSummary, return on the first match\n\t\t} else {\n\t\t\tdst.MoTagSummary = nil\n\t\t\treturn fmt.Errorf(\"Failed to unmarshal WorkflowSolutionInstanceResponse as MoTagSummary: %s\", err.Error())\n\t\t}\n\t}\n\n\t// check if the discriminator value is 'workflow.SolutionInstance.List'\n\tif jsonDict[\"ObjectType\"] == \"workflow.SolutionInstance.List\" {\n\t\t// try to unmarshal JSON data into WorkflowSolutionInstanceList\n\t\terr = json.Unmarshal(data, &dst.WorkflowSolutionInstanceList)\n\t\tif err == nil {\n\t\t\treturn nil // data stored in dst.WorkflowSolutionInstanceList, return on the first match\n\t\t} else {\n\t\t\tdst.WorkflowSolutionInstanceList = nil\n\t\t\treturn fmt.Errorf(\"Failed to unmarshal WorkflowSolutionInstanceResponse as WorkflowSolutionInstanceList: %s\", err.Error())\n\t\t}\n\t}\n\n\treturn nil\n}", "func (m *SearchResponse) UnmarshalJSON(b []byte) error {\n\treturn SearchResponseJSONUnmarshaler.Unmarshal(bytes.NewReader(b), m)\n}", "func (v *Raw) UnmarshalJSON(data []byte) error {\n\tr := jlexer.Lexer{Data: data}\n\teasyjson42239ddeDecodeGithubComKhliengDispatchServer10(&r, v)\n\treturn r.Error()\n}", "func (LinkAccessed) Unmarshal(v []byte) (interface{}, error) {\n\te := LinkAccessed{}\n\terr := json.Unmarshal(v, &e)\n\treturn e, err\n}" ]
[ "0.63455194", "0.6331751", "0.61941075", "0.60945374", "0.6029206", "0.5953289", "0.5942413", "0.5926016", "0.5879392", "0.58535326", "0.57928324", "0.5761479", "0.57413685", "0.57221705", "0.57169855", "0.56886274", "0.56837124", "0.5682268", "0.5682157", "0.5667626", "0.5657062", "0.56311494", "0.56261885", "0.5600782", "0.5585206", "0.5574144", "0.55738115", "0.55726296", "0.55654997", "0.55581367", "0.55453753", "0.5542441", "0.55414766", "0.55377275", "0.55374044", "0.55325335", "0.55302197", "0.55223495", "0.55178416", "0.5505747", "0.5505378", "0.54571337", "0.54562354", "0.54541165", "0.5430155", "0.54170746", "0.5415664", "0.5414158", "0.5409234", "0.5400272", "0.53888094", "0.5387522", "0.53857404", "0.53823924", "0.53589803", "0.53516155", "0.53430164", "0.53414476", "0.5338545", "0.5335548", "0.533242", "0.5331425", "0.53276587", "0.5320331", "0.53174454", "0.5314068", "0.53024656", "0.5294123", "0.52929795", "0.52854747", "0.52820635", "0.52795297", "0.527703", "0.5276954", "0.52741784", "0.5267972", "0.526512", "0.5261299", "0.5260086", "0.52576756", "0.52571356", "0.52550954", "0.5245513", "0.5244482", "0.5241317", "0.52365494", "0.52361006", "0.52261806", "0.5216662", "0.5209224", "0.5209195", "0.52083784", "0.52051556", "0.5204047", "0.5200215", "0.51991194", "0.51942736", "0.51917505", "0.51917267", "0.5189802" ]
0.77195877
0
pretty function of Response logs the API response for debugging purposes
func (r *Response) pretty() string { var body api.Response _ = json.Unmarshal(r.recorder.Body.Bytes(), &body) lr := LogResponse{Res{ Code: r.recorder.Code, Headers: r.recorder.Header(), Body: body, }} res, err := json.MarshalIndent(lr, "", " ") gomega.Expect(err).To(gomega.BeNil()) return string(res) }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func printResponse(resp interface{}, err error) {\n\tif err == nil {\n\t\tjtext, err := json.MarshalIndent(resp, \"\", \" \")\n\t\tif err == nil {\n\t\t\tfmt.Println(string(jtext))\n\t\t}\n\t}\n\tif err != nil {\n\t\tfmt.Printf(\"err: %s\\n\", err)\n\t}\n}", "func (r Response) String() string {\n\treturn fmt.Sprintf(\"%s : %s\", r.Regex.String(), r.Extra)\n}", "func FormatResponse(o interface{}) string {\n\tout, err := json.MarshalIndent(o, \"\", \"\\t\")\n\tMust(err, `Command failed because an error occurred while prettifying output: %s`, err)\n\treturn string(out)\n}", "func (c *Client) dumpResponse(resp *http.Response) {\n\t// ignore errors dumping response - no recovery from this\n\tresponseDump, _ := httputil.DumpResponse(resp, true)\n\tfmt.Fprintln(c.Debug, string(responseDump))\n\tfmt.Fprintln(c.Debug)\n}", "func dumpResponse(res *http.Response) ([]byte, error) {\n\treturn httputil.DumpResponse(res, true)\n}", "func (c *TogglHttpClient) dumpResponse(resp *http.Response) {\n\tif c.traceLog != nil {\n\t\tout, err := httputil.DumpResponse(resp, true)\n\t\tif err == nil {\n\t\t\tc.tracef(\"\\n\\n%s\\n\", string(out))\n\t\t}\n\t}\n}", "func FormatResponseLog(resp *restful.Response, req *restful.Request) string {\n\treturn fmt.Sprintf(ResponseLogString, req.Request.RemoteAddr, resp.StatusCode())\n}", "func printResponse(resp *http.Response) error {\n\tbodyBytes, err := ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer resp.Body.Close()\n\tbodyString := string(bodyBytes)\n\tfmt.Printf(\"response status:%s, body: %s\\n\", resp.Status, bodyString)\n}", "func (c *client) dumpResponse(resp *http.Response) {\n\tif c.tracelogger != nil {\n\t\tout, err := httputil.DumpResponse(resp, true)\n\t\tif err == nil {\n\t\t\tc.tracelogger.Log(string(out))\n\t\t}\n\t}\n}", "func (c *Client) dumpResponse(resp *http.Response) {\n\tif c.tracelog != nil {\n\t\tout, err := httputil.DumpResponse(resp, true)\n\t\tif err == nil {\n\t\t\tc.tracef(\"%s\\n\", string(out))\n\t\t}\n\t}\n}", "func PrettyPrintActuatorInfoResponse(actuatorResponse string) {\n\n\treader := MakeDynamicStructReader(ActuatorInfoProperties{}, actuatorResponse)\n\n\trowConfigAutoMerge := table.RowConfig{AutoMerge: true}\n\n\tt := MakeTable()\n\n\t// Parse Service Info\n\tif reader.HasField(\"Title\") {\n\n\t\ttitle := reader.GetField(\"Title\").String()\n\n\t\tif title == \"\" {\n\n\t\t\tVLog(\"[pp] Title was empty... Skipping parsing of Title\")\n\n\t\t} else {\n\t\t\tt.AppendHeader(table.Row{\n\t\t\t\ttext.Bold.Sprint(\"Service Info\"), text.Bold.Sprint(\"Service Info\"),\n\t\t\t}, rowConfigAutoMerge)\n\n\t\t\tt.AppendSeparator()\n\n\t\t\tt.AppendRow(table.Row{\n\t\t\t\t\"title\", title,\n\t\t\t}, rowConfigAutoMerge)\n\n\t\t\trenderAndResetTable(t)\n\n\t\t}\n\t}\n\n\t// Parse Git info\n\tif reader.HasField(\"Git\") {\n\n\t\tgitInfo := reader.GetField(\"Git\").Interface().(ActuatorInfoGitProperties)\n\n\t\tif gitInfo.Branch == \"\" {\n\n\t\t\tVLog(\"[pp] gitInfo.Branch was empty... Skipping parsing of GitInfo\")\n\n\t\t} else {\n\t\t\tt.AppendHeader(table.Row{\n\t\t\t\ttext.Bold.Sprint(\"Git Info\"), text.Bold.Sprint(\"Git Info\"),\n\t\t\t}, rowConfigAutoMerge)\n\n\t\t\tt.AppendSeparator()\n\n\t\t\tt.AppendRow(table.Row{\n\t\t\t\t\"branch\", gitInfo.Branch,\n\t\t\t}, rowConfigAutoMerge)\n\n\t\t\t// Parse commit related info in response.git.commit\n\t\t\tfor k, v := range gitInfo.Commit {\n\t\t\t\tif k == \"id\" {\n\t\t\t\t\tswitch v.(type) {\n\t\t\t\t\tcase string:\n\t\t\t\t\t\t// Target's info.git.mode config was set to DEFAULT\n\t\t\t\t\t\t// Sample -\n\t\t\t\t\t\t// \"commit\": {\n\t\t\t\t\t\t// \t\"id\": \"01dbf9f\",\n\t\t\t\t\t\t// \t\"time\": \"2021-03-14 23:30:28+0000\"\n\t\t\t\t\t\t// }\n\t\t\t\t\t\tt.AppendRow(table.Row{\n\t\t\t\t\t\t\t\"commit.ID\", fmt.Sprintf(\"%s\", v),\n\t\t\t\t\t\t}, rowConfigAutoMerge)\n\n\t\t\t\t\tdefault:\n\t\t\t\t\t\t// Target's info.git.mode config was set to FULL\n\t\t\t\t\t\t// Sample -\n\t\t\t\t\t\t// \"commit\": {\n\t\t\t\t\t\t// \t\"time\": \"2021-03-14 23:30:28+0000\",\n\t\t\t\t\t\t// \t\"message\": {\n\t\t\t\t\t\t// \t\t\"full\": \"dev: wip pretty-printing git info\\n\",\n\t\t\t\t\t\t// \t\t\"short\": \"dev: wip pretty-printing git info\"\n\t\t\t\t\t\t// \t},\n\t\t\t\t\t\t// \t\"id\": {\n\t\t\t\t\t\t// \t\t\"describe\": \"0.0.2-5-g01dbf9f-dirty\",\n\t\t\t\t\t\t// \t\t\"abbrev\": \"01dbf9f\",\n\t\t\t\t\t\t// \t\t\"full\": \"01dbf9f76c23701dbccf44cd4b8e44abd6ec8640\"\n\t\t\t\t\t\t// \t},\n\t\t\t\t\t\t// \t\"user\": {\n\t\t\t\t\t\t// \t\t\"email\": \"[email protected]\",\n\t\t\t\t\t\t// \t\t\"name\": \"Archit Khode\"\n\t\t\t\t\t\t// \t}\n\t\t\t\t\t\t// },\n\t\t\t\t\t\tfor v_k, v_v := range v.(map[string]interface{}) {\n\t\t\t\t\t\t\tt.AppendRow(table.Row{\n\t\t\t\t\t\t\t\tfmt.Sprintf(\"commit.%v\", v_k), fmt.Sprintf(\"%v\", v_v),\n\t\t\t\t\t\t\t}, rowConfigAutoMerge)\n\t\t\t\t\t\t}\n\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\tif k == \"time\" {\n\t\t\t\t\tswitch v.(type) {\n\t\t\t\t\tcase string:\n\t\t\t\t\t\tt.AppendRow(table.Row{\n\t\t\t\t\t\t\t\"commit.time\", fmt.Sprintf(\"%s\", v),\n\t\t\t\t\t\t}, rowConfigAutoMerge)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\n\t\t\trenderAndResetTable(t)\n\n\t\t}\n\t}\n\n\t// Parse Build info\n\tif reader.HasField(\"Build\") {\n\n\t\tbuildInfo := reader.GetField(\"Build\").Interface().(map[string]interface{})\n\n\t\tif len(buildInfo) == 0 {\n\t\t\tVLog(\"[pp] buildInfo was empty... Skipping parsing of buildInfo\")\n\t\t} else {\n\n\t\t\tt.AppendHeader(table.Row{\n\t\t\t\ttext.Bold.Sprint(\"Build Info\"), text.Bold.Sprint(\"Build Info\"),\n\t\t\t}, rowConfigAutoMerge)\n\n\t\t\tt.AppendSeparator()\n\n\t\t\tfor k, v := range buildInfo {\n\t\t\t\tswitch v.(type) {\n\t\t\t\tcase string:\n\t\t\t\t\t// Sample -\n\t\t\t\t\t// \"build\": {\n\t\t\t\t\t// \t\"artifact\": \"demo-service\",\n\t\t\t\t\t// \t\"name\": \"demo-service\",\n\t\t\t\t\t// \t\"time\": \"2021-03-29T21:47:03.802Z\",\n\t\t\t\t\t// \t\"version\": \"0.0.1-SNAPSHOT\",\n\t\t\t\t\t// \t\"group\": \"xyz.archit\"\n\t\t\t\t\t// }\n\t\t\t\t\tt.AppendRow(table.Row{\n\t\t\t\t\t\tfmt.Sprintf(\"%s\", k), fmt.Sprintf(\"%s\", v),\n\t\t\t\t\t}, rowConfigAutoMerge)\n\t\t\t\t}\n\t\t\t}\n\n\t\t\trenderAndResetTable(t)\n\n\t\t}\n\n\t}\n\n\tif CLIConfig.Verbose {\n\n\t\tt.AppendHeader(table.Row{\n\t\t\ttext.Bold.Sprint(\"Raw /actuator/info Response\"),\n\t\t}, rowConfigAutoMerge)\n\n\t\tt.AppendRow(table.Row{\n\t\t\tPrettyJSON(actuatorResponse),\n\t\t}, rowConfigAutoMerge)\n\n\t}\n\n\trenderAndResetTable(t)\n\n}", "func Log(v interface{}) *CustomResponse {\n\tutil.Println(v)\n\treturn customResponse\n}", "func (j JSONResponse) String() string {\n\tstr, err := json.MarshalIndent(j, \"\", \" \")\n\tif err != nil {\n\t\treturn fmt.Sprintf(`{\n \"error\": \"%v\"\n}`, err)\n\t}\n\n\treturn string(str)\n}", "func (r Response) String() string {\n\tJSON, err := json.Marshal(r)\n\tif err != nil {\n\t\tlog.Fatalln(err)\n\t}\n\treturn string(JSON)\n}", "func (resp *response) Info() string {\n\tif resp.resp == nil {\n\t\treturn \"\"\n\t}\n\tvar (\n\t\tout bytes.Buffer\n\t\tstr string\n\t\tresponse = resp.resp\n\t)\n\tif str = resp.String(); str == \"\" {\n\t\treturn str\n\t}\n\tout.WriteString(fmt.Sprint(response.Proto, \" \", resp.Status))\n\tif len(response.Header) > 0 {\n\t\tfor name, values := range response.Header {\n\t\t\tfor _, value := range values {\n\t\t\t\tout.WriteString(fmt.Sprintf(\"\\n%s:%s\", name, value))\n\t\t\t}\n\t\t}\n\t}\n\t//body\n\tout.WriteString(fmt.Sprint(\"\\n\\n\", str))\n\treturn out.String()\n}", "func (r *Response) String() string {\n\n\tbasicCode := r.BasicCode\n\tcomment := r.Comment\n\tif len(comment) == 0 && r.BasicCode == 0 {\n\t\tvar ok bool\n\t\tif comment, ok = defaultTexts.m[EnhancedStatusCode{r.Class, r.EnhancedCode}]; !ok {\n\t\t\tswitch r.Class {\n\t\t\tcase 2:\n\t\t\t\tcomment = \"OK\"\n\t\t\tcase 4:\n\t\t\t\tcomment = \"Temporary failure.\"\n\t\t\tcase 5:\n\t\t\t\tcomment = \"Permanent failure.\"\n\t\t\t}\n\t\t}\n\t}\n\te := EnhancedStatusCode{r.Class, r.EnhancedCode}\n\tif r.BasicCode == 0 {\n\t\tbasicCode = getBasicStatusCode(e)\n\t}\n\n\treturn fmt.Sprintf(\"%d %s %s\", basicCode, e.String(), comment)\n}", "func (r Response) String() string {\n\t// format:\n\t// VALUE <key> <flags> <bytes> [<cas unique>]\\r\\n\n\t//<data block>\\r\\n\n\n\tvar b bytes.Buffer\n\n\tfor i := range r.Values {\n\t\t//b.WriteString(fmt.Sprintf(\"VALUE %s %s %d\\r\\n\", r.Values[i].Key, r.Values[i].Flags, len(r.Values[i].Data)))\n\t\tb.WriteString(\"VALUE \")\n\t\tb.WriteString(r.Values[i].Key)\n\t\tb.WriteString(\" \")\n\t\tb.WriteString(r.Values[i].Flags)\n\t\tb.WriteString(\" \")\n\t\tb.WriteString(strconv.Itoa(len(r.Values[i].Data)))\n\n\t\tif r.Values[i].Cas != \"\" {\n\t\t\tb.WriteString(\" \")\n\t\t\tb.WriteString(r.Values[i].Cas)\n\t\t}\n\n\t\tb.WriteString(\"\\r\\n\")\n\n\t\tb.Write(r.Values[i].Data)\n\t\tb.WriteString(\"\\r\\n\")\n\t}\n\n\tb.WriteString(r.Response)\n\tb.WriteString(\"\\r\\n\")\n\n\treturn b.String()\n}", "func (self *BatchResponse) ResponseAsString() string {\n\treturn string(self.Debug.RawResponse)\n}", "func (a Authorization) PrettyPrint() string {\n\tbs, err := json.MarshalIndent(a, \"\", \" \")\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn string(bs)\n}", "func logEndpointResponse(response *http.Response, postback Pbo) {\n\tv_info.Println(\"Received response from : < \" + postback.Url+\" >\" )\n\tv_info.Println(\"Response Code:\", response.StatusCode)\n\tbody, _ := ioutil.ReadAll(response.Body)\n\tv_info.Println(\"Response Body:\", string(body))\n}", "func (s MethodResponse) String() string {\n\treturn awsutil.Prettify(s)\n}", "func logRequestResponse(r *ssh.Request, ok bool, data []byte, info string) {\n\tlog.Printf(\n\t\t\"%v Response Type:%q Payload:%q OK:%v ResData:%q\",\n\t\tinfo,\n\t\tr.Type,\n\t\tr.Payload,\n\t\tok,\n\t\tdata,\n\t)\n}", "func LogResponse(logger log.Logger) autorest.RespondDecorator {\n\treturn func(r autorest.Responder) autorest.Responder {\n\t\treturn autorest.ResponderFunc(func(resp *http.Response) error {\n\t\t\tif resp != nil {\n\t\t\t\tprovider, resource := parseServiceURL(resp.Request.URL.Path)\n\t\t\t\tapiRequestCounter.WithLabelValues(provider, resource, strconv.Itoa(resp.StatusCode)).Inc()\n\n\t\t\t\tif logger.GetLogLevel() == log.DebugLevel {\n\t\t\t\t\tif start, ok := resp.Request.Context().Value(timeKey).(time.Time); ok {\n\t\t\t\t\t\tlogger.\n\t\t\t\t\t\t\tWith(\"path\", resp.Request.URL.Path).\n\t\t\t\t\t\t\tWith(\"status\", resp.StatusCode).\n\t\t\t\t\t\t\tWith(\"time\", time.Since(start)).\n\t\t\t\t\t\t\tDebug(\"request\")\n\t\t\t\t\t}\n\n\t\t\t\t\tif dump, e := httputil.DumpResponse(resp, false); e == nil {\n\t\t\t\t\t\tlogger.Debug(string(dump))\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\n\t\t\treturn r.Respond(resp)\n\t\t})\n\t}\n}", "func printResponseBody(res *http.Response) {\n\tdefer res.Body.Close()\n\tresponseBodyBytes, err := ioutil.ReadAll(res.Body)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tresponseBody := string(responseBodyBytes)\n\tfmt.Println(responseBody)\n}", "func (s FunctionResponse) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (s ResponseDetails) String() string {\n\treturn awsutil.Prettify(s)\n}", "func Dump(t *testing.T, resp *http.Response) {\n\t// dump request\n\tvar buffer bytes.Buffer\n\tbuffer.WriteString(\"\\n\")\n\tbuffer.WriteString(fmt.Sprintf(\"%v %v\\n\", resp.Request.Method, resp.Request.URL))\n\tfor k, v := range resp.Request.Header {\n\t\tif len(k) > 0 {\n\t\t\tbuffer.WriteString(fmt.Sprintf(\"%s : %v\\n\", k, strings.Join(v, \",\")))\n\t\t}\n\t}\n\tif resp == nil {\n\t\tbuffer.WriteString(\"-- no response --\")\n\t\tLogf(t, buffer.String())\n\t\treturn\n\t}\n\t// dump response\n\tbuffer.WriteString(fmt.Sprintf(\"\\n%s\\n\", resp.Status))\n\tfor k, v := range resp.Header {\n\t\tif len(k) > 0 {\n\t\t\tbuffer.WriteString(fmt.Sprintf(\"%s : %v\\n\", k, strings.Join(v, \",\")))\n\t\t}\n\t}\n\tif resp.Body != nil {\n\t\tbody, err := ioutil.ReadAll(resp.Body)\n\t\tif err != nil {\n\t\t\tif resp.StatusCode/100 == 3 {\n\t\t\t\t// redirect closes body ; nothing to read\n\t\t\t\tbuffer.WriteString(\"\\n\")\n\t\t\t} else {\n\t\t\t\tbuffer.WriteString(fmt.Sprintf(\"unable to read body:%v\", err))\n\t\t\t}\n\t\t} else {\n\t\t\tif len(body) > 0 {\n\t\t\t\tbuffer.WriteString(\"\\n\")\n\t\t\t}\n\t\t\tbuffer.WriteString(string(body))\n\t\t}\n\t\tresp.Body.Close()\n\t\t// put the body back for re-reads\n\t\tresp.Body = ioutil.NopCloser(bytes.NewReader(body))\n\t}\n\tbuffer.WriteString(\"\\n\")\n\tLogf(t, buffer.String())\n}", "func (r *Response) Dump() {\n\tlog.Println(\"-\", r)\n\n\tvar ext string\n\texts, _ := mime.ExtensionsByType(r.Header.Get(\"Content-Type\"))\n\tif len(exts) > 0 {\n\t\text = exts[0]\n\t}\n\tname := fmt.Sprintf(\"response-dump-%d%s\", time.Now().Unix(), ext)\n\tf, err := os.Create(name)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tdefer f.Close()\n\n\tif strings.HasPrefix(r.Header.Get(\"Content-Type\"), \"text/html\") {\n\t\tbuf := bytes.NewBufferString(\"<pre style=\\\"background:#000;color:#0f0;font:13px/1.2 monospace;padding:20px\\\">\")\n\t\tlog.New(buf, \"\", log.LstdFlags).Print(\"- \", r)\n\t\tbuf.WriteString(\"</pre>\")\n\t\tif _, err = io.Copy(f, buf); err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t}\n\n\tif _, err = io.Copy(f, r.Body); err != nil {\n\t\tpanic(err)\n\t}\n\n\topenFile(name)\n}", "func (s RouteResponse) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (s RouteResponse) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (s IntegrationResponse) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (s IntegrationResponse) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (s IntegrationResponse) String() string {\n\treturn awsutil.Prettify(s)\n}", "func PrintResponse(response *http.Response) {\n\tvar colorCode string\n\tif response.StatusCode == 200 {\n\t\tcolorCode = \"g\"\n\t} else {\n\t\tcolorCode = \"r\"\n\t}\n\tcolor.Printf(colorCode, fmt.Sprintf(\"%s\\n\", response.Status))\n}", "func (resp *Response) Dump(body bool) ([]byte, error) {\n\treturn httputil.DumpResponse(resp.Response, body)\n}", "func (t traceV4) Response(resp *http.Response) (err error) {\n\tvar respTrace []byte\n\t// For errors we make sure to dump response body as well.\n\tif resp.StatusCode != http.StatusOK &&\n\t\tresp.StatusCode != http.StatusPartialContent &&\n\t\tresp.StatusCode != http.StatusNoContent {\n\t\trespTrace, err = httputil.DumpResponse(resp, true)\n\t} else {\n\t\trespTrace, err = httputil.DumpResponse(resp, false)\n\t}\n\tif err == nil {\n\t\tconsole.Debug(string(respTrace))\n\t}\n\n\tif resp.TLS != nil {\n\t\tprintTLSCertInfo(resp.TLS)\n\t}\n\n\treturn err\n}", "func (adm AdminClient) dumpHTTP(req *http.Request, resp *http.Response) error {\n\t// Starts http dump.\n\t_, err := fmt.Fprintln(adm.traceOutput, \"---------START-HTTP---------\")\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// Filter out Signature field from Authorization header.\n\tadm.filterSignature(req)\n\n\t// Only display request header.\n\treqTrace, err := httputil.DumpRequestOut(req, false)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// Write request to trace output.\n\t_, err = fmt.Fprint(adm.traceOutput, string(reqTrace))\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// Only display response header.\n\tvar respTrace []byte\n\n\t// For errors we make sure to dump response body as well.\n\tif resp.StatusCode != http.StatusOK &&\n\t\tresp.StatusCode != http.StatusPartialContent &&\n\t\tresp.StatusCode != http.StatusNoContent {\n\t\trespTrace, err = httputil.DumpResponse(resp, true)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t} else {\n\t\t// WORKAROUND for https://github.com/golang/go/issues/13942.\n\t\t// httputil.DumpResponse does not print response headers for\n\t\t// all successful calls which have response ContentLength set\n\t\t// to zero. Keep this workaround until the above bug is fixed.\n\t\tif resp.ContentLength == 0 {\n\t\t\tvar buffer bytes.Buffer\n\t\t\tif err = resp.Header.Write(&buffer); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\trespTrace = buffer.Bytes()\n\t\t\trespTrace = append(respTrace, []byte(\"\\r\\n\")...)\n\t\t} else {\n\t\t\trespTrace, err = httputil.DumpResponse(resp, false)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\t// Write response to trace output.\n\t_, err = fmt.Fprint(adm.traceOutput, strings.TrimSuffix(string(respTrace), \"\\r\\n\"))\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// Ends the http dump.\n\t_, err = fmt.Fprintln(adm.traceOutput, \"---------END-HTTP---------\")\n\treturn err\n}", "func (r Result) PrettyPrintJSON() string {\n\tpretty, err := json.MarshalIndent(r.Body, \"\", \" \")\n\tif err != nil {\n\t\tpanic(err.Error())\n\t}\n\treturn string(pretty)\n}", "func logResponse(ctx context.Context, start time.Time, err error, msg string) {\n\tvar fields map[string]interface{}\n\tvar ok bool\n\tif fields, ok = ctx.Value(ctxKey{}).(map[string]interface{}); !ok {\n\t\tfields = map[string]interface{}{}\n\t}\n\n\t// Calculate the elapsed time\n\tfields[\"elapsed\"] = time.Since(start).Nanoseconds()\n\tfields[\"start\"] = start.Format(time.RFC3339Nano)\n\n\t// Response code\n\tcode := status.Code(err)\n\tfields[\"code\"] = code\n\n\t// Log the response finished\n\tlevelLog(log.WithFields(log.Fields(fields)), DefaultCodeToLevel(code), msg)\n}", "func (s UtteranceBotResponse) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (l loggedRoundTripper) logResponse(req *http.Request, res *http.Response, err error, duration time.Duration) {\n\tduration /= time.Millisecond\n\tif err != nil {\n\t\tl.log.Debugf(\"HTTP Request (%s) %s [time (ms): %d, error=%q]\", req.Method, req.URL, duration, err.Error())\n\t} else {\n\t\tl.log.Debugf(\"HTTP Request (%s) %s [time (ms): %d, status: %d]\", req.Method, req.URL, duration, res.StatusCode)\n\t}\n}", "func (s ImportAssetFromApiGatewayApiResponseDetails) String() string {\n\treturn awsutil.Prettify(s)\n}", "func prettyPrint(v interface{}) {\n\tencoder := json.NewEncoder(os.Stdout)\n\tencoder.SetIndent(\"\", \" \")\n\n\tif err := encoder.Encode(v); err != nil {\n\t\tlog.Warning(\"Unable to pretty-print tunnel information, will dump raw data instead...\")\n\t\tfmt.Printf(\"%+v\\n\", v)\n\t}\n}", "func (r *Response) String() string {\n\treturn string(r.Data)\n}", "func (s ComponentResponse) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (l *Logger) PrintResponse(resp *http.Response) {\n\tvar p = printer{logger: l}\n\tp.printResponse(resp)\n}", "func (s ExportRevisionsToS3ResponseDetails) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (t *testResult) PrettyPrintLines() []string {\n\tattrs := t.Attributes()\n\tout := []string{}\n\tout = append(out, fmt.Sprintf(\"request: %s %s\", attrs.Method, attrs.Path))\n\tif len(attrs.Headers) > 0 {\n\t\tpairs := make([]string, 0, len(attrs.Headers))\n\t\tfor key, value := range attrs.Headers {\n\t\t\tpairs = append(pairs, fmt.Sprintf(`\"%s\"=\"%s\"`, key, value))\n\t\t}\n\t\tout = append(out, fmt.Sprintf(\"request headers: %s\", strings.Join(pairs, \", \")))\n\t}\n\n\troute := t.Route()\n\tif route != nil {\n\t\tout = append(out, fmt.Sprintf(\"matching route id: %s\", route.Id))\n\t\tout = append(out, fmt.Sprintf(\"matching route:\\n```%s```\", t.prettyPrintRoute()))\n\t}\n\treturn out\n}", "func (out *JsonOutput) Print() {\n\tout.ResponseWriter.Header().Set(\"Access-Control-Allow-Origin\", \"*\")\n\tout.ResponseWriter.Header().Set(\"Content-Type\", \"application/json; charset=UTF-8\")\n\tjson.NewEncoder(out.ResponseWriter).Encode(out.APIOutput)\n}", "func (logger *StdLogger) PrintResponse(sessionID string, code int, message string) {\n\tlog.Printf(\"%s < %d %s\", sessionID, code, message)\n}", "func (t traceV2) Response(resp *http.Response) (err error) {\n\tvar respTrace []byte\n\t// For errors we make sure to dump response body as well.\n\tif resp.StatusCode != http.StatusOK &&\n\t\tresp.StatusCode != http.StatusPartialContent &&\n\t\tresp.StatusCode != http.StatusNoContent {\n\t\trespTrace, err = httputil.DumpResponse(resp, true)\n\t} else {\n\t\trespTrace, err = httputil.DumpResponse(resp, false)\n\t}\n\tif err == nil {\n\t\tconsole.Debug(string(respTrace))\n\t}\n\n\tif globalInsecure && resp.TLS != nil {\n\t\tdumpTLSCertificates(resp.TLS)\n\t}\n\n\treturn err\n}", "func (s GetApiOutput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (o ExportPolicyCreateResponse) String() string {\n\tvar buffer bytes.Buffer\n\tbuffer.WriteString(fmt.Sprintf(\"%s: %s\\n\", \"version\", o.ResponseVersion))\n\tbuffer.WriteString(fmt.Sprintf(\"%s: %s\\n\", \"xmlns\", o.ResponseXmlns))\n\tbuffer.WriteString(fmt.Sprintf(\"%s: %s\\n\", \"results\", o.Result))\n\treturn buffer.String()\n}", "func (m *Response) DebugString() string {\n\tvar buffer bytes.Buffer\n\tbuffer.WriteString(fmt.Sprintf(\"%d: \", m.RequestID))\n\tbuffer.WriteString(m.CMDMethod.String())\n\tbuffer.WriteString(\"/\")\n\treturn buffer.String()\n}", "func responseLogger(c *gin.Context) {\n\tdetails := obtainBodyLogWriter(c)\n\n\tc.Next()\n\n\tdumpPayload := repository.DumpResponsePayload{\n\t\tHeaders: details.Blw.Header(),\n\t\tBody: details.Blw.Body,\n\t\tStatus: c.Writer.Status(),\n\t}\n\n\tif utils.CheckExcludedPaths(c.FullPath()) {\n\t\tgo repository.DumpRequestResponse(c, Config.ApplicationID, DB, dumpPayload, readBody(details.Rdr))\n\t}\n}", "func encodeGetTagResponse(ctx context.Context, w http.ResponseWriter, response interface{}) (err error) {\n\tif f, ok := response.(endpoint.Failure); ok && f.Failed() != nil {\n\t\tErrorEncoder(ctx, f.Failed(), w)\n\t\treturn nil\n\t}\n\tw.Header().Set(\"Content-Type\", \"application/json; charset=utf-8\")\n\terr = json.NewEncoder(w).Encode(response)\n\tif err != nil {\n\t\t\tlogrus.Warn(err.Error())\n\t\t}\n\treturn\n}", "func (s ComputeResponse) String() string {\n\treturn awsutil.Prettify(s)\n}", "func prettyPrint(data interface{}) {\n\tvar p []byte\n\t// var err := error\n\tp, err := json.MarshalIndent(data, \"\", \"\\t\")\n\tif err != nil {\n\t\tfmt.Println(err)\n\t\treturn\n\t}\n\tfmt.Printf(\"%s \\n\", p)\n}", "func (s IpAddressResponse) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (s IpAddressResponse) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (s IpAddressResponse) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (o *Updatescheduleuploadresponse) String() string {\n \n \n o.Headers = map[string]string{\"\": \"\"} \n \n\n j, _ := json.Marshal(o)\n str, _ := strconv.Unquote(strings.Replace(strconv.Quote(string(j)), `\\\\u`, `\\u`, -1))\n\n return str\n}", "func (o *Forecastservicegoaltemplateimpactoverrideresponse) String() string {\n \n \n \n\n j, _ := json.Marshal(o)\n str, _ := strconv.Unquote(strings.Replace(strconv.Quote(string(j)), `\\\\u`, `\\u`, -1))\n\n return str\n}", "func (o JsonSerializationResponseOutput) Format() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v JsonSerializationResponse) *string { return v.Format }).(pulumi.StringPtrOutput)\n}", "func (t AuthChallengeResponseResponse) String() string {\n\treturn string(t)\n}", "func PrintLog(t time.Time, reqSize int, resSize int, r *http.Request, compact bool) {\n\tlogStr := \"\"\n\n\tif compact {\n\t\tlogStr = `id: %s | p: %s | pr: %s | ms: %d | rq_ln: %d | rs_ln: %d`\n\t} else {\n\t\tlogStr = `{\"rq_id\": \"%s\", \"rq_path\": \"%s\", \"rq_proto\": \"%s\", \"elapsed_time_ms\": %d, \"rq_length\": %d, \"rs_length\": %d}`\n\t}\n\n\tlogStr = fmt.Sprintf(logStr,\n\t\tr.Header.Get(\"X-Request-Id\"),\n\t\tr.URL.Path,\n\t\tr.Proto,\n\t\ttime.Since(t).Milliseconds(),\n\t\treqSize,\n\t\tresSize,\n\t)\n\n\tlog.Println(logStr)\n}", "func (rp *ResponsePrinter) Response(res InvokerResponse) {\n\tif res.Error != nil {\n\t\tlog.Printf(\"connector-sdk got error: %s\", res.Error.Error())\n\t} else {\n\t\tlog.Printf(\"connector-sdk got result: [%d] %s => %s (%d) bytes\", res.Status, res.Topic, res.Function, len(*res.Body))\n\t\tif rp.PrintResponseBody {\n\t\t\tfmt.Printf(\"[%d] %s => %s\\n%q\", res.Status, res.Topic, res.Function, string(*res.Body))\n\t\t}\n\t}\n}", "func (l *ActivityLogger) LogRequest(resp *http.Response, body *gjson.Result, responseTime int64) {\n\tif l.options.Disabled {\n\t\treturn\n\t}\n\n\tif resp == nil {\n\t\treturn\n\t}\n\n\tif !strings.Contains(l.options.Methods, resp.Request.Method) {\n\t\treturn\n\t}\n\n\tquery, err := url.QueryUnescape(resp.Request.URL.RawQuery)\n\tquery = strings.ReplaceAll(query, `\\`, `\\\\`)\n\tif err != nil {\n\t\tquery = resp.Request.URL.RawQuery\n\t}\n\n\tl.mu.Lock()\n\tdefer l.mu.Unlock()\n\tif resp.StatusCode >= 200 && resp.StatusCode <= 299 {\n\t\tcacheTag := resp.Header.Get(\"ETag\")\n\t\tisCachedResponse := cacheTag != \"\"\n\t\tfmt.Fprintf(l.w,\n\t\t\t`{\"time\":\"%s\",\"ctx\":\"%s\",\"type\":\"request\",\"method\":\"%s\",\"host\":\"%s\",\"path\":\"%s\",\"query\":\"%s\",\"accept\":\"%s\",\"processingMode\":\"%s\",\"statusCode\":%d,\"responseTimeMS\":%d,\"responseSelf\":\"%s\",\"etag\":\"%s\",\"cached\":%v}`+\"\\n\",\n\t\t\ttime.Now().Format(time.RFC3339Nano),\n\t\t\tl.contextID,\n\t\t\tresp.Request.Method,\n\t\t\tresp.Request.URL.Host,\n\t\t\tresp.Request.URL.Path,\n\t\t\tquery,\n\t\t\tresp.Request.Header.Get(\"Accept\"),\n\t\t\tresp.Request.Header.Get(\"X-Cumulocity-Processing-Mode\"),\n\t\t\tresp.StatusCode,\n\t\t\tresponseTime,\n\t\t\tbody.Get(\"self\").Str,\n\t\t\tcacheTag,\n\t\t\tisCachedResponse,\n\t\t)\n\t} else {\n\t\terrorResponse := body.Raw\n\t\tif !strings.HasPrefix(errorResponse, \"{\") {\n\t\t\terrorResponse = \"\\\"\" + errorResponse + \"\\\"\"\n\t\t}\n\t\tfmt.Fprintf(l.w,\n\t\t\t`{\"time\":\"%s\",\"ctx\":\"%s\",\"type\":\"request\",\"method\":\"%s\",\"host\":\"%s\",\"path\":\"%s\",\"query\":\"%s\",\"accept\":\"%s\",\"processingMode\":\"%s\",\"statusCode\":%d,\"responseTimeMS\":%d,\"responseSelf\":\"%s\",\"responseError\":%s}`+\"\\n\",\n\t\t\ttime.Now().Format(time.RFC3339Nano),\n\t\t\tl.contextID,\n\t\t\tresp.Request.Method,\n\t\t\tresp.Request.URL.Host,\n\t\t\tresp.Request.URL.Path,\n\t\t\tquery,\n\t\t\tresp.Request.Header.Get(\"Accept\"),\n\t\t\tresp.Request.Header.Get(\"X-Cumulocity-Processing-Mode\"),\n\t\t\tresp.StatusCode,\n\t\t\tresponseTime,\n\t\t\tbody.Get(\"self\").Str,\n\t\t\terrorResponse,\n\t\t)\n\t}\n}", "func JSON(response []byte) {\n\t// pretty-print the json\n\tutils.PrintJSON(response)\n}", "func (d *deliveryAgent) logResponse(res *response.Response) {\n\tlog.Println(\"-----------------------------\")\n\tlog.Println(\"RESPONSE RECEIVED:\")\n\tlog.Printf(\" Response Code: %v\\n\", res.Code)\n\tlog.Printf(\" Response Body: %v\\n\", res.Body)\n\tlog.Printf(\" Response Time: %v\\n\", res.Time)\n\tlog.Printf(\" Delivery Time: %v\\n\", res.DeliveryTime)\n\tlog.Println(\"-----------------------------\")\n}", "func (o *Buagentschedulehistoryresponse) String() string {\n o.PriorPublishedSchedules = []Buschedulereference{{}} \n \n o.DroppedChanges = []Buagentschedulehistorydroppedchange{{}} \n o.Changes = []Buagentschedulehistorychange{{}} \n\n j, _ := json.Marshal(o)\n str, _ := strconv.Unquote(strings.Replace(strconv.Quote(string(j)), `\\\\u`, `\\u`, -1))\n\n return str\n}", "func (s RestApi) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (logger *FTPLogger) PrintResponse(sessionID string, code int, message string) {\n\tlogrus.WithFields(logrus.Fields{\"time\": time.Now(), \"session\": sessionID, \"code\": code, \"response\": message}).Debugf(\"Response with %q and code %d\", message, code)\n\n}", "func (r *Response) String() string {\n\tif r.Error != nil {\n\t\treturn \"\"\n\t}\n\n\tr.populateResponseByteBuffer()\n\n\treturn r.internalByteBuffer.String()\n}", "func PrettyPrint(val interface{}) {\n\to, e := json.MarshalIndent(val, \"\", \" \")\n\tif e != nil {\n\t\tlog.Panic(e.Error())\n\t}\n\tfmt.Printf(string(o))\n\tfmt.Println()\n}", "func PrettyPrintActuatorHealthResponse(actuatorResponse string) {\n\n\treader := MakeDynamicStructReader(ActuatorHealthProperties{}, actuatorResponse)\n\n\trowConfigAutoMerge := table.RowConfig{AutoMerge: true}\n\n\tt := MakeTable()\n\n\tt.AppendHeader(table.Row{\n\t\ttext.Bold.Sprint(\"Health\"), text.Bold.Sprint(\"Health\"),\n\t}, rowConfigAutoMerge)\n\n\tstatus := reader.GetField(\"Status\").String()\n\tif status == \"UP\" {\n\t\tt.AppendRow(table.Row{\n\t\t\t\"status\", text.FgGreen.Sprint(status),\n\t\t})\n\t} else {\n\t\tt.AppendRow(table.Row{\n\t\t\t\"status\", text.FgRed.Sprint(status),\n\t\t})\n\t}\n\n\tt.Render()\n\n\tt.ResetHeaders()\n\tt.ResetRows()\n\tt.ResetFooters()\n\n}", "func (logger *DiscardLogger) PrintResponse(sessionID string, code int, message string) {}", "func responseLogger(handler http.HandlerFunc) http.HandlerFunc {\n\treturn func (w http.ResponseWriter, r *http.Request){\n\t\tlog.Printf(\"\\n%s %s%s %s\",r.Method, r.Host, r.RequestURI, r.Proto )\n\t\thandler(w,r)\n\t}\n}", "func (s ResponseMetadata) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (r *Response) ToString() string {\n\treturn r.recorder.Body.String()\n}", "func encodeUpdateTagResponse(ctx context.Context, w http.ResponseWriter, response interface{}) (err error) {\n\tif f, ok := response.(endpoint.Failure); ok && f.Failed() != nil {\n\t\tErrorEncoder(ctx, f.Failed(), w)\n\t\treturn nil\n\t}\n\tw.Header().Set(\"Content-Type\", \"application/json; charset=utf-8\")\n\terr = json.NewEncoder(w).Encode(response)\n\tif err != nil {\n\t\t\tlogrus.Warn(err.Error())\n\t\t}\n\treturn\n}", "func FormatResponse(res *httptest.ResponseRecorder) LambdaOutput {\n\tresult := LambdaOutput{\n\t\tStatusCode: res.Code,\n\t\tBody: res.Body.String(),\n\t\tHeaders: map[string]string{},\n\t}\n\tfor key := range res.HeaderMap {\n\t\tresult.Headers[key] = res.HeaderMap.Get(key)\n\t}\n\treturn result\n}", "func (r *Response) String() string {\n\tif r.body == nil {\n\t\treturn \"\"\n\t}\n\treturn strings.TrimSpace(string(r.body))\n}", "func JSONLogger(r *http.Request, status int, len int64, d time.Duration) {\n\tos.Stderr.WriteString(JSONLogMessage(time.Now, r.Method, r.URL, status, len, d, nil))\n}", "func (statsResponse *StatsResponse) String() string {\n\tstatsResponseBytes, err := json.Marshal(statsResponse)\n\tif err != nil {\n\t\treturn err.Error()\n\t}\n\n\treturn string(statsResponseBytes)\n}", "func DumpResponse(resp *Response, body bool) (dump []byte, err os.Error) {\n\tvar b bytes.Buffer\n\tsave := resp.Body\n\tsavecl := resp.ContentLength\n\tif !body || resp.Body == nil {\n\t\tresp.Body = nil\n\t\tresp.ContentLength = 0\n\t} else {\n\t\tsave, resp.Body, err = drainBody(resp.Body)\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\t}\n\terr = resp.Write(&b)\n\tresp.Body = save\n\tresp.ContentLength = savecl\n\tif err != nil {\n\t\treturn\n\t}\n\tdump = b.Bytes()\n\treturn\n}", "func (s VPCConfigResponse) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (o SnapmirrorResyncResponse) String() string {\n\treturn ToString(reflect.ValueOf(o))\n}", "func (srv *WebServer) logResponse(next http.HandlerFunc) http.HandlerFunc {\n\treturn func(w http.ResponseWriter, r *http.Request) {\n\t\tlogWriter := statusLogger{w, 200}\n\t\tnext(&logWriter, r)\n\t\tlog.Printf(\"%s %s: %v %s\\n\", r.Method, r.RequestURI, logWriter.status, http.StatusText(logWriter.status))\n\t}\n}", "func LogResponse(res http.Response, body []byte) {\n\tbodyString := string(body)\n\tmessage := fmt.Sprintf(\"Response for [%s]: Status: %s. Body: %s\", res.Request.URL.String(), strconv.Itoa(res.StatusCode), bodyString)\n\tif res.StatusCode >= 200 && res.StatusCode < 300 {\n\t\tLog(LogLevelDefault, message)\n\t} else {\n\t\tLog(LogLevelError, message)\n\t}\n}", "func showPersonInfo(r result) {\n\tif r.Error != nil {\n\t\tlog.Printf(\"Response Error because err: %v\\n\", r.Error)\n\t\treturn\n\t}\n\n\tif len(r.Person.Results) == 0 {\n\t\tlog.Printf(\"Response Error: Person info not found\\n\")\n\t\treturn\n\t}\n\n\tinfo := r.Person.Results[0]\n\tfmt.Printf(\"[%v] Email: %v \\n\", r.Latency, info.Email)\n}", "func (o ExportPolicyCreateResponse) String() string {\n\treturn ToString(reflect.ValueOf(o))\n}", "func (s ExportAssetToSignedUrlResponseDetails) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (s PropertyResponse) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (o SnapmirrorCreateResponse) String() string {\n\treturn ToString(reflect.ValueOf(o))\n}", "func (resp *CommonRPCResponse) String() string {\n\treturn fmt.Sprintf(\"<Code: %d, Msg: %s>\", resp.Code, resp.Msg)\n}", "func (s PropertyDefinitionResponse) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (s CreateApiOutput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (HTTPOperation) GetVerboseResponseFieldNames() []string {\n\treturn []string{\"id\", \"date\", \"number available\", \"number total\", \"vaccine\", \"input type\", \"tags\", \"location\", \"created at\"}\n}", "func (dl DefaultLogger) LogResponse(req *http.Request, res *http.Response, err error, duration time.Duration) {\n\tduration /= time.Millisecond\n\tif err != nil {\n\t\tlog.Printf(\"HTTP Request method=%s host=%s path=%s status=error durationMs=%d error=%q\", req.Method, req.Host, req.URL.Path, duration, err.Error())\n\t} else {\n\t\tlog.Printf(\"HTTP Request method=%s host=%s path=%s status=%d durationMs=%d\", req.Method, req.Host, req.URL.Path, res.StatusCode, duration)\n\t}\n}" ]
[ "0.675035", "0.66082674", "0.6589289", "0.64971024", "0.6496821", "0.6450547", "0.64165956", "0.6409296", "0.6408843", "0.63750416", "0.63278466", "0.6296406", "0.6276663", "0.6247866", "0.6224701", "0.6193935", "0.6148054", "0.61447066", "0.6128089", "0.6105928", "0.60850304", "0.6026492", "0.601069", "0.59960616", "0.59222263", "0.5920385", "0.59002614", "0.58527917", "0.5820602", "0.5820602", "0.5791101", "0.5791101", "0.5791101", "0.5780416", "0.5767304", "0.57640123", "0.5744914", "0.57429373", "0.57367295", "0.57292783", "0.57132035", "0.5709411", "0.5691607", "0.5663817", "0.56636053", "0.5661372", "0.56604296", "0.5654659", "0.56477666", "0.5640243", "0.5638414", "0.56376183", "0.5633091", "0.5631499", "0.5622282", "0.5599951", "0.5594331", "0.55901134", "0.5589879", "0.5589879", "0.5589879", "0.55870414", "0.5569709", "0.5562587", "0.5556909", "0.5556797", "0.5534909", "0.5532948", "0.5527836", "0.5527055", "0.5516475", "0.55153406", "0.5513368", "0.550536", "0.55037737", "0.5494107", "0.548612", "0.5483971", "0.54793006", "0.54530644", "0.5442956", "0.54417706", "0.543864", "0.54347026", "0.5434499", "0.54336774", "0.5432847", "0.54312354", "0.54264724", "0.5424404", "0.5423677", "0.5422854", "0.5418978", "0.5418973", "0.5416339", "0.5403496", "0.5398666", "0.53955483", "0.538329", "0.5376318" ]
0.826891
0
Code returns the actual HTTP status Code
func (r *Response) Code() int { return r.recorder.Code }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func StatusCode(err error) int {\n\tif e := getResponseError(err); e != nil {\n\t\treturn e.StatusCode\n\t} else {\n\t\treturn 0\n\t}\n}", "func (i Internet) StatusCode() int {\n\tstatusCode, _ := strconv.Atoi(i.Faker.RandomStringElement(statusCodes))\n\treturn statusCode\n}", "func (o *VerifyConnectionOK) Code() int {\n\treturn 200\n}", "func (err *Error) StatusCode() int {\n\tif err.statusCode == 0 {\n\t\treturn http.StatusInternalServerError\n\t}\n\treturn err.statusCode\n}", "func (o *UpdateAntivirusServerDefault) Code() int {\n\treturn o._statusCode\n}", "func (se StatusError) Code() int {\n\treturn int(se)\n}", "func (o *GetRunDownstreamLineageOK) Code() int {\n\treturn 200\n}", "func (o *CreateAntivirusServerDefault) Code() int {\n\treturn o._statusCode\n}", "func (si SignedIdentifiers) StatusCode() int {\n\treturn si.rawResponse.StatusCode\n}", "func (e Error) Code() safehttp.StatusCode {\n\treturn e.StatusCode\n}", "func (o *WebModifyDefault) Code() int {\n\treturn o._statusCode\n}", "func (cfr CreateFilesystemResponse) StatusCode() int {\n\treturn cfr.rawResponse.StatusCode\n}", "func (o *IngestFromURIDefault) Code() int {\n\treturn o._statusCode\n}", "func (code Code) HttpCode() int {\n\tswitch code {\n case ResponseCreated: return http.StatusCreated\n case ResponseDeleted: return http.StatusOK\n case ResponseValid: return http.StatusNotModified\n case ResponseChanged: return http.StatusNoContent\n case ResponseContent: return http.StatusOK\n case ResponseBadRequest: return http.StatusBadRequest\n case ResponseUnauthorized: return http.StatusUnauthorized\n case ResponseBadOption: return http.StatusBadRequest\n case ResponseForbidden: return http.StatusForbidden\n case ResponseNotFound: return http.StatusNotFound\n case ResponseMethodNotAllowed: return http.StatusBadRequest\n case ResponseNotAcceptable: return http.StatusNotAcceptable\n case ResponseConflict: return http.StatusConflict\n case ResponsePreconditionFailed: return http.StatusPreconditionFailed\n case RequestEntityTooLarge: return http.StatusRequestEntityTooLarge\n case ResponseUnsupportedMediaType: return http.StatusUnsupportedMediaType\n case ResponseUnprocessableEntity: return http.StatusUnprocessableEntity\n case ResponseInternalServerError: return http.StatusInternalServerError\n case ResponseNotImplemented: return http.StatusNotImplemented\n case ResponseBadGateway: return http.StatusBadGateway\n case ResponseServiceUnavailable: return http.StatusServiceUnavailable\n case ResponseGatewayTimeout: return http.StatusGatewayTimeout\n case ResponseProxyingNotSupported: return http.StatusBadGateway\n }\n return 0\n}", "func (r *request) StatusCode() int {\n\treturn 400\n}", "func (r *Response) StatusCode() int {\n\treturn r.statusCode\n}", "func (h *ResponseHeader) StatusCode() int {\n\tif h.statusCode == 0 {\n\t\treturn StatusOK\n\t}\n\treturn h.statusCode\n}", "func (res *ResponseRecorder) StatusCode() int {\n\tif res.statusCode == 0 {\n\t\treturn 200\n\t}\n\treturn res.statusCode\n}", "func (r *Response) StatusCode() int {\n\tif r.RawResponse == nil {\n\t\treturn 0\n\t}\n\treturn r.RawResponse.StatusCode\n}", "func (r *Response) StatusCode() int {\n\tif r.RawResponse == nil {\n\t\treturn 0\n\t}\n\treturn r.RawResponse.StatusCode\n}", "func (err errorResponse) StatusCode() int {\n\treturn err.Status\n}", "func (err *DecodeError) StatusCode() int {\n\treturn err.sc\n}", "func (lpr LeasePathResponse) StatusCode() int {\n\treturn lpr.rawResponse.StatusCode\n}", "func (_BaseContent *BaseContentCaller) StatusCode(opts *bind.CallOpts) (*big.Int, error) {\n\tvar out []interface{}\n\terr := _BaseContent.contract.Call(opts, &out, \"statusCode\")\n\n\tif err != nil {\n\t\treturn *new(*big.Int), err\n\t}\n\n\tout0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int)\n\n\treturn out0, err\n\n}", "func (cpr CreatePathResponse) StatusCode() int {\n\treturn cpr.rawResponse.StatusCode\n}", "func (p *ProxyWriter) StatusCode() int {\n\tif p.Code == 0 {\n\t\t// per contract standard lib will set this to http.StatusOK if not set\n\t\t// by user, here we avoid the confusion by mirroring this logic\n\t\treturn http.StatusOK\n\t}\n\treturn p.Code\n}", "func (t *httpError) StatusCode() int {\n\treturn t.statusCode\n}", "func (sss StorageServiceStats) StatusCode() int {\n\treturn sss.rawResponse.StatusCode\n}", "func (r requestError) StatusCode() int {\n\treturn r.statusCode\n}", "func StatusCode(err error) int {\n\tif err == nil {\n\t\treturn statusNoError\n\t}\n\tif scErr, ok := err.(StatusCoder); ok {\n\t\treturn scErr.StatusCode()\n\t}\n\treturn statusInternalServerError\n}", "func (s *FileSystemLimitExceeded) StatusCode() int {\n\treturn s.RespMetadata.StatusCode\n}", "func (o *GetPassesDefault) Code() int {\n\treturn o._statusCode\n}", "func (bshhr BlobsSetHTTPHeadersResponse) StatusCode() int {\n\treturn bshhr.rawResponse.StatusCode\n}", "func (re *RequestError) StatusCode() int {\n\treturn re.response.StatusCode\n}", "func (r *Response) StatusCode() int {\n\treturn r.rawResponse.StatusCode\n}", "func (err *FetchError) StatusCode() int {\n\treturn err.sc\n}", "func (s *TooManyRequests) StatusCode() int {\n\treturn s.RespMetadata.StatusCode\n}", "func (s *AccessPointLimitExceeded) StatusCode() int {\n\treturn s.RespMetadata.StatusCode\n}", "func (o *GetVersionOK) Code() int {\n\treturn 200\n}", "func (o *TransferRunsOK) Code() int {\n\treturn 200\n}", "func (s *MaximumResultReturnedException) StatusCode() int {\n\treturn s.RespMetadata.StatusCode\n}", "func (bl BlockList) StatusCode() int {\n\treturn bl.rawResponse.StatusCode\n}", "func (abcr AppendBlobsCreateResponse) StatusCode() int {\n\treturn abcr.rawResponse.StatusCode\n}", "func (o *DescribeServerOK) Code() int {\n\treturn 200\n}", "func (e *HTTPResponseEvent) StatusCode() int {\n\treturn e.statusCode\n}", "func (pl PageList) StatusCode() int {\n\treturn pl.rawResponse.StatusCode\n}", "func (gr GenericResponse) StatusCode() int {\n\treturn gr.status\n}", "func (o *AddAPIDefault) Code() int {\n\treturn o._statusCode\n}", "func (o *ObjectsGetOK) Code() int {\n\treturn 200\n}", "func (s *ThroughputLimitExceeded) StatusCode() int {\n\treturn s.RespMetadata.StatusCode\n}", "func (s *NetworkInterfaceLimitExceeded) StatusCode() int {\n\treturn s.RespMetadata.StatusCode\n}", "func (o *GetVulnerabilitiesDefault) Code() int {\n\treturn o._statusCode\n}", "func (o *UtilityServiceReadyDefault) Code() int {\n\treturn o._statusCode\n}", "func (s *ResourceLimitExceeded) StatusCode() int {\n\treturn s.RespMetadata.StatusCode\n}", "func (upr UpdatePathResponse) StatusCode() int {\n\treturn upr.rawResponse.StatusCode\n}", "func (s *SecurityGroupLimitExceeded) StatusCode() int {\n\treturn s.RespMetadata.StatusCode\n}", "func (s *IncorrectFileSystemLifeCycleState) StatusCode() int {\n\treturn s.RespMetadata.StatusCode\n}", "func (o *LoginOK) Code() int {\n\treturn 200\n}", "func (dr downloadResponse) StatusCode() int {\n\treturn dr.rawResponse.StatusCode\n}", "func (s *BlockedException) StatusCode() int {\n\treturn s.RespMetadata.StatusCode\n}", "func (o *GetRunDownstreamLineageDefault) Code() int {\n\treturn o._statusCode\n}", "func (o *UpdateAPIDefault) Code() int {\n\treturn o._statusCode\n}", "func (ssp StorageServiceProperties) StatusCode() int {\n\treturn ssp.rawResponse.StatusCode\n}", "func (o *GetSummarySystemDefault) Code() int {\n\treturn o._statusCode\n}", "func StatusCode(err error) int {\n\tif err == nil {\n\t\treturn 0\n\t}\n\tif code, ok := LookupStatusCoder(err); ok {\n\t\treturn code\n\t}\n\treturn 1\n}", "func (o *GetVersionDefault) Code() int {\n\treturn o._statusCode\n}", "func (s *FileSystemAlreadyExists) StatusCode() int {\n\treturn s.RespMetadata.StatusCode\n}", "func (s *Status) Code() int {\n\tif s == nil || s.s == nil {\n\t\treturn int(OK.s.Code)\n\t}\n\n\treturn int(s.s.Code)\n}", "func (bur BlobsUndeleteResponse) StatusCode() int {\n\treturn bur.rawResponse.StatusCode\n}", "func (bbur BlockBlobsUploadResponse) StatusCode() int {\n\treturn bbur.rawResponse.StatusCode\n}", "func (o *UpdateAgentDefault) Code() int {\n\treturn o._statusCode\n}", "func (o *PostSimulationActivityDefault) Code() int {\n\treturn o._statusCode\n}", "func (o *ReplaceTCPResponseRuleDefault) Code() int {\n\treturn o._statusCode\n}", "func (bscfur BlobsStartCopyFromURLResponse) StatusCode() int {\n\treturn bscfur.rawResponse.StatusCode\n}", "func (o *ArchiveRunsOK) Code() int {\n\treturn 200\n}", "func (o *ReplaceServerDefault) Code() int {\n\treturn o._statusCode\n}", "func (o *GetProjectOK) Code() int {\n\treturn 200\n}", "func (o *PostHclCompatibilityStatusesDefault) Code() int {\n\treturn o._statusCode\n}", "func (r *ErrorResponse) StatusCode() int {\n\tif r.Response == nil {\n\t\treturn 0\n\t}\n\treturn r.Response.StatusCode\n}", "func (o *BounceDefault) Code() int {\n\treturn o._statusCode\n}", "func (o *GetAccountsOK) Code() int {\n\treturn 200\n}", "func (o *GetTimestampResponseDefault) Code() int {\n\treturn o._statusCode\n}", "func (ccr ContainersCreateResponse) StatusCode() int {\n\treturn ccr.rawResponse.StatusCode\n}", "func (o *SignupOK) Code() int {\n\treturn 200\n}", "func (s *PolicyNotFound) StatusCode() int {\n\treturn s.RespMetadata.StatusCode\n}", "func (s *FileSystemInUse) StatusCode() int {\n\treturn s.RespMetadata.StatusCode\n}", "func (csapr ContainersSetAccessPolicyResponse) StatusCode() int {\n\treturn csapr.rawResponse.StatusCode\n}", "func (s *RangeNotSatisfiableException) StatusCode() int {\n\treturn s.RespMetadata.StatusCode\n}", "func (s *ExceedsLimitException) StatusCode() int {\n\treturn s.RespMetadata.StatusCode\n}", "func (e *InternalError) Code() int {\n\treturn e.httpCode\n}", "func (gfpr GetFilesystemPropertiesResponse) StatusCode() int {\n\treturn gfpr.rawResponse.StatusCode\n}", "func (o *GetServerConfigImportsMoidDefault) Code() int {\n\treturn o._statusCode\n}", "func (r errorResponse) StatusCode() int {\n\treturn r.Code\n}", "func (o *UpdateMTOServiceItemStatusOK) Code() int {\n\treturn 200\n}", "func (bstr BlobsSetTierResponse) StatusCode() int {\n\treturn bstr.rawResponse.StatusCode\n}", "func (s *TooManyRequestsException) StatusCode() int {\n\treturn s.RespMetadata.StatusCode\n}", "func (s *TooManyRequestsException) StatusCode() int {\n\treturn s.RespMetadata.StatusCode\n}", "func (s *TooManyRequestsException) StatusCode() int {\n\treturn s.RespMetadata.StatusCode\n}", "func (s *InsufficientThroughputCapacity) StatusCode() int {\n\treturn s.RespMetadata.StatusCode\n}", "func (s *EffectivePolicyNotFoundException) StatusCode() int {\n\treturn s.RespMetadata.StatusCode\n}", "func (s *FileSystemNotFound) StatusCode() int {\n\treturn s.RespMetadata.StatusCode\n}" ]
[ "0.8178374", "0.81755567", "0.80625063", "0.8051035", "0.8028105", "0.80270743", "0.8007115", "0.8001795", "0.7993193", "0.7966204", "0.79622895", "0.7934504", "0.792353", "0.7916362", "0.79099643", "0.7907924", "0.7905691", "0.790197", "0.7899776", "0.7899776", "0.78930235", "0.7880331", "0.78777814", "0.7871269", "0.78677094", "0.7865427", "0.7864644", "0.7861392", "0.7842574", "0.78287834", "0.7823224", "0.7820075", "0.7819002", "0.7795573", "0.7787804", "0.7786617", "0.7784921", "0.7783875", "0.77744085", "0.7772135", "0.7766417", "0.7762049", "0.77554643", "0.7754595", "0.7748917", "0.7742343", "0.773642", "0.77342045", "0.7733812", "0.7732327", "0.7726455", "0.77252877", "0.7719085", "0.7714701", "0.77133703", "0.7710173", "0.77098274", "0.7706548", "0.7704912", "0.77044225", "0.7703263", "0.7700601", "0.769799", "0.76947814", "0.7693324", "0.7688811", "0.76799434", "0.7677612", "0.7674848", "0.7673551", "0.76603574", "0.7657659", "0.76570445", "0.7656203", "0.764865", "0.7647105", "0.7646004", "0.7641416", "0.7638349", "0.76372296", "0.7635945", "0.7629613", "0.76270974", "0.76242846", "0.76232666", "0.7621985", "0.7621317", "0.7618963", "0.76187366", "0.7614486", "0.7613813", "0.7612059", "0.7610112", "0.7608446", "0.76058096", "0.7604405", "0.7604405", "0.76024926", "0.7601377", "0.760019", "0.7595423" ]
0.0
-1
GetClaims returns the token claims
func (t *oidcToken) GetClaims(v interface{}) error { return t.Claims(v) }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (t *Token) GetClaims() Claims {\n\treturn Claims(t.Claims)\n}", "func getClaims(token *jwt.Token) jwt.MapClaims {\n\treturn token.Claims.(jwt.MapClaims)\n}", "func GetClaims(token *jwt.Token) (jwt.MapClaims, error) {\n\tif !token.Valid {\n\t\treturn nil, fmt.Errorf(\"Unauthorized\")\n\t}\n\terr := token.Claims.(jwt.MapClaims).Valid()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn token.Claims.(jwt.MapClaims), nil\n}", "func getClaims(c echo.Context) map[string]interface{} {\n\tuser := c.Get(\"user\").(*jwt.Token)\n\tclaims := user.Claims.(jwt.MapClaims)\n\treturn claims\n}", "func (j *JWT) GetClaims(tokenString string) (gojwt.MapClaims, error) {\n\tvar claims gojwt.MapClaims\n\n\tgojwt.TimeFunc = j.Now\n\ttoken, err := gojwt.Parse(tokenString, j.KeyFunc)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif !token.Valid {\n\t\treturn nil, fmt.Errorf(\"token is not valid\")\n\t}\n\n\tclaims, ok := token.Claims.(gojwt.MapClaims)\n\tif !ok {\n\t\treturn nil, fmt.Errorf(\"token has no claims\")\n\t}\n\n\treturn claims, nil\n}", "func GetClaims(req *http.Request) jwt.MapClaims {\n\tuserToken := req.Context().Value(userKeyName)\n\ttok := userToken.(*jwt.Token)\n\tclaims := tok.Claims.(jwt.MapClaims)\n\treturn claims\n}", "func GetClaims(c Claims, id string) error {\n\treturn get(claimStatusDB, c, id)\n}", "func (r *Repository) GetClaims(user *User) CustomClaims {\n\tclaims := CustomClaims{User: user}\n\tclaims.ExpiresAt = jwt.TimeFunc().Unix() + Repo.expiresAt\n\treturn claims\n}", "func (j *Service) Get(r *http.Request) (Claims, string, error) {\n\n\tfromCookie := false\n\ttokenString := \"\"\n\n\t// try to get from \"token\" query param\n\tif tkQuery := r.URL.Query().Get(j.JWTQuery); tkQuery != \"\" {\n\t\ttokenString = tkQuery\n\t}\n\n\t// try to get from JWT header\n\tif tokenHeader := r.Header.Get(j.JWTHeaderKey); tokenHeader != \"\" && tokenString == \"\" {\n\t\ttokenString = tokenHeader\n\t}\n\n\t// try to get from JWT cookie\n\tif tokenString == \"\" {\n\t\tfromCookie = true\n\t\tjc, err := r.Cookie(j.JWTCookieName)\n\t\tif err != nil {\n\t\t\treturn Claims{}, \"\", fmt.Errorf(\"token cookie was not presented: %w\", err)\n\t\t}\n\t\ttokenString = jc.Value\n\t}\n\n\tclaims, err := j.Parse(tokenString)\n\tif err != nil {\n\t\treturn Claims{}, \"\", fmt.Errorf(\"failed to get token: %w\", err)\n\t}\n\n\t// promote claim's aud to User.Audience\n\tif claims.User != nil {\n\t\tclaims.User.Audience = claims.Audience\n\t}\n\n\tif !fromCookie && j.IsExpired(claims) {\n\t\treturn Claims{}, \"\", fmt.Errorf(\"token expired\")\n\t}\n\n\tif j.DisableXSRF {\n\t\treturn claims, tokenString, nil\n\t}\n\n\tif fromCookie && claims.User != nil {\n\t\txsrf := r.Header.Get(j.XSRFHeaderKey)\n\t\tif claims.Id != xsrf {\n\t\t\treturn Claims{}, \"\", fmt.Errorf(\"xsrf mismatch\")\n\t\t}\n\t}\n\n\treturn claims, tokenString, nil\n}", "func ValidateTokenAndGetClaims(signedtoken string) (jwt.MapClaims, error) {\n\n\tvalid, unsignedToken, err := ValidateToken(signedtoken)\n\n\tif valid == false {\n\t\treturn nil, err\n\t}\n\n\tmapClamins := unsignedToken.Claims.(jwt.MapClaims) //Retrive claims map\n\n\treturn mapClamins, nil\n}", "func (authenticator *GoogleAuthenticator) GetClaims(authorization string) (AuthenticationInfo, error) {\n\tjwtString := strings.Split(authorization, \" \")[1]\n\tjwtParser := new(jwt.Parser)\n\tjwtParser.SkipClaimsValidation = true\n\ttoken, err := jwtParser.ParseWithClaims(jwtString, &GoogleJWTClaims{}, authenticator.getKey())\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tclaims := token.Claims.(*GoogleJWTClaims)\n\tve := claims.validWithClientID(authenticator.clientID)\n\n\treturn claims, ve\n}", "func (a *Auth) GrabTokenClaims(r *http.Request) (*ClaimsType, error) {\n\tca, err := a.authStore.Get(r)\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"Auth.GrabTokenClaims: Error getting auth claims\")\n\t}\n\n\treturn ca, nil\n}", "func GetClaimsFromContext(r *http.Request) *Claims {\n\treturn r.Context().Value(\"claims\").(*Claims)\n}", "func (sessionStorer *SessionStorer) Get(req *http.Request) (*claims.Claims, error) {\n\ttokenString := req.Header.Get(\"Authorization\")\n\n\t// Get Token from Cookie\n\tif tokenString == \"\" {\n\t\ttokenString = sessionStorer.SessionManager.Get(req, sessionStorer.SessionName)\n\t}\n\n\treturn sessionStorer.ValidateClaims(tokenString)\n}", "func (*TokenRepository) GetAuthClaims(user UserInfo) *BaseClaims {\n\texpire := time.Now().Local().Add(authExpireTime).Unix()\n\n\treturn &BaseClaims{\n\t\tStandardClaims: jwtGo.StandardClaims{\n\t\t\tExpiresAt: expire,\n\t\t\tIssuer: \"www.progsys.no\",\n\t\t},\n\t\tUserInfo: user,\n\t}\n}", "func ExtractClaims(token string) (*models.Claims, error) {\n\tclaims := &models.Claims{}\n\n\ttk, err := jwt.ParseWithClaims(token, claims, func(token *jwt.Token) (interface{}, error) {\n\t\treturn []byte(\"pingouin123\"), nil\n\t})\n\tif err != nil {\n\t\tif err == jwt.ErrSignatureInvalid {\n\t\t\treturn nil, errors.New(\"Not Authorized\")\n\t\t}\n\t\treturn nil, err\n\t}\n\tif !tk.Valid {\n\t\treturn nil, errors.New(\"Not Authorized\")\n\t}\n\treturn claims, nil\n}", "func (t *Token) RawClaims() []byte {\n\treturn t.claims\n}", "func GetClaim(token *jwt.Token) jwt.MapClaims {\n\tif claim, ok := token.Claims.(jwt.MapClaims); ok && token.Valid {\n\t\treturn claim\n\t}\n\treturn nil\n}", "func (w *ServerInterfaceWrapper) GetClaims(ctx echo.Context) error {\n\tvar err error\n\n\tctx.Set(\"OAuth.Scopes\", []string{\"\"})\n\n\t// Parameter object where we will unmarshal all parameters from the context\n\tvar params GetClaimsParams\n\t// ------------- Optional query parameter \"page_size\" -------------\n\n\terr = runtime.BindQueryParameter(\"form\", true, false, \"page_size\", ctx.QueryParams(), &params.PageSize)\n\tif err != nil {\n\t\treturn echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf(\"Invalid format for parameter page_size: %s\", err))\n\t}\n\n\t// ------------- Optional query parameter \"page_number\" -------------\n\n\terr = runtime.BindQueryParameter(\"form\", true, false, \"page_number\", ctx.QueryParams(), &params.PageNumber)\n\tif err != nil {\n\t\treturn echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf(\"Invalid format for parameter page_number: %s\", err))\n\t}\n\n\t// Invoke the callback with all the unmarshalled arguments\n\terr = w.Handler.GetClaims(ctx, params)\n\treturn err\n}", "func (j *jws) Claims() jwt.Claims {\n\tif j.isJWT {\n\t\tif c, ok := j.payload.v.(Claims); ok {\n\t\t\treturn jwt.Claims(c)\n\t\t}\n\t}\n\treturn nil\n}", "func (i *Instance) Claims() (claims []string, err error) {\n\tsp, err := i.GetSnapshot().FastForward()\n\tif err != nil {\n\t\treturn\n\t}\n\tclaims, err = sp.Getdir(i.dir.Prefix(\"claims\"))\n\tif cp.IsErrNoEnt(err) {\n\t\tclaims = []string{}\n\t\terr = nil\n\t}\n\treturn\n}", "func NewClaims() *Claims {\n\treturn &Claims{\n\t\tStandardClaims: &jwt.StandardClaims{},\n\t}\n}", "func (s *Store) GetToken(subject, id string) (*types.Claims, error) {\n\tresp, err := s.client.Get(context.TODO(), getTokenPath(subject, id), clientv3.WithLimit(1))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif len(resp.Kvs) != 1 {\n\t\treturn nil, fmt.Errorf(\"token %s for %s does not exist\", id, subject)\n\t}\n\n\tclaims := &types.Claims{}\n\terr = json.Unmarshal(resp.Kvs[0].Value, claims)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn claims, nil\n}", "func (m *Manager) claim(rawAccessToken string) (*CustomClaims, error) {\n\tjwtToken, err := jwt.ParseSigned(rawAccessToken)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif m.jwsClient == nil {\n\t\treturn nil, fmt.Errorf(\"JWS client not configured\")\n\t}\n\tvar verifiedClaims *CustomClaims\n\tcacheKey := \"jws-\" + rawAccessToken\n\tif cachedItem, hasCachedItem := m.cache.GetExpiryAwareItem(cacheKey); hasCachedItem {\n\t\tif cachedClaims, ok := cachedItem.(CustomClaims); ok && cachedClaims.VerifyExpiration() == nil {\n\t\t\tverifiedClaims = &cachedClaims\n\t\t}\n\t}\n\tif verifiedClaims == nil {\n\t\t// verify claims signature\n\t\tresp, err := m.jwsClient.Get(m.config.JWSConfig.Endpoint)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"failed to retrieve JSON Web Keysets due to %s\", err)\n\t\t}\n\t\tdefer func() {\n\t\t\t_ = resp.Body.Close()\n\t\t}()\n\t\tvar jwks jose.JSONWebKeySet\n\t\tif err := json.NewDecoder(resp.Body).Decode(&jwks); err != nil {\n\t\t\treturn nil, fmt.Errorf(\"failed to decode JSON Web Keysets response due to %s\", err)\n\t\t}\n\t\tvar claims CustomClaims\n\t\tif err := jwtToken.Claims(&jwks, &claims); err != nil {\n\t\t\treturn nil, fmt.Errorf(\"failed to verify token signature due to %s\", err)\n\t\t}\n\t\tm.cache.AddExpiryAwareItem(cacheKey, claims)\n\t\tverifiedClaims = &claims\n\t}\n\treturn verifiedClaims, nil\n}", "func (base *BaseController) ParseClaims() map[string]interface{} {\n\tcl := base.Ctx.Input.GetData(\"JWTClaims\")\n\tif cl != nil {\n\t\tclmap, ok := cl.(map[string]interface{})\n\t\tif ok {\n\t\t\treturn clmap\n\t\t}\n\t\treturn nil\n\t}\n\treturn nil\n}", "func GetClaim(ctx context.Context, name string) string {\n\ttoken := GetToken(ctx)\n\tif token != nil {\n\t\tif claims, ok := GetToken(ctx).Claims.(jwt.MapClaims); ok {\n\t\t\treturn fmt.Sprintf(\"%s\", claims[name])\n\t\t}\n\t}\n\treturn \"\"\n}", "func getUserClaims(user string) (map[string]string, error) {\n\tfakeM := http.Header{}\n\tfakeM.Add(\"Authorization\", user)\n\n\tfakeR := http.Request{Header: fakeM}\n\n\ttoken, err := validator.ValidateRequest(&fakeR)\n\n\tif err == nil {\n\t\tclaims := map[string]string{}\n\t\terr = validator.Claims(&fakeR, token, &claims)\n\t\tif err != nil {\n\t\t\treturn claims, nil\n\t\t}\n\t\treturn nil, err\n\t}\n\treturn nil, err\n}", "func (m *Application) GetGroupMembershipClaims()(*string) {\n return m.groupMembershipClaims\n}", "func RetrieveClaims(ctx context.Context) (*claims.Claims, bool) {\n\tclms, ok := ctx.Value(keyClaims).(*claims.Claims)\n\treturn clms, ok\n}", "func RetrieveClaims(ctx context.Context) (*claims.Claims, bool) {\n\tclms, ok := ctx.Value(keyClaims).(*claims.Claims)\n\treturn clms, ok\n}", "func ExtractClaims(tokenStr string, signinigKey []byte) (jwt.MapClaims, error) {\n\tvar (\n\t\ttoken *jwt.Token\n\t\terr error\n\t)\n\ttoken, err = jwt.Parse(tokenStr, func(token *jwt.Token) (interface{}, error) {\n\t\t// check token signing method etc\n\t\treturn signinigKey, nil\n\t})\n\tif err != nil {\n\t\ttoken, err = jwt.Parse(tokenStr, func(token *jwt.Token) (interface{}, error) {\n\t\t\t// check token signing method etc\n\t\t\treturn signinigKey, nil\n\t\t})\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\tclaims, ok := token.Claims.(jwt.MapClaims)\n\tif !(ok && token.Valid) {\n\t\terr = fmt.Errorf(\"Invalid JWT Token\")\n\t\treturn nil, err\n\t}\n\treturn claims, nil\n}", "func NewClaims() *Claims {\n\n\treturn &Claims{\n\t\tModelVersion: 1,\n\t\tContent: map[string]string{},\n\t\tMigrationsLog: map[string]string{},\n\t}\n}", "func requestAuthClaims(r *http.Request) AuthClaims {\n\t// first, try to extract from header\n\tif claims := requestAuthHeaderClaims(r); claims != nil {\n\t\treturn claims\n\t}\n\t// second, try to extract from cookie\n\tif claims := requestAuthCookieClaims(r); claims != nil {\n\t\treturn claims\n\t}\n\treturn nil\n}", "func (*TokenRepository) GetRefreshClaims(id string) *BaseClaims {\n\texpire := time.Now().Local().Add(refreshExpireTime).Unix()\n\n\treturn &BaseClaims{\n\t\tStandardClaims: jwtGo.StandardClaims{\n\t\t\tExpiresAt: expire,\n\t\t\tIssuer: \"www.progsys.no\",\n\t\t\tId: id,\n\t\t},\n\t}\n}", "func decryptClaims(ciphertext string) (*TokenClaims, error) {\n\tdecoded, err := base64.StdEncoding.DecodeString(ciphertext)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tplaintext, err := decrypt(decoded, []byte{})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\ttokenClaims := &TokenClaims{}\n\tif err = json.Unmarshal(plaintext, tokenClaims); err != nil {\n\t\treturn nil, err\n\t}\n\treturn tokenClaims, nil\n}", "func ExtractClaims(tokenString string, tokenSecretKey string) (jwt.MapClaims, error) {\n\tvar (\n\t\ttoken *jwt.Token\n\t\terr error\n\t)\n\n\ttoken, err = jwt.Parse(tokenString, func(token *jwt.Token) (interface{}, error) {\n\t\t// check token signing method etc\n\t\treturn []byte(tokenSecretKey), nil\n\t})\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tclaims, ok := token.Claims.(jwt.MapClaims)\n\tif !(ok && token.Valid) {\n\t\treturn nil, errors.New(\"invalid token\")\n\t}\n\n\treturn claims, nil\n}", "func (jp *Parser) GetMapClaims(token *jwt.Token) (jwt.MapClaims, bool) {\n\tclaims, ok := token.Claims.(*JanusClaims)\n\tif !ok {\n\t\treturn jwt.MapClaims{}, ok\n\t}\n\treturn claims.MapClaims, ok\n}", "func (m *Application) GetOptionalClaims()(OptionalClaimsable) {\n return m.optionalClaims\n}", "func NewClaims(sessionId string,expire int64) jwt.Claims {\n\treturn CustomClaims{\n\t\tsessionId,\n\t\tjwt.StandardClaims{\n\t\t\tExpiresAt: expire,\n\t\t},\n\t}\n}", "func (p *resourcePrincipalConfigurationProvider) GetClaim(key string) (interface{}, error) {\n\treturn nil, nil\n}", "func (o *Claims) DeepCopy() *Claims {\n\n\tif o == nil {\n\t\treturn nil\n\t}\n\n\tout := &Claims{}\n\to.DeepCopyInto(out)\n\n\treturn out\n}", "func (sessionStorer *SessionStorer) ValidateClaims(tokenString string) (*claims.Claims, error) {\n\ttoken, err := jwt.ParseWithClaims(tokenString, &claims.Claims{}, func(token *jwt.Token) (interface{}, error) {\n\t\tif token.Method != sessionStorer.SigningMethod {\n\t\t\treturn nil, fmt.Errorf(\"unexpected signing method\")\n\t\t}\n\t\treturn []byte(sessionStorer.SignedString), nil\n\t})\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif claims, ok := token.Claims.(*claims.Claims); ok && token.Valid {\n\t\treturn claims, nil\n\t}\n\treturn nil, errors.New(\"invalid token\")\n}", "func (o *OIDCAuthenticator) parseClaims(claims map[string]interface{}) (*Claims, error) {\n\n\t// If we have namespace set, then use it to get custom claims:\n\tif len(o.namespace) > 0 {\n\t\tfor _, cc := range customClaims {\n\t\t\t// Check if there claims needed are under a namespace\n\t\t\tif v, ok := claims[o.namespace+cc]; ok {\n\t\t\t\t// Move it to the top of the json tree overwriting anything\n\t\t\t\t// there with the same name.\n\t\t\t\tclaims[cc] = v\n\t\t\t}\n\t\t}\n\t}\n\n\t// Marshal into byte stream so that we can unmarshal into SDK Claims\n\tcbytes, err := json.Marshal(claims)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"Internal error, unable to re-encode OIDC token claims: %v\", err)\n\t}\n\n\t// Return claims\n\tvar sdkClaims Claims\n\tif err := json.Unmarshal(cbytes, &sdkClaims); err != nil {\n\t\treturn nil, fmt.Errorf(\"Unable to get claims from token: %v\", err)\n\t}\n\n\tif err := validateUsername(o.usernameClaim, &sdkClaims); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &sdkClaims, nil\n}", "func ParseClaims(tokenStr string, signingKey []byte) (*CustomClaims, error) {\n\ttoken, err := jwt.ParseWithClaims(tokenStr, &CustomClaims{}, func(token *jwt.Token) (interface{}, error) {\n\t\treturn signingKey, nil\n\t})\n\tif claims, ok := token.Claims.(*CustomClaims); ok && token.Valid {\n\t\treturn claims, nil\n\t}\n\treturn nil, err\n}", "func (t *Subrogationcode) getAllclaims(stub shim.ChaincodeStubInterface, args []string) ([]byte, error) {\n\n\tkeysIter, err := stub.RangeQueryState(\"\", \"\")\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"keys operation failed. Error accessing state: %s\", err)\n\t}\n\tdefer keysIter.Close()\n\n\tvar keys []Claim\n\tfor keysIter.HasNext() {\n\t\tkey, _, iterErr := keysIter.Next()\n\t\tif iterErr != nil {\n\t\t\treturn nil, fmt.Errorf(\"keys operation failed. Error accessing state: %s\", err)\n\t\t}\n\t\tvals, err := stub.GetState(key)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"keys operation failed. Error accessing state: %s\", err)\n\t\t}\n\n\t\tvar klaim Claim\n\t\tjson.Unmarshal(vals, &klaim)\n\n\t\tif klaim.Insuredname != \"\" {\n\t\tkeys = append(keys, klaim)\n\t}\n\n\t}\n\n\tjsonKeys, err := json.Marshal(keys)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"keys operation failed. Error marshaling JSON: %s\", err)\n\t}\n\n\treturn jsonKeys, nil\n\n}", "func (t *Jwt) claimsFromToken(tokenString string) (jwt.MapClaims, error) {\n\t// parse token\n\tjwtToken, err := jwt.ParseWithClaims(tokenString, jwt.MapClaims{}, func(token *jwt.Token) (i interface{}, e error) {\n\t\tif _, ok := token.Method.(*jwt.SigningMethodRSA); !ok {\n\t\t\t// todo: log error\n\t\t\treturn\n\t\t}\n\t\treturn t.PublicKey, nil\n\t})\n\n\t// get claims\n\tvar claims jwt.MapClaims\n\tif jwtToken == nil || jwtToken.Claims == nil {\n\t\treturn claims, errors.New(\"jwtToken error\")\n\t}\n\n\tclaims = jwtToken.Claims.(jwt.MapClaims)\n\treturn claims, err\n}", "func (u *User) Claims() jwt.AppClaims {\n\treturn jwt.AppClaims{\n\t\tID: u.ID,\n\t\tSub: u.Name,\n\t\tRoles: u.Roles,\n\t}\n}", "func (t *Subrogationcode) getClaim(stub shim.ChaincodeStubInterface, args []string) ([]byte, error) {\n\tvar klaimref string\n\n\tklaimref = args[0]\n\n\tclaimAsBytes, err := stub.GetState(klaimref + \"_claim\")\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"keys operation failed. Error accessing state: %s\", err)\n\t}\n\tvar klaim Claim\n\tjson.Unmarshal(claimAsBytes, &klaim)\n\n\t//re := regexp.MustCompile(`\\r?\\n`)\n\tclaimdata := Claim {\n\t\tClaimref : klaim.Claimref,\n\t\tInsuredname : klaim.Insuredname,\n\t\tPolicynumber : klaim.Policynumber,\n\t\tClaimnumber : klaim.Claimnumber,\n\t\tTortcarriername : klaim.Tortcarriername,\n\t\tTortcarrieraddress : klaim.Tortcarrieraddress,\n\t\tTortcarrieremail : klaim.Tortcarrieremail,\n\t\tDateofaccident : klaim.Dateofaccident,\n\t\tTortdefendentname : klaim.Tortdefendentname,\n\t\tAccidentstreet : klaim.Accidentstreet,\n\t\tAccidenttown : klaim.Accidenttown,\n\t\tAccidentcounty : klaim.Accidentcounty,\n\t\tAccidentstate : klaim.Accidentstate,\n\t\tPropertydamageamount : klaim.Propertydamageamount,\n\t\tClaimamount : klaim.Claimamount,\n}\n\nresp, err := json.Marshal(claimdata)\nif err != nil {\n fmt.Println(\"error:\", err)\n}\n\n\treturn resp, nil\n\n}", "func GetJWTClaims(ctx context.Context) (*entity.AccessTokenClaims, error) {\n\tif ctx == nil {\n\t\treturn &entity.AccessTokenClaims{}, errNoContext\n\t}\n\n\tclaims, ok := ctx.Value(ClaimsKey).(*entity.AccessTokenClaims)\n\tif !ok {\n\t\treturn &entity.AccessTokenClaims{}, errNoClaimsInContext\n\t}\n\n\treturn claims, nil\n}", "func getClaim(claims map[string]interface{}, path string) interface{} {\n\tkeys := strings.Split(path, \".\")\n\tvar value interface{} = claims\n\tfor _, key := range keys {\n\t\tif value = get(key, value); value == nil {\n\t\t\tbreak\n\t\t}\n\t}\n\treturn value\n}", "func getUserClaimsRequest(r *http.Request) (map[string]string, error) {\n\ttoken, err := validator.ValidateRequest(r)\n\tif err == nil {\n\t\tclaims := map[string]string{}\n\t\terr = validator.Claims(r, token, &claims)\n\t\tif err != nil {\n\t\t\treturn claims, nil\n\t\t}\n\t\treturn nil, err\n\t}\n\treturn nil, err\n}", "func (l *loader) FetchClaims(ctx context.Context) ([]types.Claim, error) {\n\t// Get the current claim count.\n\tclaimCount, err := l.caller.ClaimDataLen(&bind.CallOpts{\n\t\tContext: ctx,\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Fetch each claim and build a list.\n\tclaimList := make([]types.Claim, claimCount.Uint64())\n\tfor i := uint64(0); i < claimCount.Uint64(); i++ {\n\t\tclaim, err := l.fetchClaim(ctx, i)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tclaimList[i] = claim\n\t}\n\n\treturn claimList, nil\n}", "func VerifyClaims(jwt jose.JWT, issuer, clientID string) error {\n\tnow := time.Now().UTC()\n\n\tclaims, err := jwt.Claims()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tident, err := IdentityFromClaims(claims)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif ident.ExpiresAt.Before(now) {\n\t\treturn errors.New(\"token is expired\")\n\t}\n\n\t// iss REQUIRED. Issuer Identifier for the Issuer of the response.\n\t// The iss value is a case sensitive URL using the https scheme that contains scheme,\n\t// host, and optionally, port number and path components and no query or fragment components.\n\tif iss, exists := claims[\"iss\"].(string); exists {\n\t\tif !urlEqual(iss, issuer) {\n\t\t\treturn fmt.Errorf(\"invalid claim value: 'iss'. expected=%s, found=%s.\", issuer, iss)\n\t\t}\n\t} else {\n\t\treturn errors.New(\"missing claim: 'iss'\")\n\t}\n\n\t// iat REQUIRED. Time at which the JWT was issued.\n\t// Its value is a JSON number representing the number of seconds from 1970-01-01T0:0:0Z\n\t// as measured in UTC until the date/time.\n\tif _, exists := claims[\"iat\"].(float64); !exists {\n\t\treturn errors.New(\"missing claim: 'iat'\")\n\t}\n\n\t// aud REQUIRED. Audience(s) that this ID Token is intended for.\n\t// It MUST contain the OAuth 2.0 client_id of the Relying Party as an audience value.\n\t// It MAY also contain identifiers for other audiences. In the general case, the aud\n\t// value is an array of case sensitive strings. In the common special case when there\n\t// is one audience, the aud value MAY be a single case sensitive string.\n\tif aud, ok, err := claims.StringClaim(\"aud\"); err == nil && ok {\n\t\tif aud != clientID {\n\t\t\treturn fmt.Errorf(\"invalid claims, 'aud' claim and 'client_id' do not match, aud=%s, client_id=%s\", aud, clientID)\n\t\t}\n\t} else if aud, ok, err := claims.StringsClaim(\"aud\"); err == nil && ok {\n\t\tif !containsString(clientID, aud) {\n\t\t\treturn fmt.Errorf(\"invalid claims, cannot find 'client_id' in 'aud' claim, aud=%v, client_id=%s\", aud, clientID)\n\t\t}\n\t} else {\n\t\treturn errors.New(\"invalid claim value: 'aud' is required, and should be either string or string array\")\n\t}\n\n\treturn nil\n}", "func getRequiredClaims() {\n\n\tresourceService, err := cloudresourcemanager.NewService(context.Background())\n\tif err != nil {\n\t\tklog.Fatalf(\"cannot create cloudresourcemanager api service\")\n\t}\n\n\tprojectsService := gcpProjectsAPIService{projectsService: resourceService.Projects}\n\n\tcomputeService, err := compute.NewService(context.Background())\n\tif err != nil {\n\t\tklog.Fatalf(\"cannot create compute api service\")\n\t}\n\n\tbackendsService := gcpBackendsAPIService{backendServicesService: computeService.BackendServices}\n\n\t// TODO: use a backoff strategy\n\ttick := time.NewTicker(retryInterval)\n\tdefer tick.Stop()\n\n\tfor {\n\t\tklog.V(3).Info(\"begin call to get the project number\")\n\t\tprojectNumber, err := getProjectNumber(projectsService)\n\t\tif err != nil {\n\t\t\tklog.Errorf(\"error retrieving project number: %v\", err)\n\t\t}\n\t\tklog.V(3).Info(\"begin call to get the backend service id\")\n\t\tbackendServiceID, err := getBackendServiceID(backendsService)\n\t\tif err != nil {\n\t\t\tklog.Errorf(\"error retrieving backend service id: %v\", err)\n\t\t}\n\t\tif err == nil {\n\t\t\taudience = fmt.Sprintf(\"/projects/%s/global/backendServices/%s\", projectNumber, backendServiceID)\n\t\t\tklog.V(1).Infof(\"audience value found: %s, ready to validate requests\", audience)\n\t\t\treturn\n\t\t}\n\t\tklog.V(1).Infof(\"unable to retrieve the audience value, retring in %v seconds\", retryInterval.Seconds())\n\t\tselect {\n\t\tcase <-tick.C:\n\t\t}\n\t}\n}", "func GetFromToken(tokenString, signingString string) (*Claim, error) {\n\ttoken, err := jwt.Parse(tokenString, func(*jwt.Token) (interface{}, error) {\n\t\treturn []byte(signingString), nil\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif !token.Valid {\n\t\treturn nil, errors.New(\"invalid token\")\n\t}\n\n\t_, ok := token.Claims.(jwt.MapClaims)\n\tif !ok {\n\t\treturn nil, errors.New(\"invalid claim\")\n\t}\n\n\treturn &Claim{ID: int(1)}, nil\n}", "func (k Keeper) GetClaim(ctx sdk.Context, key string) (types.Claim, error) {\n\tstore := ctx.KVStore(k.storeKey)\n\tvar claim types.Claim\n\tbyteKey := []byte(types.ClaimPrefix + key)\n\terr := k.cdc.UnmarshalBinaryLengthPrefixed(store.Get(byteKey), &claim)\n\tif err != nil {\n\t\treturn claim, err\n\t}\n\treturn claim, nil\n}", "func (t *jwtMgr) Get(token, key string) (interface{}, bool, error) {\n\tif !validateTokenFormat(token) {\n\t\treturn nil, false, ErrInvalidTokenFormat\n\t}\n\tstandardClaims, privateClaims, ok, err := t.validateJWTToken(fmt.Sprintf(\"%s.%s\", t.header, token))\n\tif !ok {\n\t\treturn nil, ok, err\n\t}\n\t// check key in standard and private claims\n\tswitch key {\n\tcase IssuerClaim:\n\t\treturn standardClaims.Issuer, true, nil\n\tcase SubClaim:\n\t\treturn standardClaims.Subject, true, nil\n\tcase ExpClaim:\n\t\treturn standardClaims.Expiry, true, nil\n\tcase IssuedAtClaim:\n\t\treturn standardClaims.IssuedAt, true, nil\n\tcase AudienceClaim, NotBeforeClaim, IDClaim:\n\t\treturn nil, false, nil\n\tdefault:\n\t\tval, ok := privateClaims[key]\n\t\treturn val, ok, nil\n\t}\n}", "func requestAuthHeaderClaims(r *http.Request) AuthClaims {\n\tauthHdr, prs := r.Header[\"Authorization\"]\n\tif prs {\n\t\tif claims := parseAuthHeader(authHdr); claims != nil {\n\t\t\treturn claims\n\t\t}\n\t}\n\treturn nil\n}", "func (s *routeClaimLister) RouteClaims(namespace string) RouteClaimNamespaceLister {\n\treturn routeClaimNamespaceLister{indexer: s.indexer, namespace: namespace}\n}", "func ReadAndGetClaim(tokenStr string) (jwt.MapClaims, error) {\n\ttoken, err := ReadToken(tokenStr)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn GetClaim(token), nil\n}", "func (c *RefreshClaims) ParseClaims(claims jwt.MapClaims) error {\n\ttoken, ok := claims[\"token\"]\n\tif !ok {\n\t\treturn errors.New(\"could not parse claim token\")\n\t}\n\tc.Token = token.(string)\n\treturn nil\n}", "func (c *Claims) UnmarshalJSON(b []byte) error {\n\ttokenClaims := struct {\n\t\tTenantString string `json:\"tenant\"`\n\t\tScopes string `json:\"scopes\"`\n\t\tConsumerID string `json:\"consumerID\"`\n\t\tConsumerType consumer.ConsumerType `json:\"consumerType\"`\n\t\tOnBehalfOf string `json:\"onBehalfOf\"`\n\t\tRegion string `json:\"region\"`\n\t\tTokenClientID string `json:\"tokenClientID\"`\n\t\tFlow oathkeeper.AuthFlow `json:\"flow\"`\n\t\tZID string `json:\"zid\"`\n\t\tjwt.StandardClaims\n\t}{}\n\n\terr := json.Unmarshal(b, &tokenClaims)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"while unmarshaling token claims:\")\n\t}\n\n\tc.Scopes = tokenClaims.Scopes\n\tc.ConsumerID = tokenClaims.ConsumerID\n\tc.ConsumerType = tokenClaims.ConsumerType\n\tc.OnBehalfOf = tokenClaims.OnBehalfOf\n\tc.Region = tokenClaims.Region\n\tc.TokenClientID = tokenClaims.TokenClientID\n\tc.Flow = tokenClaims.Flow\n\tc.ZID = tokenClaims.ZID\n\tc.StandardClaims = tokenClaims.StandardClaims\n\n\tif err := json.Unmarshal([]byte(tokenClaims.TenantString), &c.Tenant); err != nil {\n\t\tlog.D().Warnf(\"While unmarshaling tenants: %+v\", err)\n\t\tc.Tenant = make(map[string]string)\n\t}\n\n\treturn nil\n}", "func (j *JWT) ValidClaims(jwtToken Token, lifespan time.Duration, alg gojwt.Keyfunc) (Principal, error) {\n\t// 1. Checks for expired tokens\n\t// 2. Checks if time is after the issued at\n\t// 3. Check if time is after not before (nbf)\n\t// 4. Check if subject is not empty\n\t// 5. Check if duration less than auth lifespan\n\ttoken, err := gojwt.ParseWithClaims(string(jwtToken), &Claims{}, alg)\n\tif err != nil {\n\t\treturn Principal{}, err\n\t\t// at time of this writing and researching the docs, token.Valid seems to be always true\n\t} else if !token.Valid {\n\t\treturn Principal{}, err\n\t}\n\n\t// at time of this writing and researching the docs, there will always be claims\n\tclaims, ok := token.Claims.(*Claims)\n\tif !ok {\n\t\treturn Principal{}, fmt.Errorf(\"unable to convert claims to standard claims\")\n\t}\n\n\texp := time.Unix(claims.ExpiresAt, 0)\n\tiat := time.Unix(claims.IssuedAt, 0)\n\n\t// If the duration of the claim is longer than the auth lifespan then this is\n\t// an invalid claim because server assumes that lifespan is the maximum possible\n\t// duration. However, a lifespan of zero means that the duration comparison\n\t// against the auth duration is not needed.\n\tif lifespan > 0 && exp.Sub(iat) > lifespan {\n\t\treturn Principal{}, fmt.Errorf(\"claims duration is different from auth lifespan\")\n\t}\n\n\treturn Principal{\n\t\tSubject: claims.Subject,\n\t\tIssuer: claims.Issuer,\n\t\tOrganization: claims.Organization,\n\t\tGroup: claims.Group,\n\t\tExpiresAt: exp,\n\t\tIssuedAt: iat,\n\t}, nil\n}", "func (c *claimExtractor) GetClaim(claim string) (interface{}, bool, error) {\n\tif claim == \"\" {\n\t\treturn nil, false, nil\n\t}\n\n\tif value := getClaimFrom(claim, c.tokenClaims); value != nil {\n\t\treturn value, true, nil\n\t}\n\n\tif c.profileClaims == nil {\n\t\tprofileClaims, err := c.loadProfileClaims()\n\t\tif err != nil {\n\t\t\treturn nil, false, fmt.Errorf(\"failed to fetch claims from profile URL: %v\", err)\n\t\t}\n\n\t\tc.profileClaims = profileClaims\n\t}\n\n\tif value := getClaimFrom(claim, c.profileClaims); value != nil {\n\t\treturn value, true, nil\n\t}\n\n\treturn nil, false, nil\n}", "func DecodeWithCustomClaims(tokenString string) (*CustomClaims, error) {\n\n // Parse the token\n token, err := jwt.ParseWithClaims(tokenString, &CustomClaims{}, func(token *jwt.Token) (interface{}, error) {\n return key, nil\n })\n\n // Validate the token and return the custom claims\n if claims, ok := token.Claims.(*CustomClaims); ok && token.Valid {\n return claims, nil\n } else {\n return nil, err\n }\n}", "func listClaim(ctx sdk.Context, k Keeper) ([]byte, error) {\n\tvar claimList []types.Claim\n\tstore := ctx.KVStore(k.storeKey)\n\titerator := sdk.KVStorePrefixIterator(store, []byte(types.ClaimPrefix))\n\tfor ; iterator.Valid(); iterator.Next() {\n\t\tvar claim types.Claim\n\t\tk.cdc.MustUnmarshalBinaryLengthPrefixed(store.Get(iterator.Key()), &claim)\n\t\tclaimList = append(claimList, claim)\n\t}\n\tres := codec.MustMarshalJSONIndent(k.cdc, claimList)\n\treturn res, nil\n}", "func (j *JwtAuthenticationHandler) validateClaims(claims *JwtVapiClaims) error {\n\tclaimsValidator := *NewJwtVapiClaimsValidator(claims, j.maxClockSkew, j.acceptableAudiences)\n\treturn claimsValidator.Valid()\n}", "func (m *ClaimControllerRefManager) ClaimClaims(claims []*corev1.PersistentVolumeClaim, filters ...func(claim *corev1.PersistentVolumeClaim) bool) ([]*corev1.PersistentVolumeClaim, error) {\n\tvar claimed []*corev1.PersistentVolumeClaim\n\tvar errlist []error\n\n\tmatch := func(obj metav1.Object) bool {\n\t\tclaim := obj.(*corev1.PersistentVolumeClaim)\n\t\t// Check selector first so filters only run on potentially matching claims.\n\t\tif !m.Selector.Matches(labels.Set(claim.Labels)) {\n\t\t\treturn false\n\t\t}\n\t\tfor _, filter := range filters {\n\t\t\tif !filter(claim) {\n\t\t\t\treturn false\n\t\t\t}\n\t\t}\n\t\treturn true\n\t}\n\tadopt := func(obj metav1.Object) error {\n\t\treturn m.AdoptClaim(obj.(*corev1.PersistentVolumeClaim))\n\t}\n\trelease := func(obj metav1.Object) error {\n\t\treturn m.ReleaseClaim(obj.(*corev1.PersistentVolumeClaim))\n\t}\n\n\tfor _, claim := range claims {\n\t\tok, err := m.ClaimObject(claim, match, adopt, release)\n\t\tif err != nil {\n\t\t\terrlist = append(errlist, err)\n\t\t\tcontinue\n\t\t}\n\t\tif ok {\n\t\t\tclaimed = append(claimed, claim)\n\t\t}\n\t}\n\treturn claimed, utilerrors.NewAggregate(errlist)\n}", "func (w *ServerInterfaceWrapper) GetClaim(ctx echo.Context) error {\n\tvar err error\n\t// ------------- Path parameter \"id\" -------------\n\tvar id int\n\n\terr = runtime.BindStyledParameter(\"simple\", false, \"id\", ctx.Param(\"id\"), &id)\n\tif err != nil {\n\t\treturn echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf(\"Invalid format for parameter id: %s\", err))\n\t}\n\n\tctx.Set(\"OAuth.Scopes\", []string{\"\"})\n\n\t// Invoke the callback with all the unmarshalled arguments\n\terr = w.Handler.GetClaim(ctx, id)\n\treturn err\n}", "func MakeClaims() jwt.MapClaims {\n\tiat := time.Now()\n\texp := iat.Add(1 * time.Minute)\n\treturn jwt.MapClaims{\n\t\t\"iss\": \"https://sso.redhat.com/auth/realms/redhat-external\",\n\t\t\"iat\": iat.Unix(),\n\t\t\"typ\": \"Bearer\",\n\t\t\"exp\": exp.Unix(),\n\t}\n}", "func Verify(tokenStr string) (Claims, error) {\n\n\tif !verifyToken(tokenStr) {\n\t\treturn nil, errTokenInvalid\n\t}\n\n\ttokenArray := strings.Split(tokenStr, \".\")\n\n\t// Make sure token contains 3 parts\n\tif len(tokenArray) != 3 {\n\t\treturn nil, errTokenInvalid\n\t}\n\n\tif err := verifyHeader(tokenArray[0]); err != nil {\n\t\treturn nil, err\n\t}\n\n\tclaimsByte, err := base64.RawURLEncoding.DecodeString(tokenArray[1])\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar claims = Claims{}\n\tif err := json.Unmarshal(claimsByte, &claims); err != nil {\n\t\treturn nil, err\n\t}\n\n\tif err := claims.validate(); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn claims, nil\n}", "func (s *StandardClaim) Get(c Claims, id string) error {\n\tif err := GetClaims(c, id); err != nil {\n\t\treturn err\n\t}\n\theader, err := c.Hdr()\n\tif err != nil {\n\t\treturn err\n\t}\n\tcontent, err := c.Cont()\n\tif err != nil {\n\t\treturn err\n\t}\n\ts.Claim = c\n\ts.Header = header\n\ts.Content = content\n\treturn nil\n}", "func (c *distccClientClaims) Get(name string, options v1.GetOptions) (result *v1alpha1.DistccClientClaim, err error) {\n\tresult = &v1alpha1.DistccClientClaim{}\n\terr = c.client.Get().\n\t\tNamespace(c.ns).\n\t\tResource(\"distccclientclaims\").\n\t\tName(name).\n\t\tVersionedParams(&options, scheme.ParameterCodec).\n\t\tDo().\n\t\tInto(result)\n\treturn\n}", "func (v *verifierPrivate) ValidateTokenAndGetClaims(tokenString string, customClaims interface{}) (*Token, error) {\n\n\t// let us check if the verifier is already expired. If it is just return verifier expired error\n\t// The caller has to re-initialize the verifier.\n\ttoken := Token{}\n\ttoken.standardClaims = &jwt.StandardClaims{}\n\tparsedToken, err := jwt.ParseWithClaims(tokenString, token.standardClaims, func(token *jwt.Token) (interface{}, error) {\n\n\t\tif keyIDValue, keyIDExists := token.Header[\"kid\"]; keyIDExists {\n\n\t\t\tkeyIDString, ok := keyIDValue.(string)\n\t\t\tif !ok {\n\t\t\t\treturn nil, fmt.Errorf(\"kid (key id) in jwt header is not a string : %v\", keyIDValue)\n\t\t\t}\n\n\t\t\tif matchPubKey, found := v.pubKeyMap[keyIDString]; !found {\n\t\t\t\treturn nil, &MatchingCertNotFoundError{keyIDString}\n\t\t\t} else {\n\t\t\t\t// if the certificate just expired.. we need to return appropriate error\n\t\t\t\t// so that the caller can deal with it appropriately\n\t\t\t\tnow := time.Now()\n\t\t\t\tif now.After(matchPubKey.expTime) {\n\t\t\t\t\treturn nil, &MatchingCertJustExpired{keyIDString}\n\t\t\t\t}\n\t\t\t\t// if the verifier expired, we need to use a new instance of the verifier\n\t\t\t\tif time.Now().After(v.expiration) {\n\t\t\t\t\treturn nil, &VerifierExpiredError{v.expiration}\n\t\t\t\t}\n\t\t\t\treturn matchPubKey.pubKey, nil\n\t\t\t}\n\n\t\t} else {\n\t\t\treturn nil, fmt.Errorf(\"kid (key id) field missing in token. field is mandatory\")\n\t\t}\n\t})\n\n\tif err != nil {\n\t\tif jwtErr, ok := err.(*jwt.ValidationError); ok {\n\t\t\tswitch e := jwtErr.Inner.(type) {\n\t\t\tcase *MatchingCertNotFoundError, *VerifierExpiredError, *MatchingCertJustExpired:\n\t\t\t\treturn nil, e\n\t\t\t}\n\t\t\treturn nil, jwtErr\n\t\t}\n\t\treturn nil, err\n\t}\n\ttoken.jwtToken = parsedToken\n\t// so far we have only got the standardClaims parsed. We need to now fill the customClaims\n\n\tparts := strings.Split(tokenString, \".\")\n\t// no need check for the number of segments since the previous ParseWithClaims has already done this check.\n\t// therefor the following is redundant. If we change the implementation, will need to revisit\n\t//if len(parts) != 3 {\n\t//\treturn nil, \"jwt token to be parsed seems to be in \"\n\t//}\n\n\t// parse Claims\n\tvar claimBytes []byte\n\n\tif claimBytes, err = jwt.DecodeSegment(parts[1]); err != nil {\n\t\treturn nil, fmt.Errorf(\"could not decode claims part of the jwt token\")\n\t}\n\tdec := json.NewDecoder(bytes.NewBuffer(claimBytes))\n\terr = dec.Decode(customClaims)\n\ttoken.customClaims = customClaims\n\n\treturn &token, nil\n}", "func (c *Consumer) Claims() []int32 {\n\tc.pLock.Lock()\n\tids := c.partIDs\n\tc.pLock.Unlock()\n\treturn ids\n}", "func (c *claimExtractor) loadProfileClaims() (*simplejson.Json, error) {\n\tif c.profileURL == nil || c.profileURL.String() == \"\" || c.requestHeaders == nil {\n\t\t// When no profileURL is set, we return a non-empty map so that\n\t\t// we don't attempt to populate the profile claims again.\n\t\t// If there are no headers, the request would be unauthorized so we also skip\n\t\t// in this case too.\n\t\treturn simplejson.New(), nil\n\t}\n\n\tclaims, err := requests.New(c.profileURL.String()).\n\t\tWithContext(c.ctx).\n\t\tWithHeaders(c.requestHeaders).\n\t\tDo().\n\t\tUnmarshalSimpleJSON()\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"error making request to profile URL: %v\", err)\n\t}\n\n\treturn claims, nil\n}", "func ConstructClaimsFromSlice(claims ...Claim) (*jwt.Claims, error) {\n\ttokenClaims := &jwt.Claims{\n\t\tSet: map[string]interface{}{},\n\t}\n\n\tfor _, claim := range claims {\n\t\tif claim.IsRegistered() {\n\t\t\terr := constructRegisteredClaim(tokenClaims, claim)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t} else {\n\t\t\terr := constructUnregisteredClaim(tokenClaims, claim)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t}\n\t}\n\n\tif tokenClaims.ID == \"\" {\n\t\ttokenClaims.ID = uuid.Must(uuid.NewV4()).String()\n\t}\n\n\treturn tokenClaims, nil\n}", "func DecodeClaims(claims interface{}) (result Claims, err error) {\n\terr = mapstructure.Decode(claims, &result)\n\treturn result, err\n}", "func (j *JWT) VerifyClaims(claims map[string]interface{}) error {\n\terr := j.verifier(claims)\n\tif err != nil {\n\t\treturn errors.Wrap(ErrClaimsInvalid, \"jwt.VerifyClaims\")\n\t}\n\treturn nil\n}", "func readClaimsFromFile(filename string) ([]claim, error) {\n\tf, err := os.Open(filename)\n\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t\treturn nil, err\n\t}\n\tdefer func() {\n\t\tif err = f.Close(); err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\t}()\n\ts := bufio.NewScanner(f)\n\n\ttotalArea := 0\n\tclaims := make([]claim, 0)\n\tfor s.Scan() {\n\t\tline := s.Text()\n\t\tvar (\n\t\t\ta, b, c, d, e int\n\t\t)\n\t\tfmt.Sscanf(line, \"#%d @ %d,%d: %dx%d\", &a, &b, &c, &d, &e)\n\t\tclaims = append(claims, claim{b, c, d, e})\n\n\t\ttotalArea += (d * e)\n\t}\n\treturn claims, nil\n}", "func JWTClaims(tokenString string) (map[string]interface{}, bool) {\n\ttoken, err := jwt.Parse(tokenString, signingKeyJWT)\n\tif err != nil {\n\t\treturn nil, false\n\t}\n\n\treturn token.Claims, token.Valid\n}", "func (manager *JWTManager) Verify(accessToken string) (*UserClaims, error) {\n\ttoken, err := jwt.ParseWithClaims(\n\t\taccessToken,\n\t\t&UserClaims{},\n\t\tfunc (token *jwt.Token) (interface{}, error) {\n\t\t\t_, ok := token.Method.(*jwt.SigningMethodHMAC)\n\t\t\tif !ok {\n\t\t\t\treturn nil, fmt.Errorf(\"unexpected token signing method\")\n\t\t\t}\n\n\t\t\treturn []byte(manager.secretKey), nil\n\t\t},\n\t)\n\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"invalid token: %w\", err)\n\t}\n\n\tclaims, ok := token.Claims.(*UserClaims)\n\tif !ok {\n\t\treturn nil, fmt.Errorf(\"invalid token claims\")\n\t}\n\n\treturn claims, nil\n}", "func (auth *AuthCookie) ClaimsIdentity() string {\n\treturn auth.Claim(NameIdentity)\n}", "func GetAuthFromContext(ctx context.Context) *UserClaims {\n\traw := ctx.Value(UserCtxKey)\n\n\tif raw == nil {\n\t\treturn nil\n\t}\n\n\treturn raw.(*UserClaims)\n}", "func GetUser(c echo.Context) (res *JwtCustomClaims) {\n\tuser := c.Get(\"user\")\n\tif user != nil {\n\t\tres = user.(*JwtCustomClaims)\n\t}\n\treturn res\n}", "func NodeClaims(ctx context.Context, db DB, nodeID string) (claims []*Claim, err error) {\n\tq := dynamo.NewQuery(ClaimTableName, \"#node = :node\")\n\tq.SetIndexName(ClaimNodeIdxName)\n\tq.AddExpressionName(\"#node\", \"node\")\n\tq.AddExpressionValue(\":node\", nodeID)\n\tif _, err := q.ExecuteWithContext(ctx, db, &claims); err != nil {\n\t\treturn nil, errors.Wrap(err, \"failed to query\")\n\t}\n\n\treturn claims, nil\n}", "func NewClaim(t *Token) *CustomClaims {\n\tvar iat = time.Now().Unix()\n\tvar expTime = iat + t.exp\n\n\treturn &CustomClaims{\n\t\tjwt.StandardClaims{\n\t\t\tNotBefore: iat,\n\t\t\tIssuedAt: iat,\n\t\t\tExpiresAt: expTime,\n\t\t\tId: t.identifier,\n\t\t\tSubject: t.subject,\n\t\t\tIssuer: t.issuer,\n\t\t\tAudience: t.audience,\n\t\t},\n\t}\n}", "func (j *JwtAuthenticator) AuthenticateToken(ctx context.Context, rawtoken string) (*Claims, error) {\n\n\t// Parse token\n\ttoken, err := jwt.Parse(rawtoken, func(token *jwt.Token) (interface{}, error) {\n\n\t\t// Verify Method\n\t\tif strings.HasPrefix(token.Method.Alg(), \"RS\") {\n\t\t\t// RS256, RS384, or RS512\n\t\t\treturn j.rsaKey, nil\n\t\t} else if strings.HasPrefix(token.Method.Alg(), \"ES\") {\n\t\t\t// ES256, ES384, or ES512\n\t\t\treturn j.ecdsKey, nil\n\t\t} else if strings.HasPrefix(token.Method.Alg(), \"HS\") {\n\t\t\t// HS256, HS384, or HS512\n\t\t\treturn j.sharedSecretKey, nil\n\t\t}\n\t\treturn nil, fmt.Errorf(\"Unknown token algorithm: %s\", token.Method.Alg())\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif !token.Valid {\n\t\treturn nil, fmt.Errorf(\"Token failed validation\")\n\t}\n\n\t// Get claims\n\tclaims, ok := token.Claims.(jwt.MapClaims)\n\tif claims == nil || !ok {\n\t\treturn nil, fmt.Errorf(\"No claims found in token\")\n\t}\n\n\t// Check for required claims\n\tfor _, requiredClaim := range requiredClaims {\n\t\tif _, ok := claims[requiredClaim]; !ok {\n\t\t\t// Claim missing\n\t\t\treturn nil, fmt.Errorf(\"Required claim %v missing from token\", requiredClaim)\n\t\t}\n\t}\n\n\t// Token now has been verified.\n\t// Claims holds all the authorization information.\n\t// Here we need to first decode it then unmarshal it from JSON\n\tparts := strings.Split(token.Raw, \".\")\n\tclaimBytes, err := jwt.DecodeSegment(parts[1])\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"Failed to decode claims: %v\", err)\n\t}\n\tvar sdkClaims Claims\n\terr = json.Unmarshal(claimBytes, &sdkClaims)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"Unable to get sdkclaims: %v\", err)\n\t}\n\n\tif err := validateUsername(j.usernameClaim, &sdkClaims); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &sdkClaims, nil\n}", "func (t *CarInsuranceChaincode) getClaim(stub shim.ChaincodeStubInterface, args []string) ([]byte, error) {\n\tvar key, jsonResp string\n\t//var err error\n\n\tif len(args) != 1 {\n\t\treturn nil, errors.New(\"Incorrect number of arguments. Expecting name of the key to query\")\n\t}\n\n\tkey = args[0]\n\tvalAsbytes, err := stub.GetState(key)\n\tif err != nil {\n\t\tjsonResp = \"{\\\"Error\\\":\\\"Failed to get value of \" + key + \"\\\"}\"\n\t\treturn nil, errors.New(jsonResp)\n\t}\n\n\treturn valAsbytes, nil\n}", "func constructRegisteredClaim(tokenClaims *jwt.Claims, claim Claim) error {\n\tswitch claim.Field() {\n\tcase Issuer:\n\t\ttokenClaims.Registered.Issuer = claim.String\n\tcase Subject:\n\t\ttokenClaims.Registered.Subject = claim.String\n\tcase Audience:\n\t\ttokenClaims.Registered.Audiences = append(tokenClaims.Registered.Audiences, claim.String)\n\tcase Expires:\n\t\tif claim.Type == TimeType {\n\t\t\tt, err := claim.Time()\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\ttokenClaims.Registered.Expires = jwt.NewNumericTime(t)\n\t\t} else {\n\t\t\treturn errors.New(\"invalid type for exp\")\n\t\t}\n\tcase NotBefore:\n\t\tif claim.Type == TimeType {\n\t\t\tt, err := claim.Time()\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\ttokenClaims.Registered.NotBefore = jwt.NewNumericTime(t)\n\t\t} else {\n\t\t\treturn errors.New(\"invalid type for nbf\")\n\t\t}\n\tcase Issued:\n\t\tif claim.Type == TimeType {\n\t\t\tt, err := claim.Time()\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\ttokenClaims.Registered.Issued = jwt.NewNumericTime(t)\n\t\t} else {\n\t\t\treturn errors.New(\"invalid type for iat\")\n\t\t}\n\tcase ID:\n\t\ttokenClaims.Registered.ID = claim.String\n\t}\n\n\treturn nil\n}", "func (o *OIDCIDVerifier) Verify(ctx context.Context, rawIDToken string) (*Claims, error) {\n\tidToken, err := o.Verifier.Verify(ctx, rawIDToken)\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"unable to verify rawIDToken\")\n\t}\n\tvar claims Claims\n\tif err = idToken.Claims(&claims); err != nil {\n\t\treturn nil, err\n\t}\n\treturn &claims, nil\n}", "func (c *AppClaims) ParseClaims(claims jwt.MapClaims) error {\n\tid, ok := claims[\"id\"]\n\tif !ok {\n\t\treturn errors.New(\"could not parse claim id\")\n\t}\n\tc.ID = int(id.(float64))\n\n\tsub, ok := claims[\"sub\"]\n\tif !ok {\n\t\treturn errors.New(\"could not parse claim sub\")\n\t}\n\tc.Sub = sub.(string)\n\n\trl, ok := claims[\"roles\"]\n\tif !ok {\n\t\treturn errors.New(\"could not parse claims roles\")\n\t}\n\n\tvar roles []string\n\tif rl != nil {\n\t\tfor _, v := range rl.([]interface{}) {\n\t\t\troles = append(roles, v.(string))\n\t\t}\n\t}\n\tc.Roles = roles\n\n\treturn nil\n}", "func ExpiredClaims(ctx context.Context, db DB, limit int64) (claims []*Claim, err error) {\n\tfor i := int64(0); i < ClaimScatterPartitions; i++ {\n\t\tq := dynamo.NewQuery(ClaimTableName, \"part = :part AND #ttl BETWEEN :minttl AND :maxttl\")\n\t\tq.SetIndexName(ClaimTTLIdxName)\n\t\tq.SetLimit(limit)\n\t\tq.AddExpressionValue(\":part\", i)\n\t\tq.AddExpressionName(\"#ttl\", \"ttl\")\n\t\tq.AddExpressionValue(\":minttl\", 1)\n\t\tq.AddExpressionValue(\":maxttl\", time.Now().Unix())\n\n\t\tpartClaims := []*Claim{}\n\t\tif _, err := q.ExecuteWithContext(ctx, db, &partClaims); err != nil {\n\t\t\treturn nil, errors.Wrapf(err, \"failed to query partition %d\", i)\n\t\t}\n\n\t\tclaims = append(claims, partClaims...)\n\t}\n\n\treturn claims, nil\n}", "func (middleware *Middleware) CheckIfTokenExpire(c controller.MContext) (*CustomClaims, error) {\n\ttoken, err := middleware.ParseToken(c)\n\tif err != nil {\n\t\tvalidationErr, ok := err.(*jwt.ValidationError)\n\t\tif !ok || validationErr.Errors != jwt.ValidationErrorExpired {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\tif token == nil {\n\t\treturn nil, ErrEmptyAuthHeader\n\t}\n\n\tclaims := token.Claims.(*CustomClaims)\n\n\tif claims.ExpiresAt < jwt.TimeFunc().Unix() {\n\t\treturn nil, ErrExpiredToken\n\t}\n\n\treturn claims, nil\n}", "func (m *MockFullNode) StateGetClaims(arg0 context.Context, arg1 address.Address, arg2 types0.TipSetKey) (map[verifreg.ClaimId]verifreg.Claim, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"StateGetClaims\", arg0, arg1, arg2)\n\tret0, _ := ret[0].(map[verifreg.ClaimId]verifreg.Claim)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func (m JwtVapiClaims) Valid() error {\n\treturn nil\n}", "func makeClaims(user User) Claims {\n\texpirationTime := time.Now().Add(time.Duration(10 * time.Minute)).Unix()\n\tclaims := Claims{\n\t\tName: user.Name,\n\t\tSurname: user.Surname,\n\t\tType: user.Type,\n\t\tStandardClaims: jwt.StandardClaims{ExpiresAt: expirationTime},\n\t\tMail: user.Mail,\n\t}\n\treturn claims\n}", "func (ja *jwtAuthorizer) Get(token string) (*auth.AuthUser, error) {\n\tt, err := ja.parse(token)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"jwt token cannot be parsed: %s\", err)\n\t}\n\tath := t.Claims[\"user\"].(map[string]interface{})\n\tvar a auth.AuthUser\n\ta.Uid = ath[\"uid\"].(string)\n\ta.Name = ath[\"name\"].(string)\n\ta.Network = ath[\"network\"].(string)\n\ta.BackgroundUrl = ath[\"backgroundurl\"].(string)\n\ta.ThumbnailUrl = ath[\"thumbnail\"].(string)\n\treturn &a, nil\n}", "func Read(signingKey, jwtString string) (map[string]interface{}, error) {\n\ttoken, err := jwt.Parse(jwtString, func(token *jwt.Token) (interface{}, error) {\n\t\tif _, ok := token.Method.(*jwt.SigningMethodHMAC); !ok {\n\t\t\treturn nil, fmt.Errorf(\"Unexpected signing method: %v\", token.Header[\"alg\"])\n\t\t}\n\t\treturn []byte(signingKey), nil\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif !token.Valid {\n\t\treturn nil, invalidJwtError(\"claim validation failed\")\n\t}\n\tif claims, ok := token.Claims.(jwt.MapClaims); ok {\n\t\tif custom, ok := claims[\"custom_claims\"].(map[string]interface{}); ok {\n\t\t\treturn custom, nil\n\t\t}\n\t}\n\treturn nil, invalidJwtError(\"could not read claims\")\n}" ]
[ "0.8247486", "0.814989", "0.79294944", "0.7875769", "0.7714982", "0.7493702", "0.73716587", "0.7146246", "0.7055893", "0.7031104", "0.7011716", "0.69563204", "0.6920891", "0.6904525", "0.6884058", "0.68386084", "0.6778676", "0.677816", "0.66969836", "0.6605809", "0.6596906", "0.65833473", "0.65510786", "0.65271276", "0.6453032", "0.6428453", "0.6415198", "0.6372284", "0.6362848", "0.6362848", "0.6334732", "0.62650985", "0.6254243", "0.6243351", "0.62302", "0.61768097", "0.61445254", "0.61157656", "0.61086035", "0.6056693", "0.60123616", "0.5991845", "0.59805065", "0.5977028", "0.596643", "0.59475803", "0.59458286", "0.59090775", "0.5887942", "0.58782846", "0.58480555", "0.5846693", "0.58418584", "0.5800897", "0.57735974", "0.577299", "0.5764514", "0.5748057", "0.571926", "0.57069004", "0.5690541", "0.56608963", "0.5655863", "0.5653971", "0.56411695", "0.5635166", "0.56293035", "0.5613849", "0.56094146", "0.55996627", "0.559629", "0.55928564", "0.55893457", "0.55798185", "0.5570976", "0.55690473", "0.5565237", "0.5541248", "0.5516778", "0.55144924", "0.5505673", "0.5467541", "0.5458111", "0.5445889", "0.5420274", "0.53834337", "0.5360655", "0.5358484", "0.53501755", "0.53307295", "0.5328784", "0.5319425", "0.5309099", "0.52755713", "0.52717364", "0.52679354", "0.5260646", "0.5257944", "0.5251129" ]
0.8447192
1
NewOIDC creates a config and a verifier for the OIDC interaction
func NewOIDC(c config.OAuthConfig) (oauthConfig *oauth2.Config, oauthVerifier OIDCVerifier) { ctx := context.Background() provider, err := oidc.NewProvider(ctx, c.Provider) if err != nil { log.Fatal(err) } oidcConfig := &oidc.Config{ ClientID: c.ClientID, } ver := provider.Verifier(oidcConfig) oauthVerifier = &oidcVerifier{ver} oauthConfig = &oauth2.Config{ ClientID: c.ClientID, ClientSecret: c.ClientSecret, Endpoint: provider.Endpoint(), RedirectURL: c.RedirectURL, Scopes: openIDScope, } return }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func NewOIDC(config *OIDCAuthConfig) (*OIDCAuthenticator, error) {\n\t// Reverting the defaultTimeout base context as some of the coreos oidc api\n\t// takes this context for subsequent call and end up using expired context.\n\tctx := context.Background()\n\n\tp, err := oidc.NewProvider(ctx, config.Issuer)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"Unable to communicate with OIDC provider %s: %v\",\n\t\t\tconfig.Issuer,\n\t\t\terr)\n\t}\n\n\tv := p.Verifier(&oidc.Config{\n\t\tClientID: config.ClientID,\n\t\tSkipClientIDCheck: config.SkipClientIDCheck,\n\t\tSkipIssuerCheck: config.SkipIssuerCheck,\n\t})\n\treturn &OIDCAuthenticator{\n\t\turl: config.Issuer,\n\t\tusernameClaim: config.UsernameClaim,\n\t\tnamespace: config.Namespace,\n\t\tprovider: p,\n\t\tverifier: v,\n\t}, nil\n}", "func NewConfigAndVerifier(c conf.OAuthConfig) (OIDCConfig, OIDCVerifier) {\n\tctx := context.Background()\n\tprovider, err := oidc.NewProvider(ctx, c.Provider)\n\tif err != nil {\n\t\tpanic(fmt.Sprintf(\"could not create a new OIDC provider: %v\", err))\n\t}\n\tver := provider.Verifier(&oidc.Config{\n\t\tClientID: c.ClientID,\n\t\tSupportedSigningAlgs: []string{\"RS256\", \"HS256\"},\n\t})\n\toidcVer := &oidcVerifier{ver}\n\n\tvar endPoint oauth2.Endpoint\n\t// either the endpoint is \"constructed\" by the supplied endpoint-URL\n\t// or the specific provider-endpoint is used\n\tif c.EndPointURL != \"\" {\n\t\tendPoint = oauth2.Endpoint{\n\t\t\tAuthURL: c.EndPointURL + \"/auth\",\n\t\t\tTokenURL: c.EndPointURL + \"/token\",\n\t\t}\n\t} else {\n\t\tendPoint = provider.Endpoint()\n\t}\n\toidcCfg := &oidcConfig{\n\t\tconfig: &oauth2.Config{\n\t\t\tClientID: c.ClientID,\n\t\t\tClientSecret: c.ClientSecret,\n\t\t\tEndpoint: endPoint,\n\t\t\tRedirectURL: c.RedirectURL,\n\t\t\tScopes: openIDScope,\n\t\t},\n\t}\n\treturn oidcCfg, oidcVer\n}", "func (o *OIDC) Init(config Config) (err error) {\n\tswitch {\n\tcase o.Type == \"\":\n\t\treturn errors.New(\"type cannot be empty\")\n\tcase o.Name == \"\":\n\t\treturn errors.New(\"name cannot be empty\")\n\tcase o.ClientID == \"\":\n\t\treturn errors.New(\"clientID cannot be empty\")\n\tcase o.ConfigurationEndpoint == \"\":\n\t\treturn errors.New(\"configurationEndpoint cannot be empty\")\n\t}\n\n\t// Validate listenAddress if given\n\tif o.ListenAddress != \"\" {\n\t\tif _, _, err := net.SplitHostPort(o.ListenAddress); err != nil {\n\t\t\treturn errors.Wrap(err, \"error parsing listenAddress\")\n\t\t}\n\t}\n\n\t// Decode and validate openid-configuration endpoint\n\tu, err := url.Parse(o.ConfigurationEndpoint)\n\tif err != nil {\n\t\treturn errors.Wrapf(err, \"error parsing %s\", o.ConfigurationEndpoint)\n\t}\n\tif !strings.Contains(u.Path, \"/.well-known/openid-configuration\") {\n\t\tu.Path = path.Join(u.Path, \"/.well-known/openid-configuration\")\n\t}\n\tif err := getAndDecode(u.String(), &o.configuration); err != nil {\n\t\treturn err\n\t}\n\tif err := o.configuration.Validate(); err != nil {\n\t\treturn errors.Wrapf(err, \"error parsing %s\", o.ConfigurationEndpoint)\n\t}\n\t// Replace {tenantid} with the configured one\n\tif o.TenantID != \"\" {\n\t\to.configuration.Issuer = strings.ReplaceAll(o.configuration.Issuer, \"{tenantid}\", o.TenantID)\n\t}\n\t// Get JWK key set\n\to.keyStore, err = newKeyStore(o.configuration.JWKSetURI)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\to.ctl, err = NewController(o, o.Claims, config, o.Options)\n\treturn\n}", "func NewOIDC(logger log.Logger, configs []TenantOIDCConfig) (http.Handler, map[string]Middleware, []error) {\n\thandlers := map[string]http.Handler{}\n\tmiddlewares := map[string]Middleware{}\n\twarnings := make([]error, 0, len(configs))\n\n\tfor _, c := range configs {\n\t\tp, err := NewProvider(context.TODO(), logger, getCookieForTenant(c.Tenant), \"/\"+c.Tenant, c.OIDCConfig)\n\t\tif err != nil {\n\t\t\twarnings = append(warnings, fmt.Errorf(\"failed to instantiate OIDC provider for tenant %q: %w\", c.Tenant, err))\n\t\t\tcontinue\n\t\t}\n\n\t\tr := chi.NewRouter()\n\t\tr.Handle(\"/login\", p.LoginHandler())\n\t\tr.Handle(\"/callback\", p.CallbackHandler())\n\n\t\thandlers[c.Tenant] = r\n\t\tmiddlewares[c.Tenant] = p.Middleware()\n\t}\n\n\tr := chi.NewRouter()\n\tr.Mount(\"/\", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\ttenant, ok := GetTenant(r.Context())\n\t\tconst msg = \"error finding tenant\"\n\t\tif !ok {\n\t\t\tlevel.Warn(logger).Log(\"msg\", msg)\n\t\t\thttp.Error(w, msg, http.StatusInternalServerError)\n\t\t\treturn\n\t\t}\n\t\th, ok := handlers[tenant]\n\t\tif !ok {\n\t\t\tlevel.Debug(logger).Log(\"msg\", msg)\n\t\t\thttp.Error(w, msg, http.StatusUnauthorized)\n\t\t\treturn\n\t\t}\n\t\th.ServeHTTP(w, r)\n\t}))\n\n\treturn r, middlewares, warnings\n}", "func New(oidcConfig OIDCConfig, oidcVerifier OIDCVerifier, jwtConfig conf.Security, repo store.Repository) Service {\n\treturn &oidcService{\n\t\toauthConfig: oidcConfig,\n\t\toauthVerifier: oidcVerifier,\n\t\trepo: repo,\n\t\tjwtConfig: jwtConfig,\n\t}\n}", "func New(cfg config.Config) (*Client, error) {\n\tclient := &Client{\n\t\tconfig: cfg.Authentication.OIDC,\n\t}\n\n\tif !client.config.Enabled {\n\t\t// if oidc is not enabled, we are done, no need to setup an actual client.\n\t\t// The \"disabled\" client is however still valuable to deny any requests\n\t\t// coming in with an OAuth token set.\n\t\treturn client, nil\n\t}\n\n\tif err := client.init(); err != nil {\n\t\treturn nil, fmt.Errorf(\"oidc init: %v\", err)\n\t}\n\n\treturn client, nil\n}", "func NewProvider(ctx context.Context, logger log.Logger, cookieName, redirectURL string, config OIDCConfig) (*OIDCProvider, error) {\n\tt := &http.Transport{\n\t\tProxy: http.ProxyFromEnvironment,\n\t\tDialContext: (&net.Dialer{\n\t\t\tTimeout: 30 * time.Second,\n\t\t\tKeepAlive: 30 * time.Second,\n\t\t}).DialContext,\n\t\tForceAttemptHTTP2: true,\n\t\tMaxIdleConns: 100,\n\t\tIdleConnTimeout: 90 * time.Second,\n\t\tTLSHandshakeTimeout: 10 * time.Second,\n\t\tExpectContinueTimeout: 1 * time.Second,\n\t}\n\tif config.IssuerCA != nil {\n\t\tt.TLSClientConfig = &tls.Config{\n\t\t\tRootCAs: x509.NewCertPool(),\n\t\t}\n\t\tt.TLSClientConfig.RootCAs.AddCert(config.IssuerCA)\n\t}\n\n\tclient := &http.Client{\n\t\tTimeout: 5 * time.Second,\n\t\tTransport: t,\n\t}\n\n\tprovider, err := oidc.NewProvider(oidc.ClientContext(ctx, client), config.IssuerURL)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\toauth2Config := oauth2.Config{\n\t\tClientID: config.ClientID,\n\t\tClientSecret: config.ClientSecret,\n\t\tEndpoint: provider.Endpoint(),\n\t\tRedirectURL: config.RedirectURL,\n\t\tScopes: []string{\"openid\", \"profile\", \"email\", \"groups\"},\n\t}\n\n\tverifier := provider.Verifier(&oidc.Config{ClientID: config.ClientID})\n\n\treturn &OIDCProvider{\n\t\tlogger: logger,\n\t\tprovider: provider,\n\t\tclient: client,\n\t\tconfig: config,\n\t\tcookieName: cookieName,\n\t\tredirectURL: redirectURL,\n\t\toauth2Config: oauth2Config,\n\t\tverifier: verifier,\n\t}, nil\n}", "func NewOIDCAuthenticator(config *OIDCConfig) (authenticator.Request, error) {\n\ttokenAuthenticator, err := oidc.New(oidc.Options{\n\t\tIssuerURL: config.IssuerURL,\n\t\tClientID: config.ClientID,\n\t\tCAFile: config.CAFile,\n\t\tUsernameClaim: config.UsernameClaim,\n\t\tUsernamePrefix: config.UsernamePrefix,\n\t\tGroupsClaim: config.GroupsClaim,\n\t\tGroupsPrefix: config.GroupsPrefix,\n\t\tSupportedSigningAlgs: config.SupportedSigningAlgs,\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn bearertoken.New(tokenAuthenticator), nil\n}", "func NewVerifier(config *VerifierConfig) *oidc.IDTokenVerifier {\n\tprovider, err := oidc.NewProvider(CTX, config.Authority)\n\tif err != nil {\n\t\tpanic(err.Error())\n\t}\n\taudience := \"\"\n\tif config.Audience != nil {\n\t\taudience = *config.Audience\n\t}\n\treturn provider.Verifier(&oidc.Config{\n\t\tClientID: audience,\n\t\tSkipClientIDCheck: config.Audience == nil,\n\t})\n}", "func (c *config) newConfig(redirect string) *oauth2.Config {\n\treturn &oauth2.Config{\n\t\tClientID: c.Client,\n\t\tClientSecret: c.Secret,\n\t\tEndpoint: oauth2.Endpoint{\n\t\t\tAuthURL: fmt.Sprintf(\"%s/site/oauth2/authorize\", c.URL),\n\t\t\tTokenURL: fmt.Sprintf(\"%s/site/oauth2/access_token\", c.URL),\n\t\t},\n\t\tRedirectURL: fmt.Sprintf(\"%s/authorize\", redirect),\n\t}\n}", "func newOIDCClient(tokens *oidc.Tokens[*oidc.IDTokenClaims]) *oidcClient {\n\tclient := oidcClient{\n\t\ttokens: tokens,\n\t\thttpClient: &http.Client{},\n\t\toidcTransport: &oidcTransport{},\n\t}\n\n\t// Ensure client.tokens is never nil otherwise authenticate() will panic.\n\tif client.tokens == nil {\n\t\tclient.tokens = &oidc.Tokens[*oidc.IDTokenClaims]{}\n\t}\n\n\treturn &client\n}", "func New(authenticator auth.Authenticator) clevergo.MiddlewareFunc {\n\treturn func(next clevergo.Handle) clevergo.Handle {\n\t\treturn func(c *clevergo.Context) error {\n\t\t\tidentity, err := authenticator.Authenticate(c.Request, c.Response)\n\t\t\tif err != nil {\n\t\t\t\tlog.Println(err)\n\t\t\t\tauthenticator.Challenge(c.Request, c.Response)\n\t\t\t} else {\n\t\t\t\tc.WithValue(auth.IdentityKey, identity)\n\t\t\t}\n\t\t\treturn next(c)\n\t\t}\n\t}\n}", "func New(cfg *config.Config) *OidcService {\n\n\treturn &OidcService{Client: client.New(\n\t\t*cfg,\n\t\tmetadata.ClientInfo{\n\t\t\tServiceName: ServiceName,\n\t\t\tEndpoint: *cfg.Endpoint,\n\t\t\tAPIVersion: pingaccess.SDKVersion,\n\t\t},\n\t)}\n}", "func NewOptions() *Options {\n\n\tclients := make([]ClientOptions,0)\n\n\tgoogleClient := ClientOptions{\n\t\tProviderName: \t\t\t\"google\",\n\t\tClientID: \t\t\t\t\"492051402277-jjgt15h9nc3p0d550dcbagn9d87f5hqn.apps.googleusercontent.com\",\n\t\tClientSecret:\t\t\t\"vQyHZ6IxPdTcIQ8BFHXCpO-W\",\n\t\tOIDCIssuerURL: \t\t\t\"https://accounts.google.com\",\n\t\tApprovalPrompt:\t\t\t\"force\",\n\t\tSkipOIDCDiscovery:\t\tfalse,\n\t\tScope:\t\t\t\t\t\"openid email profile\",\n\t}\n\n\tif googleClient.OIDCIssuerURL != \"\" {\n\n\t\tctx := context.Background()\n\n\t\t// Construct a manual IDTokenVerifier from issuer URL & JWKS URI\n\t\t// instead of metadata discovery if we enable -skip-oidc-discovery.\n\t\t// In this case we need to make sure the required endpoints for\n\t\t// the provider are configured.\n\t\tif googleClient.SkipOIDCDiscovery {\n\t\t\tif googleClient.LoginURL == \"\" {\n\t\t\t\tpanic(errors.New(\"missing setting: login-url\"))\n\t\t\t}\n\t\t\tif googleClient.RedeemURL == \"\" {\n\t\t\t\tpanic(errors.New(\"missing setting: redeem-url\"))\n\t\t\t}\n\t\t\tif googleClient.OIDCJwksURL == \"\" {\n\t\t\t\tpanic(errors.New(\"missing setting: oidc-jwks-url\"))\n\t\t\t}\n\t\t\tkeySet := oidc.NewRemoteKeySet(ctx, googleClient.OIDCJwksURL)\n\t\t\tgoogleClient.oidcVerifier = oidc.NewVerifier(googleClient.OIDCIssuerURL, keySet, &oidc.Config{\n\t\t\t\tClientID: googleClient.ClientID,\n\t\t\t})\n\t\t} else {\n\t\t\t// Configure discoverable provider data.\n\t\t\tprovider, err := oidc.NewProvider(ctx, googleClient.OIDCIssuerURL)\n\t\t\tif err != nil {\n\t\t\t\tpanic(err)\n\t\t\t}\n\t\t\tgoogleClient.oidcVerifier = provider.Verifier(&oidc.Config{\n\t\t\t\tClientID: googleClient.ClientID,\n\t\t\t})\n\n\t\t\tgoogleClient.LoginURL = provider.Endpoint().AuthURL\n\t\t\tgoogleClient.RedeemURL = provider.Endpoint().TokenURL\n\n\t\t}\n\t\tif googleClient.Scope == \"\" {\n\t\t\tgoogleClient.Scope = \"openid email profile\"\n\t\t}\n\t}\n\n\tclients = append(clients, googleClient)\n\n\treturn &Options{\n\t\tCookieOptions: options.CookieOptions{\n\t\t\tCookieName: \"_oauth2_proxy\",\n\t\t\tCookieSecret: HashKeyCookie,\n\t\t\tCookieDomain: \"127.0.0.1\",\n\t\t\tCookieSecure: false,\n\t\t\tCookieHTTPOnly: true,\n\t\t\tCookieExpire: time.Duration(168) * time.Hour,\n\t\t\tCookieRefresh: time.Duration(0),\n\t\t},\n\t\tSessionOptions: options.SessionOptions{\n\t\t\tType: \"cookie\",\n\t\t},\n\t\tClient: \t\t\t clients,\n\t\tLoggingFilename: \"\",\n\t\tLoggingMaxSize: 100,\n\t\tLoggingMaxAge: 7,\n\t\tLoggingMaxBackups: 0,\n\t\tLoggingLocalTime: true,\n\t\tLoggingCompress: false,\n\t\tStandardLogging: true,\n\t\tStandardLoggingFormat: logger.DefaultStandardLoggingFormat,\n\t\tRequestLogging: true,\n\t\tRequestLoggingFormat: logger.DefaultRequestLoggingFormat,\n\t\tAuthLogging: true,\n\t\tAuthLoggingFormat: logger.DefaultAuthLoggingFormat,\n\t\tRedirectURL: \"/proxy/auth/callback\",\n\t\tredirectURL: parseURL(\"/proxy/auth/callback\", \"redirect\"),\n\t\tWhitelistDomains: []string{\"127.0.0.1:5000\",\"waqt.appointy.com\"},\n\t}\n}", "func (v Verifier) Init() Verifier {\n\tauthDomain = v.AuthDomain\n\tconfig.ClientID = v.PolicyAUD\n\tkeySet = oidc.NewRemoteKeySet(ctx, v.getCertsURL())\n\tverifier = oidc.NewVerifier(authDomain, keySet, config)\n\n\treturn v\n}", "func NewOidcConfigOK() *OidcConfigOK {\n\treturn &OidcConfigOK{}\n}", "func New(endpointURL string) (*Client, error) {\n\tu, err := url.Parse(endpointURL)\n\tif err != nil {\n\t\treturn nil, microerror.Maskf(invalidConfigError, \"invalid endpoint URL\")\n\t}\n\n\ttlsConfig := &tls.Config{}\n\n\ttransport := httptransport.New(u.Host, \"\", []string{u.Scheme})\n\ttransport.Transport = &http.Transport{\n\t\tProxy: http.ProxyFromEnvironment,\n\t\tTLSClientConfig: tlsConfig,\n\t}\n\n\tc := &Client{\n\t\tAPIEndpointURL: endpointURL,\n\t\tGSClientGen: gsclient.New(transport, strfmt.Default),\n\t}\n\n\tpkceResponse, err := oidc.RunPKCE(endpointURL)\n\tif err != nil {\n\t\tfmt.Println(\"DEBUG: Attempt to run the OAuth2 PKCE workflow with a local callback HTTP server failed.\")\n\t\treturn nil, microerror.Mask(err)\n\t}\n\n\tidToken, err := oidc.ParseIDToken(pkceResponse.IDToken)\n\tif err != nil {\n\t\treturn nil, microerror.Mask(err)\n\t}\n\n\t// store tokens\n\tc.IDToken = idToken\n\tc.AccessToken = pkceResponse.AccessToken\n\tc.RefreshToken = pkceResponse.RefreshToken\n\n\treturn c, nil\n}", "func Newcid(creator []byte) (ClientIdentity, error) {\n\tc := &clientIdentityImpl{creator: creator}\n\terr := c.init()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn c, nil\n}", "func OIDCFlow(config Config) error {\n\tif config.Log == nil {\n\t\treturn errors.New(\"error validating config: Log is required\")\n\t}\n\n\tif config.IssuerURL == \"\" {\n\t\treturn errors.New(\"error validating config: IssuerURL is required\")\n\t}\n\n\tif config.ClientID == \"\" {\n\t\treturn errors.New(\"error validating config: ClientID is required\")\n\t}\n\n\tif config.ClientSecret == \"\" {\n\t\treturn errors.New(\"error validating config: ClientSecret is required\")\n\t}\n\n\tif config.TokenHandler == nil {\n\t\treturn errors.New(\"error validating config: TokenHandler is required\")\n\t}\n\n\tif config.SkipTLSVerify && config.IssuerRootCA != \"\" {\n\t\treturn errors.New(\"it makes no sense to use IssuerRootCA and SkipTLSVerify at the same time\")\n\t}\n\n\tappModel := &app{\n\t\tconfig: config,\n\t}\n\n\treturn oidcFlow(appModel)\n}", "func New(c Config) Client {\n\t// Generate secret proof. See https://developers.facebook.com/docs/graph-api/securing-requests/#appsecret_proof\n\tmac := hmac.New(sha256.New, []byte(c.Secret))\n\tmac.Write([]byte(c.Token))\n\n\tapi := strings.TrimSuffix(c.API, \"/\")\n\tif api == \"\" {\n\t\tapi = defaultAPI\n\t}\n\n\treturn Client{\n\t\ttoken: c.Token,\n\t\tsecretProof: hex.EncodeToString(mac.Sum(nil)),\n\t\tapi: api,\n\t}\n}", "func New(cfg *config.Config, opts ...Option) (*CA, error) {\n\tca := &CA{\n\t\tconfig: cfg,\n\t\topts: new(options),\n\t\tcompactStop: make(chan struct{}),\n\t}\n\tca.opts.apply(opts)\n\treturn ca.Init(cfg)\n}", "func newConfig(initiator bool) noise.Config {\n\treturn noise.Config{\n\t\tCipherSuite: cipherSuite,\n\t\tPattern: noise.HandshakeNK,\n\t\tInitiator: initiator,\n\t\tPrologue: []byte(\"dnstt 2020-04-13\"),\n\t}\n}", "func NewCAPI(quit qu.C, timeout ...time.Duration) (c *CAPI) {\n\tc = &CAPI{quit: quit}\n\tif len(timeout)>0 {\n\t\tc.Timeout = timeout[0]\n\t} else {\n\t\tc.Timeout = time.Second * 5\n\t}\n\treturn \n}", "func New(config *Config) (api.Authorize, error) {\n\tif config == nil {\n\t\tconfig = NewDefaultConfig()\n\t}\n\n\treturn &authorizer{config: config}, nil\n}", "func New(config Config) App {\n\treturn App{\n\t\treq: request.New().Post(strings.TrimSpace(*config.url)).BasicAuth(strings.TrimSpace(*config.user), *config.pass),\n\t}\n}", "func NewVerifier(issuerURL string, clientID string, jwksURL string) Verifier {\n\tkeySet := oidc.NewRemoteKeySet(context.Background(), jwksURL)\n\treturn &OIDCIDVerifier{\n\t\tVerifier: oidc.NewVerifier(issuerURL, keySet, &oidc.Config{\n\t\t\tClientID: clientID,\n\t\t}),\n\t}\n}", "func New(conf Config) (http.Handler, error) {\n\tbackendURL, err := url.Parse(conf.BackendURL)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tvar rbac *RBAC\n\tif conf.RulesFile != \"\" {\n\t\trbac, err = LoadRulesFile(conf.RulesFile)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tprovider, err := oidcp.NewProvider(context.TODO(), conf.OIDCIssuer)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tmux := http.NewServeMux()\n\tproxy := &oidcProxy{\n\t\tconfig: conf,\n\t\tprovider: provider,\n\t\tverifier: provider.Verifier(&oidcp.Config{ClientID: conf.ClientID}),\n\t\toauthConf: &oauth2.Config{\n\t\t\tClientID: conf.ClientID,\n\t\t\tClientSecret: conf.Secret,\n\t\t\tEndpoint: provider.Endpoint(),\n\t\t\tRedirectURL: strings.TrimRight(conf.ProxyURL, \"/\") + \"/.proxy/authcode\",\n\t\t\tScopes: append([]string{oidcp.ScopeOpenID}, conf.Scopes...),\n\t\t},\n\t\thandler: mux,\n\t\tproxy: httputil.NewSingleHostReverseProxy(backendURL),\n\t\trbac: rbac,\n\t}\n\n\tmux.HandleFunc(\"/.proxy/authcode\", proxy.handleAuthCode)\n\tmux.HandleFunc(\"/\", proxy.filter)\n\n\treturn proxy, nil\n}", "func newConfig(envParams envParams) error {\n\t// Initialize server config.\n\tsrvCfg := newServerConfigV14()\n\n\t// If env is set for a fresh start, save them to config file.\n\tif globalIsEnvCreds {\n\t\tsrvCfg.SetCredential(envParams.creds)\n\t}\n\n\tif globalIsEnvBrowser {\n\t\tsrvCfg.SetBrowser(envParams.browser)\n\t}\n\n\t// Create config path.\n\tif err := createConfigDir(); err != nil {\n\t\treturn err\n\t}\n\n\t// hold the mutex lock before a new config is assigned.\n\t// Save the new config globally.\n\t// unlock the mutex.\n\tserverConfigMu.Lock()\n\tserverConfig = srvCfg\n\tserverConfigMu.Unlock()\n\n\t// Save config into file.\n\treturn serverConfig.Save()\n}", "func NewTestCAWithConfig(config TestCAConfig) *types.CertAuthorityV2 {\n\t// privateKeys is to specify another RSA private key\n\tif len(config.PrivateKeys) == 0 {\n\t\tconfig.PrivateKeys = [][]byte{fixtures.PEMBytes[\"rsa\"]}\n\t}\n\tkeyBytes := config.PrivateKeys[0]\n\trsaKey, err := ssh.ParseRawPrivateKey(keyBytes)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tsigner, err := ssh.NewSignerFromKey(rsaKey)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tcert, err := tlsca.GenerateSelfSignedCAWithConfig(tlsca.GenerateCAConfig{\n\t\tSigner: rsaKey.(*rsa.PrivateKey),\n\t\tEntity: pkix.Name{\n\t\t\tCommonName: config.ClusterName,\n\t\t\tOrganization: []string{config.ClusterName},\n\t\t},\n\t\tTTL: defaults.CATTL,\n\t\tClock: config.Clock,\n\t})\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tca := &types.CertAuthorityV2{\n\t\tKind: types.KindCertAuthority,\n\t\tSubKind: string(config.Type),\n\t\tVersion: types.V2,\n\t\tMetadata: types.Metadata{\n\t\t\tName: config.ClusterName,\n\t\t\tNamespace: apidefaults.Namespace,\n\t\t},\n\t\tSpec: types.CertAuthoritySpecV2{\n\t\t\tType: config.Type,\n\t\t\tClusterName: config.ClusterName,\n\t\t},\n\t}\n\n\t// Match the key set to lib/auth/auth.go:newKeySet().\n\tswitch config.Type {\n\tcase types.DatabaseCA:\n\t\tca.Spec.ActiveKeys.TLS = []*types.TLSKeyPair{{Cert: cert, Key: keyBytes}}\n\tcase types.KindJWT, types.OIDCIdPCA:\n\t\t// Generating keys is CPU intensive operation. Generate JWT keys only\n\t\t// when needed.\n\t\tpublicKey, privateKey, err := testauthority.New().GenerateJWT()\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t\tca.Spec.ActiveKeys.JWT = []*types.JWTKeyPair{{\n\t\t\tPublicKey: publicKey,\n\t\t\tPrivateKey: privateKey,\n\t\t}}\n\tcase types.UserCA, types.HostCA:\n\t\tca.Spec.ActiveKeys = types.CAKeySet{\n\t\t\tSSH: []*types.SSHKeyPair{{\n\t\t\t\tPublicKey: ssh.MarshalAuthorizedKey(signer.PublicKey()),\n\t\t\t\tPrivateKey: keyBytes,\n\t\t\t}},\n\t\t\tTLS: []*types.TLSKeyPair{{Cert: cert, Key: keyBytes}},\n\t\t}\n\tcase types.OpenSSHCA:\n\t\tca.Spec.ActiveKeys = types.CAKeySet{\n\t\t\tSSH: []*types.SSHKeyPair{{\n\t\t\t\tPublicKey: ssh.MarshalAuthorizedKey(signer.PublicKey()),\n\t\t\t\tPrivateKey: keyBytes,\n\t\t\t}},\n\t\t}\n\tcase types.SAMLIDPCA:\n\t\tca.Spec.ActiveKeys.TLS = []*types.TLSKeyPair{{Cert: cert, Key: keyBytes}}\n\tdefault:\n\t\tpanic(\"unknown CA type\")\n\t}\n\n\treturn ca\n}", "func New(file string) (conf *Config, err error) {\n\tconf = &Config{\n\t\tLogLevel: \"info\",\n\t\tSite: \"default\",\n\t\taudit: []auditors.Auditor{},\n\t}\n\n\trawconf, err := os.ReadFile(file)\n\tif err != nil {\n\t\treturn nil, errors.Wrapf(err, \"could not read config file %s\", file)\n\t}\n\n\terr = json.Unmarshal(rawconf, conf)\n\tif err != nil {\n\t\treturn nil, errors.Wrapf(err, \"could not parse config file %s\", file)\n\t}\n\n\tccfg, err := cconf.NewConfig(conf.ChoriaConfigFile)\n\tif err != nil {\n\t\treturn nil, errors.Wrapf(err, \"could not parse choria config %s\", conf.ChoriaConfigFile)\n\t}\n\n\tccfg.LogFile = conf.LogFile\n\tccfg.LogLevel = conf.LogLevel\n\tccfg.RPCAuthorization = false\n\n\t// by definition these are clients who do not have security credentials, verification is based on the JWT\n\tccfg.DisableSecurityProviderVerify = true\n\n\tconf.fw, err = choria.NewWithConfig(ccfg)\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"could not configure choria\")\n\t}\n\n\terr = configureAuthenticator(conf)\n\tif err != nil {\n\t\treturn nil, errors.Wrapf(err, \"could not configure %s authenticator\", conf.AuthenticatorType)\n\t}\n\n\terr = configureSigner(conf)\n\tif err != nil {\n\t\treturn nil, errors.Wrapf(err, \"could not configure %s signer\", conf.SignerType)\n\t}\n\n\treturn conf, nil\n}", "func NewConfig(configFile string) (*Config, error) {\n\n\tcfg := &Config{\n\t\tHost: \"0.0.0.0\",\n\t\tPort: 8080,\n\t\tAllowEmptyClientSecret: false,\n\t\tScopes: []string{\"openid\", \"profile\", \"email\", \"offline_access\"},\n\t\tUsernameClaim: \"nickname\",\n\t\tEmailClaim: \"\",\n\t\tServeTLS: false,\n\t\tCertFile: \"/etc/gangway/tls/tls.crt\",\n\t\tKeyFile: \"/etc/gangway/tls/tls.key\",\n\t\tClusterCAPath: \"/var/run/secrets/kubernetes.io/serviceaccount/ca.crt\",\n\t\tHTTPPath: \"\",\n\t}\n\n\tif configFile != \"\" {\n\t\tdata, err := ioutil.ReadFile(configFile)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\terr = yaml.Unmarshal([]byte(data), cfg)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\terr := envconfig.Process(\"gangway\", cfg)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\terr = cfg.Validate()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Check for trailing slash on HTTPPath and remove\n\tcfg.HTTPPath = strings.TrimRight(cfg.HTTPPath, \"/\")\n\n\treturn cfg, nil\n}", "func New(e *casbin.Enforcer) *Casbin {\n\treturn &Casbin{enforcer: e}\n}", "func NewOIDCServer(serverFlow ServerFlowType, serverIP, serverPort, publicKeyPath, privateKeyPath string, devMode bool) OIDCServer {\n\n\treturn &oidcServer{\n\t\trsa: newRSAProcessor(serverFlow, publicKeyPath, privateKeyPath),\n\t\tkeyID: uuid.Must(uuid.NewV4()).String(),\n\t\tserverIP: serverIP,\n\t\tserverPort: serverPort,\n\t\tserverFlow: serverFlow,\n\t\tdevMode: devMode,\n\t}\n}", "func New(db *database.HealthAuthorityDB, config *Config) (*Verifier, error) {\n\tcache, err := cache.New(config.CacheDuration)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &Verifier{db, cache}, nil\n}", "func NewCreateOIDCIssuerCmd() *cobra.Command {\n\tcreateOIDCIssuerCmd := &cobra.Command{\n\t\tUse: \"create-oidc-issuer --name NAME --region REGION --subscription-id SUBSCRIPTION_ID --tenant-id TENANT_ID --public-key-file PUBLIC_KEY_FILE\",\n\t\tShort: \"Create OIDC Issuer\",\n\t\tRun: createOIDCIssuerCmd,\n\t\tPersistentPreRun: initEnvForCreateOIDCIssuerCmd,\n\t}\n\n\t// Required parameters\n\tcreateOIDCIssuerCmd.PersistentFlags().StringVar(\n\t\t&CreateOIDCIssuerOpts.Name,\n\t\t\"name\",\n\t\t\"\",\n\t\t\"User-defined name for all created Azure resources. This user-defined name can be separate from the cluster's infra-id. \"+\n\t\t\tfmt.Sprintf(\"Azure resources created by ccoctl will be tagged with '%s_NAME = %s'\", ownedAzureResourceTagKeyPrefix, ownedAzureResourceTagValue),\n\t)\n\tcreateOIDCIssuerCmd.MarkPersistentFlagRequired(\"name\")\n\tcreateOIDCIssuerCmd.PersistentFlags().StringVar(&CreateOIDCIssuerOpts.Region, \"region\", \"\", \"Azure region in which to create identity provider infrastructure\")\n\tcreateOIDCIssuerCmd.MarkPersistentFlagRequired(\"region\")\n\tcreateOIDCIssuerCmd.PersistentFlags().StringVar(&CreateOIDCIssuerOpts.SubscriptionID, \"subscription-id\", \"\", \"Azure Subscription ID within which to create identity provider infrastructure\")\n\tcreateOIDCIssuerCmd.MarkPersistentFlagRequired(\"subscription-id\")\n\tcreateOIDCIssuerCmd.PersistentFlags().StringVar(&CreateOIDCIssuerOpts.TenantID, \"tenant-id\", \"\", \"Azure Tenant ID in which identity provider infrastructure will be created\")\n\tcreateOIDCIssuerCmd.MarkPersistentFlagRequired(\"tenant-id\")\n\tcreateOIDCIssuerCmd.PersistentFlags().StringVar(&CreateOIDCIssuerOpts.PublicKeyPath, \"public-key-file\", \"\", \"Path to public ServiceAccount signing key\")\n\tcreateOIDCIssuerCmd.MarkPersistentFlagRequired(\"public-key-file\")\n\n\t// Optional parameters\n\tcreateOIDCIssuerCmd.PersistentFlags().StringVar(\n\t\t&CreateOIDCIssuerOpts.OIDCResourceGroupName,\n\t\t\"oidc-resource-group-name\",\n\t\t\"\",\n\t\t// FIXME: Say what the default is gonna be, ie -oidc appended to the --name.\n\t\t\"The Azure resource group in which to create OIDC infrastructure including a storage account, blob storage container and user-assigned managed identities. \"+\n\t\t\t\"A resource group will be created with a name derived from the --name parameter if an --oidc-resource-group-name parameter was not provided.\",\n\t)\n\tcreateOIDCIssuerCmd.PersistentFlags().StringVar(\n\t\t&CreateOIDCIssuerOpts.StorageAccountName,\n\t\t\"storage-account-name\",\n\t\t\"\",\n\t\t\"The name of the Azure storage account in which to create OIDC issuer infrastructure. \"+\n\t\t\t\"A storage account will be created with a name derived from the --name parameter if a --storage-account-name parameter was not provided. \"+\n\t\t\t\"The storage account will be created within the OIDC resource group identified by the --oidc-resource-group-name parameter. \"+\n\t\t\t\"If pre-existing, the storage account must exist within the OIDC resource group identified by the --oidc-resource-group-name parameter. \"+\n\t\t\t\"Azure storage account names must be between 3 and 24 characters in length and may contain numbers and lowercase letters only.\",\n\t)\n\tcreateOIDCIssuerCmd.PersistentFlags().StringVar(\n\t\t&CreateOIDCIssuerOpts.BlobContainerName,\n\t\t\"blob-container-name\",\n\t\t\"\",\n\t\t\"The name of the Azure blob container in which to upload OIDC discovery documents. \"+\n\t\t\t\"A blob container will be created with a name derived from the --name parameter if a --blob-container-name parameter was not provided. \"+\n\t\t\t\"The blob container will be created within the OIDC resource group identified by the --oidc-resource-group-name parameter \"+\n\t\t\t\"and storage account identified by --storage-account-name.\",\n\t)\n\tcreateOIDCIssuerCmd.PersistentFlags().BoolVar(&CreateOIDCIssuerOpts.DryRun, \"dry-run\", false, \"Skip creating objects, and just save what would have been created into files\")\n\tcreateOIDCIssuerCmd.PersistentFlags().StringVar(&CreateOIDCIssuerOpts.OutputDir, \"output-dir\", \"\", \"Directory to place generated manifest files. Defaults to the current directory.\")\n\tcreateOIDCIssuerCmd.PersistentFlags().StringToStringVar(&CreateOIDCIssuerOpts.UserTags, \"user-tags\", map[string]string{}, \"User tags to be applied to Azure resources, multiple tags may be specified comma-separated for example: --user-tags key1=value1,key2=value2\")\n\n\treturn createOIDCIssuerCmd\n}", "func New(ctx context.Context, next http.Handler, config *Config, name string) (http.Handler, error) {\n if len(config.HeaderName) == 0 {\n return nil, fmt.Errorf(\"HeaderName cannot be empty\")\n }\n\n return &CNCFDemo{\n headerName: config.HeaderName,\n next: next,\n name: name,\n }, nil\n}", "func (g *FakeClientFactory) New(context.Context, client.Reader, string, string) (capb.ConfigAgentClient, controllers.ConnCloseFunc, error) {\n\tif g.Caclient == nil {\n\t\tg.Reset()\n\t}\n\treturn g.Caclient, emptyConnCloseFunc, nil\n}", "func New(config *Config) (*Provider, error) {\n\tif config.URL == \"\" {\n\t\tconfig.URL = fmt.Sprintf(\"http://%s\", config.ListenAddr)\n\t}\n\n\tif config.TOTP == \"\" {\n\t\tkey, err := totp.Generate(totp.GenerateOpts{\n\t\t\tIssuer: \"karmabot\",\n\t\t\tAccountName: \"slack\",\n\t\t})\n\n\t\tif err != nil {\n\t\t\tconfig.Log.Err(err).Fatal(\"an error occurred while generating a TOTP key\")\n\t\t} else {\n\t\t\tconfig.Log.KV(\"totpKey\", key.Secret()).Fatal(\"please use the following TOTP key\")\n\t\t}\n\t}\n\n\tprovider := &Provider{\n\t\tConfig: config,\n\t\tui: newUI(config),\n\t}\n\n\treturn provider, nil\n}", "func New(registrationDB OriginDatabase, connCache ConnectionCache) *IdentityVerifier {\n\treturn &IdentityVerifier{\n\t\tregistrationDB: registrationDB,\n\t\tconnCache: connCache,\n\t}\n}", "func NewCognitoIdentityProviderAPI(t mockConstructorTestingTNewCognitoIdentityProviderAPI) *CognitoIdentityProviderAPI {\n\tmock := &CognitoIdentityProviderAPI{}\n\tmock.Mock.Test(t)\n\n\tt.Cleanup(func() { mock.AssertExpectations(t) })\n\n\treturn mock\n}", "func New(config *Config) (*Operation, error) {\n\tstore, err := getTxnStore(config.StoreProvider)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"issuer store provider : %w\", err)\n\t}\n\n\tcontextOp, err := jsonldcontextrest.New(&storeProvider{config.StoreProvider})\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"create jsonld context operation: %w\", err)\n\t}\n\n\tsvc := &Operation{\n\t\ttokenIssuer: config.TokenIssuer,\n\t\ttokenResolver: config.TokenResolver,\n\t\tdocumentLoader: config.DocumentLoader,\n\t\tcmsURL: config.CMSURL,\n\t\tvcsURL: config.VCSURL,\n\t\tdidAuthHTML: config.DIDAuthHTML,\n\t\treceiveVCHTML: config.ReceiveVCHTML,\n\t\tvcHTML: config.VCHTML,\n\t\tdidCommHTML: config.DIDCommHTML,\n\t\tdidCommVpHTML: config.DIDCOMMVPHTML,\n\t\thttpClient: &http.Client{Transport: &http.Transport{TLSClientConfig: config.TLSConfig}},\n\t\trequestTokens: config.RequestTokens,\n\t\tissuerAdapterURL: config.IssuerAdapterURL,\n\t\tstore: store,\n\t\thomePage: config.OIDCCallbackURL,\n\t\tdidcommScopes: map[string]struct{}{},\n\t\tassuranceScopes: map[string]string{},\n\t\taddJSONLDContextHandler: contextOp.Add,\n\t}\n\n\tif config.didcommScopes != nil {\n\t\tsvc.didcommScopes = config.didcommScopes\n\t}\n\n\tif config.assuranceScopes != nil {\n\t\tsvc.assuranceScopes = config.assuranceScopes\n\t}\n\n\tif config.OIDCProviderURL != \"\" {\n\t\tsvc.oidcClient, err = oidcclient.New(&oidcclient.Config{\n\t\t\tOIDCClientID: config.OIDCClientID,\n\t\t\tOIDCClientSecret: config.OIDCClientSecret, OIDCCallbackURL: config.OIDCCallbackURL,\n\t\t\tOIDCProviderURL: config.OIDCProviderURL, TLSConfig: config.TLSConfig,\n\t\t})\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"failed to create oidc client : %w\", err)\n\t\t}\n\t}\n\n\tsvc.registerHandler()\n\n\treturn svc, nil\n}", "func (ca *CA) Init(cfg *config.Config) (*CA, error) {\n\t// Set password, it's ok to set nil password, the ca will prompt for them if\n\t// they are required.\n\topts := []authority.Option{\n\t\tauthority.WithPassword(ca.opts.password),\n\t\tauthority.WithSSHHostPassword(ca.opts.sshHostPassword),\n\t\tauthority.WithSSHUserPassword(ca.opts.sshUserPassword),\n\t\tauthority.WithIssuerPassword(ca.opts.issuerPassword),\n\t}\n\tif ca.opts.linkedCAToken != \"\" {\n\t\topts = append(opts, authority.WithLinkedCAToken(ca.opts.linkedCAToken))\n\t}\n\n\tif ca.opts.database != nil {\n\t\topts = append(opts, authority.WithDatabase(ca.opts.database))\n\t}\n\n\tif ca.opts.quiet {\n\t\topts = append(opts, authority.WithQuietInit())\n\t}\n\n\twebhookTransport := http.DefaultTransport.(*http.Transport).Clone()\n\topts = append(opts, authority.WithWebhookClient(&http.Client{Transport: webhookTransport}))\n\n\tauth, err := authority.New(cfg, opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tca.auth = auth\n\n\ttlsConfig, clientTLSConfig, err := ca.getTLSConfig(auth)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\twebhookTransport.TLSClientConfig = clientTLSConfig\n\n\t// Using chi as the main router\n\tmux := chi.NewRouter()\n\thandler := http.Handler(mux)\n\n\tinsecureMux := chi.NewRouter()\n\tinsecureHandler := http.Handler(insecureMux)\n\n\t// Add HEAD middleware\n\tmux.Use(middleware.GetHead)\n\tinsecureMux.Use(middleware.GetHead)\n\n\t// Add regular CA api endpoints in / and /1.0\n\tapi.Route(mux)\n\tmux.Route(\"/1.0\", func(r chi.Router) {\n\t\tapi.Route(r)\n\t})\n\n\t// Mount the CRL to the insecure mux\n\tinsecureMux.Get(\"/crl\", api.CRL)\n\tinsecureMux.Get(\"/1.0/crl\", api.CRL)\n\n\t// Add ACME api endpoints in /acme and /1.0/acme\n\tdns := cfg.DNSNames[0]\n\tu, err := url.Parse(\"https://\" + cfg.Address)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tport := u.Port()\n\tif port != \"\" && port != \"443\" {\n\t\tdns = fmt.Sprintf(\"%s:%s\", dns, port)\n\t}\n\n\t// ACME Router is only available if we have a database.\n\tvar acmeDB acme.DB\n\tvar acmeLinker acme.Linker\n\tif cfg.DB != nil {\n\t\tacmeDB, err = acmeNoSQL.New(auth.GetDatabase().(nosql.DB))\n\t\tif err != nil {\n\t\t\treturn nil, errors.Wrap(err, \"error configuring ACME DB interface\")\n\t\t}\n\t\tacmeLinker = acme.NewLinker(dns, \"acme\")\n\t\tmux.Route(\"/acme\", func(r chi.Router) {\n\t\t\tacmeAPI.Route(r)\n\t\t})\n\t\t// Use 2.0 because, at the moment, our ACME api is only compatible with v2.0\n\t\t// of the ACME spec.\n\t\tmux.Route(\"/2.0/acme\", func(r chi.Router) {\n\t\t\tacmeAPI.Route(r)\n\t\t})\n\t}\n\n\t// Admin API Router\n\tif cfg.AuthorityConfig.EnableAdmin {\n\t\tadminDB := auth.GetAdminDatabase()\n\t\tif adminDB != nil {\n\t\t\tacmeAdminResponder := adminAPI.NewACMEAdminResponder()\n\t\t\tpolicyAdminResponder := adminAPI.NewPolicyAdminResponder()\n\t\t\twebhookAdminResponder := adminAPI.NewWebhookAdminResponder()\n\t\t\tmux.Route(\"/admin\", func(r chi.Router) {\n\t\t\t\tadminAPI.Route(\n\t\t\t\t\tr,\n\t\t\t\t\tadminAPI.WithACMEResponder(acmeAdminResponder),\n\t\t\t\t\tadminAPI.WithPolicyResponder(policyAdminResponder),\n\t\t\t\t\tadminAPI.WithWebhookResponder(webhookAdminResponder),\n\t\t\t\t)\n\t\t\t})\n\t\t}\n\t}\n\n\tvar scepAuthority *scep.Authority\n\tif ca.shouldServeSCEPEndpoints() {\n\t\tscepPrefix := \"scep\"\n\t\tscepAuthority, err = scep.New(auth, scep.AuthorityOptions{\n\t\t\tService: auth.GetSCEPService(),\n\t\t\tDNS: dns,\n\t\t\tPrefix: scepPrefix,\n\t\t})\n\t\tif err != nil {\n\t\t\treturn nil, errors.Wrap(err, \"error creating SCEP authority\")\n\t\t}\n\n\t\t// According to the RFC (https://tools.ietf.org/html/rfc8894#section-7.10),\n\t\t// SCEP operations are performed using HTTP, so that's why the API is mounted\n\t\t// to the insecure mux.\n\t\tinsecureMux.Route(\"/\"+scepPrefix, func(r chi.Router) {\n\t\t\tscepAPI.Route(r)\n\t\t})\n\n\t\t// The RFC also mentions usage of HTTPS, but seems to advise\n\t\t// against it, because of potential interoperability issues.\n\t\t// Currently I think it's not bad to use HTTPS also, so that's\n\t\t// why I've kept the API endpoints in both muxes and both HTTP\n\t\t// as well as HTTPS can be used to request certificates\n\t\t// using SCEP.\n\t\tmux.Route(\"/\"+scepPrefix, func(r chi.Router) {\n\t\t\tscepAPI.Route(r)\n\t\t})\n\t}\n\n\t// helpful routine for logging all routes\n\t//dumpRoutes(mux)\n\t//dumpRoutes(insecureMux)\n\n\t// Add monitoring if configured\n\tif len(cfg.Monitoring) > 0 {\n\t\tm, err := monitoring.New(cfg.Monitoring)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\thandler = m.Middleware(handler)\n\t\tinsecureHandler = m.Middleware(insecureHandler)\n\t}\n\n\t// Add logger if configured\n\tif len(cfg.Logger) > 0 {\n\t\tlogger, err := logging.New(\"ca\", cfg.Logger)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\thandler = logger.Middleware(handler)\n\t\tinsecureHandler = logger.Middleware(insecureHandler)\n\t}\n\n\t// Create context with all the necessary values.\n\tbaseContext := buildContext(auth, scepAuthority, acmeDB, acmeLinker)\n\n\tca.srv = server.New(cfg.Address, handler, tlsConfig)\n\tca.srv.BaseContext = func(net.Listener) context.Context {\n\t\treturn baseContext\n\t}\n\n\t// only start the insecure server if the insecure address is configured\n\t// and, currently, also only when it should serve SCEP endpoints.\n\tif ca.shouldServeInsecureServer() {\n\t\t// TODO: instead opt for having a single server.Server but two\n\t\t// http.Servers handling the HTTP and HTTPS handler? The latter\n\t\t// will probably introduce more complexity in terms of graceful\n\t\t// reload.\n\t\tca.insecureSrv = server.New(cfg.InsecureAddress, insecureHandler, nil)\n\t\tca.insecureSrv.BaseContext = func(net.Listener) context.Context {\n\t\t\treturn baseContext\n\t\t}\n\t}\n\n\treturn ca, nil\n}", "func newExternalAuth(conf *Config) {\n\tgomniauth.SetSecurityKey(conf.Auth.Facebook.Key)\n\tgomniauth.WithProviders(\n\t\tfacebook.New(\n\t\t\tconf.Auth.Facebook.Key,\n\t\t\tconf.Auth.Facebook.Secret,\n\t\t\tconf.Auth.Facebook.URL,\n\t\t),\n\t\tgoogle.New(\n\t\t\tconf.Auth.Google.Key,\n\t\t\tconf.Auth.Google.Secret,\n\t\t\tconf.Auth.Google.URL,\n\t\t),\n\t)\n}", "func New() (*OCMProvider, error) {\n\treturn NewWithEnv(viper.GetString(Env))\n}", "func newVerifyingClient(c Client, previousResult Result, strict bool, sch *crypto.Scheme) Client {\n\treturn &verifyingClient{\n\t\tClient: c,\n\t\tindirectClient: c,\n\t\tpointOfTrust: previousResult,\n\t\tstrict: strict,\n\t\tscheme: sch,\n\t\tlog: log.DefaultLogger(),\n\t}\n}", "func new(master_key string) Ca {\n\tcatls, err := tls.LoadX509KeyPair(\"../storage/root-certificate/ca_cert.pem\", \"../storage/root-certificate/ca_key.pem\")\n\tcheck(err)\n\tfirst_start_time := time.Date(2021, 1, 1, 0, 0, 0, 0, time.UTC).AddDate(0, 0, 0)\n\treturn Ca{catls, master_key, first_start_time}\n}", "func New() (*CA, error) {\n\treturn &CA{}, nil\n}", "func newClient(ctx context.Context, cfg vcConfig) (*vsClient, error) {\n\tu := url.URL{\n\t\tScheme: \"https\",\n\t\tHost: cfg.server,\n\t\tPath: \"sdk\",\n\t}\n\n\tu.User = url.UserPassword(cfg.user, cfg.password)\n\tinsecure := cfg.insecure\n\n\tgc, err := govmomi.NewClient(ctx, &u, insecure)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"connecting to vSphere API: %w\", err)\n\t}\n\n\trc := rest.NewClient(gc.Client)\n\ttm := tags.NewManager(rc)\n\n\tvsc := vsClient{\n\t\tgovmomi: gc,\n\t\trest: rc,\n\t\ttagManager: tm,\n\t}\n\n\terr = vsc.rest.Login(ctx, u.User)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"logging into rest api: %w\", err)\n\t}\n\n\treturn &vsc, nil\n}", "func NewClient(config *Config) (c *Client, err error) {\n\tif config == nil {\n\t\treturn nil, errClientConfigNil\n\t}\n\n\tc = &Client{\n\t\trevocationTransport: http.DefaultTransport,\n\t}\n\n\tif c.transport, err = ghinstallation.NewAppsTransport(\n\t\thttp.DefaultTransport,\n\t\tint64(config.AppID),\n\t\t[]byte(config.PrvKey),\n\t); err != nil {\n\t\treturn nil, err\n\t}\n\n\tif c.url, err = url.ParseRequestURI(fmt.Sprintf(\n\t\t\"%s/app/installations/%v/access_tokens\",\n\t\tstrings.TrimSuffix(fmt.Sprint(config.BaseURL), \"/\"),\n\t\tconfig.InsID,\n\t)); err != nil {\n\t\treturn nil, err\n\t}\n\n\tif c.revocationURL, err = url.ParseRequestURI(fmt.Sprintf(\n\t\t\"%s/installation/token\",\n\t\tstrings.TrimSuffix(fmt.Sprint(config.BaseURL), \"/\"),\n\t)); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn c, nil\n}", "func New(h *handler.Handler, c *config.Config) {\n\ttokenAuth = jwtauth.New(\"HS256\", []byte(c.Token), nil)\n\tr := chi.NewRouter()\n\ts := &server{\n\t\thand: h,\n\t\trouter: r,\n\t\taddress: c.Address,\n\t}\n\ts.makeHandlers()\n\ts.startServer()\n}", "func New(cfg hotstuff.Config) (hotstuff.Signer, hotstuff.Verifier) {\n\tec := &ecdsaCrypto{cfg}\n\treturn ec, ec\n}", "func NewConfig(cfg config.Config) *oauth2.Config {\n\tconf := &oauth2.Config{\n\t\tClientID: cfg.ClientID,\n\t\tScopes: []string{\"authorization_code\"},\n\t\tEndpoint: oauth2.Endpoint{\n\t\t\tTokenURL: config.TokenURL,\n\t\t\tAuthURL: config.AuthURL,\n\t\t},\n\t}\n\treturn conf\n}", "func New(stack consensus.Stack) consensus.Consenter {\n\t\n\tconfigLock.Lock()\n\tif config == nil{\n\t\tconfig = loadConfig()\n\t}\n\tdefer configLock.Unlock()\n\t\n\thandle, _, _ := stack.GetNetworkHandles()\n\tid, _ := getValidatorID(handle)\n\n\tswitch strings.ToLower(config.GetString(\"general.mode\")) {\n\tcase \"batch\":\n\t\treturn newObcBatch(id, config, stack)\n\tdefault:\n\t\tpanic(fmt.Errorf(\"Invalid PBFT mode: %s\", config.GetString(\"general.mode\")))\n\t}\n}", "func NewCSP(host string, port int, id string, password string, secureEndpoint ...bool) (*Aqua, error) {\n\n\taqua := Aqua{Host: host, Port: port, ID: id, Password: password, Secure: true, InsecureSkipVerify: true}\n\taqua.RestClient = *gorequest.New().TLSClientConfig(&tls.Config{InsecureSkipVerify: aqua.InsecureSkipVerify})\n\n\tif len(secureEndpoint) > 0 {\n\t\taqua.Secure = secureEndpoint[0]\n\t\tif len(secureEndpoint) > 1 {\n\t\t\taqua.InsecureSkipVerify = secureEndpoint[1]\n\t\t}\n\t}\n\n\tif aqua.Secure {\n\t\taqua.URL = fmt.Sprintf(\"https://%s:%d/api\", host, port)\n\t} else {\n\t\taqua.URL = fmt.Sprintf(\"http://%s:%d/api\", host, port)\n\t}\n\n\tconnected, message := authenticate(&aqua)\n\n\tif connected {\n\t\taqua.RestClient.Set(\"Authorization\", \"Bearer \"+aqua.Token)\n\t\treturn &aqua, nil\n\t}\n\n\treturn &aqua, fmt.Errorf(message)\n}", "func Request(wellKnownConfig oidc.WellKnownConfiguration, client OidcClient) error {\n\t// from original code\n\tcodeVerifier := \"\"\n\tcodeChallenge := \"\"\n\n\tstate, stateErr := oidc.GenerateRandomStringURLSafe(24)\n\tif stateErr != nil {\n\t\treturn fmt.Errorf(\"failed to generate random state. Check that your OS has a crypto implementation available\")\n\t}\n\n\tauthorisationURL, err := oidc.BuildCodeAuthorisationRequest(\n\t\twellKnownConfig,\n\t\tclient.ClientID,\n\t\tclient.RedirectURL.String(),\n\t\tclient.Scopes,\n\t\tstate,\n\t\tcodeChallenge,\n\t)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to build authorisation request %w\", err)\n\t}\n\n\tm := http.NewServeMux()\n\ts := http.Server{\n\t\tAddr: fmt.Sprintf(\":%s\", client.RedirectURL.Port()),\n\t\tHandler: m,\n\t}\n\tctx, cancel := context.WithCancel(context.Background())\n\n\tdefer cancel()\n\n\t// Open a web server to receive the redirect\n\tm.HandleFunc(\"/\", func(w http.ResponseWriter, r *http.Request) {\n\t\thandleOidcCallback(w, r,\n\t\t\tclient.Alias,\n\t\t\tclient.ClientID,\n\t\t\tclient.ClientSecret,\n\t\t\tclient.RedirectURL.String(),\n\t\t\twellKnownConfig,\n\t\t\tstate,\n\t\t\tcodeVerifier,\n\t\t\tcancel,\n\t\t)\n\t})\n\n\tfmt.Println(\"Open browser to\", authorisationURL)\n\n\tgo func() {\n\t\tif err := s.ListenAndServe(); err != nil && err != http.ErrServerClosed {\n\t\t\tlog.Println(err)\n\t\t}\n\t}()\n\n\tselect {\n\tcase <-ctx.Done():\n\t\t// Shutdown the server when the context is canceled\n\t\terr := s.Shutdown(ctx)\n\t\tif err != nil {\n\t\t\tlog.Println(err)\n\t\t}\n\t}\n\n\treturn nil\n}", "func new() exampleInterface {\n\treturn config{}\n}", "func NewClientConfiguration(pfmServicesEnabled bool, isAutomaticBatchUpdateEnabled bool, isDevelopmentModeEnabled bool, isNonEuroAccountsSupported bool, isAutoCategorizationEnabled bool, mandatorLicense MandatorLicense, preferredConsentType PreferredConsentType, userNotificationCallbackUrl NullableString, userSynchronizationCallbackUrl NullableString, refreshTokensValidityPeriod int32, userAccessTokensValidityPeriod int32, clientAccessTokensValidityPeriod int32, maxUserLoginAttempts int32, transactionImportLimitation int32, isUserAutoVerificationEnabled bool, isMandatorAdmin bool, isWebScrapingEnabled bool, isXs2aEnabled bool, pinStorageAvailableInWebForm bool, paymentsEnabled bool, isStandalonePaymentsEnabled bool, availableBankGroups []string, products []Product, applicationName NullableString, finTSProductRegistrationNumber NullableString, storeSecretsAvailableInWebForm bool, supportSubjectDefault NullableString, supportEmail NullableString, aisWebFormMode WebFormMode, pisWebFormMode WebFormMode, pisStandaloneWebFormMode WebFormMode, betaBanksEnabled bool, categoryRestrictions []Category, autoDismountWebForm bool, corsAllowedOrigins []string, ) *ClientConfiguration {\n\tthis := ClientConfiguration{}\n\tthis.PfmServicesEnabled = pfmServicesEnabled\n\tthis.IsAutomaticBatchUpdateEnabled = isAutomaticBatchUpdateEnabled\n\tthis.IsDevelopmentModeEnabled = isDevelopmentModeEnabled\n\tthis.IsNonEuroAccountsSupported = isNonEuroAccountsSupported\n\tthis.IsAutoCategorizationEnabled = isAutoCategorizationEnabled\n\tthis.MandatorLicense = mandatorLicense\n\tthis.PreferredConsentType = preferredConsentType\n\tthis.UserNotificationCallbackUrl = userNotificationCallbackUrl\n\tthis.UserSynchronizationCallbackUrl = userSynchronizationCallbackUrl\n\tthis.RefreshTokensValidityPeriod = refreshTokensValidityPeriod\n\tthis.UserAccessTokensValidityPeriod = userAccessTokensValidityPeriod\n\tthis.ClientAccessTokensValidityPeriod = clientAccessTokensValidityPeriod\n\tthis.MaxUserLoginAttempts = maxUserLoginAttempts\n\tthis.TransactionImportLimitation = transactionImportLimitation\n\tthis.IsUserAutoVerificationEnabled = isUserAutoVerificationEnabled\n\tthis.IsMandatorAdmin = isMandatorAdmin\n\tthis.IsWebScrapingEnabled = isWebScrapingEnabled\n\tthis.IsXs2aEnabled = isXs2aEnabled\n\tthis.PinStorageAvailableInWebForm = pinStorageAvailableInWebForm\n\tthis.PaymentsEnabled = paymentsEnabled\n\tthis.IsStandalonePaymentsEnabled = isStandalonePaymentsEnabled\n\tthis.AvailableBankGroups = availableBankGroups\n\tthis.Products = products\n\tthis.ApplicationName = applicationName\n\tthis.FinTSProductRegistrationNumber = finTSProductRegistrationNumber\n\tthis.StoreSecretsAvailableInWebForm = storeSecretsAvailableInWebForm\n\tthis.SupportSubjectDefault = supportSubjectDefault\n\tthis.SupportEmail = supportEmail\n\tthis.AisWebFormMode = aisWebFormMode\n\tthis.PisWebFormMode = pisWebFormMode\n\tthis.PisStandaloneWebFormMode = pisStandaloneWebFormMode\n\tthis.BetaBanksEnabled = betaBanksEnabled\n\tthis.CategoryRestrictions = categoryRestrictions\n\tthis.AutoDismountWebForm = autoDismountWebForm\n\tthis.CorsAllowedOrigins = corsAllowedOrigins\n\treturn &this\n}", "func NewConfig() {\n\t appConfig = &AppConfig{}\n}", "func New(authToken string, locales []string) *Client {\n\tpool := x509.NewCertPool()\n\tpool.AppendCertsFromPEM([]byte(\"-----BEGIN CERTIFICATE-----\\nMIIL6TCCCtGgAwIBAgIQBigdNnW0H8yz/xj67Pj93zANBgkqhkiG9w0BAQsFADBw\\nMQswCQYDVQQGEwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3\\nd3cuZGlnaWNlcnQuY29tMS8wLQYDVQQDEyZEaWdpQ2VydCBTSEEyIEhpZ2ggQXNz\\ndXJhbmNlIFNlcnZlciBDQTAeFw0xNDEyMDgwMDAwMDBaFw0xODAyMDYxMjAwMDBa\\nMGwxCzAJBgNVBAYTAlVTMRMwEQYDVQQIEwpDYWxpZm9ybmlhMRYwFAYDVQQHEw1T\\nYW4gRnJhbmNpc2NvMRUwEwYDVQQKEwxGYXN0bHksIEluYy4xGTAXBgNVBAMTEGEu\\nc3NsLmZhc3RseS5uZXQwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDU\\nJUiQsaVP/vC4Mb3aJUmA9KnMQa7EJfjYLsE4F0VehrOp8jlSSXmQLELlUAwPp2F2\\nPNyB32DDOFBHZIYwFrApFEzsJdTKQUYk6xHPZOdYoIijpmfb5xRMdTjqxThGkk+k\\nhU0+ipPWiErJNRkapLgPwPD4ctd5X8rnKF8lMHIxx5Xhdg6PqZC3F7y45Nym2a3M\\n8xIKIkB77o1bkuDpGnV9ZESC/Yf9Mc4NmWrQjqQc+8yIabir+n7/YcM5UdUjZPNS\\nhgL4jLYVJ+KDRZcjIT/dXRZoPpJgRFL9NIep/eSAzQa3g659uW7tjN6tg5iQm4hw\\nksaWp+zfTAJc4IXNtlndAgMBAAGjggiBMIIIfTAfBgNVHSMEGDAWgBRRaP+QrwIH\\ndTzM2WVkYqISuFlyOzAdBgNVHQ4EFgQUwIj0Y03ka1Q28RLCtKWy4nN7FIgwggax\\nBgNVHREEggaoMIIGpIIQYS5zc2wuZmFzdGx5Lm5ldIISKi5hLnNzbC5mYXN0bHku\\nbmV0gg9mYXN0Lndpc3RpYS5jb22CEHB1cmdlLmZhc3RseS5uZXSCEm1pcnJvcnMu\\nZmFzdGx5Lm5ldIIOKi5wYXJzZWNkbi5jb22CDSouZmFzdHNzbC5uZXSCCXZveGVy\\nLmNvbYINd3d3LnZveGVyLmNvbYIOKi5maXJlYmFzZS5jb22CEHNpdGVzLnlhbW1l\\nci5jb22CGHNpdGVzLnN0YWdpbmcueWFtbWVyLmNvbYIPKi5za2ltbGlua3MuY29t\\nghMqLnNraW1yZXNvdXJjZXMuY29tghBjZG4udGhpbmdsaW5rLm1lggwqLmZpdGJp\\ndC5jb22CEiouaG9zdHMuZmFzdGx5Lm5ldIISY29udHJvbC5mYXN0bHkubmV0gg8q\\nLndpa2lhLWluYy5jb22CFSoucGVyZmVjdGF1ZGllbmNlLmNvbYILKi53aWtpYS5j\\nb22CEmYuY2xvdWQuZ2l0aHViLmNvbYIVKi5kaWdpdGFsc2Npcm9jY28ubmV0ggoq\\nLmV0c3kuY29tghAqLmV0c3lzdGF0aWMuY29tgg0qLmFkZHRoaXMuY29tghAqLmFk\\nZHRoaXNjZG4uY29tgg9mYXN0Lndpc3RpYS5uZXSCDnJhdy5naXRodWIuY29tgg93\\nd3cudXNlcmZveC5jb22CEyouYXNzZXRzLXlhbW1lci5jb22CGyouc3RhZ2luZy5h\\nc3NldHMteWFtbWVyLmNvbYIWYXNzZXRzLmh1Z2dpZXMtY2RuLm5ldIISb3JiaXQu\\nc2hhemFtaWQuY29tgg9hYm91dC5qc3Rvci5vcmeCFyouZ2xvYmFsLnNzbC5mYXN0\\nbHkubmV0gg13ZWIudm94ZXIuY29tgg9weXBpLnB5dGhvbi5vcmeCCyouMTJ3YnQu\\nY29tghJ3d3cuaG9sZGVyZGVvcmQubm+CGnNlY3VyZWQuaW5kbi5pbmZvbGlua3Mu\\nY29tghBwbGF5LnZpZHlhcmQuY29tghhwbGF5LXN0YWdpbmcudmlkeWFyZC5jb22C\\nFXNlY3VyZS5pbWcud2ZyY2RuLmNvbYIWc2VjdXJlLmltZy5qb3NzY2RuLmNvbYIQ\\nKi5nb2NhcmRsZXNzLmNvbYIVd2lkZ2V0cy5waW50ZXJlc3QuY29tgg4qLjdkaWdp\\ndGFsLmNvbYINKi43c3RhdGljLmNvbYIPcC5kYXRhZG9naHEuY29tghBuZXcubXVs\\nYmVycnkuY29tghJ3d3cuc2FmYXJpZmxvdy5jb22CEmNkbi5jb250ZW50ZnVsLmNv\\nbYIQdG9vbHMuZmFzdGx5Lm5ldIISKi5odWV2b3NidWVub3MuY29tgg4qLmdvb2Rl\\nZ2dzLmNvbYIWKi5mYXN0bHkucGljbW9ua2V5LmNvbYIVKi5jZG4ud2hpcHBsZWhp\\nbGwubmV0ghEqLndoaXBwbGVoaWxsLm5ldIIbY2RuLm1lZGlhMzQud2hpcHBsZWhp\\nbGwubmV0ghtjZG4ubWVkaWE1Ni53aGlwcGxlaGlsbC5uZXSCG2Nkbi5tZWRpYTc4\\nLndoaXBwbGVoaWxsLm5ldIIcY2RuLm1lZGlhOTEwLndoaXBwbGVoaWxsLm5ldIIO\\nKi5tb2RjbG90aC5jb22CDyouZGlzcXVzY2RuLmNvbYILKi5qc3Rvci5vcmeCDyou\\nZHJlYW1ob3N0LmNvbYIOd3d3LmZsaW50by5jb22CDyouY2hhcnRiZWF0LmNvbYIN\\nKi5oaXBtdW5rLmNvbYIaY29udGVudC5iZWF2ZXJicm9va3MuY28udWuCG3NlY3Vy\\nZS5jb21tb24uY3Nuc3RvcmVzLmNvbYIOd3d3LmpvaW5vcy5jb22CJXN0YWdpbmct\\nbW9iaWxlLWNvbGxlY3Rvci5uZXdyZWxpYy5jb22CDioubW9kY2xvdGgubmV0ghAq\\nLmZvdXJzcXVhcmUuY29tggwqLnNoYXphbS5jb22CCiouNHNxaS5uZXSCDioubWV0\\nYWNwYW4ub3JnggwqLmZhc3RseS5jb22CCXdpa2lhLmNvbYIKZmFzdGx5LmNvbYIR\\nKi5nYWR2ZW50dXJlcy5jb22CFnd3dy5nYWR2ZW50dXJlcy5jb20uYXWCFXd3dy5n\\nYWR2ZW50dXJlcy5jby51a4IJa3JlZG8uY29tghZjZG4tdGFncy5icmFpbmllbnQu\\nY29tghRteS5iaWxsc3ByaW5nYXBwLmNvbYIGcnZtLmlvMA4GA1UdDwEB/wQEAwIF\\noDAdBgNVHSUEFjAUBggrBgEFBQcDAQYIKwYBBQUHAwIwdQYDVR0fBG4wbDA0oDKg\\nMIYuaHR0cDovL2NybDMuZGlnaWNlcnQuY29tL3NoYTItaGEtc2VydmVyLWc1LmNy\\nbDA0oDKgMIYuaHR0cDovL2NybDQuZGlnaWNlcnQuY29tL3NoYTItaGEtc2VydmVy\\nLWc1LmNybDBMBgNVHSAERTBDMDcGCWCGSAGG/WwBATAqMCgGCCsGAQUFBwIBFhxo\\ndHRwczovL3d3dy5kaWdpY2VydC5jb20vQ1BTMAgGBmeBDAECAjCBgwYIKwYBBQUH\\nAQEEdzB1MCQGCCsGAQUFBzABhhhodHRwOi8vb2NzcC5kaWdpY2VydC5jb20wTQYI\\nKwYBBQUHMAKGQWh0dHA6Ly9jYWNlcnRzLmRpZ2ljZXJ0LmNvbS9EaWdpQ2VydFNI\\nQTJIaWdoQXNzdXJhbmNlU2VydmVyQ0EuY3J0MAwGA1UdEwEB/wQCMAAwDQYJKoZI\\nhvcNAQELBQADggEBAKLWzbX7wSyjzE7BVMjLrHAaiz+WGSwrAPrQBJ29sqouu9gv\\nI7i2Ie6eiRb4YLMouy6D+ZNZ+RM+Hkjv+PZFxCcDRmaWi+74ha5d8O155gRJRPZ0\\nSy5SfD/8kqrJRfC+/D/KdQzOroD4sx6Qprs9lZ0IEn4CTf0YPNV+Cps37LsVyPJL\\nfjDlGIM5K3B/vtZfn2f8buQ9QyKiN0bc67GdCjih9dSrkQNkxJiEOwqiSjYtkdFO\\ndYpXF8d1rQKV7a6z2vJloDwilfXLLlUX7rA3qVu7r4EUfIsZgH7hgB4bbst7tx+7\\nPgUEq2334kKPVFpsxgsj5++k4lh7tNlakXiBUtw=\\n-----END CERTIFICATE-----\\n-----BEGIN CERTIFICATE-----\\nMIIEsTCCA5mgAwIBAgIQBOHnpNxc8vNtwCtCuF0VnzANBgkqhkiG9w0BAQsFADBs\\nMQswCQYDVQQGEwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3\\nd3cuZGlnaWNlcnQuY29tMSswKQYDVQQDEyJEaWdpQ2VydCBIaWdoIEFzc3VyYW5j\\nZSBFViBSb290IENBMB4XDTEzMTAyMjEyMDAwMFoXDTI4MTAyMjEyMDAwMFowcDEL\\nMAkGA1UEBhMCVVMxFTATBgNVBAoTDERpZ2lDZXJ0IEluYzEZMBcGA1UECxMQd3d3\\nLmRpZ2ljZXJ0LmNvbTEvMC0GA1UEAxMmRGlnaUNlcnQgU0hBMiBIaWdoIEFzc3Vy\\nYW5jZSBTZXJ2ZXIgQ0EwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQC2\\n4C/CJAbIbQRf1+8KZAayfSImZRauQkCbztyfn3YHPsMwVYcZuU+UDlqUH1VWtMIC\\nKq/QmO4LQNfE0DtyyBSe75CxEamu0si4QzrZCwvV1ZX1QK/IHe1NnF9Xt4ZQaJn1\\nitrSxwUfqJfJ3KSxgoQtxq2lnMcZgqaFD15EWCo3j/018QsIJzJa9buLnqS9UdAn\\n4t07QjOjBSjEuyjMmqwrIw14xnvmXnG3Sj4I+4G3FhahnSMSTeXXkgisdaScus0X\\nsh5ENWV/UyU50RwKmmMbGZJ0aAo3wsJSSMs5WqK24V3B3aAguCGikyZvFEohQcft\\nbZvySC/zA/WiaJJTL17jAgMBAAGjggFJMIIBRTASBgNVHRMBAf8ECDAGAQH/AgEA\\nMA4GA1UdDwEB/wQEAwIBhjAdBgNVHSUEFjAUBggrBgEFBQcDAQYIKwYBBQUHAwIw\\nNAYIKwYBBQUHAQEEKDAmMCQGCCsGAQUFBzABhhhodHRwOi8vb2NzcC5kaWdpY2Vy\\ndC5jb20wSwYDVR0fBEQwQjBAoD6gPIY6aHR0cDovL2NybDQuZGlnaWNlcnQuY29t\\nL0RpZ2lDZXJ0SGlnaEFzc3VyYW5jZUVWUm9vdENBLmNybDA9BgNVHSAENjA0MDIG\\nBFUdIAAwKjAoBggrBgEFBQcCARYcaHR0cHM6Ly93d3cuZGlnaWNlcnQuY29tL0NQ\\nUzAdBgNVHQ4EFgQUUWj/kK8CB3U8zNllZGKiErhZcjswHwYDVR0jBBgwFoAUsT7D\\naQP4v0cB1JgmGggC72NkK8MwDQYJKoZIhvcNAQELBQADggEBABiKlYkD5m3fXPwd\\naOpKj4PWUS+Na0QWnqxj9dJubISZi6qBcYRb7TROsLd5kinMLYBq8I4g4Xmk/gNH\\nE+r1hspZcX30BJZr01lYPf7TMSVcGDiEo+afgv2MW5gxTs14nhr9hctJqvIni5ly\\n/D6q1UEL2tU2ob8cbkdJf17ZSHwD2f2LSaCYJkJA69aSEaRkCldUxPUd1gJea6zu\\nxICaEnL6VpPX/78whQYwvwt/Tv9XBZ0k7YXDK/umdaisLRbvfXknsuvCnQsH6qqF\\n0wGjIChBWUMo0oHjqvbsezt3tkBigAVBRQHvFwY+3sAzm2fTYS5yh+Rp/BIAV0Ae\\ncPUeybQ=\\n-----END CERTIFICATE-----\\n\"))\n\treturn &Client{\n\t\tLocales: locales,\n\t\tauthToken: authToken,\n\t\tclient: &http.Client{\n\t\t\tTransport: &http.Transport{\n\t\t\t\tTLSClientConfig: &tls.Config{\n\t\t\t\t\tRootCAs: pool,\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\thost: fmt.Sprintf(\"https://%s\", ContentfulCDNURL),\n\t\tpool: pool,\n\t\tspaceID: \"ygx37epqlss8\",\n\t}\n}", "func New(ctx context.Context, config Config) *Client {\n\tclient := &Client{\n\t\tid: generateClientID(\"C\"),\n\t\tvalues: make(map[string]interface{}),\n\t\tevents: make(chan *Event, 64),\n\t\tsends: make(chan string, 64),\n\t\tcapEnabled: make(map[string]bool),\n\t\tcapData: make(map[string]string),\n\t\tconfig: config.WithDefaults(),\n\t\tstatus: &Status{id: generateClientID(\"T\")},\n\t}\n\n\tclient.ctx, client.cancel = context.WithCancel(ctx)\n\n\t_ = client.AddTarget(client.status)\n\n\tgo client.handleEventLoop()\n\tgo client.handleSendLoop()\n\n\tclient.EmitNonBlocking(NewEvent(\"client\", \"create\"))\n\n\treturn client\n}", "func RegisterOIDC(router *router.Router, middleware middlewares.RequestHandlerBridge) {\n\t// TODO: Add OPTIONS handler.\n\trouter.GET(pathOpenIDConnectWellKnown, middleware(oidcWellKnown))\n\n\trouter.GET(pathOpenIDConnectConsent, middleware(oidcConsent))\n\n\trouter.POST(pathOpenIDConnectConsent, middleware(oidcConsentPOST))\n\n\trouter.GET(pathOpenIDConnectJWKs, middleware(oidcJWKs))\n\n\trouter.GET(pathOpenIDConnectAuthorization, middleware(middlewares.NewHTTPToAutheliaHandlerAdaptor(oidcAuthorization)))\n\n\t// TODO: Add OPTIONS handler.\n\trouter.POST(pathOpenIDConnectToken, middleware(middlewares.NewHTTPToAutheliaHandlerAdaptor(oidcToken)))\n\n\trouter.POST(pathOpenIDConnectIntrospection, middleware(middlewares.NewHTTPToAutheliaHandlerAdaptor(oidcIntrospection)))\n\n\trouter.GET(pathOpenIDConnectUserinfo, middleware(middlewares.NewHTTPToAutheliaHandlerAdaptor(oidcUserinfo)))\n\trouter.POST(pathOpenIDConnectUserinfo, middleware(middlewares.NewHTTPToAutheliaHandlerAdaptor(oidcUserinfo)))\n\n\t// TODO: Add OPTIONS handler.\n\trouter.POST(pathOpenIDConnectRevocation, middleware(middlewares.NewHTTPToAutheliaHandlerAdaptor(oidcRevocation)))\n}", "func New() *nauth {\n\tsingleton.mutex.Lock()\n\tdefer singleton.mutex.Unlock()\n\n\tif singleton.generatedKeysC == nil && PrecalculatedKeysNum > 0 {\n\t\tsingleton.generatedKeysC = make(chan keyPair, PrecalculatedKeysNum)\n\t\tgo singleton.precalculateKeys()\n\t}\n\treturn &singleton\n}", "func newConfig() (*config, error) {\n\tec2Metadata := ec2metadata.New(session.Must(session.NewSession()))\n\tregion, err := ec2Metadata.Region()\n\tif err != nil {\n\t\treturn nil, errors.Wrapf(err, \"unable to get region from ec2 metadata\")\n\t}\n\n\tinstanceID, err := ec2Metadata.GetMetadata(\"instance-id\")\n\tif err != nil {\n\t\treturn nil, errors.Wrapf(err, \"unable to get instance id from ec2 metadata\")\n\t}\n\n\tmac, err := ec2Metadata.GetMetadata(\"mac\")\n\tif err != nil {\n\t\treturn nil, errors.Wrapf(err, \"unable to get mac from ec2 metadata\")\n\t}\n\n\tsecurityGroups, err := ec2Metadata.GetMetadata(\"security-groups\")\n\tif err != nil {\n\t\treturn nil, errors.Wrapf(err, \"unable to get security groups from ec2 metadata\")\n\t}\n\n\tinterfaces, err := ec2Metadata.GetMetadata(\"network/interfaces/macs\")\n\tif err != nil {\n\t\treturn nil, errors.Wrapf(err, \"unable to get interfaces from ec2 metadata\")\n\t}\n\n\tsubnet, err := ec2Metadata.GetMetadata(\"network/interfaces/macs/\" + mac + \"/subnet-id\")\n\tif err != nil {\n\t\treturn nil, errors.Wrapf(err, \"unable to get subnet from ec2 metadata\")\n\t}\n\n\tvpc, err := ec2Metadata.GetMetadata(\"network/interfaces/macs/\" + mac + \"/vpc-id\")\n\tif err != nil {\n\t\treturn nil, errors.Wrapf(err, \"unable to get vpc from ec2 metadata\")\n\t}\n\n\treturn &config{region: region,\n\t\tsubnet: subnet,\n\t\tindex: int64(len(strings.Split(interfaces, \"\\n\"))),\n\t\tinstanceID: instanceID,\n\t\tsecurityGroups: strings.Split(securityGroups, \"\\n\"),\n\t\tvpc: vpc,\n\t}, nil\n}", "func newDistccClientClaims(c *K8ccV1alpha1Client, namespace string) *distccClientClaims {\n\treturn &distccClientClaims{\n\t\tclient: c.RESTClient(),\n\t\tns: namespace,\n\t}\n}", "func NewConfig(ctx context.Context, issuer string) (*Config, error) {\n\tc := &Config{\n\t\tIssuer: issuer,\n\t\tCtx: ctx,\n\t}\n\terr := c.discovery()\n\treturn c, err\n}", "func NewCA(config *NewCertConfig, names ...string) (*Certificate, error) {\n\tconfig.IsCA = true\n\n\tcert, err := newCert(config, names...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tcertPool := x509.NewCertPool()\n\tcertPool.AddCert(cert.Cert)\n\tcert.CertPool = certPool\n\n\treturn cert, nil\n}", "func NewClient(mcfg Config) (mc Client, err error) {\n\tdiscoveryURI := DiscoveryURI\n\tif mcfg.DiscoveryURI != \"\" {\n\t\tdiscoveryURI = mcfg.DiscoveryURI\n\t}\n\n\tmcfg = populateDefaultConfig(mcfg)\n\n\tvar provider oidc.ProviderConfig\n\tfor tries := 0; true; {\n\n\t\tprovider, err = oidc.FetchProviderConfig(mcfg.HTTPClient, discoveryURI)\n\t\tif err == nil {\n\t\t\tbreak\n\t\t}\n\t\ttries++\n\t\tif tries > mcfg.ProviderRetries {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tmcfg.Clock.Sleep(sleepPeriod)\n\t}\n\n\tcredentials := oidc.ClientCredentials{\n\t\tID: mcfg.ClientID,\n\t\tSecret: mcfg.ClientSecret,\n\t}\n\n\toidc, err := oidc.NewClient(oidc.ClientConfig{\n\t\tCredentials: credentials,\n\t\tRedirectURL: mcfg.RedirectURI,\n\t\tProviderConfig: provider,\n\t\tScope: mcfg.Scope,\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\toidc.SyncProviderConfig(discoveryURI)\n\n\toauth, err := oidc.OAuthClient()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &client{\n\t\toidc: oidc,\n\t\toauth: oauth,\n\t\tprovider: provider,\n\t\tconfig: mcfg,\n\t}, err\n}", "func createOIDCIssuer(client *azureclients.AzureClientWrapper, name, region, oidcResourceGroupName, storageAccountName, blobContainerName, subscriptionID, tenantID, publicKeyPath, outputDir string, resourceTags map[string]string, dryRun bool) (string, error) {\n\t// Add CCO's \"owned\" tag to resource tags map\n\tresourceTags[fmt.Sprintf(\"%s_%s\", ownedAzureResourceTagKeyPrefix, name)] = ownedAzureResourceTagValue\n\n\tstorageAccountKey := \"\"\n\tif !dryRun {\n\t\t// Ensure that the public key file can be read at the publicKeyPath before continuing\n\t\t_, err := os.ReadFile(publicKeyPath)\n\t\tif err != nil {\n\t\t\treturn \"\", errors.Wrap(err, \"unable to read public key file\")\n\t\t}\n\n\t\t// Ensure the resource group exists\n\t\terr = ensureResourceGroup(client, oidcResourceGroupName, region, resourceTags)\n\t\tif err != nil {\n\t\t\treturn \"\", errors.Wrap(err, \"failed to ensure resource group\")\n\t\t}\n\n\t\t// Ensure storage account exists\n\t\terr = ensureStorageAccount(client, storageAccountName, oidcResourceGroupName, region, resourceTags)\n\t\tif err != nil {\n\t\t\treturn \"\", errors.Wrap(err, \"failed to ensure storage account\")\n\t\t}\n\n\t\tstorageAccountKey, err = getStorageAccountKey(client, storageAccountName, oidcResourceGroupName)\n\t\tif err != nil {\n\t\t\treturn \"\", errors.Wrap(err, \"failed to get storage account key\")\n\t\t}\n\n\t\t// Ensure blob container exists\n\t\terr = ensureBlobContainer(client, oidcResourceGroupName, storageAccountName, blobContainerName)\n\t\tif err != nil {\n\t\t\treturn \"\", errors.Wrap(err, \"failed to create blob container\")\n\t\t}\n\t}\n\n\t// Upload OIDC documents (openid-configuration, jwks.json) to the blob container\n\toutputDirAbsPath, err := filepath.Abs(outputDir)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tissuerURL, err := uploadOIDCDocuments(client, storageAccountName, storageAccountKey, publicKeyPath, blobContainerName, outputDirAbsPath, dryRun, resourceTags)\n\tif err != nil {\n\t\treturn \"\", errors.Wrap(err, \"failed to upload OIDC documents\")\n\t}\n\n\t// Write cluster authentication object installer manifest cluster-authentication-02-config.yaml\n\t// for our issuerURL within outputDir/manifests\n\tif err = provisioning.CreateClusterAuthentication(issuerURL, outputDir); err != nil {\n\t\treturn \"\", errors.Wrap(err, \"failed to create cluster authentication manifest\")\n\t}\n\n\t// Write Azure AD pod identity webhook config secret azure-ad-pod-identity-webhook-config.yaml\n\t// within outputDir/manifests\n\tif err = createPodIdentityWebhookConfigSecret(tenantID, outputDir); err != nil {\n\t\treturn \"\", errors.Wrap(err, \"failed to create Azure AD pod identity webhook manifest\")\n\t}\n\n\treturn issuerURL, nil\n}", "func NewClient(c diam.Conn) {\n // Build CCR\n\n parser, _ := diamdict.NewParser()\n parser.Load(bytes.NewReader(diamdict.DefaultXML))\n parser.Load(bytes.NewReader(diamdict.CreditControlXML))\n\n m := diam.NewRequest(257, 0, parser)\n // Add AVPs\n m.NewAVP(\"Origin-Host\", 0x40, 0x00, Identity)\n m.NewAVP(\"Origin-Realm\", 0x40, 0x00, Realm)\n m.NewAVP(\"Origin-State-Id\", 0x40, 0x00, diamtype.Unsigned32(rand.Uint32()))\n m.NewAVP(\"Auth-Application-Id\", 0x40, 0x00, AuthApplicationId)\n laddr := c.LocalAddr()\n ip, _, _ := net.SplitHostPort(laddr.String())\n m.NewAVP(\"Host-IP-Address\", 0x40, 0x0, diamtype.Address(net.ParseIP(ip)))\n m.NewAVP(\"Vendor-Id\", 0x40, 0x0, VendorId)\n m.NewAVP(\"Product-Name\", 0x00, 0x0, ProductName)\n\n log.Printf(\"Sending message to %s\", c.RemoteAddr().String())\n log.Println(m.String())\n // Send message to the connection\n if _, err := m.WriteTo(c); err != nil {\n log.Fatal(\"Write failed:\", err)\n }\n\n m = diam.NewRequest(272, 4, parser)\n // Add AVPs\n m.NewAVP(\"Session-Id\", 0x40, 0x00, diamtype.UTF8String(fmt.Sprintf(\"%v\", rand.Uint32())))\n m.NewAVP(\"Origin-Host\", 0x40, 0x00, Identity)\n m.NewAVP(\"Origin-Realm\", 0x40, 0x00, Realm)\n m.NewAVP(\"Destination-Realm\", 0x40, 0x00, DestinationRealm)\n m.NewAVP(\"Auth-Application-Id\", 0x40, 0x0, AuthApplicationId)\n m.NewAVP(\"CC-Request-Type\", 0x40, 0x0, CCRequestType)\n m.NewAVP(\"Service-Context-Id\", 0x40, 0x0, ServiceContextId)\n m.NewAVP(\"Service-Identifier\", 0x40, 0x0, ServiceIdentifier)\n m.NewAVP(\"CC-Request-Number\", 0x40, 0x0, CCRequestNumber)\n m.NewAVP(\"Requested-Action\", 0x40, 0x0, RequestedAction)\n m.NewAVP(\"Subscription-Id\", 0x40, 0x00, &diam.Grouped{\n AVP: []*diam.AVP{\n // Subscription-Id-Type\n diam.NewAVP(450, 0x40, 0x0, SubscriptionIdType),\n // Subscription-Id-Data\n diam.NewAVP(444, 0x40, 0x0, SubscriptionIdData),\n },\n })\n m.NewAVP(\"Service-Parameter-Info\", 0x40, 0x00, &diam.Grouped{\n AVP: []*diam.AVP{\n // Service-Parameter-Type\n diam.NewAVP(441, 0x40, 0x0, ServiceParameterType1),\n // Service-Parameter-Value\n diam.NewAVP(442, 0x40, 0x0, ServiceParameterValue1),\n },\n })\n m.NewAVP(\"Service-Parameter-Info\", 0x40, 0x00, &diam.Grouped{\n AVP: []*diam.AVP{\n // Service-Parameter-Type\n diam.NewAVP(441, 0x40, 0x0, ServiceParameterType2),\n // Service-Parameter-Value\n diam.NewAVP(442, 0x40, 0x0, ServiceParameterValue2),\n },\n })\n\n log.Printf(\"Sending message to %s\", c.RemoteAddr().String())\n log.Println(m.String())\n // Send message to the connection\n if _, err := m.WriteTo(c); err != nil {\n log.Fatal(\"Write failed:\", err)\n }\n}", "func New(clientKey, secret, callbackURL string, scopes ...string) *Provider {\n\tp := &Provider{\n\t\tClientKey: clientKey,\n\t\tSecret: secret,\n\t\tCallbackURL: callbackURL,\n\t}\n\tp.config = newConfig(p, scopes)\n\treturn p\n}", "func (k *K8sutil) generateConfig(configDir, certsDir, namespace, clusterName string) error {\n\tcaConfig := caconfig{\n\t\tSigning: configSigning{\n\t\t\tDefault: configDefault{\n\t\t\t\tUsages: []string{\n\t\t\t\t\t\"signing\",\n\t\t\t\t\t\"key encipherment\",\n\t\t\t\t\t\"server auth\",\n\t\t\t\t\t\"client auth\",\n\t\t\t\t},\n\t\t\t\tExpiry: \"8760h\",\n\t\t\t},\n\t\t},\n\t}\n\n\tcaCSR := csr{\n\t\tHosts: []string{\n\t\t\t\"localhost\",\n\t\t\tfmt.Sprintf(\"elasticsearch-%s\", clusterName),\n\t\t\tfmt.Sprintf(\"%s.%s\", fmt.Sprintf(\"elasticsearch-%s\", clusterName), namespace),\n\t\t\tfmt.Sprintf(\"%s.%s.svc.cluster.local\", fmt.Sprintf(\"elasticsearch-%s\", clusterName), namespace),\n\t\t},\n\t\tKey: key{\n\t\t\tAlgo: \"rsa\",\n\t\t\tSize: 2048,\n\t\t},\n\t\tNames: []names{\n\t\t\tnames{\n\t\t\t\tC: \"US\",\n\t\t\t\tL: \"Pittsburgh\",\n\t\t\t\tO: \"elasticsearch-operator\",\n\t\t\t\tOU: \"k8s\",\n\t\t\t\tST: \"Pennsylvania\",\n\t\t\t},\n\t\t},\n\t}\n\n\tcaConfigJSON, err := json.Marshal(caConfig)\n\tif err != nil {\n\t\tlogrus.Error(\"json Marshal error : \", err)\n\t\treturn err\n\t}\n\tf, err := os.Create(fmt.Sprintf(\"%s/ca-config.json\", configDir))\n\t_, err = f.Write(caConfigJSON)\n\tif err != nil {\n\t\tlogrus.Error(\"Error creating ca-config.json: \", err)\n\t\treturn err\n\t}\n\n\treqCACSRJSON, _ := json.Marshal(caCSR)\n\tf, err = os.Create(fmt.Sprintf(\"%s/ca-csr.json\", configDir))\n\t_, err = f.Write(reqCACSRJSON)\n\tif err != nil {\n\t\tlogrus.Error(\"Error creating ca-csr.json: \", err)\n\t\treturn err\n\t}\n\n\tfor k, v := range map[string]string{\n\t\t\"node\": \"req-node-csr.json\",\n\t\t\"sgadmin\": \"req-sgadmin-csr.json\",\n\t\t\"kibana\": \"req-kibana-csr.json\",\n\t\t\"cerebro\": \"req-cerebro-csr.json\",\n\t} {\n\n\t\treq := csr{\n\t\t\tCN: k,\n\t\t\tHosts: []string{\n\t\t\t\t\"localhost\",\n\t\t\t\tfmt.Sprintf(\"%s-%s\", k, clusterName),\n\t\t\t\tfmt.Sprintf(\"%s.%s\", fmt.Sprintf(\"%s-%s\", k, clusterName), namespace),\n\t\t\t\tfmt.Sprintf(\"%s.%s.svc.cluster.local\", fmt.Sprintf(\"%s-%s\", k, clusterName), namespace),\n\t\t\t\tfmt.Sprintf(\"elasticsearch-%s\", clusterName),\n\t\t\t},\n\t\t\tKey: key{\n\t\t\t\tAlgo: \"rsa\",\n\t\t\t\tSize: 2048,\n\t\t\t},\n\t\t\tNames: []names{\n\t\t\t\tnames{\n\t\t\t\t\tO: \"autogenerated\",\n\t\t\t\t\tOU: \"elasticsearch cluster\",\n\t\t\t\t\tL: \"operator\",\n\t\t\t\t},\n\t\t\t},\n\t\t}\n\n\t\tconfigJSON, _ := json.Marshal(req)\n\t\tf, err := os.Create(fmt.Sprintf(\"%s/%s\", configDir, v))\n\t\t_, err = f.Write(configJSON)\n\t\tif err != nil {\n\t\t\tlogrus.Error(err)\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}", "func NewAPI(serverURL, group, deviceName, password string) (*CacophonyAPI, error) {\n\tapi := &CacophonyAPI{\n\t\tserverURL: serverURL,\n\t\tgroup: group,\n\t\tdeviceName: deviceName,\n\t\tpassword: password,\n\t}\n\tif password == \"\" {\n\t\terr := api.register()\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tapi.justRegistered = true\n\t} else {\n\t\terr := api.newToken()\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\treturn api, nil\n}", "func New(cfg ...Config) (*x509.Certificate, crypto.Signer, error) {\n\tcert, key, err := genCertAndKey(getConfig(cfg), true)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\treturn cert, key, nil\n}", "func New(kubeconfig *rest.Config, opa opa_client.Data, ns types.ResourceType, name string, owner metav1.OwnerReference) *Initializer {\n\tcpy := *kubeconfig\n\tif ns.Group == \"\" {\n\t\tcpy.APIPath = \"/api\"\n\t} else {\n\t\tcpy.APIPath = \"/apis\"\n\t}\n\tcpy.GroupVersion = &schema.GroupVersion{\n\t\tGroup: ns.Group,\n\t\tVersion: ns.Version,\n\t}\n\tcpy.NegotiatedSerializer = dynamic.ContentConfig().NegotiatedSerializer\n\treturn &Initializer{\n\t\tkubeconfig: &cpy,\n\t\tname: name,\n\t\tns: ns,\n\t\topa: opa,\n\t\towner: owner,\n\t}\n}", "func NewConfig() *Config {\r\n\t//config := Config{}\r\n\treturn &Config{\r\n\t\tInsteon: Credential{BaseURL: \"http://192.168.1.1:25105\", Username: \"fobar\", Password: \"password\"},\r\n\t\tWifiInterface: \"wlan0\",\r\n\t\tSSID: \"ssid_to_track\",\r\n\t\tGarageID: \"A3BF45\",\r\n\t\tLogfile: \"/var/log/opensesame.log\",\r\n\t}\r\n}", "func (c *Config) New() (*vela.Client, error) {\n\tlogrus.Trace(\"creating new Vela client from plugin configuration\")\n\n\t// create the app string\n\tappID := fmt.Sprintf(\"%s; %s\", c.AppName, c.AppVersion)\n\n\t// create Vela client from configuration\n\tclient, err := vela.NewClient(c.Server, appID, nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// check if a token is provided for authentication\n\tif len(c.Token) > 0 {\n\t\tlogrus.Debugf(\"setting authentication token for Vela\")\n\n\t\t// set the token for authentication in the Vela client\n\t\tclient.Authentication.SetPersonalAccessTokenAuth(c.Token)\n\t}\n\n\treturn client, nil\n}", "func newCA() {\n\terr := os.MkdirAll(shrubCA, 0700)\n\n\tpriv, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader)\n\tfatalIfErr(err, \"failed to generate the CA key\")\n\tpub := priv.PublicKey\n\n\tserialNumberLimit := new(big.Int).Lsh(big.NewInt(1), 128)\n\tserialNumber, err := rand.Int(rand.Reader, serialNumberLimit)\n\tfatalIfErr(err, \"failed to generate serial number\")\n\n\tspkiASN1, err := x509.MarshalPKIXPublicKey(&pub)\n\tfatalIfErr(err, \"failed to encode public key\")\n\n\tvar spki struct {\n\t\tAlgorithm pkix.AlgorithmIdentifier\n\t\tSubjectPublicKey asn1.BitString\n\t}\n\t_, err = asn1.Unmarshal(spkiASN1, &spki)\n\tfatalIfErr(err, \"failed to decode public key\")\n\n\tskid := sha1.Sum(spki.SubjectPublicKey.Bytes)\n\n\ttpl := &x509.Certificate{\n\t\tSerialNumber: serialNumber,\n\t\tSubject: pkix.Name{\n\t\t\tOrganization: []string{\"shrubgateway local CA\"},\n\t\t\tOrganizationalUnit: []string{userAndHostname},\n\n\t\t\t// The CommonName is required by iOS to show the certificate in the\n\t\t\t// \"Certificate Trust Settings\" menu.\n\t\t\t// https://github.com/FiloSottile/mkcert/issues/47\n\t\t\tCommonName: \"shrubgateway \" + userAndHostname,\n\t\t},\n\t\tSubjectKeyId: skid[:],\n\n\t\tNotAfter: time.Now().AddDate(10, 0, 0),\n\t\tNotBefore: time.Now().AddDate(0, 0, -1),\n\n\t\tKeyUsage: x509.KeyUsageCertSign,\n\n\t\tBasicConstraintsValid: true,\n\t\tIsCA: true,\n\t\tMaxPathLenZero: true,\n\n\t\t// for security reasons the local CA generated is restricted\n\t\t// to subdomains of \".localhost\"\n\t\tPermittedDNSDomains: []string{\".localhost\"},\n\t}\n\n\tcert, err := x509.CreateCertificate(rand.Reader, tpl, tpl, &pub, priv)\n\tfatalIfErr(err, \"failed to generate CA certificate\")\n\n\tprivDER, err := x509.MarshalPKCS8PrivateKey(priv)\n\tfatalIfErr(err, \"failed to encode CA key\")\n\terr = ioutil.WriteFile(filepath.Join(shrubCA, rootKeyName), pem.EncodeToMemory(\n\t\t&pem.Block{Type: \"PRIVATE KEY\", Bytes: privDER}), 0400)\n\tfatalIfErr(err, \"failed to save CA key\")\n\n\terr = ioutil.WriteFile(filepath.Join(shrubCA, rootName), pem.EncodeToMemory(\n\t\t&pem.Block{Type: \"CERTIFICATE\", Bytes: cert}), 0644)\n\tfatalIfErr(err, \"failed to save CA key\")\n\n\tlog.Printf(\"Created a new local CA at \\\"%s\\\"\\n\", shrubCA)\n}", "func newCmdCertsConfig() *cobra.Command {\n\tcmd := &cobra.Command{\n\t\tUse: \"config\",\n\t\tShort: \"Certs config\",\n\t\tAliases: []string{\"cfg\"},\n\t\tRunE: func(cmd *cobra.Command, args []string) error {\n\t\t\tklog.Info(\"args is %v\", args)\n\t\t\treturn nil\n\t\t},\n\t\tArgs: cobra.NoArgs,\n\t}\n\tcmd.AddCommand(newCmdCertsConfigDefault())\n\tcmd.AddCommand(newCmdCertsConfigCheck())\n\treturn cmd\n}", "func New(c Config) lilty.ChainHandler {\n\treturn func(next lilty.HandlerFunc) lilty.HandlerFunc {\n\t\treturn func(ctxt *lilty.Context) {\n\t\t\tusername, password, ok := ctxt.Request.BasicAuth()\n\n\t\t\tmatch := c.Username == username && c.Password == password\n\n\t\t\tif !ok || !match {\n\t\t\t\tctxt.SetResponseHeader(lilty.WWWAuthenticate, `Basic realm=\"`+c.Realm+`\"`)\n\t\t\t\tctxt.SetStatusCode(401)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tnext(ctxt)\n\t\t}\n\t}\n}", "func New() *CA {\n\treturn &CA{\n\t\tPair: &Pair{},\n\t\tCertFileName: RootCertFileName,\n\t\tKeyFileName: RootKeyFileName,\n\t}\n}", "func (d *dexterOIDChttp) createOauth2Config() error {\n\t// setup oidc client context\n\tctx := oidc.ClientContext(context.Background(), d.httpClient)\n\n\t// populate oauth2 config\n\td.Oauth2Config.ClientID = oidcDataHTTP.clientID\n\td.Oauth2Config.ClientSecret = oidcDataHTTP.clientSecret\n\td.Oauth2Config.RedirectURL = oidcDataHTTP.callback\n\n\tswitch oidcDataHTTP.endpoint {\n\tcase \"azure\":\n\t\td.Oauth2Config.Endpoint = microsoft.AzureADEndpoint(oidcDataHTTP.azureTenant)\n\t\td.Oauth2Config.Scopes = []string{oidc.ScopeOpenID, oidc.ScopeOfflineAccess, \"email\"}\n\tcase \"google\":\n\t\td.Oauth2Config.Endpoint = google.Endpoint\n\t\td.Oauth2Config.Scopes = []string{oidc.ScopeOpenID, \"profile\", \"email\"}\n\tdefault:\n\t\t// Attempt to use endpoint as generic issuer if it is a valid URL\n\t\t_, err := url.Parse(oidcDataHTTP.endpoint)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"unsupported endpoint: %s\", oidcDataHTTP.endpoint)\n\t\t}\n\n\t\t// Attempt to gather endpoint information via discovery\n\t\tgenericProvider, err := oidc.NewProvider(ctx, oidcDataHTTP.endpoint)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\td.Oauth2Config.Endpoint = genericProvider.Endpoint()\n\t\td.Oauth2Config.Scopes = []string{oidc.ScopeOpenID, \"profile\", \"email\"}\n\t}\n\n\t// Append additional specified scopes\n\td.Oauth2Config.Scopes = append(d.Oauth2Config.Scopes, d.scopes...)\n\n\treturn nil\n}", "func NewVerifier(ctx context.Context, aud string) *IDTokenVerifier {\n\tkeySet := oidc.NewRemoteKeySet(ctx, googleRootCertURL)\n\n\tvar config = &oidc.Config{\n\t\tSkipClientIDCheck: false,\n\t\tClientID: aud,\n\t}\n\toidcVerifier := oidc.NewVerifier(googleIssuerURL, keySet, config)\n\n\treturn &IDTokenVerifier{\n\t\toidcVerifier: oidcVerifier,\n\t}\n}", "func newClient(conf Config) (*github.Client, error) {\n\tctx := context.Background()\n\n\tvar ts oauth2.TokenSource\n\tswitch {\n\tcase conf.HasAPIToken():\n\t\tts = oauth2.StaticTokenSource(\n\t\t\t&oauth2.Token{AccessToken: conf.GetAPIToken()},\n\t\t)\n\tdefault:\n\t\treturn nil, errors.New(\"Cannot find GitHub credentials\")\n\t}\n\n\ttc := oauth2.NewClient(ctx, ts)\n\treturn github.NewClient(tc), nil\n}", "func newSecret(name string) corev1.Secret {\n\tconst (\n\t\t// defaultCert is a PEM-encoded certificate.\n\t\tdefaultCert = `-----BEGIN CERTIFICATE-----\nMIIDIjCCAgqgAwIBAgIBBjANBgkqhkiG9w0BAQUFADCBoTELMAkGA1UEBhMCVVMx\nCzAJBgNVBAgMAlNDMRUwEwYDVQQHDAxEZWZhdWx0IENpdHkxHDAaBgNVBAoME0Rl\nZmF1bHQgQ29tcGFueSBMdGQxEDAOBgNVBAsMB1Rlc3QgQ0ExGjAYBgNVBAMMEXd3\ndy5leGFtcGxlY2EuY29tMSIwIAYJKoZIhvcNAQkBFhNleGFtcGxlQGV4YW1wbGUu\nY29tMB4XDTE2MDExMzE5NDA1N1oXDTI2MDExMDE5NDA1N1owfDEYMBYGA1UEAxMP\nd3d3LmV4YW1wbGUuY29tMQswCQYDVQQIEwJTQzELMAkGA1UEBhMCVVMxIjAgBgkq\nhkiG9w0BCQEWE2V4YW1wbGVAZXhhbXBsZS5jb20xEDAOBgNVBAoTB0V4YW1wbGUx\nEDAOBgNVBAsTB0V4YW1wbGUwgZ8wDQYJKoZIhvcNAQEBBQADgY0AMIGJAoGBAM0B\nu++oHV1wcphWRbMLUft8fD7nPG95xs7UeLPphFZuShIhhdAQMpvcsFeg+Bg9PWCu\nv3jZljmk06MLvuWLfwjYfo9q/V+qOZVfTVHHbaIO5RTXJMC2Nn+ACF0kHBmNcbth\nOOgF8L854a/P8tjm1iPR++vHnkex0NH7lyosVc/vAgMBAAGjDTALMAkGA1UdEwQC\nMAAwDQYJKoZIhvcNAQEFBQADggEBADjFm5AlNH3DNT1Uzx3m66fFjqqrHEs25geT\nyA3rvBuynflEHQO95M/8wCxYVyuAx4Z1i4YDC7tx0vmOn/2GXZHY9MAj1I8KCnwt\nJik7E2r1/yY0MrkawljOAxisXs821kJ+Z/51Ud2t5uhGxS6hJypbGspMS7OtBbw7\n8oThK7cWtCXOldNF6ruqY1agWnhRdAq5qSMnuBXuicOP0Kbtx51a1ugE3SnvQenJ\nnZxdtYUXvEsHZC/6bAtTfNh+/SwgxQJuL2ZM+VG3X2JIKY8xTDui+il7uTh422lq\nwED8uwKl+bOj6xFDyw4gWoBxRobsbFaME8pkykP1+GnKDberyAM=\n-----END CERTIFICATE-----\n`\n\t\t// defaultKey is a PEM-encoded private key.\n\t\tdefaultKey = `-----BEGIN RSA PRIVATE KEY-----\nMIICWwIBAAKBgQDNAbvvqB1dcHKYVkWzC1H7fHw+5zxvecbO1Hiz6YRWbkoSIYXQ\nEDKb3LBXoPgYPT1grr942ZY5pNOjC77li38I2H6Pav1fqjmVX01Rx22iDuUU1yTA\ntjZ/gAhdJBwZjXG7YTjoBfC/OeGvz/LY5tYj0fvrx55HsdDR+5cqLFXP7wIDAQAB\nAoGAfE7P4Zsj6zOzGPI/Izj7Bi5OvGnEeKfzyBiH9Dflue74VRQkqqwXs/DWsNv3\nc+M2Y3iyu5ncgKmUduo5X8D9To2ymPRLGuCdfZTxnBMpIDKSJ0FTwVPkr6cYyyBk\n5VCbc470pQPxTAAtl2eaO1sIrzR4PcgwqrSOjwBQQocsGAECQQD8QOra/mZmxPbt\nbRh8U5lhgZmirImk5RY3QMPI/1/f4k+fyjkU5FRq/yqSyin75aSAXg8IupAFRgyZ\nW7BT6zwBAkEA0A0ugAGorpCbuTa25SsIOMxkEzCiKYvh0O+GfGkzWG4lkSeJqGME\nkeuJGlXrZNKNoCYLluAKLPmnd72X2yTL7wJARM0kAXUP0wn324w8+HQIyqqBj/gF\nVt9Q7uMQQ3s72CGu3ANZDFS2nbRZFU5koxrggk6lRRk1fOq9NvrmHg10AQJABOea\npgfj+yGLmkUw8JwgGH6xCUbHO+WBUFSlPf+Y50fJeO+OrjqPXAVKeSV3ZCwWjKT4\n9viXJNJJ4WfF0bO/XwJAOMB1wQnEOSZ4v+laMwNtMq6hre5K8woqteXICoGcIWe8\nu3YLAbyW/lHhOCiZu2iAI8AbmXem9lW6Tr7p/97s0w==\n-----END RSA PRIVATE KEY-----\n`\n\t)\n\treturn corev1.Secret{\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tName: name,\n\t\t},\n\t\tData: map[string][]byte{\n\t\t\t\"tls.crt\": []byte(defaultCert),\n\t\t\t\"tls.key\": []byte(defaultKey),\n\t\t},\n\t}\n}", "func New(configVarResolver *providerconfig.ConfigVarResolver) cloudprovidertypes.Provider {\n\treturn &provider{configVarResolver: &providerconfig.ConfigPointerVarResolver{Cvr: configVarResolver}}\n}", "func newCanaryConfig(provider config.Provider) (*Config, error) {\n\traw := provider.Get(ConfigurationKey)\n\tvar cfg Config\n\tif err := raw.Populate(&cfg); err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to load canary configuration with error: %v\", err)\n\t}\n\tif err := cfg.validate(); err != nil {\n\t\treturn nil, err\n\t}\n\treturn &cfg, nil\n}", "func (s *SCEP) Init(config Config) (err error) {\n\tswitch {\n\tcase s.Type == \"\":\n\t\treturn errors.New(\"provisioner type cannot be empty\")\n\tcase s.Name == \"\":\n\t\treturn errors.New(\"provisioner name cannot be empty\")\n\t}\n\n\t// Mask the actual challenge value, so it won't be marshaled\n\ts.secretChallengePassword = s.ChallengePassword\n\ts.ChallengePassword = \"*** redacted ***\"\n\n\t// Default to 2048 bits minimum public key length (for CSRs) if not set\n\tif s.MinimumPublicKeyLength == 0 {\n\t\ts.MinimumPublicKeyLength = 2048\n\t}\n\n\tif s.MinimumPublicKeyLength%8 != 0 {\n\t\treturn errors.Errorf(\"%d bits is not exactly divisible by 8\", s.MinimumPublicKeyLength)\n\t}\n\n\ts.encryptionAlgorithm = s.EncryptionAlgorithmIdentifier // TODO(hs): we might want to upgrade the default security to AES-CBC?\n\tif s.encryptionAlgorithm < 0 || s.encryptionAlgorithm > 4 {\n\t\treturn errors.New(\"only encryption algorithm identifiers from 0 to 4 are valid\")\n\t}\n\n\t// TODO: add other, SCEP specific, options?\n\n\ts.ctl, err = NewController(s, s.Claims, config, s.Options)\n\treturn\n}", "func newConfigFromViper(v *viper.Viper) Config {\n\treturn Config{\n\t\tChainlinkURL: v.GetString(\"chainlinkurl\"),\n\t\tInitiatorToChainlinkAccessKey: v.GetString(\"ic_accesskey\"),\n\t\tInitiatorToChainlinkSecret: v.GetString(\"ic_secret\"),\n\t\tDatabaseURL: v.GetString(\"databaseurl\"),\n\t\tChainlinkToInitiatorAccessKey: v.GetString(\"ci_accesskey\"),\n\t\tChainlinkToInitiatorSecret: v.GetString(\"ci_secret\"),\n\t\tExpectsMock: v.GetBool(\"mock\"),\n\t\tChainlinkTimeout: v.GetDuration(\"cl_timeout\"),\n\t\tChainlinkRetryAttempts: v.GetUint(\"cl_retry_attempts\"),\n\t\tChainlinkRetryDelay: v.GetDuration(\"cl_retry_delay\"),\n\t}\n}", "func newContextWithAuth(perms []weave.Condition) (weave.Context, x.Authenticator) {\n\tctx := context.Background()\n\t// Set current block height to 100\n\tctx = weave.WithHeight(ctx, 100)\n\tauth := &weavetest.CtxAuth{Key: \"authKey\"}\n\t// Create a new context and add addr to the list of signers\n\treturn auth.SetConditions(ctx, perms...), auth\n}", "func New(conf *liverpc.ClientConfig) *Client {\n\tif conf == nil {\n\t\tconf = &liverpc.ClientConfig{}\n\t}\n\tconf.AppID = DiscoveryAppId\n\tvar realCli = liverpc.NewClient(conf)\n\tcli := &Client{cli: realCli}\n\tcli.clientInit(realCli)\n\treturn cli\n}", "func New(c *Config) *Provider {\n\treturn &Provider{\n\t\tConfig: c,\n\t}\n}", "func Init(c *cli.Context, appName string) (int, error) {\n\tkeySize := 2048\n\tdir := c.String(\"install-dir\")\n\tif _, err := os.Stat(dir); os.IsNotExist(err) {\n\t\tif err := os.MkdirAll(dir, 0755); /* #nosec */ err != nil {\n\t\t\treturn 1, fmt.Errorf(\"failed to create %s\", dir)\n\t\t}\n\t}\n\n\tif c.IsSet(\"registration-url\") {\n\t\tlog.Infof(\"Generating %d bit private key\", keySize)\n\t\tkey, err := rsa.GenerateKey(rand.Reader, keySize)\n\t\tif err != nil {\n\t\t\treturn 1, fmt.Errorf(\"can't generate a private key\")\n\t\t}\n\n\t\tkeyfile, err := os.OpenFile(path.Join(dir, \"cert.key\"), os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0600)\n\t\tif err != nil {\n\t\t\treturn 1, fmt.Errorf(\"failed to create %s: %v\", path.Join(dir, \"cert.key\"), err)\n\t\t}\n\t\tif err = pem.Encode(keyfile, &pem.Block{Type: \"RSA PRIVATE KEY\", Bytes: x509.MarshalPKCS1PrivateKey(key)}); err != nil {\n\t\t\treturn 1, fmt.Errorf(\"failed to write private key to file: %v\", err)\n\t\t}\n\t\tif err := keyfile.Close(); err != nil {\n\t\t\treturn 1, fmt.Errorf(\"failed to close handle to private key file: %s\", err)\n\t\t}\n\n\t\tcn := discoverIdentity(c)\n\t\t// create csr template\n\t\tcsrTemplate := x509.CertificateRequest{\n\t\t\tSignatureAlgorithm: x509.SHA256WithRSA,\n\t\t\tSubject: pkix.Name{\n\t\t\t\tCommonName: cn,\n\t\t\t},\n\t\t}\n\t\tlog.Infof(\"Creating signing request for identity %#v\", cn)\n\t\tcsrData, err := x509.CreateCertificateRequest(rand.Reader, &csrTemplate, key)\n\t\tif err != nil {\n\t\t\treturn 1, fmt.Errorf(\"failed to generate csr: %v\", err)\n\t\t}\n\t\tvar csr bytes.Buffer\n\t\tif err = pem.Encode(&csr, &pem.Block{Type: \"CERTIFICATE REQUEST\", Bytes: csrData}); err != nil {\n\t\t\treturn 1, fmt.Errorf(\"failed to PEM encode certificate request\")\n\t\t}\n\n\t\tlog.Info(\"Requesting certificate from registration endpoint\")\n\t\tclient := http.Client{\n\t\t\tCheckRedirect: func(req *http.Request, via []*http.Request) error {\n\t\t\t\tif len(via) >= 10 {\n\t\t\t\t\treturn errors.New(\"stopped after 10 redirects\")\n\t\t\t\t}\n\t\t\t\treq.Header[\"Accept\"] = via[0].Header[\"Accept\"]\n\t\t\t\treq.Header[\"User-Agent\"] = via[0].Header[\"User-Agent\"]\n\t\t\t\treturn nil\n\t\t\t},\n\t\t}\n\t\treq, err := http.NewRequest(\"POST\", c.String(\"registration-url\"), &csr)\n\t\tif err != nil {\n\t\t\treturn 1, fmt.Errorf(\"failed to create registration http request: %s\", err)\n\t\t}\n\t\treq.Header[\"Accept\"] = []string{\"application/json\"}\n\t\treq.Header[\"User-Agent\"] = []string{appName + \" \" + version.String()}\n\t\tresp, err := client.Do(req)\n\t\tif err != nil {\n\t\t\treturn 1, fmt.Errorf(\"http request failed: %v\", err)\n\t\t}\n\t\tdefer resp.Body.Close()\n\t\tswitch {\n\t\tcase resp.StatusCode == 403:\n\t\t\treturn 1, fmt.Errorf(\"invalid registration token given\")\n\t\tcase resp.StatusCode != 200:\n\t\t\treturn 1, fmt.Errorf(\"unknown error while fetching certificate: %s\", resp.Status)\n\t\t}\n\t\tvar certs struct {\n\t\t\tCa string\n\t\t\tCertificate string\n\t\t}\n\t\terr = json.NewDecoder(resp.Body).Decode(&certs)\n\t\tif err != nil {\n\t\t\treturn 1, fmt.Errorf(\"failed to parse json reponse: %v\", err)\n\t\t}\n\n\t\terr = ioutil.WriteFile(path.Join(dir, \"cert.pem\"), []byte(certs.Certificate), 0644)\n\t\tif err != nil {\n\t\t\treturn 1, fmt.Errorf(\"failed to write certificate to disk: %v\", err)\n\t\t}\n\t\terr = ioutil.WriteFile(path.Join(dir, \"ca.pem\"), []byte(certs.Ca), 0644)\n\t\tif err != nil {\n\t\t\treturn 1, fmt.Errorf(\"failed to write CA certificate to disk: %v\", err)\n\t\t}\n\n\t\tlog.Info(\"Retrieved and stored certificate\")\n\n\t\tcfgFile, err := os.OpenFile(path.Join(dir, \"arc.cfg\"), os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0644) // #nosec\n\t\tif err != nil {\n\t\t\treturn 1, fmt.Errorf(\"failed to create %s: %v\", path.Join(dir, \"arc.cfg\"), err)\n\t\t}\n\n\t\tupdateInterval := \"\"\n\t\tif c.IsSet(\"update-interval\") {\n\t\t\tupdateInterval = fmt.Sprintf(\"%d\", c.Int(\"update-interval\"))\n\t\t}\n\n\t\tcertUpdateInterval := \"\"\n\t\tif c.IsSet(\"cert-update-interval\") {\n\t\t\tcertUpdateInterval = fmt.Sprintf(\"%d\", c.Int(\"cert-update-interval\"))\n\t\t}\n\n\t\tcertUpdateThreshold := \"\"\n\t\tif c.IsSet(\"cert-update-threshold\") {\n\t\t\tcertUpdateThreshold = fmt.Sprintf(\"%d\", c.Int(\"cert-update-threshold\"))\n\t\t}\n\n\t\teol := \"\\n\"\n\t\tif runtime.GOOS == \"windows\" {\n\t\t\teol = \"\\r\\n\"\n\t\t}\n\n\t\ttemplateVars := map[string]string{\n\t\t\t\"Cert\": path.Join(dir, \"cert.pem\"),\n\t\t\t\"Key\": path.Join(dir, \"cert.key\"),\n\t\t\t\"Ca\": path.Join(dir, \"ca.pem\"),\n\t\t\t\"Endpoint\": strings.Join(c.StringSlice(\"endpoint\"), \",\"),\n\t\t\t\"UpdateUri\": c.String(\"update-uri\"),\n\t\t\t\"ApiUri\": c.String(\"api-uri\"),\n\t\t\t\"UpdateInterval\": updateInterval,\n\t\t\t\"CertUpdateInterval\": certUpdateInterval,\n\t\t\t\"CertUpdateThreshold\": certUpdateThreshold,\n\t\t\t\"Transport\": c.String(\"transport\"),\n\t\t\t\"Eol\": eol,\n\t\t}\n\t\terr = configTemplate.Execute(cfgFile, templateVars)\n\t\tif err != nil {\n\t\t\treturn 1, fmt.Errorf(\"failed to write config file: %v\", err)\n\t\t}\n\n\t}\n\n\tif err := service.New(dir).Install(c.String(\"tmp-dir\")); err != nil {\n\t\treturn 1, fmt.Errorf(\"failed to install service: %s\", err)\n\t}\n\treturn 0, nil\n}", "func NewClient(c Config) (*Client, error) {\n\tif c.CookieName == \"\" {\n\t\treturn nil, fmt.Errorf(\"must supply a cookie name\")\n\t}\n\tif c.Regexp == \"\" {\n\t\tc.Regexp = noMatch\n\t}\n\n\thasEmailScope := false\n\n\tfor _, s := range c.Scopes {\n\t\tif s == emailScope {\n\t\t\thasEmailScope = true\n\t\t}\n\t}\n\tif !hasEmailScope {\n\t\tc.Scopes = append(c.Scopes, \"https://www.googleapis.com/auth/userinfo.email\")\n\t}\n\n\tmatch, err := regexp.Compile(c.Regexp)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\toc := &Client{\n\t\toauthConfig: &oauth2.Config{\n\t\t\tClientID: c.Token,\n\t\t\tClientSecret: c.Secret,\n\t\t\tRedirectURL: c.RedirectURL,\n\t\t\tScopes: c.Scopes,\n\t\t\tEndpoint: google.Endpoint,\n\t\t},\n\t\tsm: sessions.NewManager(c.CookieName),\n\t\tm: map[string]*oauth2.Token{},\n\t\thttpClients: map[string]*http.Client{},\n\t\tmatch: match,\n\t}\n\n\tx := make([]byte, 32)\n\trand.Read(x)\n\toc.oauthState = fmt.Sprintf(\"%x\", x)\n\toc.whitelist = map[string]struct{}{}\n\treturn oc, nil\n}", "func New(viper *viper.Viper, httpClient *http.Client) (*Config, error) {\n\n\t// Set Defaults\n\tviper.SetDefault(\"VAULT_ADDR\", \"http://127.0.0.1:8200\")\n\tviper.SetDefault(\"KV_VERSION\", \"2\")\n\n\t// Instantiate Env\n\tviper.SetEnvPrefix(\"AVP\")\n\tviper.AutomaticEnv()\n\n\tconfig := &Config{\n\t\tAddress: viper.GetString(\"VAULT_ADDR\"),\n\t\tPathPrefix: viper.GetString(\"PATH_PREFIX\"),\n\t}\n\n\tapiConfig := &api.Config{\n\t\tAddress: viper.GetString(\"VAULT_ADDR\"),\n\t\tHttpClient: httpClient,\n\t}\n\n\ttlsConfig := &api.TLSConfig{}\n\n\tif viper.IsSet(\"VAULT_CAPATH\") {\n\t\ttlsConfig.CAPath = viper.GetString(\"VAULT_CAPATH\")\n\t}\n\n\tif viper.IsSet(\"VAULT_CACERT\") {\n\t\ttlsConfig.CACert = viper.GetString(\"VAULT_CACERT\")\n\t}\n\n\tif viper.IsSet(\"VAULT_SKIP_VERIFY\") {\n\t\ttlsConfig.Insecure = viper.GetBool(\"VAULT_SKIP_VERIFY\")\n\t}\n\n\tif err := apiConfig.ConfigureTLS(tlsConfig); err != nil {\n\t\treturn nil, err\n\t}\n\n\tapiClient, err := api.NewClient(apiConfig)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif viper.IsSet(\"VAULT_NAMESPACE\") {\n\t\tapiClient.SetNamespace(viper.GetString(\"VAULT_NAMESPACE\"))\n\t}\n\n\tif viper.IsSet(\"PATH_PREFIX\") {\n\t\tprint(\"PATH_PREFIX will be deprecated in v1.0.0, please migrate to using the avp_path annotation.\")\n\t}\n\n\tconfig.VaultClient = apiClient\n\n\tauthType := viper.GetString(\"AUTH_TYPE\")\n\n\tvar auth types.AuthType\n\tswitch viper.GetString(\"TYPE\") {\n\tcase \"vault\":\n\t\tswitch authType {\n\t\tcase \"approle\":\n\t\t\tif viper.IsSet(\"ROLE_ID\") && viper.IsSet(\"SECRET_ID\") {\n\t\t\t\tauth = vault.NewAppRoleAuth(viper.GetString(\"ROLE_ID\"), viper.GetString(\"SECRET_ID\"))\n\t\t\t} else {\n\t\t\t\treturn nil, errors.New(\"ROLE_ID and SECRET_ID for approle authentication cannot be empty\")\n\t\t\t}\n\t\tcase \"github\":\n\t\t\tif viper.IsSet(\"GITHUB_TOKEN\") {\n\t\t\t\tauth = vault.NewGithubAuth(viper.GetString(\"GITHUB_TOKEN\"))\n\t\t\t} else {\n\t\t\t\treturn nil, errors.New(\"GITHUB_TOKEN for github authentication cannot be empty\")\n\t\t\t}\n\t\tcase \"k8s\":\n\t\t\tif viper.IsSet(\"K8S_ROLE\") {\n\t\t\t\tauth = vault.NewK8sAuth(\n\t\t\t\t\tviper.GetString(\"K8S_ROLE\"),\n\t\t\t\t\tviper.GetString(\"K8S_MOUNT_PATH\"),\n\t\t\t\t\tviper.GetString(\"K8S_TOKEN_PATH\"),\n\t\t\t\t)\n\t\t\t} else {\n\t\t\t\treturn nil, errors.New(\"K8S_ROLE cannot be empty when using Kubernetes Auth\")\n\t\t\t}\n\t\tdefault:\n\t\t\treturn nil, errors.New(\"Must provide a supported Authentication Type\")\n\t\t}\n\t\tconfig.Backend = backends.NewVaultBackend(auth, apiClient, viper.GetString(\"KV_VERSION\"))\n\tcase \"secretmanager\":\n\t\tswitch authType {\n\t\tcase \"iam\":\n\t\t\tif viper.IsSet(\"IBM_API_KEY\") {\n\t\t\t\tauth = ibmsecretmanager.NewIAMAuth(viper.GetString(\"IBM_API_KEY\"))\n\t\t\t} else {\n\t\t\t\treturn nil, errors.New(\"IBM_API_KEY for iam authentication cannot be empty\")\n\t\t\t}\n\t\tdefault:\n\t\t\treturn nil, errors.New(\"Must provide a supported Authentication Type\")\n\t\t}\n\t\tconfig.Backend = backends.NewIBMSecretManagerBackend(auth, apiClient)\n\tdefault:\n\t\treturn nil, errors.New(\"Must provide a supported Vault Type\")\n\t}\n\n\treturn config, nil\n}", "func New(provider client.ConfigProvider, log *logrus.Logger) certstore.CertStore {\n\treturn &acmStore{\n\t\tacm: acm.New(provider),\n\t\tlog: log,\n\t}\n}", "func newConfig() (*rest.Config, error) {\n // try in cluster config first, it should fail quickly on lack of env vars\n cfg, err := inClusterConfig()\n if err != nil {\n cfg, err = clientcmd.BuildConfigFromFlags(\"\", clientcmd.RecommendedHomeFile)\n if err != nil {\n return nil, errors.Wrap(err, \"failed to get InClusterConfig and Config from kube_config\")\n }\n }\n return cfg, nil\n}", "func NewProvisioningCA(c *ProvisioningConf) (*ProvisioningAuthority, error) {\n\tif c == nil {\n\t\treturn nil, errors.New(\"provisioning config is nil\")\n\t}\n\n\tvar rootCertificate *x509.Certificate\n\tvar rootKey *rsa.PrivateKey\n\tswitch {\n\tcase c.RootCertificate != nil && c.RootKey != nil:\n\t\trootCertificate = c.RootCertificate\n\t\trootKey = c.RootKey\n\n\tcase c.RootCertificate == nil && c.RootKey == nil:\n\t\tvar err error\n\t\trootKey, err = generateRSAKey()\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\trootCertificate, err = createRootCertificate(rootKey, &x509.Certificate{\n\t\t\tSerialNumber: big.NewInt(1),\n\t\t\tSubject: pkix.Name{CommonName: \"root\"},\n\t\t\tBasicConstraintsValid: true,\n\t\t\tIsCA: true,\n\t\t\tNotAfter: neverExpires,\n\t\t})\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\tdefault:\n\t\treturn nil, errors.New(\"the root certificate or private key is nil but not both\")\n\t}\n\n\tprovisioningAuthority := &ProvisioningAuthority{\n\t\tRootCert: rootCertificate,\n\t\tRootKey: rootKey,\n\t}\n\n\tif c.NoIntermediates {\n\t\treturn provisioningAuthority, nil\n\t}\n\n\tintermediateSigningKey, err := generateRSAKey()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tintermediateCertificate, err := createCertificate(&intermediateSigningKey.PublicKey, &x509.Certificate{\n\t\tSerialNumber: big.NewInt(2),\n\t\tNotAfter: neverExpires,\n\t\tSubject: pkix.Name{CommonName: \"intermediate\"},\n\t\tIsCA: true,\n\t\tBasicConstraintsValid: true,\n\t}, rootKey, rootCertificate)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tprovisioningAuthority.IntermediateCert = intermediateCertificate\n\tprovisioningAuthority.IntermediateKey = intermediateSigningKey\n\n\treturn provisioningAuthority, nil\n}", "func (a *IAMApiService) ConfigureOIDCProvider(ctx context.Context, providerId string, iamoidcProviderConfig IamoidcProviderConfig) (*http.Response, error) {\n\tvar (\n\t\tlocalVarHttpMethod = http.MethodPut\n\t\tlocalVarPostBody interface{}\n\t\tlocalVarFormFileName string\n\t\tlocalVarFileName string\n\t\tlocalVarFileBytes []byte\n\t)\n\n\t// create path and map variables\n\tlocalVarPath := a.client.cfg.BasePath + \"/acs/api/v1/auth/oidc/providers/{provider-id}\"\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"provider-id\"+\"}\", fmt.Sprintf(\"%v\", providerId), -1)\n\n\tlocalVarHeaderParams := make(map[string]string)\n\tlocalVarQueryParams := url.Values{}\n\tlocalVarFormParams := url.Values{}\n\n\t// to determine the Content-Type header\n\tlocalVarHttpContentTypes := []string{\"application/json\"}\n\n\t// set Content-Type header\n\tlocalVarHttpContentType := selectHeaderContentType(localVarHttpContentTypes)\n\tif localVarHttpContentType != \"\" {\n\t\tlocalVarHeaderParams[\"Content-Type\"] = localVarHttpContentType\n\t}\n\n\t// to determine the Accept header\n\tlocalVarHttpHeaderAccepts := []string{\"application/json\"}\n\n\t// set Accept header\n\tlocalVarHttpHeaderAccept := selectHeaderAccept(localVarHttpHeaderAccepts)\n\tif localVarHttpHeaderAccept != \"\" {\n\t\tlocalVarHeaderParams[\"Accept\"] = localVarHttpHeaderAccept\n\t}\n\t// body params\n\tlocalVarPostBody = &iamoidcProviderConfig\n\tr, err := a.client.prepareRequest(ctx, localVarPath, localVarHttpMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFormFileName, localVarFileName, localVarFileBytes)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tlocalVarHttpResponse, err := a.client.callAPI(r)\n\tif err != nil || localVarHttpResponse == nil {\n\t\treturn localVarHttpResponse, err\n\t}\n\n\tlocalVarBody, err := ioutil.ReadAll(localVarHttpResponse.Body)\n\tlocalVarHttpResponse.Body.Close()\n\tif err != nil {\n\t\treturn localVarHttpResponse, err\n\t}\n\n\tif localVarHttpResponse.StatusCode >= 300 {\n\t\tnewErr := GenericOpenAPIError{\n\t\t\tbody: localVarBody,\n\t\t\terror: localVarHttpResponse.Status,\n\t\t}\n\t\tif localVarHttpResponse.StatusCode == 0 {\n\t\t\tvar v IamError\n\t\t\terr = a.client.decode(&v, localVarBody, localVarHttpResponse.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\tnewErr.error = err.Error()\n\t\t\t\treturn localVarHttpResponse, newErr\n\t\t\t}\n\t\t\tnewErr.model = v\n\t\t\treturn localVarHttpResponse, newErr\n\t\t}\n\t\treturn localVarHttpResponse, newErr\n\t}\n\n\treturn localVarHttpResponse, nil\n}", "func newVppAgentComposite(configuration *common.NSConfiguration) *vppagentComposite {\n\t// ensure the env variables are processed\n\tif configuration == nil {\n\t\tconfiguration = &common.NSConfiguration{}\n\t}\n\tconfiguration.CompleteNSConfiguration()\n\n\tnewVppAgentComposite := &vppagentComposite{\n\t\tvppAgentEndpoint: defaultVPPAgentEndpoint,\n\t\tworkspace: configuration.Workspace,\n\t}\n\tif err := newVppAgentComposite.Reset(); err != nil {\n\t\treturn nil\n\t}\n\n\treturn newVppAgentComposite\n}", "func NewECCA(name string) (*x509.Certificate, interface{}, error) {\n\tkey, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader)\n\tif err != nil {\n\t\treturn nil, nil, fmt.Errorf(\"unable to create a private key for a new CA: %v\", err)\n\t}\n\n\tcfg := CertID{\n\t\tCommonName: name,\n\t}\n\n\tnow := time.Now()\n\ttmpl := x509.Certificate{\n\t\tSerialNumber: new(big.Int).SetInt64(0),\n\t\tSubject: pkix.Name{\n\t\t\tCommonName: cfg.CommonName,\n\t\t\tOrganization: cfg.Organization,\n\t\t},\n\t\tNotBefore: now.UTC(),\n\t\tNotAfter: now.Add(time.Hour).UTC(),\n\t\tKeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature | x509.KeyUsageCertSign,\n\t\tBasicConstraintsValid: true,\n\t\tIsCA: true,\n\t}\n\n\tcertDERBytes, err := x509.CreateCertificate(rand.Reader, &tmpl, &tmpl, key.Public(), key)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\tcert, err := x509.ParseCertificate(certDERBytes)\n\treturn cert, key, err\n}" ]
[ "0.68531483", "0.60295284", "0.6020972", "0.5855811", "0.5811098", "0.57540846", "0.5675066", "0.56265175", "0.55925673", "0.55753696", "0.5556502", "0.53845775", "0.5298002", "0.5277302", "0.5205054", "0.5196069", "0.51866984", "0.51817036", "0.5118985", "0.51092976", "0.5088442", "0.50671583", "0.50528175", "0.50496584", "0.50320846", "0.5028947", "0.50178385", "0.5016848", "0.500096", "0.49930125", "0.49849293", "0.49795482", "0.4975553", "0.49722028", "0.4967285", "0.49550727", "0.49515077", "0.49401006", "0.49394426", "0.49316844", "0.49289", "0.4922389", "0.49219605", "0.49162114", "0.49135238", "0.49128217", "0.4912489", "0.49081227", "0.48995003", "0.48970005", "0.48934144", "0.48911014", "0.4884676", "0.48842728", "0.48694366", "0.48675197", "0.48662734", "0.48638362", "0.4844265", "0.48422745", "0.4840176", "0.4836618", "0.48347938", "0.48313218", "0.4824578", "0.48240763", "0.48231125", "0.48200184", "0.4815932", "0.48091215", "0.48043928", "0.4804073", "0.48029682", "0.48015174", "0.4794402", "0.47912306", "0.47875133", "0.47858217", "0.47852314", "0.47717047", "0.47683236", "0.47682622", "0.47637537", "0.4741274", "0.474106", "0.47282857", "0.47266468", "0.47256607", "0.47237524", "0.47159642", "0.47143897", "0.4707131", "0.46953467", "0.46900234", "0.4689557", "0.46865734", "0.4678208", "0.4677025", "0.4676196", "0.46716952" ]
0.7447854
0
GetOIDCRedirectURL returns the URL used for additional roundtrip to ensure that cookies are written
func (a *loginAPI) GetOIDCRedirectURL() string { return oidcInitiateURL }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (a *loginAPI) HandleOIDCRedirect(w http.ResponseWriter, r *http.Request) error {\n\tstate := randToken()\n\ta.appCookie.Set(stateParam, state, cookieExpiry, w)\n\tlog.WithField(\"func\", \"server.HandleOIDCRedirect\").Debugf(\"GetRedirect: initiate using state '%s'\", state)\n\thttp.Redirect(w, r, a.GetOIDCRedirectURL(), http.StatusTemporaryRedirect)\n\treturn nil\n}", "func GetAuthURL(res http.ResponseWriter, req *http.Request) (string, error) {\n\tif !keySet && defaultStore == Store {\n\t\tfmt.Println(\"goth/gothic: no SESSION_SECRET environment variable is set. The default cookie store is not available and any calls will fail. Ignore this warning if you are using a different store.\")\n\t}\n\n\tproviderName, err := GetProviderName(req)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tprovider, err := goth.GetProvider(providerName)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tsess, err := provider.BeginAuth(SetState(req))\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\turl, err := sess.GetAuthURL()\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\terr = StoreInSession(providerName, sess.Marshal(), req, res)\n\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\treturn url, err\n}", "func (r *oauthProxy) getRedirectionURL(w http.ResponseWriter, req *http.Request) string {\n\tctx, span, logger := r.traceSpan(req.Context(), \"getRedirectionURL\")\n\tif span != nil {\n\t\tdefer span.End()\n\t}\n\n\tvar redirect string\n\tswitch r.config.RedirectionURL {\n\tcase \"\":\n\t\t// need to determine the scheme, cx.Request.URL.Scheme doesn't have it, best way is to default\n\t\t// and then check for TLS\n\t\tscheme := unsecureScheme\n\t\tif req.TLS != nil {\n\t\t\tscheme = secureScheme\n\t\t}\n\t\t// @QUESTION: should I use the X-Forwarded-<header>?? ..\n\t\tredirect = fmt.Sprintf(\"%s://%s\",\n\t\t\tdefaultTo(req.Header.Get(\"X-Forwarded-Proto\"), scheme),\n\t\t\tdefaultTo(req.Header.Get(\"X-Forwarded-Host\"), req.Host))\n\tdefault:\n\t\tredirect = r.config.RedirectionURL\n\t}\n\n\tstate, _ := req.Cookie(requestStateCookie)\n\tif state != nil && req.URL.Query().Get(\"state\") != state.Value {\n\t\tlogger.Error(\"state in cookie and url query parameter do not match\", zap.String(\"cookie-state\", state.Value),\n\t\t\tzap.String(\"url-state\", req.URL.Query().Get(\"state\")))\n\t\t// clear all cookies in response\n\t\tr.clearAllCookies(req, w)\n\t\tr.errorResponse(w, req.WithContext(ctx), \"state parameter mismatch\", http.StatusForbidden, nil)\n\t\treturn \"\"\n\t}\n\treturn fmt.Sprintf(\"%s%s\", redirect, r.config.WithOAuthURI(\"callback\"))\n}", "func (a *loginAPI) HandleOIDCRedirectFinal(w http.ResponseWriter, r *http.Request) error {\n\tstate := a.appCookie.Get(stateParam, r)\n\tif state == \"\" {\n\t\tlog.WithField(\"func\", \"server.HandleOIDCRedirectFinal\").Debugf(\"emptiy state from cookie, referrer: '%s'\", r.Referer())\n\t\treturn errors.BadRequestError{Err: fmt.Errorf(\"missing state, cannot initiate OIDC\"), Request: r}\n\t}\n\tlog.WithField(\"func\", \"server.HandleOIDCRedirectFinal\").Debugf(\"initiate OIDC redirect using state: '%s'\", state)\n\thttp.Redirect(w, r, a.oauthConfig.AuthCodeURL(state), http.StatusFound)\n\treturn nil\n}", "func ConsentURL(applicationID, redirectURI, state string) (*url.URL, error) {\n\tconsentURL, err := url.Parse(\"https://login.microsoftonline.com/common/adminconsent\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tquery := url.Values{\n\t\t\"client_id\": []string{applicationID},\n\t}\n\n\tif redirectURI != \"\" {\n\t\tquery.Set(\"redirect_uri\", redirectURI)\n\t}\n\n\tif state != \"\" {\n\t\tquery.Set(\"state\", state)\n\t}\n\n\tconsentURL.RawQuery = query.Encode()\n\n\treturn consentURL, nil\n}", "func GenerateAuthURL(state, redirectURL string) (authUrl string) {\n\tbase, _ := url.Parse(discoveryCache.AuthEndpoint)\n\t// construct query params\n\tparams := url.Values{}\n\tparams.Set(\"response_type\", \"code\")\n\tparams.Set(\"scope\", \"openid foundation_profile\")\n\tparams.Set(\"client_id\", viper.GetString(\"oidc.client_id\"))\n\tparams.Set(\"state\", state)\n\tparams.Set(\"request\", createRequestJWT(state, redirectURL))\n\tbase.RawQuery = params.Encode()\n\n\tlog.Debugf(\"auth URL was constructed with state: %s\", state)\n\treturn base.String()\n}", "func (authnreq *AuthnRequest) RedirectURL() string {\n\treturn authnreq.outMessage.RedirectURL(\n\t\tauthnreq.IDP.SSOURLs[HTTPRedirect],\n\t\tauthnreq.XML(HTTPRedirect),\n\t\t\"SAMLRequest\",\n\t)\n}", "func GetLoginURL(state string) string {\n\treturn conf.AuthCodeURL(state)\n}", "func GetRedirectURL(w http.ResponseWriter, r *http.Request) {\n\tredirectResponse, err := server.GetRedirectURL()\n\tif err == nil {\n\t\tw.Header().Set(\"Content-Type\", \"application/json\")\n\t\tjson.NewEncoder(w).Encode(redirectResponse)\n\t} else {\n\t\t//failed to get the redirectURL\n\t\tlog.Debugf(\"GetRedirectUrl failed with error %v\", err)\n\t\tReturnHTTPError(w, r, http.StatusInternalServerError, \"Failed to get the redirect URL\")\n\t\treturn\n\t}\n}", "func GetRedirectURL(w http.ResponseWriter, r *http.Request) {\n\tredirectResponse, err := server.GetRedirectURL()\n\tif err == nil {\n\t\tw.Header().Set(\"Content-Type\", \"application/json\")\n\t\tjson.NewEncoder(w).Encode(redirectResponse)\n\t} else {\n\t\t//failed to get the redirectURL\n\t\tlog.Debugf(\"GetRedirectUrl failed with error %v\", err)\n\t\tReturnHTTPError(w, r, http.StatusInternalServerError, \"Failed to get the redirect URL\")\n\t}\n}", "func GetRedirectURL() (map[string]string, error) {\n\tresponse := make(map[string]string)\n\tif provider != nil {\n\t\tredirect := provider.GetRedirectURL()\n\t\tresponse[\"redirectUrl\"] = URLEncoded(redirect)\n\t\tresponse[\"provider\"] = provider.GetName()\n\t\tlog.Debugf(\"GetRedirectURL: returning response %v\", response)\n\t\treturn response, nil\n\t}\n\treturn response, fmt.Errorf(\"No auth provider configured\")\n}", "func OIDCGetLink(discoveryURL, clientID, clientSecret, redirectURL string) (string, error) {\n\tctx := context.Background()\n\tprovider, err := oidc.NewProvider(ctx, discoveryURL)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\toauth2Config := oauth2.Config{\n\t\tClientID: clientID,\n\t\tClientSecret: clientSecret,\n\t\tRedirectURL: redirectURL,\n\t\tEndpoint: provider.Endpoint(),\n\t\tScopes: []string{oidc.ScopeOpenID},\n\t}\n\n\treturn fmt.Sprintf(\"{\\\"url\\\": \\\"%s\\\"}\", oauth2Config.AuthCodeURL(\"\", oauth2.AccessTypeOffline, oauth2.ApprovalForce)), nil\n}", "func GetRedirectURL(dto dto.PersistenceDTO) (url string) {\n\tif dto.PDS == model.EnvVariables.Google_Drive_PDS && dto.GoogleAccessCreds.AccessToken == \"\" {\n\t\turl = getGoogleRedirectURL(dto.ID)\n\t} else if dto.PDS == model.EnvVariables.One_Drive_PDS && dto.OneDriveToken.AccessToken == \"\" {\n\t\turl = getOneDriveRedirectURL(dto.ID)\n\t}\n\n\treturn\n}", "func GetRedirectURL(url string) (string, error) {\n\tdir, err := ioutil.TempDir(\"\", \"chromedp-example\")\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tdefer os.RemoveAll(dir)\n\n\topts := append(chromedp.DefaultExecAllocatorOptions[:],\n\t\tchromedp.DisableGPU,\n\t\tchromedp.UserDataDir(dir),\n\t\tchromedp.Flag(\"headless\", !config.Config.Debug),\n\t)\n\n\tallocCtx, cancel := chromedp.NewExecAllocator(context.Background(), opts...)\n\tdefer cancel()\n\n\tctx, cancel := chromedp.NewContext(\n\t\tallocCtx,\n\t\tchromedp.WithLogf(log.Logf),\n\t)\n\tdefer cancel()\n\n\tctx, cancel = context.WithTimeout(ctx, time.Duration(config.Config.Deadline)*time.Second)\n\tdefer cancel()\n\n\tvar jsOutput string\n\tif err := chromedp.Run(ctx,\n\t\t// Navigate to user's page\n\t\tchromedp.Navigate(url),\n\t\t// Wait until page loads\n\t\tchromedp.WaitReady(`div`),\n\t\t// Grab url links from our element\n\t\tchromedp.EvaluateAsDevTools(`window.location.href`, &jsOutput),\n\t); err != nil {\n\t\treturn \"\", err\n\t}\n\n\treturn jsOutput, err\n}", "func (p *OktaProvider) GetSignInURL(redirectURI, state string) string {\n\t// https://developer.okta.com/docs/api/resources/oidc/#authorize\n\tvar a url.URL\n\ta = *p.SignInURL\n\tparams, _ := url.ParseQuery(a.RawQuery)\n\tparams.Set(\"redirect_uri\", redirectURI)\n\tparams.Add(\"scope\", p.Scope)\n\tparams.Set(\"client_id\", p.ClientID)\n\tparams.Add(\"response_mode\", \"query\")\n\tparams.Set(\"response_type\", \"code\")\n\tparams.Add(\"state\", state)\n\ta.RawQuery = params.Encode()\n\treturn a.String()\n}", "func (manager *OpenIdManager) GenerateLoginUrl(providerId string, state string) (string, error) {\n\tclient, err := manager.GetOIdClient(providerId)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\treturn client.GenerateLoginURL(state), nil\n}", "func GetURL(authUser AuthUser) (string, string) {\n\tstate := RandomString(30)\n\n\t//.AuthCodeURL generates the url the user visits to authorize access to our app:\n\treturn authUser.config.AuthCodeURL(state), state\n}", "func SignInURL(\n\tsenderPrivateKey *hpke.PrivateKey,\n\tauthenticatePublicKey *hpke.PublicKey,\n\tauthenticateURL *url.URL,\n\tredirectURL *url.URL,\n\tidpID string,\n) (string, error) {\n\tsignInURL := *authenticateURL\n\tsignInURL.Path = \"/.pomerium/sign_in\"\n\n\tq := signInURL.Query()\n\tq.Set(QueryRedirectURI, redirectURL.String())\n\tq.Set(QueryIdentityProviderID, idpID)\n\tq.Set(QueryVersion, versionStr())\n\tq.Set(QueryRequestUUID, uuid.NewString())\n\tBuildTimeParameters(q, signInExpiry)\n\tq, err := hpke.EncryptURLValuesV2(senderPrivateKey, authenticatePublicKey, q)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tsignInURL.RawQuery = q.Encode()\n\n\treturn signInURL.String(), nil\n}", "func WebAuthnURL(_ *http.Request, authenticateURL *url.URL, key []byte, values url.Values) string {\n\tu := authenticateURL.ResolveReference(&url.URL{\n\t\tPath: WebAuthnURLPath,\n\t\tRawQuery: buildURLValues(values, url.Values{\n\t\t\tQueryDeviceType: {DefaultDeviceType},\n\t\t\tQueryEnrollmentToken: nil,\n\t\t\tQueryRedirectURI: {authenticateURL.ResolveReference(&url.URL{\n\t\t\t\tPath: DeviceEnrolledPath,\n\t\t\t}).String()},\n\t\t}).Encode(),\n\t})\n\treturn NewSignedURL(key, u).Sign().String()\n}", "func (a API) AuthUrl(state string, redirect_url string) (url string) {\r\n\treturn \"https://www.linkedin.com/uas/oauth2/authorization?response_type=code&client_id=\" + a.oauth_key +\r\n\t\t\"&state=\" + state + \"&redirect_uri=\" + redirect_url\r\n}", "func (r MockedGoogleAuth) GetLoginURL(state string) string {\n\tif !r.initialized {\n\t\tr.InitGoogleAuthCredentials()\n\t}\n\treturn \"http://loginURLWithState:\" + state\n}", "func GetLoginURL(state string) (int, string, error) {\n\t_, conf, e := cred()\n\tif e != nil {\n\t\treturn 500, \"\", e\n\t}\n\treturn 200, conf.AuthCodeURL(state), nil\n}", "func (h *Handler) getLoginURL(state string) string {\n\t// State can be some kind of random generated hash string.\n\t// See relevant RFC: http://tools.ietf.org/html/rfc6749#section-10.12\n\treturn h.OAuthConf.AuthCodeURL(state)\n}", "func AuthResponseURL(redirectURI string, responseType oidc.ResponseType, responseMode oidc.ResponseMode, response interface{}, encoder httphelper.Encoder) (string, error) {\n\tparams, err := httphelper.URLEncodeResponse(response, encoder)\n\tif err != nil {\n\t\treturn \"\", oidc.ErrServerError().WithParent(err)\n\t}\n\tif responseMode == oidc.ResponseModeQuery {\n\t\treturn redirectURI + \"?\" + params, nil\n\t}\n\tif responseMode == oidc.ResponseModeFragment {\n\t\treturn redirectURI + \"#\" + params, nil\n\t}\n\tif responseType == \"\" || responseType == oidc.ResponseTypeCode {\n\t\treturn redirectURI + \"?\" + params, nil\n\t}\n\treturn redirectURI + \"#\" + params, nil\n}", "func (i *Identity) Redirect(clientID, clientSecret, redirectURL string) (string, error) {\n\t// check if driver is valid\n\tif !isDriverValid(i.driver) {\n\t\treturn \"\", fmt.Errorf(\"Driver not valid: %s\", i.driver)\n\t}\n\t// check if redirectURL is valid\n\t_, err := url.ParseRequestURI(redirectURL)\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"REdirect URL <%s> not valid: %s\", redirectURL, err.Error())\n\t}\n\t// check if redirectURL has valid scheme\n\tif !strings.HasPrefix(redirectURL, \"https://\") && !strings.HasPrefix(redirectURL, \"http://\") {\n\t\treturn \"\", fmt.Errorf(\"Redirect URL <%s> not valid: protocol not valid\", redirectURL)\n\t}\n\n\ti.cfg = &oauth2.Config{\n\t\tClientID: clientID,\n\t\tClientSecret: clientSecret,\n\t\tRedirectURL: redirectURL,\n\t\tScopes: i.scopes,\n\t\tEndpoint: endpointMap[i.driver],\n\t}\n\treturn i.cfg.AuthCodeURL(i.state), nil\n}", "func (api *API) getLoginURL(state string) string {\n\treturn api.oauthConfig.AuthCodeURL(state)\n}", "func (p *Proxy) GetSignInURL(authenticateURL, redirectURL *url.URL, state string) *url.URL {\n\ta := authenticateURL.ResolveReference(&url.URL{Path: \"/sign_in\"})\n\tnow := time.Now()\n\trawRedirect := redirectURL.String()\n\tparams, _ := url.ParseQuery(a.RawQuery)\n\tparams.Set(\"redirect_uri\", rawRedirect)\n\tparams.Set(\"shared_secret\", p.SharedKey)\n\tparams.Set(\"response_type\", \"code\")\n\tparams.Add(\"state\", state)\n\tparams.Set(\"ts\", fmt.Sprint(now.Unix()))\n\tparams.Set(\"sig\", p.signRedirectURL(rawRedirect, now))\n\ta.RawQuery = params.Encode()\n\treturn a\n}", "func (p *AzureProvider) GetLoginURL(redirectURI, state string) string {\n\tvar a url.URL\n\ta = *p.LoginURL\n\tparams, _ := url.ParseQuery(a.RawQuery)\n\tparams.Set(\"redirect_uri\", redirectURI)\n\tparams.Add(\"scope\", p.Scope)\n\tparams.Set(\"client_id\", p.ClientID)\n\tparams.Set(\"response_type\", \"code\")\n\tparams.Add(\"state\", state)\n\tif p.ApprovalPrompt != \"\" {\n\t\tparams.Set(\"prompt\", p.ApprovalPrompt) // Azure uses \"prompt\" instead of \"approval_prompt\"\n\t}\n\ta.RawQuery = params.Encode()\n\treturn a.String()\n}", "func (o *LDAPIdentityProvider) URL() string {\n\tif o != nil && o.bitmap_&2 != 0 {\n\t\treturn o.url\n\t}\n\treturn \"\"\n}", "func oauth2Url(incName string) string {\n\tincInfo := oauth2Infos[incName]\n\treturn fmt.Sprintf(\"%s?client_id=%s&redirect_uri=%s&state=%s\", incInfo.codeUrl, incInfo.clientId, incInfo.redirectUrl, incName)\n}", "func (p *Proxy) signRedirectURL(rawRedirect string, timestamp time.Time) string {\n\tdata := []byte(fmt.Sprint(rawRedirect, timestamp.Unix()))\n\th := cryptutil.Hash(p.SharedKey, data)\n\treturn base64.URLEncoding.EncodeToString(h)\n}", "func (auth *oAuth) GetAuthURI() (string, error) { // Github Autorizate\n\tswitch {\n\tcase auth.ClientID == \"\":\n\t\treturn \"\", fmt.Errorf(\"GetAuth Error: oAuth ClientID undefined, you need to define it before use oAuth requests\")\n\tcase auth.Path == \"\":\n\t\treturn \"\", fmt.Errorf(\"GetAuth Error: oAuth STATE undefined, you need to define it before use oAuth requests\")\n\tcase auth.State == \"\":\n\t\treturn \"\", fmt.Errorf(\"GetAuth Error: oAuth STATE undefined, you need to define it before use oAuth requests\")\n\t}\n\n\tvalues := make(url.Values)\n\tvalues.Add(\"client_id\", auth.ClientID)\n\tvalues.Add(\"redirect_uri\", auth.Path)\n\tvalues.Add(\"scope\", \"user\")\n\tvalues.Add(\"state\", auth.State)\n\n\treturn fmt.Sprintf(\"%s?%s\", auth.AuthURI, values.Encode()), nil\n}", "func (m *InternalDomainFederation) GetSignOutUri()(*string) {\n val, err := m.GetBackingStore().Get(\"signOutUri\")\n if err != nil {\n panic(err)\n }\n if val != nil {\n return val.(*string)\n }\n return nil\n}", "func GoogleOauth2RedirectLink(w http.ResponseWriter, r *http.Request) {\n\thttp.Redirect(w, r, googleOauthConfig.AuthCodeURL(os.Getenv((\"RANDOM_STATE\"))), http.StatusTemporaryRedirect)\n}", "func GetLoginRedirect(ctx *context.Context) string {\n\tloginRedirect := strings.TrimSpace(ctx.GetCookie(\"login_to\"))\n\tif utils.IsMatchHost(loginRedirect) == false {\n\t\tloginRedirect = \"/\"\n\t} else {\n\t\tctx.SetCookie(\"login_to\", \"\", -1, \"/\")\n\t}\n\treturn loginRedirect\n}", "func (a *Authenticator) LoginURL(target string, state interface{}) (string, []byte, error) {\n\tsecret := make([]byte, 16)\n\t_, err := a.rng.Read(secret)\n\tif err != nil {\n\t\treturn \"\", nil, err\n\t}\n\n\t// This is not necessary. We could just pass the secret to the AuthCodeURL function.\n\t// But it needs to be escaped. AuthoCookie.Encode will sign it, as well as Encode it. Cannot hurt.\n\tesecret, err := a.authEncoder.Encode(LoginState{Secret: secret, Target: target, State: state})\n\tif err != nil {\n\t\treturn \"\", nil, err\n\t}\n\n\turl := a.conf.AuthCodeURL(string(esecret))\n\t///* oauth2.AccessTypeOffline, oauth2.SetAuthURLParam(\"prompt\", \"login\"), oauth2.SetAuthURLParam(\"approval_prompt\", \"force\"), oauth2.SetAuthURLParam(\"max_age\", \"0\") */)\n\treturn url, secret, nil\n}", "func (o AppSharedCredentialsOutput) RedirectUrl() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v *AppSharedCredentials) pulumi.StringPtrOutput { return v.RedirectUrl }).(pulumi.StringPtrOutput)\n}", "func (c Client) AuthURL(state string) string {\n\tvalues := url.Values{\"client_id\": {c.ISS}, \"state\": {\"state\"}, \"response_type\": {\"code\"}}\n\treturn fmt.Sprintf(\"%s?%s\", c.Endpoint.AuthURL, values.Encode())\n}", "func (a Auth) URL() string {\n\tu := url.URL{}\n\tu.Host = oauthHost\n\tu.Scheme = oauthScheme\n\tu.Path = oauthPath\n\n\tif len(a.RedirectURI) == 0 {\n\t\ta.RedirectURI = oauthRedirectURI\n\t}\n\tif len(a.ResponseType) == 0 {\n\t\ta.ResponseType = oauthResponseType\n\t}\n\tif len(a.Display) == 0 {\n\t\ta.Display = oauthDisplay\n\t}\n\n\tvalues := u.Query()\n\tvalues.Add(paramResponseType, a.ResponseType)\n\tvalues.Add(paramScope, a.Scope.String())\n\tvalues.Add(paramAppID, int64s(a.ID))\n\tvalues.Add(paramRedirectURI, a.RedirectURI)\n\tvalues.Add(paramVersion, defaultVersion)\n\tvalues.Add(paramDisplay, a.Display)\n\tu.RawQuery = values.Encode()\n\n\treturn u.String()\n}", "func (c *Client) AuthenticateURL(state string, scopes ...string) string {\n\tu := url.Values{\n\t\t\"response_type\": {\"code\"},\n\t\t\"redirect_uri\": {c.callbackURL},\n\t\t\"client_id\": {c.clientID},\n\t\t\"scope\": {strings.Join(scopes, \" \")},\n\t\t\"state\": {state},\n\t}\n\n\treturn fmt.Sprintf(\"%s?%s\", c.discovery.AuthorizationEndpoint, u.Encode())\n}", "func loginURL(ctx context.Context, dest string) string {\n\turl := domainPrefix + appengine.AppID(ctx) + appspotDomainSuffix + loginRouteSuffix\n\turl = \"/login\"\n\tif len(dest) > 0 {\n\t\turl = url + \"?redirect=\" + html.EscapeString(dest)\n\t}\n\treturn url\n}", "func (o AppSharedCredentialsOutput) AccessibilityLoginRedirectUrl() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v *AppSharedCredentials) pulumi.StringPtrOutput { return v.AccessibilityLoginRedirectUrl }).(pulumi.StringPtrOutput)\n}", "func (mc *client) GetAuthRequestURL(state string) (string, error) {\n\treturn getAuthRequestURL(state, mc.oauth)\n}", "func (m *InternalDomainFederation) GetActiveSignInUri()(*string) {\n val, err := m.GetBackingStore().Get(\"activeSignInUri\")\n if err != nil {\n panic(err)\n }\n if val != nil {\n return val.(*string)\n }\n return nil\n}", "func (p *Proxy) GetSignOutURL(authenticateURL, redirectURL *url.URL) *url.URL {\n\ta := authenticateURL.ResolveReference(&url.URL{Path: \"/sign_out\"})\n\tnow := time.Now()\n\trawRedirect := redirectURL.String()\n\tparams, _ := url.ParseQuery(a.RawQuery)\n\tparams.Add(\"redirect_uri\", rawRedirect)\n\tparams.Set(\"ts\", fmt.Sprint(now.Unix()))\n\tparams.Set(\"sig\", p.signRedirectURL(rawRedirect, now))\n\ta.RawQuery = params.Encode()\n\treturn a\n}", "func (it IssueTracker) MakeAuthRequestURL() string {\n\t// NOTE: Need to add XSRF protection if we ever want to run this on a public\n\t// server.\n\treturn it.OAuthConfig.AuthCodeURL(it.OAuthConfig.RedirectURL)\n}", "func (client *Provider) GenerateLoginURL() string {\n\t// generates random state and sign it using HMAC256\n\tstate := GetRandomStateWithHMAC(25)\n\tloginURL := client.oauth2Config.AuthCodeURL(state)\n\treturn strings.TrimSpace(loginURL)\n}", "func (r *AccessTokenResponse) AsRedirectURL(redirectURL string, extraParams url.Values) string {\n\textraParams.Set(\"access_token\", r.Token)\n\textraParams.Set(\"token_type\", r.TokenType)\n\textraParams.Set(\"expires_in\", strconv.Itoa(r.ExpiresIn))\n\textraParams.Set(\"refresh_token\", r.RefreshToken)\n\n\treturn redirectURL + \"#\" + extraParams.Encode()\n}", "func (c *ConfigurationData) GetEmailVerifiedRedirectURL() string {\n\treturn c.v.GetString(varEmailVerifiedRedirectURL)\n}", "func (g *GoogleHandler) AuthCodeURL() string {\n\treturn g.oauthConfig.AuthCodeURL(uuid.NewV4().String(), oauth2.AccessTypeOffline)\n}", "func (c Configuration) PrintAuthUrl() {\n\tlog.Printf(\"Visit \\n%s/authorize?%s\",\n\t\tc.BaseUrl,\n\t\turl.Values{\"response_type\": {\"code\"},\n\t\t\t\"client_id\": {c.ClientId},\n\t\t\t\"redirect_uri\": {c.RedirectUri},\n\t\t\t\"scope\": {c.Scope}}.Encode(),\n\t)\n\n}", "func (p *OAuthProxy) GetRedirectURL(host string) *url.URL {\n\t// TODO: Ensure that we only allow valid upstream hosts in redirect URIs\n\tvar u url.URL\n\tu = *p.redirectURL\n\n\t// Build redirect URI from request host\n\tif u.Scheme == \"\" {\n\t\tif p.cookieSecure {\n\t\t\tu.Scheme = \"https\"\n\t\t} else {\n\t\t\tu.Scheme = \"http\"\n\t\t}\n\t}\n\tu.Host = host\n\treturn &u\n}", "func LoginUrl() (string, error) {\n\tclientId := getClientID()\n\tscopes := os.Getenv(\"GITHUB_SCOPES\")\n\tif scopes == \"\" {\n\t\tscopes = \"user:email repo\"\n\t}\n\tloginUrl := os.Getenv(\"GITHUB_URL_LOGIN\")\n\n\tif clientId == \"\" || loginUrl == \"\" {\n\t\treturn \"\", errors.New(\"oops something went wrong\")\n\t}\n\n\t// Build the final URL\n\turl := loginUrl + \"?client_id=\" + clientId + \"&scope=\" + scopes\n\treturn url, nil\n}", "func GetAuthorizationURL(id string) string {\n\tv := url.Values{}\n\tv.Set(\"client_id\", id)\n\tv.Set(\"response_type\", \"code\")\n\tv.Set(\"redirect_uri\", \"http://localhost:15298/callback\")\n\tv.Set(\"scope\", \"playlist-read-private user-top-read user-library-read user-library-modify user-read-currently-playing user-read-recently-played user-modify-playback-state user-read-playback-state user-follow-read playlist-read-collaborative\")\n\n\tr := buildRequest(\"GET\", accountsURLBase+\"authorize\", v, nil)\n\treturn r.URL.String()\n}", "func (o *LDAPIdentityProvider) GetURL() (value string, ok bool) {\n\tok = o != nil && o.bitmap_&2 != 0\n\tif ok {\n\t\tvalue = o.url\n\t}\n\treturn\n}", "func OAUTHDisconnect(c *fiber.Ctx) error {\n\tmodels.SYSLOG.Tracef(\"entering OAUTHDisconnect; original URL: %v\", c.OriginalURL())\n\tdefer models.SYSLOG.Trace(\"exiting OAUTHDisconnect\")\n\tsessData, err := models.MySessionStore.Get(c)\n\tif err != nil {\n\t\tmodels.SYSLOG.Errorf(\"session exception %v\", err)\n\t\tpanic(err)\n\t}\n\n\t// for debug purposes - inspect the session variables\n\tmodels.SYSLOG.Tracef(\"session id fresh ? %v\", sessData.Fresh())\n\n\tmodels.SYSLOG.Trace(\"trying to get 'oauth-scope' value\")\n\ttk := sessData.Get(\"oauth-scope\")\n\tmodels.SYSLOG.Tracef(\"session stored 'oauth-scope' is %v\", tk)\n\n\tmodels.SYSLOG.Trace(\"trying to get 'oauth-token-type' value\")\n\ttk = sessData.Get(\"oauth-token-type\")\n\tmodels.SYSLOG.Tracef(\"session stored 'oauth-token-type' is %v\", tk)\n\n\ttk = sessData.Get(\"oauth-token\")\n\tmodels.SYSLOG.Tracef(\"session stored 'oauth-token' is %v\", tk)\n\n\tsessData.Destroy()\n\n\treturn c.Redirect(\"/index.html\", fiber.StatusTemporaryRedirect)\n}", "func (p *AzureProvider) GetSignInURL(state string) string {\n\treturn p.oauth.AuthCodeURL(state, oauth2.AccessTypeOffline, oauth2.SetAuthURLParam(\"prompt\", \"select_account\"))\n}", "func redirectToCognitoLogin(ctx context.Context, d *aegis.HandlerDependencies, req *aegis.APIGatewayProxyRequest, res *aegis.APIGatewayProxyResponse, params url.Values) error {\n\tres.Redirect(301, d.Services.Cognito.HostedLoginURL)\n\treturn nil\n}", "func (session *Session) getRequestUrl(path string, params Params) string {\n\tif params == nil {\n\t\tparams = Params{}\n\t}\n\n\tparams[\"company_id\"] = session.companyId\n\tparams[\"company_token\"] = session.companyToken\n\tparams[\"app_id\"] = session.app.AppId\n\tparams[\"oauth_version\"] = 2\n\n\tif _, exist := params[\"open_id\"]; !exist && session.OpenId != \"\" {\n\t\tparams[\"open_id\"] = session.OpenId\n\t}\n\n\tif _, exist := params[\"client_ip\"]; !exist {\n\t\tif session.ClientIP != \"\" {\n\t\t\tparams[\"client_ip\"] = session.ClientIP\n\t\t} else {\n\t\t\tparams[\"client_ip\"] = session.app.ClientIP\n\t\t}\n\t}\n\n\treturn session.getUrl(path, params)\n}", "func (pca Client) AuthCodeURL(ctx context.Context, clientID, redirectURI string, scopes []string, opts ...AuthCodeURLOption) (string, error) {\n\to := authCodeURLOptions{}\n\tif err := options.ApplyOptions(&o, opts); err != nil {\n\t\treturn \"\", err\n\t}\n\tap, err := pca.base.AuthParams.WithTenant(o.tenantID)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tap.Claims = o.claims\n\tap.LoginHint = o.loginHint\n\tap.DomainHint = o.domainHint\n\treturn pca.base.AuthCodeURL(ctx, clientID, redirectURI, scopes, ap)\n}", "func (c *auth) GetAuthURL(params *AuthURLParams) (string, error) {\n\tforceApproval, state, singleSelect, vehicleInfo, flags := params.ForceApproval, params.State, params.SingleSelect, params.MakeBypass, params.Flags\n\n\tif c.clientID == \"\" {\n\t\treturn \"\", errors.New(\"AuthClient.ClientID missing\")\n\t}\n\n\tif c.redirectURI == \"\" {\n\t\treturn \"\", errors.New(\"AuthClient.RedirectURI missing\")\n\t}\n\n\t// Build Connect URL from go\n\tbaseURL, _ := url.Parse(connectURL)\n\tquery := baseURL.Query()\n\tquery.Set(\"response_type\", \"code\")\n\tquery.Set(\"client_id\", c.clientID)\n\tquery.Set(\"redirect_uri\", c.redirectURI)\n\tif len(flags) > 0 {\n\t\tquery.Set(\"flags\", strings.Join(flags, \" \"))\n\t}\n\n\tif c.scope != nil {\n\t\tquery.Set(\"scope\", strings.Join(c.scope, \" \"))\n\t}\n\n\tif c.testMode {\n\t\tquery.Set(\"mode\", \"test\")\n\t}\n\n\tapprovalPrompt := \"auto\"\n\tif forceApproval {\n\t\tapprovalPrompt = \"force\"\n\t}\n\tquery.Set(\"approval_prompt\", approvalPrompt)\n\n\tif state != \"\" {\n\t\tquery.Set(\"state\", state)\n\t}\n\n\tif vehicleInfo != (MakeBypass{}) {\n\t\tif vehicleInfo.Make != \"\" {\n\t\t\tquery.Set(\"make\", string(vehicleInfo.Make))\n\t\t}\n\t}\n\n\tif singleSelect != (SingleSelect{}) {\n\t\tsingleSelectEnabled := singleSelect.Enabled\n\t\tif singleSelect.VIN != \"\" {\n\t\t\tquery.Set(\"single_select_vin\", singleSelect.VIN)\n\t\t\tsingleSelectEnabled = true\n\t\t}\n\t\tquery.Set(\"single_select\", strconv.FormatBool(singleSelectEnabled))\n\t}\n\n\tbaseURL.RawQuery = query.Encode()\n\n\treturn baseURL.String(), nil\n}", "func redirectToCognitoLogout(ctx context.Context, d *aegis.HandlerDependencies, req *aegis.APIGatewayProxyRequest, res *aegis.APIGatewayProxyResponse, params url.Values) error {\n\thost := req.GetHeader(\"Host\")\n\tres.SetHeader(\"Set-Cookie\", \"access_token=; Domain=\"+host+\"; Secure; HttpOnly\")\n\tres.Redirect(301, d.Services.Cognito.HostedLogoutURL)\n\treturn nil\n}", "func (c *Config) GetURL() (string, error) {\n\tctx, cancel := context.WithTimeout(context.Background(), c.Timeout.Duration)\n\tdefer cancel()\n\n\treq, err := http.NewRequestWithContext(ctx, http.MethodGet, c.URL, nil)\n\tif err != nil {\n\t\treturn c.URL, fmt.Errorf(\"creating request: %w\", err)\n\t}\n\n\tclient := &http.Client{\n\t\tCheckRedirect: func(r *http.Request, via []*http.Request) error {\n\t\t\treturn http.ErrUseLastResponse\n\t\t},\n\t\tTransport: &http.Transport{\n\t\t\tTLSClientConfig: &tls.Config{InsecureSkipVerify: !c.ValidSSL}, // nolint:gosec\n\t\t},\n\t}\n\n\treq.Header.Add(\"X-API-Key\", c.APIKey)\n\n\tresp, err := client.Do(req)\n\tif err != nil {\n\t\treturn c.URL, fmt.Errorf(\"making request: %w\", err)\n\t}\n\tdefer resp.Body.Close()\n\n\t_, _ = io.Copy(io.Discard, resp.Body) // read the whole body to avoid memory leaks.\n\n\tlocation, err := resp.Location()\n\tif err != nil {\n\t\treturn c.URL, nil //nolint:nilerr // no location header, no error returned.\n\t}\n\n\tif strings.Contains(location.String(), \"/login\") {\n\t\treturn c.URL, fmt.Errorf(\"redirected to login page while checking URL %s: %w\", c.URL, ErrInvalidAPIKey)\n\t}\n\n\treturn location.String(), nil\n}", "func GetAuthCallBackURL(r *http.Request) string {\n\tauthCallBackURL := url.URL{}\n\tauthCallBackURL.Scheme = r.URL.Scheme\n\tauthCallBackURL.Host = r.Host\n\tauthCallBackURL.Path = \"/oauth2/callback\"\n\tif authCallBackURL.Scheme == \"\" {\n\t\tif config.Config.IsSecure() {\n\t\t\tauthCallBackURL.Scheme = \"https\"\n\t\t} else {\n\t\t\tauthCallBackURL.Scheme = \"http\"\n\t\t}\n\t}\n\treturn authCallBackURL.String()\n}", "func (c *Config) GetReturnURL(saleID string) string { return fmt.Sprintf(\"%s/%s\", c.returnURL, saleID) }", "func AuthURL(urlStr string, sid string, tokenP1 string, tokenP2 string) (string, error) {\n\turlObj, err := url.ParseRequestURI(urlStr)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\ttokenP3 := GenRandStr(16, \"abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789\")\n\ttoken := tokenP1 + tokenP2 + tokenP3\n\tts := time.Now().Unix() + 4200\n\tpath := urlObj.RequestURI()\n\tsep := map[bool]string{true: \"&\", false: \"?\"}[strings.Contains(urlStr, \"?\")]\n\n\tplaintext := fmt.Sprintf(\"%s%ssid=%s%d%s\", path, sep, sid, ts, tokenP3)\n\tplaintextBytes := padPkcs7([]byte(plaintext))\n\tkey := hashStr(token)\n\tblock, err := aes.NewCipher(key)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tmode := cipher.NewCBCEncrypter(block, make([]byte, aes.BlockSize))\n\tciphertext := make([]byte, len(plaintextBytes))\n\tmode.CryptBlocks(ciphertext, plaintextBytes)\n\n\tciphertextEncoded := base64.StdEncoding.EncodeToString([]byte(ciphertext))\n\taccessKey := fmt.Sprintf(\"%d_%s_%s\", ts, tokenP3, ciphertextEncoded)\n\taccessKeyEncoded := url.QueryEscape(accessKey)\n\tfinal := fmt.Sprintf(`%s%ssid=%s&accessKey=%s`, urlStr, sep, sid, accessKeyEncoded)\n\n\treturn final, nil\n}", "func (keys *GlobalOptionsKey) OIDCIssuerURL() string {\n\treturn viper.GetString(keys.oidcIssuerURL)\n}", "func RedirectURL(r *http.Request) (string, bool) {\n\tif v := r.FormValue(QueryRedirectURI); v != \"\" {\n\t\treturn v, true\n\t}\n\n\tif c, err := r.Cookie(QueryRedirectURI); err == nil {\n\t\treturn c.Value, true\n\t}\n\n\treturn \"\", false\n}", "func (a *IAMApiService) OpenIDConnectCallbackURL(ctx context.Context) (*http.Response, error) {\n\tvar (\n\t\tlocalVarHttpMethod = http.MethodGet\n\t\tlocalVarPostBody interface{}\n\t\tlocalVarFormFileName string\n\t\tlocalVarFileName string\n\t\tlocalVarFileBytes []byte\n\t)\n\n\t// create path and map variables\n\tlocalVarPath := a.client.cfg.BasePath + \"/acs/api/v1/auth/oidc/callback\"\n\n\tlocalVarHeaderParams := make(map[string]string)\n\tlocalVarQueryParams := url.Values{}\n\tlocalVarFormParams := url.Values{}\n\n\t// to determine the Content-Type header\n\tlocalVarHttpContentTypes := []string{}\n\n\t// set Content-Type header\n\tlocalVarHttpContentType := selectHeaderContentType(localVarHttpContentTypes)\n\tif localVarHttpContentType != \"\" {\n\t\tlocalVarHeaderParams[\"Content-Type\"] = localVarHttpContentType\n\t}\n\n\t// to determine the Accept header\n\tlocalVarHttpHeaderAccepts := []string{}\n\n\t// set Accept header\n\tlocalVarHttpHeaderAccept := selectHeaderAccept(localVarHttpHeaderAccepts)\n\tif localVarHttpHeaderAccept != \"\" {\n\t\tlocalVarHeaderParams[\"Accept\"] = localVarHttpHeaderAccept\n\t}\n\tr, err := a.client.prepareRequest(ctx, localVarPath, localVarHttpMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFormFileName, localVarFileName, localVarFileBytes)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tlocalVarHttpResponse, err := a.client.callAPI(r)\n\tif err != nil || localVarHttpResponse == nil {\n\t\treturn localVarHttpResponse, err\n\t}\n\n\tlocalVarBody, err := ioutil.ReadAll(localVarHttpResponse.Body)\n\tlocalVarHttpResponse.Body.Close()\n\tif err != nil {\n\t\treturn localVarHttpResponse, err\n\t}\n\n\tif localVarHttpResponse.StatusCode >= 300 {\n\t\tnewErr := GenericOpenAPIError{\n\t\t\tbody: localVarBody,\n\t\t\terror: localVarHttpResponse.Status,\n\t\t}\n\t\treturn localVarHttpResponse, newErr\n\t}\n\n\treturn localVarHttpResponse, nil\n}", "func SignOutURL(r *http.Request, authenticateURL *url.URL, key []byte) string {\n\tu := authenticateURL.ResolveReference(&url.URL{\n\t\tPath: \"/.pomerium/sign_out\",\n\t})\n\tq := u.Query()\n\tif redirectURI, ok := RedirectURL(r); ok {\n\t\tq.Set(QueryRedirectURI, redirectURI)\n\t}\n\tq.Set(QueryVersion, versionStr())\n\tu.RawQuery = q.Encode()\n\treturn NewSignedURL(key, u).Sign().String()\n}", "func getDiscordWebHookUrl(hookId string, hookToken string) string {\n\treturn strings.Join([]string{DiscordWebHookUrl, hookId, hookToken}, \"/\")\n}", "func (h *Handler) oidcCallback(w http.ResponseWriter, r *http.Request, p httprouter.Params) (interface{}, error) {\n\tresult, err := h.GetConfig().Auth.ValidateOIDCAuthCallback(r.URL.Query())\n\tif err != nil {\n\t\th.Warnf(\"Error validating callback: %v.\", err)\n\t\thttp.Redirect(w, r, \"/web/msg/error/login_failed\", http.StatusFound)\n\t\treturn nil, nil\n\t}\n\th.Infof(\"Callback: %v %v %v.\", result.Username, result.Identity, result.Req.Type)\n\treturn nil, h.CallbackHandler(w, r, webapi.CallbackParams{\n\t\tUsername: result.Username,\n\t\tIdentity: result.Identity,\n\t\tSession: result.Session,\n\t\tCert: result.Cert,\n\t\tTLSCert: result.TLSCert,\n\t\tHostSigners: result.HostSigners,\n\t\tType: result.Req.Type,\n\t\tCreateWebSession: result.Req.CreateWebSession,\n\t\tCSRFToken: result.Req.CSRFToken,\n\t\tPublicKey: result.Req.PublicKey,\n\t\tClientRedirectURL: result.Req.ClientRedirectURL,\n\t})\n}", "func (m *Application) GetDefaultRedirectUri()(*string) {\n return m.defaultRedirectUri\n}", "func (c *Client) LoginURL(id string) string {\n\treturn c.loginURL(id)\n}", "func (s *StashConsumer) AuthorizeRedirect() (string, string, error) {\n\trequestToken, err := s.requestToken()\n\tif err != nil {\n\t\tlog.Warning(\"requestToken>%s\\n\", err)\n\t\treturn \"\", \"\", err\n\t}\n\turl, err := s.consumer.AuthorizeRedirect(requestToken)\n\treturn requestToken.Token(), url, err\n}", "func OAUTHRedirect(ctx *fiber.Ctx) error {\n\n\tmodels.SYSLOG.Tracef(\"entering OAUTHRedirect; original URL: %v\", ctx.OriginalURL())\n\tdefer models.SYSLOG.Trace(\"exiting OAUTHRedirect\")\n\n\t// First, we need to get the value of the `code` query param\n\tcode := ctx.Query(\"code\", \"\")\n\tif len(code) < 1 {\n\t\treturn ctx.SendStatus(fiber.StatusBadRequest)\n\t}\n\n\t// Next, lets for the HTTP request to call the github oauth enpoint\tto get our access token\n\n\ta := fiber.AcquireAgent()\n\treq := a.Request()\n\treq.Header.SetMethod(fiber.MethodPost)\n\treq.Header.Set(\"accept\", \"application/json\")\n\treq.SetRequestURI(fmt.Sprintf(\"https://github.com/login/oauth/access_token?client_id=%s&client_secret=%s&code=%s\", models.ClientID, models.ClientSecret, code))\n\tif err := a.Parse(); err != nil {\n\t\tmodels.SYSLOG.Errorf(\"could not create HTTP request: %v\", err)\n\t}\n\n\tvar retCode int\n\tvar retBody []byte\n\tvar errs []error\n\t// Send out the HTTP request\n\tvar t *models.OAuthAccessResponse\n\n\tif retCode, retBody, errs = a.Struct(&t); len(errs) > 0 {\n\t\tmodels.SYSLOG.Tracef(\"received: %v\", string(retBody))\n\t\tmodels.SYSLOG.Errorf(\"could not send HTTP request: %v\", errs)\n\t\treturn ctx.SendStatus(fiber.StatusInternalServerError)\n\t}\n\tmodels.SYSLOG.Tracef(\"received : %v %v %v\", retCode, string(retBody), errs)\n\n\tvar sess *session.Session\n\tvar err error\n\t// Finally, send a response to redirect the user to the \"welcome\" page with the access token\n\tif sess, err = models.MySessionStore.Get(ctx); err == nil {\n\t\tsess.Set(\"token\", t.AccessToken)\n\t\tmodels.SYSLOG.Tracef(\"setting session token %v\", t.AccessToken)\n\t\tsessData, _ := models.MySessionStore.Get(ctx)\n\t\tdefer sessData.Save()\n\t\t//models.MySessionStore.RegisterType(models.OAuthAccessResponse)\n\t\tsessData.Set(\"oauth-scope\", t.Scope)\n\t\tsessData.Set(\"oauth-token-type\", t.TokenType)\n\t\tsessData.Set(\"oauth-token\", t.AccessToken)\n\n\t\tif err != nil {\n\t\t\tmodels.SYSLOG.Errorf(\"session saving exception %v\", err)\n\t\t}\n\t\tmodels.SYSLOG.Tracef(\"redirecting to /welcome.html?access_token=%v\", t.AccessToken)\n\t\t//\t\treturn ctx.Redirect(\"/welcome.html?access_token=\"+t.AccessToken, fiber.StatusFound)\n\t\treturn ctx.Redirect(\"/welcome.html\", fiber.StatusFound)\n\t}\n\n\tmodels.SYSLOG.Tracef(\"redirecting to /\")\n\treturn ctx.Redirect(\"/\", fiber.StatusTemporaryRedirect)\n}", "func (c *Ctx) OriginalURL() string {\n\treturn BytesToString(c.Request.Header.RequestURI())\n}", "func (s *MinioStore) GetPresignedURL(bucketName string, objectName string) (string, error) {\n\turl, err := s.clnt.PresignedGetObject(bucketName, objectName, getExpires, url.Values{})\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"error creating get url\", err)\n\t}\n\treturn url.String(), err\n}", "func Request(wellKnownConfig oidc.WellKnownConfiguration, client OidcClient) error {\n\t// from original code\n\tcodeVerifier := \"\"\n\tcodeChallenge := \"\"\n\n\tstate, stateErr := oidc.GenerateRandomStringURLSafe(24)\n\tif stateErr != nil {\n\t\treturn fmt.Errorf(\"failed to generate random state. Check that your OS has a crypto implementation available\")\n\t}\n\n\tauthorisationURL, err := oidc.BuildCodeAuthorisationRequest(\n\t\twellKnownConfig,\n\t\tclient.ClientID,\n\t\tclient.RedirectURL.String(),\n\t\tclient.Scopes,\n\t\tstate,\n\t\tcodeChallenge,\n\t)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to build authorisation request %w\", err)\n\t}\n\n\tm := http.NewServeMux()\n\ts := http.Server{\n\t\tAddr: fmt.Sprintf(\":%s\", client.RedirectURL.Port()),\n\t\tHandler: m,\n\t}\n\tctx, cancel := context.WithCancel(context.Background())\n\n\tdefer cancel()\n\n\t// Open a web server to receive the redirect\n\tm.HandleFunc(\"/\", func(w http.ResponseWriter, r *http.Request) {\n\t\thandleOidcCallback(w, r,\n\t\t\tclient.Alias,\n\t\t\tclient.ClientID,\n\t\t\tclient.ClientSecret,\n\t\t\tclient.RedirectURL.String(),\n\t\t\twellKnownConfig,\n\t\t\tstate,\n\t\t\tcodeVerifier,\n\t\t\tcancel,\n\t\t)\n\t})\n\n\tfmt.Println(\"Open browser to\", authorisationURL)\n\n\tgo func() {\n\t\tif err := s.ListenAndServe(); err != nil && err != http.ErrServerClosed {\n\t\t\tlog.Println(err)\n\t\t}\n\t}()\n\n\tselect {\n\tcase <-ctx.Done():\n\t\t// Shutdown the server when the context is canceled\n\t\terr := s.Shutdown(ctx)\n\t\tif err != nil {\n\t\t\tlog.Println(err)\n\t\t}\n\t}\n\n\treturn nil\n}", "func ValidateAuthReqRedirectURI(client Client, uri string, responseType oidc.ResponseType) error {\n\tif uri == \"\" {\n\t\treturn oidc.ErrInvalidRequestRedirectURI().WithDescription(\"The redirect_uri is missing in the request. \" +\n\t\t\t\"Please ensure it is added to the request. If you have any questions, you may contact the administrator of the application.\")\n\t}\n\tif strings.HasPrefix(uri, \"https://\") {\n\t\tif !str.Contains(client.RedirectURIs(), uri) {\n\t\t\treturn oidc.ErrInvalidRequestRedirectURI().\n\t\t\t\tWithDescription(\"The requested redirect_uri is missing in the client configuration. \" +\n\t\t\t\t\t\"If you have any questions, you may contact the administrator of the application.\")\n\t\t}\n\t\treturn nil\n\t}\n\tif client.ApplicationType() == ApplicationTypeNative {\n\t\treturn validateAuthReqRedirectURINative(client, uri, responseType)\n\t}\n\tif !str.Contains(client.RedirectURIs(), uri) {\n\t\treturn oidc.ErrInvalidRequestRedirectURI().WithDescription(\"The requested redirect_uri is missing in the client configuration. \" +\n\t\t\t\"If you have any questions, you may contact the administrator of the application.\")\n\t}\n\tif strings.HasPrefix(uri, \"http://\") {\n\t\tif client.DevMode() {\n\t\t\treturn nil\n\t\t}\n\t\tif responseType == oidc.ResponseTypeCode && IsConfidentialType(client) {\n\t\t\treturn nil\n\t\t}\n\t\treturn oidc.ErrInvalidRequestRedirectURI().WithDescription(\"This client's redirect_uri is http and is not allowed. \" +\n\t\t\t\"If you have any questions, you may contact the administrator of the application.\")\n\t}\n\treturn oidc.ErrInvalidRequestRedirectURI().WithDescription(\"This client's redirect_uri is using a custom schema and is not allowed. \" +\n\t\t\"If you have any questions, you may contact the administrator of the application.\")\n}", "func (sp *SessionProxy) URL() string { return sp.GetSession().URL() }", "func (g *GitHub) GetTokenQuestURL(userID string) (string, error) {\n\t// Get a object to request token.\n\tconf, err := g.getConf()\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\t// Get the request url and send to the github or other.\n\turl := conf.AuthCodeURL(userID) // Use userid as state.\n\tlog.InfoWithFields(\"cyclone receives creating token request\",\n\t\tlog.Fields{\"request url\": url})\n\n\tif !strings.Contains(url, \"github\") {\n\t\tlog.ErrorWithFields(\"Unable to get the url\", log.Fields{\"user_id\": userID})\n\t\treturn \"\", fmt.Errorf(\"Unable to get the url\")\n\t}\n\treturn url, nil\n}", "func OauthGoogleLogin(w http.ResponseWriter, r *http.Request) {\n\tutils.EnableCors(&w)\n\toauthState := generateStateOauthCookie(w)\n\tu := googleOauthConfig.AuthCodeURL(oauthState)\n\thttp.Redirect(w, r, u, http.StatusTemporaryRedirect)\n}", "func (a *AuthorizationApp) GetURL() string {\n\tif a == nil || a.URL == nil {\n\t\treturn \"\"\n\t}\n\treturn *a.URL\n}", "func (a *auth) CdnUrl() string { // nolint\n\tif a.parentAuth == nil {\n\t\treturn \"\"\n\t}\n\treturn a.parentAuth.CdnUrl()\n}", "func (c *Config) GetRedirectURI(values url.Values) (string, error) {\n\t// rfc6749 3.1. Authorization Endpoint\n\t// The endpoint URI MAY include an \"application/x-www-form-urlencoded\" formatted (per Appendix B) query component\n\tredirectURI, err := url.QueryUnescape(values.Get(\"redirect_uri\"))\n\tif err != nil {\n\t\treturn \"\", errors.Wrap(ErrInvalidRequest, 0)\n\t}\n\n\t// rfc6749 3.1.2. Redirection Endpoint\n\t// \"The redirection endpoint URI MUST be an absolute URI as defined by [RFC3986] Section 4.3\"\n\tif rp, err := url.Parse(redirectURI); err != nil {\n\t\treturn \"\", errors.Wrap(ErrInvalidRequest, 0)\n\t} else if rp.Host == \"\" {\n\t\treturn \"\", errors.Wrap(ErrInvalidRequest, 0)\n\t} else if rp.Fragment != \"\" {\n\t\t// \"The endpoint URI MUST NOT include a fragment component.\"\n\t\treturn \"\", errors.Wrap(ErrInvalidRequest, 0)\n\t}\n\n\treturn redirectURI, nil\n}", "func getRedirectHandler (w http.ResponseWriter, r *http.Request) {\n responseCode := 200\n\n r.ParseForm()\n cookieName := \"\"\n cookieUUID := r.FormValue(\"cookie\")\n if cookieUUID == \"\" { \n\tresponseCode = 400 // set response code to 400, malformed request\n } else {\n\tresponseCode = 200 // set response code to 200, request processed\n }\n \n //Attempt to retrieve user name from cookie map based on UUID\n foundCookie := false\n\n mutex.Lock()\n cookieLookup := cookieMap[cookieUUID]\n mutex.Unlock()\n\n if cookieLookup.Name != \"\" {\n\tfoundCookie = true\n\tcookieName = cookieLookup.Value\n }\n\n if !foundCookie {\n\tresponseCode = 400 // set response code to 400, malformed request\n }\n \n w.WriteHeader(responseCode)\n w.Write([]byte(cookieName))\n // timeserver will need to use r.ParseForm() and http.get(URL (i.e. authhost:9090/get) to retrieve data\n}", "func CallbackURL(\n\tauthenticatePrivateKey *hpke.PrivateKey,\n\tproxyPublicKey *hpke.PublicKey,\n\trequestParams url.Values,\n\tprofile *identity.Profile,\n\tencryptURLValues hpke.EncryptURLValuesFunc,\n) (string, error) {\n\tredirectURL, err := ParseAndValidateURL(requestParams.Get(QueryRedirectURI))\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"invalid %s: %w\", QueryRedirectURI, err)\n\t}\n\n\tvar callbackURL *url.URL\n\tif requestParams.Has(QueryCallbackURI) {\n\t\tcallbackURL, err = ParseAndValidateURL(requestParams.Get(QueryCallbackURI))\n\t\tif err != nil {\n\t\t\treturn \"\", fmt.Errorf(\"invalid %s: %w\", QueryCallbackURI, err)\n\t\t}\n\t} else {\n\t\tcallbackURL, err = DeepCopy(redirectURL)\n\t\tif err != nil {\n\t\t\treturn \"\", fmt.Errorf(\"error copying %s: %w\", QueryRedirectURI, err)\n\t\t}\n\t\tcallbackURL.Path = \"/.pomerium/callback/\"\n\t\tcallbackURL.RawQuery = \"\"\n\t}\n\n\tcallbackParams := callbackURL.Query()\n\tif requestParams.Has(QueryIsProgrammatic) {\n\t\tcallbackParams.Set(QueryIsProgrammatic, \"true\")\n\t}\n\tcallbackParams.Set(QueryRedirectURI, redirectURL.String())\n\n\trawProfile, err := protojson.Marshal(profile)\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"error marshaling identity profile: %w\", err)\n\t}\n\tcallbackParams.Set(QueryIdentityProfile, string(rawProfile))\n\tcallbackParams.Set(QueryVersion, versionStr())\n\n\tBuildTimeParameters(callbackParams, signInExpiry)\n\n\tcallbackParams, err = encryptURLValues(authenticatePrivateKey, proxyPublicKey, callbackParams)\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"error encrypting callback params: %w\", err)\n\t}\n\tcallbackURL.RawQuery = callbackParams.Encode()\n\n\treturn callbackURL.String(), nil\n}", "func AuthURLForSession(config *oauth2.Config, session *Session, scope string) (string, error) {\n\turl, token, err := GenerateAuthURL(config)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tsession.SetAuthCodeToken(token, scope)\n\treturn url, nil\n}", "func AuthorizeURL(config provider.ProviderConfig, r *http.Request) (dest string, err error) {\n\t// subdomain support\n\tbaseUrl := config.Provider.AuthorizeURL\n\tif config.Provider.Subdomain == true {\n\t\tif config.Subdomain == \"\" {\n\t\t\terr = errors.New(fmt.Sprintf(\"provider %s expects the config to contain your subdomain\", config.Provider.Name))\n\t\t\treturn\n\t\t}\n\t\tbaseUrl = strings.Replace(baseUrl, \"[subdomain]\", config.Subdomain, -1)\n\t}\n\n\tauthUrl, err := url.Parse(baseUrl)\n\tif err != nil {\n\t\treturn\n\t}\n\n\tvalues, err := query.Values(authorizationRequest{\n\t\tClientId: config.Key,\n\t\tResponseType: \"code\",\n\t\tRedirectURI: genCallbackURL(config, r),\n\t\tScope: strings.Join(config.Scope, config.Provider.ScopeDelimiter),\n\t\tState: config.State,\n\t})\n\n\t// custom parameters\n\tif len(config.CustomParameters) > 0 {\n\t\tfor _, name := range config.Provider.CustomParameters {\n\t\t\tif value, ok := config.CustomParameters[name]; ok == true {\n\t\t\t\tvalues.Set(name, value)\n\t\t\t}\n\t\t}\n\t}\n\n\tif err != nil {\n\t\treturn\n\t}\n\n\tauthUrl.RawQuery = values.Encode()\n\tdest = authUrl.String()\n\treturn\n}", "func (p *Proxy) GetRedirectURL(host string) *url.URL {\n\tu := p.redirectURL\n\tu.Scheme = \"https\"\n\tu.Host = host\n\treturn u\n}", "func (q *QRCode) GetURL() string {\n\treturn os.Getenv(\"QRCODE_BASE_URL\") + q.ID\n}", "func getFinalUrl(rawurl string, client *http.Client) (string, error) {\n\turl, err := parseUrl(rawurl)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treq, err := http.NewRequest(\"GET\", url, nil)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treq.Header.Set(\"User-Agent\", \"Mozilla\")\n\tresp, err := client.Do(req)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tdefer resp.Body.Close()\n\treturn fmt.Sprint(resp.Request.URL), nil\n}", "func (provider *GoogleOAuthProvider) GetAuthorizationRedirect(redirect_uri string) (string, error) {\n\treturn ConstructSafeURL(\"https\", \"accounts.google.com\", \"o/oauth2/v2/auth\",\n\t\tmap[string]string{\n\t\t\t\"client_id\": config.GOOGLE_CLIENT_ID,\n\t\t\t\"scope\": \"profile email\",\n\t\t\t\"response_type\": \"code\",\n\t\t\t\"redirect_uri\": redirect_uri,\n\t\t},\n\t)\n}", "func GenerateUrl() string {\n\treturn FbConfig.AuthCodeURL(\"state\")\n}", "func (p *Provider) GetURL(URI string) (string, error) {\n\ttoken, err := p.ui.authenticator.GetToken()\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\treturn fmt.Sprintf(\"%s%s?token=%s\", p.Config.URL, URI, token), nil\n}", "func getCMPURL(d *common.Domain, r *http.Request, p []*swan.Pair) string {\n\tvar u url.URL\n\tu.Scheme = d.Config.Scheme\n\tu.Host = d.CMP\n\tu.Path = \"/preferences/\"\n\tq := u.Query()\n\tq.Set(\"returnUrl\", common.GetCleanURL(d.Config, r).String())\n\tq.Set(\"accessNode\", d.SWANAccessNode)\n\tif d.CmpNodeCount > 0 {\n\t\tq.Set(\"nodeCount\", fmt.Sprintf(\"%d\", d.CmpNodeCount))\n\t}\n\taddSWANParams(r, &q, p)\n\tsetFlags(d, &q)\n\n\t// The CMP URL will never use JavaScript.\n\tq.Set(\"javaScript\", \"false\")\n\n\tu.RawQuery = q.Encode()\n\treturn u.String()\n}", "func (r *oauthProxy) redirectToURL(url string, w http.ResponseWriter, req *http.Request, statusCode int) context.Context {\n\tr.log.Debug(\"redirecting to\", zap.String(\"location\", url))\n\tw.Header().Add(\"Cache-Control\", \"no-cache, no-store, must-revalidate, max-age=0\")\n\thttp.Redirect(w, req, url, statusCode)\n\n\treturn r.revokeProxy(w, req)\n}", "func (a *Authorization) GetURL() string {\n\tif a == nil || a.URL == nil {\n\t\treturn \"\"\n\t}\n\treturn *a.URL\n}", "func (i service) GetClientP2PURL(did id.DID) (string, error) {\n\tp2pID, err := i.CurrentP2PKey(did)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\treturn fmt.Sprintf(\"/ipfs/%s\", p2pID), nil\n}" ]
[ "0.6242145", "0.6230739", "0.6214571", "0.57907677", "0.57011986", "0.5696707", "0.5691591", "0.56642246", "0.5631055", "0.5625567", "0.55932415", "0.5592131", "0.55743754", "0.55552524", "0.5475372", "0.54684097", "0.54682803", "0.5451187", "0.5420473", "0.5381438", "0.535675", "0.53240067", "0.529397", "0.5287465", "0.5285216", "0.52599627", "0.5217045", "0.5196804", "0.51934147", "0.5168124", "0.51648897", "0.5126543", "0.5118541", "0.51125854", "0.5107585", "0.51065576", "0.5085149", "0.5078565", "0.5038619", "0.5008041", "0.50068766", "0.49991333", "0.49863124", "0.4986246", "0.495864", "0.49458113", "0.49342012", "0.49299008", "0.49127612", "0.49050048", "0.4890419", "0.48677224", "0.4862527", "0.485037", "0.48378515", "0.48373687", "0.4837346", "0.4832895", "0.48313186", "0.48222739", "0.48166722", "0.48043108", "0.48033217", "0.478048", "0.4771237", "0.47649318", "0.4761937", "0.4760892", "0.4753853", "0.47479224", "0.47409126", "0.4728778", "0.4723807", "0.47136575", "0.47063902", "0.46995726", "0.46941102", "0.46861255", "0.46825042", "0.46764272", "0.46668905", "0.46651924", "0.4663954", "0.46587703", "0.4657695", "0.46562174", "0.46421602", "0.4629263", "0.46277657", "0.46253207", "0.46209016", "0.46167785", "0.46116838", "0.46040353", "0.4588974", "0.45864618", "0.4584509", "0.45843247", "0.45778227", "0.4537795" ]
0.8135182
0
HandleOIDCRedirect initiates the OAUTH flow by redirecting the authentication system
func (a *loginAPI) HandleOIDCRedirect(w http.ResponseWriter, r *http.Request) error { state := randToken() a.appCookie.Set(stateParam, state, cookieExpiry, w) log.WithField("func", "server.HandleOIDCRedirect").Debugf("GetRedirect: initiate using state '%s'", state) http.Redirect(w, r, a.GetOIDCRedirectURL(), http.StatusTemporaryRedirect) return nil }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (a *loginAPI) HandleOIDCRedirectFinal(w http.ResponseWriter, r *http.Request) error {\n\tstate := a.appCookie.Get(stateParam, r)\n\tif state == \"\" {\n\t\tlog.WithField(\"func\", \"server.HandleOIDCRedirectFinal\").Debugf(\"emptiy state from cookie, referrer: '%s'\", r.Referer())\n\t\treturn errors.BadRequestError{Err: fmt.Errorf(\"missing state, cannot initiate OIDC\"), Request: r}\n\t}\n\tlog.WithField(\"func\", \"server.HandleOIDCRedirectFinal\").Debugf(\"initiate OIDC redirect using state: '%s'\", state)\n\thttp.Redirect(w, r, a.oauthConfig.AuthCodeURL(state), http.StatusFound)\n\treturn nil\n}", "func handleAuthorize(rw http.ResponseWriter, req *http.Request) {\n\n\t// Get the Google URL which shows the Authentication page to the user.\n\turl := oauthCfg.AuthCodeURL(\"\")\n\n\t// Redirect user to that page.\n\thttp.Redirect(rw, req, url, http.StatusFound)\n}", "func HandleRedirect(w http.ResponseWriter, r *http.Request) {\n\tstate := r.URL.Query().Get(\"state\")\n\tcode := r.URL.Query().Get(\"code\")\n\trequest, response, err := ia.HandleCallbackCode(code, state)\n\tif err != nil {\n\t\tlog.Debugln(err)\n\t\tmsg := `Unable to complete authentication. <a href=\"/\">Go back.</a><hr/>`\n\t\t_ = controllers.WriteString(w, msg, http.StatusBadRequest)\n\t\treturn\n\t}\n\n\t// Check if a user with this auth already exists, if so, log them in.\n\tif u := auth.GetUserByAuth(response.Me, auth.IndieAuth); u != nil {\n\t\t// Handle existing auth.\n\t\tlog.Debugln(\"user with provided indieauth already exists, logging them in\")\n\n\t\t// Update the current user's access token to point to the existing user id.\n\t\taccessToken := request.CurrentAccessToken\n\t\tuserID := u.ID\n\t\tif err := user.SetAccessTokenToOwner(accessToken, userID); err != nil {\n\t\t\tcontrollers.WriteSimpleResponse(w, false, err.Error())\n\t\t\treturn\n\t\t}\n\n\t\tif request.DisplayName != u.DisplayName {\n\t\t\tloginMessage := fmt.Sprintf(\"**%s** is now authenticated as **%s**\", request.DisplayName, u.DisplayName)\n\t\t\tif err := chat.SendSystemAction(loginMessage, true); err != nil {\n\t\t\t\tlog.Errorln(err)\n\t\t\t}\n\t\t}\n\n\t\thttp.Redirect(w, r, \"/\", http.StatusTemporaryRedirect)\n\n\t\treturn\n\t}\n\n\t// Otherwise, save this as new auth.\n\tlog.Debug(\"indieauth token does not already exist, saving it as a new one for the current user\")\n\tif err := auth.AddAuth(request.UserID, response.Me, auth.IndieAuth); err != nil {\n\t\tcontrollers.WriteSimpleResponse(w, false, err.Error())\n\t\treturn\n\t}\n\n\t// Update the current user's authenticated flag so we can show it in\n\t// the chat UI.\n\tif err := user.SetUserAsAuthenticated(request.UserID); err != nil {\n\t\tlog.Errorln(err)\n\t}\n\n\thttp.Redirect(w, r, \"/\", http.StatusTemporaryRedirect)\n}", "func (a *Auth) Authenticate(handler http.Handler) http.Handler {\n\tif handler == nil {\n\t\tpanic(\"auth: nil handler\")\n\t}\n\n\treturn http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tif a.cfg.Disable {\n\t\t\thandler.ServeHTTP(w, r)\n\t\t\treturn\n\t\t}\n\n\t\ttoken, err := a.getCookie(r)\n\t\tif token == nil && err == nil {\n\t\t\t// Cookie is missing, invalid. Fetch new token from OAuth2 provider.\n\t\t\t// Redirect user to the OAuth2 consent page to ask for permission for the scopes specified\n\t\t\t// above.\n\t\t\t// Set the scope to the current request URL, it will be used by the redirect handler to\n\t\t\t// redirect back to the url that requested the authentication.\n\t\t\turl := a.cfg.AuthCodeURL(r.RequestURI)\n\t\t\thttp.Redirect(w, r, url, http.StatusTemporaryRedirect)\n\t\t\treturn\n\t\t}\n\t\tif err != nil {\n\t\t\ta.clearCookie(w)\n\t\t\thttp.Error(w, \"Unauthorized\", http.StatusUnauthorized)\n\t\t\ta.logf(\"Get cookie error: %v\", err)\n\t\t\treturn\n\t\t}\n\n\t\t// Source token, in case the token needs a renewal.\n\t\tnewOauth2Token, err := a.cfg.TokenSource(r.Context(), token.toOauth2()).Token()\n\t\tif err != nil {\n\t\t\ta.clearCookie(w)\n\t\t\thttp.Error(w, \"Internal error\", http.StatusInternalServerError)\n\t\t\ta.logf(\"Failed token source: %s\", err)\n\t\t\treturn\n\t\t}\n\t\tnewToken := fromOauth2(newOauth2Token)\n\n\t\tif newToken.IDToken != token.IDToken {\n\t\t\ta.logf(\"Refreshed token\")\n\t\t\ttoken = newToken\n\t\t\ta.setCookie(w, token)\n\t\t}\n\n\t\t// Validate the id_token.\n\t\tpayload, err := a.validator.Validate(r.Context(), token.IDToken, a.cfg.ClientID)\n\t\tif err != nil {\n\t\t\ta.clearCookie(w)\n\t\t\thttp.Error(w, \"Invalid auth.\", http.StatusUnauthorized)\n\t\t\ta.logf(\"Invalid token, reset cookie: %s\", err)\n\t\t\treturn\n\t\t}\n\t\t// User is authenticated.\n\t\t// Store email and name in context, and call the inner handler.\n\t\tcreds := &Creds{\n\t\t\tEmail: payload.Claims[\"email\"].(string),\n\t\t\tName: payload.Claims[\"name\"].(string),\n\t\t}\n\t\tr = r.WithContext(context.WithValue(r.Context(), credsKey, creds))\n\t\thandler.ServeHTTP(w, r)\n\t})\n}", "func (o *oidcServer) Authenticate(w http.ResponseWriter, r *http.Request) {\n\n\to.Lock()\n\tdefer o.Unlock()\n\n\tzap.L().Debug(\"Authenticating\")\n\n\tif o.serverFlow == ServerFlowTypeAuthFailure {\n\t\thttp.Error(w, \"Authentication failure\", http.StatusUnauthorized)\n\t\tzap.L().Warn(\"Authentication failure\", zap.Reflect(\"type\", o.serverFlow))\n\t\treturn\n\t}\n\n\tstate := r.URL.Query().Get(\"state\")\n\tredURI := r.URL.Query().Get(\"redirect_uri\")\n\n\treqURI, err := url.ParseRequestURI(redURI)\n\tif err != nil {\n\t\tzap.L().Error(\"Unable to parse redirect uri\", zap.Error(err))\n\t\treturn\n\t}\n\n\tq := reqURI.Query()\n\tq.Add(\"state\", state)\n\tq.Add(\"redirect_uri\", redURI)\n\treqURI.RawQuery = q.Encode()\n\n\thttp.Redirect(w, r, reqURI.String(), http.StatusTemporaryRedirect)\n}", "func OAUTHRedirect(ctx *fiber.Ctx) error {\n\n\tmodels.SYSLOG.Tracef(\"entering OAUTHRedirect; original URL: %v\", ctx.OriginalURL())\n\tdefer models.SYSLOG.Trace(\"exiting OAUTHRedirect\")\n\n\t// First, we need to get the value of the `code` query param\n\tcode := ctx.Query(\"code\", \"\")\n\tif len(code) < 1 {\n\t\treturn ctx.SendStatus(fiber.StatusBadRequest)\n\t}\n\n\t// Next, lets for the HTTP request to call the github oauth enpoint\tto get our access token\n\n\ta := fiber.AcquireAgent()\n\treq := a.Request()\n\treq.Header.SetMethod(fiber.MethodPost)\n\treq.Header.Set(\"accept\", \"application/json\")\n\treq.SetRequestURI(fmt.Sprintf(\"https://github.com/login/oauth/access_token?client_id=%s&client_secret=%s&code=%s\", models.ClientID, models.ClientSecret, code))\n\tif err := a.Parse(); err != nil {\n\t\tmodels.SYSLOG.Errorf(\"could not create HTTP request: %v\", err)\n\t}\n\n\tvar retCode int\n\tvar retBody []byte\n\tvar errs []error\n\t// Send out the HTTP request\n\tvar t *models.OAuthAccessResponse\n\n\tif retCode, retBody, errs = a.Struct(&t); len(errs) > 0 {\n\t\tmodels.SYSLOG.Tracef(\"received: %v\", string(retBody))\n\t\tmodels.SYSLOG.Errorf(\"could not send HTTP request: %v\", errs)\n\t\treturn ctx.SendStatus(fiber.StatusInternalServerError)\n\t}\n\tmodels.SYSLOG.Tracef(\"received : %v %v %v\", retCode, string(retBody), errs)\n\n\tvar sess *session.Session\n\tvar err error\n\t// Finally, send a response to redirect the user to the \"welcome\" page with the access token\n\tif sess, err = models.MySessionStore.Get(ctx); err == nil {\n\t\tsess.Set(\"token\", t.AccessToken)\n\t\tmodels.SYSLOG.Tracef(\"setting session token %v\", t.AccessToken)\n\t\tsessData, _ := models.MySessionStore.Get(ctx)\n\t\tdefer sessData.Save()\n\t\t//models.MySessionStore.RegisterType(models.OAuthAccessResponse)\n\t\tsessData.Set(\"oauth-scope\", t.Scope)\n\t\tsessData.Set(\"oauth-token-type\", t.TokenType)\n\t\tsessData.Set(\"oauth-token\", t.AccessToken)\n\n\t\tif err != nil {\n\t\t\tmodels.SYSLOG.Errorf(\"session saving exception %v\", err)\n\t\t}\n\t\tmodels.SYSLOG.Tracef(\"redirecting to /welcome.html?access_token=%v\", t.AccessToken)\n\t\t//\t\treturn ctx.Redirect(\"/welcome.html?access_token=\"+t.AccessToken, fiber.StatusFound)\n\t\treturn ctx.Redirect(\"/welcome.html\", fiber.StatusFound)\n\t}\n\n\tmodels.SYSLOG.Tracef(\"redirecting to /\")\n\treturn ctx.Redirect(\"/\", fiber.StatusTemporaryRedirect)\n}", "func handleNaturalistLogin(w http.ResponseWriter, r *http.Request) {\n\turl := authenticator.AuthUrl()\n\n\tlog.Printf(\"Redirecting: %s\", url)\n\n\thttp.Redirect(w, r, url, http.StatusTemporaryRedirect)\n}", "func OAUTHGETHandler(c *fiber.Ctx) error {\n\tmodels.SYSLOG.Trace(\"entering OAUTHGETHandler\")\n\tdefer models.SYSLOG.Trace(\"exiting OAUTHGETHandler\")\n\treturn c.Render(\"protected\", fiber.Map{})\n}", "func authHandler(w http.ResponseWriter, r *http.Request) {\n\turl := config(r.Host).AuthCodeURL(r.URL.RawQuery)\n\thttp.Redirect(w, r, url, http.StatusFound)\n}", "func (a *loginAPI) HandleAuthFlow(w http.ResponseWriter, r *http.Request) error {\n\tstate := randToken()\n\ta.appCookie.Set(stateParam, state, cookieExpiry, w)\n\tlog.WithField(\"func\", \"server.HandleAuthFlow\").Debugf(\"initiate using state '%s'\", state)\n\n\tsite, redirect := query(r, siteParam), query(r, redirectParam)\n\tif site == \"\" || redirect == \"\" {\n\t\treturn errors.BadRequestError{Err: fmt.Errorf(\"missing or invalid parameters supplied\"), Request: r}\n\t}\n\ta.appCookie.Set(authFlowCookie, fmt.Sprintf(\"%s%s%s\", site, authFlowSep, redirect), cookieExpiry, w)\n\thttp.Redirect(w, r, a.GetOIDCRedirectURL(), http.StatusTemporaryRedirect)\n\treturn nil\n}", "func (a *loginAPI) HandleOIDCLogin(w http.ResponseWriter, r *http.Request) error {\n\tctx := context.Background()\n\n\t// read the stateParam again\n\tstate := a.appCookie.Get(stateParam, r)\n\tlog.WithField(\"func\", \"server.HandleOIDCLogin\").Debugf(\"got state param: %s\", state)\n\n\tif query(r, stateParam) != state {\n\t\treturn errors.BadRequestError{Err: fmt.Errorf(\"state did not match\"), Request: r}\n\t}\n\ta.appCookie.Del(stateParam, w)\n\n\t// is this an auth/flow request\n\tvar (\n\t\tauthFlow bool\n\t\tsite, redirect string\n\t)\n\tauthFlowParams := a.appCookie.Get(authFlowCookie, r)\n\tif authFlowParams != \"\" {\n\t\tlog.WithField(\"func\", \"server.HandleOIDCLogin\").Debugf(\"auth/flow login-mode\")\n\t\tparts := strings.Split(authFlowParams, \"|\")\n\t\tsite = parts[0]\n\t\tredirect = parts[1]\n\t\tauthFlow = true\n\t}\n\ta.appCookie.Del(authFlowCookie, w)\n\n\toauth2Token, err := a.oauthConfig.Exchange(ctx, query(r, codeParam))\n\tif err != nil {\n\t\treturn errors.ServerError{Err: fmt.Errorf(\"failed to exchange token: %v\", err), Request: r}\n\t}\n\trawIDToken, ok := oauth2Token.Extra(idTokenParam).(string)\n\tif !ok {\n\t\treturn errors.ServerError{Err: fmt.Errorf(\"no id_token field in oauth2 token\"), Request: r}\n\t}\n\tidToken, err := a.oauthVerifier.VerifyToken(ctx, rawIDToken)\n\tif err != nil {\n\t\treturn errors.ServerError{Err: fmt.Errorf(\"failed to verify ID Token: %v\", err), Request: r}\n\t}\n\n\tvar oidcClaims struct {\n\t\tEmail string `json:\"email\"`\n\t\tEmailVerified bool `json:\"email_verified\"`\n\t\tDisplayName string `json:\"name\"`\n\t\tPicURL string `json:\"picture\"`\n\t\tGivenName string `json:\"given_name\"`\n\t\tFamilyName string `json:\"family_name\"`\n\t\tLocale string `json:\"locale\"`\n\t\tUserID string `json:\"sub\"`\n\t}\n\n\tif err := idToken.GetClaims(&oidcClaims); err != nil {\n\t\treturn errors.ServerError{Err: fmt.Errorf(\"claims error: %v\", err), Request: r}\n\t}\n\n\t// the user was authenticated successfully, check if sites are available for the given user!\n\tsuccess := true\n\tsites, err := a.repo.GetSitesByUser(oidcClaims.Email)\n\tif err != nil {\n\t\tlog.WithField(\"func\", \"server.HandleOIDCLogin\").Warnf(\"successfull login by '%s' but error fetching sites! %v\", oidcClaims.Email, err)\n\t\tsuccess = false\n\t}\n\n\tif len(sites) == 0 {\n\t\tlog.WithField(\"func\", \"server.HandleOIDCLogin\").Warnf(\"successfull login by '%s' but no sites availabel!\", oidcClaims.Email)\n\t\tsuccess = false\n\t}\n\n\tif authFlow {\n\t\tlog.WithField(\"func\", \"server.HandleOIDCLogin\").Debugf(\"auth/flow - check for specific site '%s'\", site)\n\t\tsuccess = false\n\t\t// check specific site\n\t\tfor _, e := range sites {\n\t\t\tif e.Name == site {\n\t\t\t\tsuccess = true\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t}\n\n\tif !success {\n\t\ta.appCookie.Set(errors.FlashKeyError, fmt.Sprintf(\"User '%s' is not allowed to login!\", oidcClaims.Email), cookieExpiry, w)\n\t\thttp.Redirect(w, r, \"/error\", http.StatusTemporaryRedirect)\n\t\treturn nil\n\t}\n\n\t// create the token using the claims of the database\n\tvar siteClaims []string\n\tfor _, s := range sites {\n\t\tsiteClaims = append(siteClaims, fmt.Sprintf(\"%s|%s|%s\", s.Name, s.URL, s.PermList))\n\t}\n\tclaims := security.Claims{\n\t\tType: \"login.User\",\n\t\tDisplayName: oidcClaims.DisplayName,\n\t\tEmail: oidcClaims.Email,\n\t\tUserID: oidcClaims.UserID,\n\t\tUserName: oidcClaims.Email,\n\t\tGivenName: oidcClaims.GivenName,\n\t\tSurname: oidcClaims.FamilyName,\n\t\tClaims: siteClaims,\n\t}\n\ttoken, err := security.CreateToken(a.jwt.JwtIssuer, []byte(a.jwt.JwtSecret), a.jwt.Expiry, claims)\n\tif err != nil {\n\t\tlog.WithField(\"func\", \"server.HandleOIDCLogin\").Errorf(\"could not create a JWT token: %v\", err)\n\t\treturn errors.ServerError{Err: fmt.Errorf(\"error creating JWT: %v\", err), Request: r}\n\t}\n\n\tlogin := persistence.Login{\n\t\tUser: oidcClaims.Email,\n\t\tCreated: time.Now().UTC(),\n\t\tType: persistence.DIRECT,\n\t}\n\n\tif authFlow {\n\t\tlogin.Type = persistence.FLOW\n\t}\n\n\terr = a.repo.StoreLogin(login, per.Atomic{})\n\tif err != nil {\n\t\tlog.WithField(\"func\", \"server.HandleOIDCLogin\").Errorf(\"the login could not be saved: %v\", err)\n\t\treturn errors.ServerError{Err: fmt.Errorf(\"error storing the login: %v\", err), Request: r}\n\t}\n\n\t// set the cookie\n\texp := a.jwt.Expiry * 24 * 3600\n\ta.setJWTCookie(a.jwt.CookieName, token, exp, w)\n\n\tredirectURL := a.jwt.LoginRedirect\n\tif authFlow {\n\t\tlog.WithField(\"func\", \"server.HandleOIDCLogin\").Debugf(\"auth/flow - redirect to specific URL: '%s'\", redirect)\n\t\tredirectURL = redirect\n\t}\n\n\t// redirect to provided URL\n\thttp.Redirect(w, r, redirectURL, http.StatusTemporaryRedirect)\n\treturn nil\n}", "func handleOidcCallback(\n\tw http.ResponseWriter,\n\tr *http.Request,\n\tclientName string,\n\tclientID string,\n\tclientSecret string,\n\tredirectURI string,\n\twellKnownConfig oidc.WellKnownConfiguration,\n\tstate string,\n\tcodeVerifier string,\n\tcancel context.CancelFunc,\n) {\n\tvar authorisationResponse, err = oidc.ValidateAuthorisationResponse(r.URL, state)\n\tif err != nil {\n\t\trenderAndLogError(w, cancel, fmt.Sprintf(\"%v\", err))\n\t\treturn\n\t}\n\n\tviewModel, err := VerifyCode(clientID, clientSecret, redirectURI, wellKnownConfig, codeVerifier, authorisationResponse.Code)\n\tif err != nil {\n\t\trenderAndLogError(w, cancel, fmt.Sprintf(\"%v\", err))\n\t\treturn\n\t}\n\n\t// show webpage\n\tt := template.New(\"credentials\")\n\t_, parseErr := t.Parse(TokenResultView())\n\tif parseErr != nil {\n\t\trenderAndLogError(w, cancel, fmt.Sprintf(\"%v\", parseErr))\n\t\treturn\n\t}\n\ttplErr := t.Execute(w, viewModel)\n\tif tplErr != nil {\n\t\trenderAndLogError(w, cancel, fmt.Sprintf(\"%v\", tplErr))\n\t\treturn\n\t}\n\n\tcancel()\n}", "func rootHandler(w http.ResponseWriter, r *http.Request) {\n\n\tif !verifyLogin(r) {\n\t\turl := LoginCfg.AuthCodeURL(\"\")\n\t\turl = url + OauthURLParams\n\t\t// this will preseve the casenumber in the URI path during Oauth2 redirect\n\t\tparams := r.URL.Query()\n\t\tparamkeys := make([]string, 0)\n\t\tfor k := range params {\n\t\t\tfor i := range params[k] {\n\t\t\t\tparamkeys = append(paramkeys, k+\"=\"+params[k][i])\n\t\t\t}\n\t\t}\n\t\tif len(paramkeys) > 0 {\n\t\t\turl = url + \"&state=\" + base64.StdEncoding.EncodeToString([]byte(strings.Join(paramkeys, \"?\")))\n\t\t}\n\n\t\thttp.Redirect(w, r, url, http.StatusFound)\n\t\treturn\n\t}\n\n\t// if user is not using https then redirect them\n\tif ( r.Header.Get(\"x-forwarded-proto\") != \"https\" && BASEURL != LOCALBASEURL) {\n\t\tfmt.Printf(\"TLS handshake is https=false x-forwarded-proto=%s\\n\", r.Header.Get(\"x-forwarded-proto\"))\n\t\thttp.Redirect(w, r, BASEURL, http.StatusFound)\n\t\treturn\n\t}\n\n startPageTemplate.Execute(w, \"\")\n}", "func (uh *UserHandler) HandleGoogleLogin(w http.ResponseWriter, r *http.Request) {\n\tOauthStateString = stringTools.RandomStringGN(20)\n\turl := googleOauthConfig.AuthCodeURL(OauthStateString)\n\thttp.Redirect(w, r, url, http.StatusSeeOther)\n}", "func loginRedirectHandler(w http.ResponseWriter, r *http.Request) {\n\tvars := mux.Vars(r)\n\tprovider, err := strconv.Atoi(vars[\"provider\"])\n\tif err != nil {\n\t\thttp.Error(w, \"Invalid identity provider\", http.StatusInternalServerError)\n\t\treturn\n\t} else {\n\t\tidp.Authenticate(provider, w, r)\n\t}\n}", "func BeginAuthHandler(res http.ResponseWriter, req *http.Request) {\n\turl, err := GetAuthURL(res, req)\n\tif err != nil {\n\t\tres.WriteHeader(http.StatusBadRequest)\n\t\tfmt.Fprintln(res, err)\n\t\treturn\n\t}\n\n\thttp.Redirect(res, req, url, http.StatusTemporaryRedirect)\n}", "func HandleLogin(w http.ResponseWriter, r *http.Request) (err error) {\n\tsession, err := cookieStore.Get(r, oauthSessionName)\n\tif err != nil {\n\t\tlog.Printf(\"corrupted session %s -- generated new\", err)\n\t\terr = nil\n\t}\n\n\tvar tokenBytes [255]byte\n\tif _, err := rand.Read(tokenBytes[:]); err != nil {\n\t\treturn AnnotateError(err, \"Couldn't generate a session!\", http.StatusInternalServerError)\n\t}\n\n\tstate := hex.EncodeToString(tokenBytes[:])\n\n\tsession.AddFlash(state, stateCallbackKey)\n\n\tif err = session.Save(r, w); err != nil {\n\t\treturn\n\t}\n\n\thttp.Redirect(w, r, oauth2Config.AuthCodeURL(state, claims), http.StatusTemporaryRedirect)\n\n\treturn\n}", "func (app *appVars) oauthRedirect(w http.ResponseWriter, r *http.Request) {\n\n\t// get and compare state to prevent Cross-Site Request Forgery\n\tstate := r.FormValue(\"state\")\n\tif state != app.state {\n\t\tlog.Fatalln(\"state is not the same (CSRF?)\")\n\t}\n\n\t// get authorization code\n\tcode := r.FormValue(\"code\")\n\n\t// exchange authorization code for token\n\ttoken, err := app.conf.Exchange(app.ctx, code)\n\tif err != nil {\n\t\tlog.Println(\"conf.Exchange\", err)\n\t\t// signal that authorization was not successful\n\t\tapp.authChan <- false\n\t\treturn\n\t}\n\n\t// update HTTP client with token\n\tapp.client = app.conf.Client(app.ctx, token)\n\n\t// TODO\n\tapp.token = token\n\n\tconst tpl = `\n<!DOCTYPE html>\n<html>\n\t<head>\n\t\t<meta charset=\"UTF-8\">\n\t\t<title>{{.Title}}</title>\n\t</head>\n\t<body>\n\t<p>Authorization successful\n\t<p><a href=\"{{.BaseUrl}}/listNotebooks\">List Notebooks</a> \n\t<p><a href=\"{{.BaseUrl}}/listPages\">List Pages</a> \n\t</body>\n</html>`\n\n\tt, err := template.New(\"authorized\").Parse(tpl)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tdata := struct {\n\t\tTitle string\n\t\tBaseUrl string\n\t}{}\n\n\tdata.Title = \"Authorized\"\n\tdata.BaseUrl = \"http://localhost:9999\"\n\n\terr = t.Execute(w, data)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\treturn\n}", "func (a *loginAPI) GetOIDCRedirectURL() string {\n\treturn oidcInitiateURL\n}", "func (a *Auth) RedirectHandler() http.Handler {\n\treturn http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tcode := r.URL.Query().Get(\"code\")\n\t\ttoken, err := a.cfg.Exchange(r.Context(), code)\n\t\tif err != nil {\n\t\t\ta.logf(\"Authentication failure for code %s: %s\", code, err)\n\t\t\thttp.Error(w, \"Authorization failure\", http.StatusUnauthorized)\n\t\t\treturn\n\t\t}\n\n\t\t_, ok := token.Extra(\"id_token\").(string)\n\t\tif !ok {\n\t\t\ta.logf(\"Invalid ID token %v (%T)\", token.Extra(\"id_token\"), token.Extra(\"id_token\"))\n\t\t\thttp.Error(w, \"Internal error\", http.StatusInternalServerError)\n\t\t\treturn\n\t\t}\n\n\t\terr = a.setCookie(w, fromOauth2(token))\n\t\tif err != nil {\n\t\t\ta.logf(\"Failed setting cookie: %v\", err)\n\t\t\thttp.Error(w, \"Internal error\", http.StatusInternalServerError)\n\t\t\treturn\n\t\t}\n\n\t\tredirectPath := r.URL.Query().Get(\"state\")\n\t\tif redirectPath == \"\" {\n\t\t\tredirectPath = \"/\"\n\t\t}\n\t\thttp.Redirect(w, r, redirectPath, http.StatusTemporaryRedirect)\n\t})\n}", "func (r *oauthProxy) oauthAuthorizationHandler(w http.ResponseWriter, req *http.Request) {\n\tctx, span, logger := r.traceSpan(req.Context(), \"authorization handler\")\n\tif span != nil {\n\t\tdefer span.End()\n\t}\n\n\tif r.config.SkipTokenVerification {\n\t\tr.errorResponse(w, req.WithContext(ctx), \"\", http.StatusNotAcceptable, nil)\n\t\treturn\n\t}\n\n\tclient, err := r.getOAuthClient(r.getRedirectionURL(w, req.WithContext(ctx)))\n\tif err != nil {\n\t\tr.errorResponse(w, req.WithContext(ctx), \"failed to retrieve the oauth client for authorization\", http.StatusInternalServerError, err)\n\t\treturn\n\t}\n\n\t// step: set the access type of the session\n\tvar accessType string\n\tif containedIn(\"offline\", r.config.Scopes, false) {\n\t\taccessType = \"offline\"\n\t}\n\n\tauthURL := client.AuthCodeURL(req.URL.Query().Get(\"state\"), accessType, \"\")\n\tlogger.Debug(\"incoming authorization request from client address\",\n\t\tzap.String(\"access_type\", accessType),\n\t\tzap.String(\"auth_url\", authURL),\n\t\tzap.String(\"client_ip\", req.RemoteAddr))\n\n\t// step: if we have a custom sign in page, lets display that\n\tif r.config.hasCustomSignInPage() {\n\t\tmodel := make(map[string]string)\n\t\tmodel[\"redirect\"] = authURL\n\t\tw.Header().Set(\"Content-Type\", \"text/html; charset=utf-8\")\n\t\tw.WriteHeader(http.StatusOK)\n\t\t_ = r.Render(w, path.Base(r.config.SignInPage), mergeMaps(model, r.config.Tags))\n\n\t\treturn\n\t}\n\n\tr.redirectToURL(authURL, w, req.WithContext(ctx), http.StatusTemporaryRedirect)\n}", "func AuthHandler(c *gin.Context) {\r\n\tvar state utils.State\r\n\tdecoded, err := utils.B64Decode(c.Query(\"state\"))\r\n\terr = json.Unmarshal([]byte(decoded), &state)\r\n\tif err != nil {\r\n\t\tc.JSON(http.StatusConflict, gin.H{\"code\": http.StatusConflict, \"message\": err})\r\n\t\treturn\r\n\t}\r\n\r\n\tAccessKey := state.AccessKey\r\n\tif AccessKey == \"\" {\r\n\t\tAccessKey = state.Token\r\n\t}\r\n\r\n\tAPPUserID, _, err := utils.LoadAccessKey(AccessKey)\r\n\r\n\tif err != nil || APPUserID == \"\" {\r\n\t\tc.JSON(http.StatusNonAuthoritativeInfo, gin.H{\"code\": http.StatusNonAuthoritativeInfo, \"message\": err})\r\n\t\treturn\r\n\t}\r\n\r\n\tfmt.Println(\"redirURL\", state.URL)\r\n\r\n\tcode := c.Query(\"code\")\r\n\tuserID, _ := utils.VerifyCode(code)\r\n\tuserInfo, _ := utils.GetUserInfo(userID)\r\n\r\n\tu := url.Values{}\r\n\tdata, _ := json.Marshal(userInfo)\r\n\tu.Set(\"state\", utils.B64Encode(string(data)))\r\n\tu.Set(\"timestamp\", fmt.Sprintln(time.Now().Unix()))\r\n\tc.Redirect(http.StatusFound, state.URL+\"?\"+u.Encode())\r\n}", "func RedirectHandler(c *gin.Context) {\n\tauthURL, err := gocialite.NewDispatcher().New().\n\t\tDriver(\"asana\").\n\t\tScopes([]string{}).\n\t\tRedirect(\n\t\t\tclientID, // Client ID\n\t\t\tclientSecret, // Client Secret\n\t\t\tredirectURL, // Redirect URL\n\t\t)\n\n\t// Check for errors (usually driver not valid)\n\tif err != nil {\n\t\tc.Writer.Write([]byte(\"Error: \" + err.Error()))\n\t\treturn\n\t}\n\n\t// Redirect with authURL\n\tc.Redirect(http.StatusFound, authURL) // Redirect with 302 HTTP code\n}", "func callbackHandler(w http.ResponseWriter, r *http.Request) {\n\n\t// Process identity provider callback, checking tokens, etc.\n\tauth, err := idp.Callback(w, r)\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t}\n\n\t// Store session authentication information in cookie\n\tsetCookie(w, r, auth, auth.ExpiresIn)\n\t\n\t// Redirect to original page\n\thttp.Redirect(w, r, auth.URL, http.StatusFound)\t\n}", "func Authentication(next http.Handler) http.Handler {\n\treturn http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tlog.Printf(\"Authentication: checking for existing authenticated session\\n\")\n\t\tauthenticated, ok := r.Context().Value(session.AuthenticatedKey).(bool)\n\t\tlog.Printf(\"Authentication: authenticated?: %b\\n\", authenticated)\n\t\tif (ok == false || authenticated == false) {\n\t\t\tstate := r.Context().Value(session.StateKey).(string)\n\t\t\tlog.Printf(\"Authentication: using state: %v\\n\", state)\n\t\t\tauthorizeURL := oauth2Config.AuthCodeURL(state, oauth2.AccessTypeOnline)\n\t\t\tlog.Printf(\"Authentication: redirecting to %s\\n\", authorizeURL)\n\t\t\thttp.Redirect(w, r, authorizeURL, http.StatusFound)\n\t\t\treturn\n\t\t} else { // authenticated == true\n log.Printf(\"Authentication: user is authenticated, done\\n\")\n next.ServeHTTP(w, r)\n }\n\t})\n}", "func RedirectHandler(c *gin.Context) {\n\t// Retrieve provider from route\n\tprovider := c.Param(\"provider\")\n\n\t// In this case we use a map to store our secrets, but you can use dotenv or your framework configuration\n\t// for example, in revel you could use revel.Config.StringDefault(provider + \"_clientID\", \"\") etc.\n\tproviderSecrets := map[string]map[string]string{\n\t\t\"github\": {\n\t\t\t\"clientID\": os.Getenv(\"CLIENT_ID_GH\"),\n\t\t\t\"clientSecret\": os.Getenv(\"CLIENT_SECRET_GH\"),\n\t\t\t\"redirectURL\": os.Getenv(\"AUTH_REDIRECT_URL\") + \"/github/callback\",\n\t\t},\n\t\t\"google\": {\n\t\t\t\"clientID\": os.Getenv(\"CLIENT_ID_G\"),\n\t\t\t\"clientSecret\": os.Getenv(\"CLIENT_SECRET_G\"),\n\t\t\t\"redirectURL\": os.Getenv(\"AUTH_REDIRECT_URL\") + \"/google/callback\",\n\t\t},\n\t}\n\n\tproviderScopes := map[string][]string{\n\t\t\"github\": []string{\"public_repo\"},\n\t\t\"google\": []string{},\n\t}\n\n\tproviderData := providerSecrets[provider]\n\tactualScopes := providerScopes[provider]\n\tauthURL, err := config.Gocial.New().\n\t\tDriver(provider).\n\t\tScopes(actualScopes).\n\t\tRedirect(\n\t\t\tproviderData[\"clientID\"],\n\t\t\tproviderData[\"clientSecret\"],\n\t\t\tproviderData[\"redirectURL\"],\n\t\t)\n\n\t// Check for errors (usually driver not valid)\n\tif err != nil {\n\t\tc.Writer.Write([]byte(\"Error: \" + err.Error()))\n\t\treturn\n\t}\n\n\t// Redirect with authURL\n\tc.Redirect(http.StatusFound, authURL)\n}", "func HandleAuthenticateDispenser(w http.ResponseWriter, r *http.Request) {\n\t// Read auth token from request\n\tvar auth DispenserAuth\n\n\terr := utils.ReadJSONFromRequest(r, &auth)\n\n\tif err != nil {\n\t\tutils.WriteError(w, utils.BadRequestError(err))\n\t\treturn\n\t}\n\n\t// Try to authenticate the dispenser\n\ttoken, err := AuthenticateDispenser(auth)\n\n\tif err != nil {\n\t\tutils.WriteError(w, err)\n\t\treturn\n\t}\n\n\t// Return session token to user\n\tutils.WriteJSON(w, token)\n}", "func (p *OIDCProvider) LoginHandler() http.Handler {\n\treturn http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\turl := p.oauth2Config.AuthCodeURL(state)\n\t\thttp.Redirect(w, r, url, http.StatusSeeOther)\n\t})\n}", "func (s *Server) HandlerInitiate(w http.ResponseWriter, r *http.Request) {\n\t// ignore error because we don't need previous session values.\n\tsession, _ := s.SessionStore.Get(r, s.Config.SessionName)\n\n\tconf := s.ProviderConfigs[s.DefaultProvider].Config()\n\tcallback := r.Header.Get(\"x-ngx-omniauth-initiate-callback\")\n\tnext := r.Header.Get(\"x-ngx-omniauth-initiate-back-to\")\n\tstate := generateNewState()\n\n\tconf.RedirectURL = callback\n\tsession.Values = map[interface{}]interface{}{}\n\tsession.Values[\"provider\"] = s.DefaultProvider\n\tsession.Values[\"callback\"] = callback\n\tsession.Values[\"next\"] = next\n\tsession.Values[\"state\"] = state\n\tif err := session.Save(r, w); err != nil {\n\t\tlogrus.WithFields(logrus.Fields{\n\t\t\t\"err\": err.Error(),\n\t\t}).Error(\"failed to save session\")\n\t\thttp.Error(w, http.StatusText(http.StatusInternalServerError), http.StatusInternalServerError)\n\t}\n\n\thttp.Redirect(w, r, conf.AuthCodeURL(state), http.StatusFound)\n}", "func (uh *UserHandler) HandleLinkedInLogin(w http.ResponseWriter, r *http.Request) {\n\tOauthStateString = stringTools.RandomStringGN(20)\n\turl := linkedinOauthConfig.AuthCodeURL(OauthStateString)\n\thttp.Redirect(w, r, url, http.StatusSeeOther)\n}", "func askForLogin(w http.ResponseWriter, r *http.Request) {\n\thttp.Redirect(w, r, oauthProviderConfig.oauthConfig.AuthCodeURL(\"\"), http.StatusFound)\n}", "func RedirectHandler(c *fiber.Ctx) {\n\tprovider := c.Params(\"provider\")\n\tif provider != \"google\" && provider != \"facebook\" {\n\t\terrors.SendErrors(c, http.StatusUnprocessableEntity, &[]string{\"unsupported provider\"})\n\t}\n\tcfg := config.GetInstance()\n\tproviderSecrets := map[string]map[string]string{\n\t\t\"facebook\": {\n\t\t\t\"clientID\": cfg.Facebook.ClientID,\n\t\t\t\"clientSecret\": cfg.Facebook.ClientSecret,\n\t\t\t\"redirectURL\": cfg.Domain+\"/api/auth/facebook/callback\",\n\t\t},\n\t\t\"google\": {\n\t\t\t\"clientID\": cfg.Google.ClientID,\n\t\t\t\"clientSecret\": cfg.Google.ClientSecret,\n\t\t\t\"redirectURL\": cfg.Domain + \"/api/auth/google/callback\",\n\t\t},\n\t}\n\tproviderData := providerSecrets[provider]\n\tauthURL, err := getGocialInstance().New().\n\t\tDriver(provider).\n\t\tRedirect(\n\t\t\tproviderData[\"clientID\"],\n\t\t\tproviderData[\"clientSecret\"],\n\t\t\tproviderData[\"redirectURL\"],\n\t\t)\n\n\t// Check for errors (usually driver not valid)\n\tif err != nil {\n\t\terrors.SendErrors(c, http.StatusInternalServerError, &[]string{err.Error()})\n\t\treturn\n\t}\n\t// Redirect with authURL\n\tc.Status(http.StatusOK).JSON(map[string]string{\"url\" : authURL})\n}", "func redirectHandler(c *gin.Context) {\n\t// Retrieve provider from route\n\tprovider := c.Param(\"provider\")\n\n\t//datos que provienen de github\n\tproviderSecrets := map[string]map[string]string{\n\t\t\"github\": {\n\t\t\t\"clientID\": \"b9563aec19bb264601a1\",\n\t\t\t\"clientSecret\": \"6c5cd9388386a6461a007576f4bfba1a7d144408\",\n\t\t\t\"redirectURL\": \"http://localhost:8090/api/socialLogin/auth/github/callback\",\n\t\t},\n\t}\n\n\tproviderScopes := map[string][]string{\n\t\t\"github\": []string{\"public_repo\"},\n\t}\n\n\tproviderData := providerSecrets[provider]\n\tactualScopes := providerScopes[provider]\n\tauthURL, err := gocial.New().\n\t\tDriver(provider).\n\t\tScopes(actualScopes).\n\t\tRedirect(\n\t\t\tproviderData[\"clientID\"],\n\t\t\tproviderData[\"clientSecret\"],\n\t\t\tproviderData[\"redirectURL\"],\n\t\t)\n\n\t// Check for errors (usually driver not valid)\n\tif err != nil {\n\t\tc.Writer.Write([]byte(\"Error: \" + err.Error()))\n\t\treturn\n\t}\n\t// Redirect with authURL\n\tc.Redirect(http.StatusFound, authURL)\n}", "func HandleLoginRedirect(r *http.Request, w http.ResponseWriter, cfg *setting.Cfg, identity *Identity, validator RedirectValidator) {\n\tredirectURL := handleLogin(r, w, cfg, identity, validator)\n\thttp.Redirect(w, r, redirectURL, http.StatusFound)\n}", "func (a *GoogleAuth) GoogleLoginHandler(w http.ResponseWriter, r *http.Request) {\n\tstate := a.NewAuthState(r)\n\turl := a.config.AuthCodeURL(state)\n\thttp.Redirect(w, r, url, http.StatusTemporaryRedirect)\n}", "func oauthCallbackHandler(w http.ResponseWriter, r *http.Request) {\n\ttransport := &oauth.Transport{Config: &oauthProviderConfig.oauthConfig}\n\ttransport.Exchange(r.FormValue(\"code\"))\n\tclient := transport.Client()\n\tresponse, err := client.Get(oauthProviderConfig.UserInfoAPI)\n\tif err != nil {\n\t\tlog.Printf(\"Error while contacting '%s': %s\\n\", oauthProviderConfig.UserInfoAPI, err)\n\t\thttp.Error(w, http.StatusText(http.StatusBadRequest), http.StatusBadRequest)\n\t\treturn\n\t}\n\tbody, err := ioutil.ReadAll(response.Body)\n\tif err != nil {\n\t\tlog.Printf(\"Error while parsing response from '%s': %s\\n\", oauthProviderConfig.UserInfoAPI, err)\n\t\thttp.Error(w, http.StatusText(http.StatusBadGateway), http.StatusBadGateway)\n\t\treturn\n\t}\n\tresponse.Body.Close()\n\tauthorized, email := isAuthorized(body)\n\tif authorized {\n\t\tauthorizeEmail(email, w, r)\n\t\tlog.Println(\"User\", email, \"logged in\")\n\t\tsession, _ := store.Get(r, serverConfig.CookieName)\n\t\tif next, ok := session.Values[\"next\"]; ok {\n\t\t\thttp.Redirect(w, r, next.(string), http.StatusFound)\n\t\t}\n\t} else {\n\t\tlog.Println(\"Access Denied: Couldn't match an email address in the server response.\")\n\t\thttp.Error(w, http.StatusText(http.StatusForbidden), http.StatusForbidden)\n\t}\n}", "func authLoginHandler(ctx context.Context, w http.ResponseWriter, r *http.Request) {\n\tgithub.RedirectToLogin(w, r)\n}", "func (h *Handler) oidcCallback(w http.ResponseWriter, r *http.Request, p httprouter.Params) (interface{}, error) {\n\tresult, err := h.GetConfig().Auth.ValidateOIDCAuthCallback(r.URL.Query())\n\tif err != nil {\n\t\th.Warnf(\"Error validating callback: %v.\", err)\n\t\thttp.Redirect(w, r, \"/web/msg/error/login_failed\", http.StatusFound)\n\t\treturn nil, nil\n\t}\n\th.Infof(\"Callback: %v %v %v.\", result.Username, result.Identity, result.Req.Type)\n\treturn nil, h.CallbackHandler(w, r, webapi.CallbackParams{\n\t\tUsername: result.Username,\n\t\tIdentity: result.Identity,\n\t\tSession: result.Session,\n\t\tCert: result.Cert,\n\t\tTLSCert: result.TLSCert,\n\t\tHostSigners: result.HostSigners,\n\t\tType: result.Req.Type,\n\t\tCreateWebSession: result.Req.CreateWebSession,\n\t\tCSRFToken: result.Req.CSRFToken,\n\t\tPublicKey: result.Req.PublicKey,\n\t\tClientRedirectURL: result.Req.ClientRedirectURL,\n\t})\n}", "func RedirectHandler(w http.ResponseWriter, r *http.Request) {\n\tlog.Print(\"I AM HERE REDIRECTED\")\n\terr := r.ParseForm()\n\tif err != nil {\n\t\tfmt.Fprintf(os.Stdout, \"could not parse query: %s\", err.Error())\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t}\n\tcode := r.FormValue(\"code\")\n\n\treqURL := fmt.Sprintf(\"https://github.com/login/oauth/access_token?client_id=%s&client_secret=%s&code=%s\", ClientID, ClientSecret, code)\n\treq, err := http.NewRequest(http.MethodPost, reqURL, nil)\n\tif err != nil {\n\t\tfmt.Fprintf(os.Stdout, \"could not retrieve http request: %s\", err.Error())\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t}\n\n\treq.Header.Set(http.CanonicalHeaderKey(\"accept\"), \"application/json\")\n\treq.Header.Set(\"X-OAuth-Scopes\", \"gists\")\n\n\tres, err := http.DefaultClient.Do(req)\n\tif err != nil {\n\t\tfmt.Fprintf(os.Stdout, \"could not send HTTP request: %s\", err.Error())\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t}\n\n\tdefer res.Body.Close()\n\t// Parse the request body into the `OAuthAccessResponse` struct\n\tvar t OAuthAccessResponse\n\tif err := json.NewDecoder(res.Body).Decode(&t); err != nil {\n\t\tfmt.Fprintf(os.Stdout, \"could not parse JSON response: %s\", err.Error())\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t}\n\tSession.AccessToken = t.AccessToken\n\n\tw.WriteHeader(http.StatusFound)\n\tw.Write([]byte(\"OK\"))\n}", "func (rh *RealmRedirect) Handle(w http.ResponseWriter, req *http.Request) {\n\tsegments := strings.Split(req.URL.Path, \"/\")\n\t// last path segment is the base64d realm ID which we will pass the incoming request to\n\tbase64realmID := segments[len(segments)-1]\n\tbytesRealmID, err := base64.RawURLEncoding.DecodeString(base64realmID)\n\trealmID := string(bytesRealmID)\n\tif err != nil {\n\t\tlog.WithError(err).WithField(\"base64_realm_id\", base64realmID).Print(\n\t\t\t\"Not a b64 encoded string\",\n\t\t)\n\t\tw.WriteHeader(400)\n\t\treturn\n\t}\n\n\trealm, err := rh.DB.LoadAuthRealm(realmID)\n\tif err != nil {\n\t\tlog.WithError(err).WithField(\"realm_id\", realmID).Print(\"Failed to load realm\")\n\t\tw.WriteHeader(404)\n\t\treturn\n\t}\n\tlog.WithFields(log.Fields{\n\t\t\"realm_id\": realmID,\n\t}).Print(\"Incoming realm redirect request\")\n\trealm.OnReceiveRedirect(w, req)\n}", "func (g *GoogleHandler) OauthHandler(w http.ResponseWriter, r *http.Request) {\n\tcode := r.FormValue(\"code\")\n\tif code == \"\" {\n\t\tlog.Info(\"code is empty\")\n\t\thttp.Error(w, \"code is empty\", http.StatusBadRequest)\n\t\treturn\n\t}\n\n\ttok, err := g.oauthConfig.Exchange(oauth2.NoContext, code)\n\tif err != nil {\n\t\tlog.Error(err.Error())\n\t\thttp.Error(w, \"token exchange failed\", http.StatusBadRequest)\n\t\treturn\n\t}\n\n\tclient := g.oauthConfig.Client(oauth2.NoContext, &oauth2.Token{AccessToken: tok.AccessToken})\n\tservice, _ := oauth2api.New(client)\n\n\ttokenCall := service.Tokeninfo()\n\ttokenCall.AccessToken(tok.AccessToken)\n\ttokenInfo, _ := tokenCall.Do()\n\n\tg.db.SaveToken(\"google\", tokenInfo.UserId, code, tok)\n\n\t// Session\n\tsession, err := g.store.Get(r, \"orgo-session\")\n\tif err != nil {\n\t\tlog.Error(err.Error())\n\t}\n\n\tsessionID, _ := g.db.SaveSession(tokenInfo.UserId)\n\tsession.Values[\"session_id\"] = sessionID\n\tsession.Save(r, w)\n\thttp.Error(w, \"ok\", http.StatusOK)\n}", "func OAUTHProtected(c *fiber.Ctx) error {\n\tmodels.SYSLOG.Tracef(\"entering OAUTHProtected; original URL: %v\", c.OriginalURL())\n\tdefer models.SYSLOG.Trace(\"exiting OAUTHProtected\")\n\n\tsessData, err := models.MySessionStore.Get(c)\n\tif err != nil {\n\t\tmodels.SYSLOG.Errorf(\"session exception %v\", err)\n\t\tpanic(err)\n\t}\n\n\t// for debug purposes - inspect the session variables\n\tmodels.SYSLOG.Tracef(\"session id fresh ? %v\", sessData.Fresh())\n\n\tmodels.SYSLOG.Trace(\"trying to get 'oauth-scope' value\")\n\ttk := sessData.Get(\"oauth-scope\")\n\tmodels.SYSLOG.Tracef(\"session stored 'oauth-scope' is %v\", tk)\n\n\tmodels.SYSLOG.Trace(\"trying to get 'oauth-token-type' value\")\n\ttk = sessData.Get(\"oauth-token-type\")\n\tmodels.SYSLOG.Tracef(\"session stored 'oauth-token-type' is %v\", tk)\n\n\ttk = sessData.Get(\"oauth-token\")\n\tmodels.SYSLOG.Tracef(\"session stored 'oauth-token' is %v\", tk)\n\n\tif tk == nil {\n\t\tsessData.Destroy()\n\t\tmodels.SYSLOG.Tracef(\"token is NULL\")\n\t\treturn c.Redirect(\"/index.html\", fiber.StatusTemporaryRedirect)\n\t}\n\n\treturn c.Next()\n}", "func (handler *AuthHandler) Auth(c *gin.Context) {\n\tc.Redirect(http.StatusMovedPermanently, handler.Oauth2Conf.AuthCodeURL(handler.Oauth2StateString, oauth2.AccessTypeOffline))\n\tc.Abort()\n}", "func OAUTHDisconnect(c *fiber.Ctx) error {\n\tmodels.SYSLOG.Tracef(\"entering OAUTHDisconnect; original URL: %v\", c.OriginalURL())\n\tdefer models.SYSLOG.Trace(\"exiting OAUTHDisconnect\")\n\tsessData, err := models.MySessionStore.Get(c)\n\tif err != nil {\n\t\tmodels.SYSLOG.Errorf(\"session exception %v\", err)\n\t\tpanic(err)\n\t}\n\n\t// for debug purposes - inspect the session variables\n\tmodels.SYSLOG.Tracef(\"session id fresh ? %v\", sessData.Fresh())\n\n\tmodels.SYSLOG.Trace(\"trying to get 'oauth-scope' value\")\n\ttk := sessData.Get(\"oauth-scope\")\n\tmodels.SYSLOG.Tracef(\"session stored 'oauth-scope' is %v\", tk)\n\n\tmodels.SYSLOG.Trace(\"trying to get 'oauth-token-type' value\")\n\ttk = sessData.Get(\"oauth-token-type\")\n\tmodels.SYSLOG.Tracef(\"session stored 'oauth-token-type' is %v\", tk)\n\n\ttk = sessData.Get(\"oauth-token\")\n\tmodels.SYSLOG.Tracef(\"session stored 'oauth-token' is %v\", tk)\n\n\tsessData.Destroy()\n\n\treturn c.Redirect(\"/index.html\", fiber.StatusTemporaryRedirect)\n}", "func CanvasOAuth2RequestHandler(w http.ResponseWriter, r *http.Request, _ httprouter.Params) {\n\tintent := r.URL.Query().Get(\"intent\")\n\tdest := r.URL.Query().Get(\"dest\")\n\tswitch intent {\n\tcase \"auth\":\n\t\tutil.SendRedirect(w, getCanvasOAuth2AuthURI(intent, dest))\n\tcase \"reauth\":\n\t\tutil.SendRedirect(w, getCanvasOAuth2AuthURI(intent, dest))\n\tdefault:\n\t\tutil.SendRedirect(w, getCanvasOAuth2AuthURI(\"auth\", dest))\n\t}\n}", "func (s *Server) handleLogin(w http.ResponseWriter, req *http.Request) error {\n\toauthState := uuid.New().String()\n\tloginSession, err := s.cookieStore.Get(req, LoginSessionName)\n\tif err != nil {\n\t\treturn err\n\t}\n\tloginSession.Options = &sessions.Options{\n\t\tMaxAge: 600,\n\t\tHttpOnly: true,\n\t\tSecure: s.opts.SecureCookie,\n\t}\n\tloginSession.Values[\"oauth_state\"] = oauthState\n\terr = loginSession.Save(req, w)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"error saving session: %s\", err)\n\t}\n\turl := s.oauthConfig.AuthCodeURL(oauthState)\n\thttp.Redirect(w, req, url, http.StatusTemporaryRedirect)\n\treturn nil\n}", "func redirectToCognitoLogin(ctx context.Context, d *aegis.HandlerDependencies, req *aegis.APIGatewayProxyRequest, res *aegis.APIGatewayProxyResponse, params url.Values) error {\n\tres.Redirect(301, d.Services.Cognito.HostedLoginURL)\n\treturn nil\n}", "func authenticated(f authenticatedHandler) http.HandlerFunc {\n\treturn func(w http.ResponseWriter, r *http.Request) {\n\t\tvar a idp.Auth\n\t\terr := getCookie(r, &a)\n\t\tif (err != nil) || (a.IDToken == \"\") {\n\t\t\turl := \"/login?page=\" + url.QueryEscape(r.URL.String())\n\t\t\thttp.Redirect(w, r, url, http.StatusFound)\n\t\t} else {\n\t\t\tf(w, r, &a)\n\t\t}\n\t}\n}", "func (h *Handler) OauthCallback(c *gin.Context) {\n\t// based off provider, use different redirect\n\tswitch c.Param(\"provider\") {\n\tcase \"google\":\n\t\th.GoogleOauthCallback(c)\n\tcase \"github\":\n\t\th.GithubOauthCallback(c)\n\tdefault:\n\t\tc.AbortWithStatus(http.StatusInternalServerError)\n\t}\n}", "func LoginHandler(c echo.Context) error {\n\tprovider, err := gomniauth.Provider(c.Param(\"provider\"))\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tauthURL, err := provider.GetBeginAuthURL(nil, nil)\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn c.Redirect(http.StatusTemporaryRedirect, authURL)\n}", "func OauthGoogleLogin(w http.ResponseWriter, r *http.Request) {\n\tutils.EnableCors(&w)\n\toauthState := generateStateOauthCookie(w)\n\tu := googleOauthConfig.AuthCodeURL(oauthState)\n\thttp.Redirect(w, r, u, http.StatusTemporaryRedirect)\n}", "func (m *Minion) defaultUnauthorizedHandler(w http.ResponseWriter, r *http.Request) {\n\tsession, _ := m.Sessions.Get(r, m.SessionName)\n\tsession.Values[RedirectKey] = r.URL.String()\n\tsession.Save(r, w)\n\n\thttp.Redirect(w, r, m.UnauthorizedURL, http.StatusSeeOther)\n}", "func (a *Authenticator) AuthHandler() khttp.FuncHandler {\n\treturn func(w http.ResponseWriter, r *http.Request) {\n\t\t_, handled, err := a.PerformAuth(w, r)\n\t\tif err != nil {\n\t\t\thttp.Error(w, \"your lack of authentication cookie is impressive - something went wrong\", http.StatusInternalServerError)\n\t\t\tlog.Printf(\"ERROR - could not complete authentication - %s\", err)\n\t\t\treturn\n\t\t}\n\n\t\tif !handled {\n\t\t\thttp.Redirect(w, r, \"/\", http.StatusTemporaryRedirect)\n\t\t}\n\t}\n}", "func (a *apiServer) handleSAMLResponse(w http.ResponseWriter, req *http.Request) {\n\tvar subject, authCode string\n\tvar err *errutil.HTTPError\n\n\tlogRequest := \"SAML login request\"\n\ta.LogReq(logRequest)\n\tdefer func(start time.Time) {\n\t\tif subject != \"\" {\n\t\t\tlogRequest = fmt.Sprintf(\"SAML login request for %s\", subject)\n\t\t}\n\t\ta.LogResp(logRequest, errutil.PrettyPrintCode(err), err, time.Since(start))\n\t}(time.Now())\n\n\tsubject, authCode, err = a.handleSAMLResponseInternal(req)\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), err.Code())\n\t\treturn\n\t}\n\n\t// Redirect caller back to dash with auth code\n\tu := *defaultDashRedirectURL\n\tif a.redirectAddress != nil {\n\t\tu = *a.redirectAddress\n\t}\n\tu.RawQuery = url.Values{\"auth_code\": []string{authCode}}.Encode()\n\tw.Header().Set(\"Location\", u.String())\n\tw.WriteHeader(http.StatusFound) // Send redirect\n}", "func (j *AuthMux) Callback() http.Handler {\n\treturn http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tlog := j.Logger.\n\t\t\tWithField(\"component\", \"auth\").\n\t\t\tWithField(\"remote_addr\", r.RemoteAddr).\n\t\t\tWithField(\"method\", r.Method).\n\t\t\tWithField(\"url\", r.URL)\n\n\t\tstate := r.FormValue(\"state\")\n\t\t// Check if the OAuth state token is valid to prevent CSRF\n\t\t// The state variable we set is actually a token. We'll check\n\t\t// if the token is valid. We don't need to know anything\n\t\t// about the contents of the principal only that it hasn't expired.\n\t\tif _, err := j.Tokens.ValidPrincipal(r.Context(), Token(state), TenMinutes); err != nil {\n\t\t\tlog.Error(\"Invalid OAuth state received: \", err.Error())\n\t\t\thttp.Redirect(w, r, j.FailureURL, http.StatusTemporaryRedirect)\n\t\t\treturn\n\t\t}\n\n\t\t// Exchange the code back with the provider to the the token\n\t\tconf := j.Provider.Config()\n\t\tcode := r.FormValue(\"code\")\n\t\ttoken, err := conf.Exchange(r.Context(), code)\n\t\tif err != nil {\n\t\t\tlog.Error(\"Unable to exchange code for token \", err.Error())\n\t\t\thttp.Redirect(w, r, j.FailureURL, http.StatusTemporaryRedirect)\n\t\t\treturn\n\t\t}\n\n\t\tif token.Extra(\"id_token\") != nil && !j.UseIDToken {\n\t\t\tlog.Info(\"found an extra id_token, but option --useidtoken is not set\")\n\t\t}\n\n\t\t// if we received an extra id_token, inspect it\n\t\tvar id string\n\t\tvar group string\n\t\tif j.UseIDToken && token.Extra(\"id_token\") != nil && token.Extra(\"id_token\") != \"\" {\n\t\t\tlog.Debug(\"found an extra id_token\")\n\t\t\tif provider, ok := j.Provider.(ExtendedProvider); ok {\n\t\t\t\tlog.Debug(\"provider implements PrincipalIDFromClaims()\")\n\t\t\t\ttokenString, ok := token.Extra(\"id_token\").(string)\n\t\t\t\tif !ok {\n\t\t\t\t\tlog.Error(\"cannot cast id_token as string\")\n\t\t\t\t\thttp.Redirect(w, r, j.FailureURL, http.StatusTemporaryRedirect)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tclaims, err := j.Tokens.GetClaims(tokenString)\n\t\t\t\tif err != nil {\n\t\t\t\t\tlog.Error(\"parsing extra id_token failed:\", err)\n\t\t\t\t\thttp.Redirect(w, r, j.FailureURL, http.StatusTemporaryRedirect)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tlog.Debug(\"found claims: \", claims)\n\t\t\t\tid, err = provider.PrincipalIDFromClaims(claims)\n\t\t\t\tif err != nil {\n\t\t\t\t\tlog.Error(\"requested claim not found in id_token:\", err)\n\t\t\t\t\thttp.Redirect(w, r, j.FailureURL, http.StatusTemporaryRedirect)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tgroup, err = provider.GroupFromClaims(claims)\n\t\t\t\tif err != nil {\n\t\t\t\t\tlog.Error(\"requested claim not found in id_token:\", err)\n\t\t\t\t\thttp.Redirect(w, r, j.FailureURL, http.StatusTemporaryRedirect)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tlog.Debug(\"provider does not implement PrincipalIDFromClaims()\")\n\t\t\t}\n\t\t} else {\n\t\t\t// otherwise perform an additional lookup\n\t\t\toauthClient := conf.Client(r.Context(), token)\n\t\t\t// Using the token get the principal identifier from the provider\n\t\t\tid, err = j.Provider.PrincipalID(oauthClient)\n\t\t\tif err != nil {\n\t\t\t\tlog.Error(\"Unable to get principal identifier \", err.Error())\n\t\t\t\thttp.Redirect(w, r, j.FailureURL, http.StatusTemporaryRedirect)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tgroup, err = j.Provider.Group(oauthClient)\n\t\t\tif err != nil {\n\t\t\t\tlog.Error(\"Unable to get OAuth Group\", err.Error())\n\t\t\t\thttp.Redirect(w, r, j.FailureURL, http.StatusTemporaryRedirect)\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\n\t\tp := Principal{\n\t\t\tSubject: id,\n\t\t\tIssuer: j.Provider.Name(),\n\t\t\tGroup: group,\n\t\t}\n\t\tctx := r.Context()\n\t\terr = j.Auth.Authorize(ctx, w, p)\n\t\tif err != nil {\n\t\t\tlog.Error(\"Unable to get add session to response \", err.Error())\n\t\t\thttp.Redirect(w, r, j.FailureURL, http.StatusTemporaryRedirect)\n\t\t\treturn\n\t\t}\n\t\tlog.Info(\"User \", id, \" is authenticated\")\n\t\thttp.Redirect(w, r, j.SuccessURL, http.StatusTemporaryRedirect)\n\t})\n}", "func (a API) Auth(w http.ResponseWriter, r *http.Request, state string, redirect_url string) {\r\n\thttp.Redirect(w, r, a.AuthUrl(state, redirect_url), http.StatusFound)\r\n}", "func handleOAuthCallback(w http.ResponseWriter, r *http.Request) {\n\tc := appengine.NewContext(r)\n\tif r.URL.Query().Get(\"error\") == \"access_denied\" {\n\t\tif err := oauthTmpl.Execute(w, &oauthPage{\"App installation cancelled.\"}); err != nil {\n\t\t\tlog.Errorf(c, \"Error executing oauthTmpl template: %s\", err)\n\t\t}\n\t\treturn\n\t}\n\n\tif _, err := sessionStore.Get(r, r.FormValue(\"state\")); err != nil {\n\t\tlog.Errorf(c, \"invalid state parameter: %s\", err)\n\t\thttp.Error(w, \"Invalid state parameter. Try 'Add to Slack' again.\", http.StatusBadRequest)\n\t\treturn\n\t}\n\tcode := r.FormValue(\"code\")\n\n\t// We do not need the OAuth token to handle Slack commands, so we just\n\t// discard it.\n\t//\n\t// Best practice would be to save this to a database so that you can\n\t// associate the Slack user with your app's user and also ask for\n\t// additional permissions in the future.\n\tif _, err := oauthConfig.Exchange(c, code); err != nil {\n\t\tlog.Errorf(c, \"Error authorizing against Slack: %s\", err)\n\t\thttp.Error(w, \"Unexpected error authorizing against Slack.\", http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\tif err := oauthTmpl.Execute(w, &oauthPage{\"Welcome! You can now run the slash command.\"}); err != nil {\n\t\tlog.Errorf(c, \"Error executing oauthTmpl template: %s\", err)\n\t}\n}", "func (app *application) requireAuthentication(next http.Handler) http.Handler {\r\n\treturn http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\r\n\t\t// If the client is not authenticated, redirect\r\n\t\tif loggedin := app.isAuthenticated(r); !loggedin {\r\n\t\t\thttp.Redirect(w, r, \"/login\", http.StatusSeeOther)\r\n\t\t\treturn\r\n\t\t}\r\n\t\t// If the client is not a verified user, redirect\r\n\t\tif verified := app.isVerified(r); !verified {\r\n\t\t\thttp.Redirect(w, r, \"/verifyuser\", http.StatusSeeOther)\r\n\t\t\treturn\r\n\t\t}\r\n\r\n\t\t/* This section should be reviewed */\r\n\t\t// Else, set the \"Cache-Control: no-store\" header so pages\r\n\t\t// which require authentication are not stored in cache\r\n\t\tw.Header().Add(\"Cache-Control\", \"no-store\")\r\n\r\n\t\t// and call the next handler in the chain.\r\n\t\tnext.ServeHTTP(w, r)\r\n\t})\r\n}", "func (o *oauth) authorizeHandler(w http.ResponseWriter, r *http.Request) {\n\t// We aren't using HandleAuthorizeRequest here because that assumes redirect_uri\n\t// exists on the request. We're just checking for a valid token.\n\tti, err := o.server.ValidationBearerToken(r)\n\tif err != nil {\n\t\tauthFailures.With(\"method\", \"oauth2\").Add(1)\n\t\tencodeError(w, err)\n\t\treturn\n\t}\n\tif ti.GetClientID() == \"\" {\n\t\tauthFailures.With(\"method\", \"oauth2\").Add(1)\n\t\tencodeError(w, fmt.Errorf(\"missing client_id\"))\n\t\treturn\n\t}\n\n\t// Passed token check, return \"200 OK\"\n\tauthSuccesses.With(\"method\", \"oauth2\").Add(1)\n\tw.Header().Set(\"Content-Type\", \"text/plain\")\n\tw.WriteHeader(http.StatusOK)\n}", "func Authenticate(h route.Handle) route.Handle {\n\treturn func(r *http.Request) (route.HandleObject, error) {\n\t\tv1reponse := new(route.V1)\n\n\t\tlog.Debugf(\"Authenticating %s\", r.URL.String())\n\t\tuser := User{}\n\t\tcookie, err := r.Cookie(\"_SID_TXNAPP_\")\n\t\tif err != nil {\n\t\t\treturn v1reponse, err\n\t\t}\n\t\terr = apicalls.Auth.Authenticate(cookie, &user)\n\t\tif err != nil {\n\t\t\treturn v1reponse, err\n\t\t}\n\n\t\tif user.UserID == 0 {\n\t\t\treturn v1reponse, errors.New(\"User is not authenticated\", http.StatusForbidden)\n\t\t}\n\t\tctx := context.WithValue(r.Context(), \"user\", user)\n\t\tr = r.WithContext(ctx)\n\t\treturn h(r)\n\t}\n}", "func OIDCAuth(optionSetters ...Option) func(next http.Handler) http.Handler {\n\toptions := newOptions(optionSetters...)\n\ttokenCache := sync.NewCache(options.UserinfoCacheSize)\n\n\th := oidcAuth{\n\t\tlogger: options.Logger,\n\t\tproviderFunc: options.OIDCProviderFunc,\n\t\thttpClient: options.HTTPClient,\n\t\toidcIss: options.OIDCIss,\n\t\tTokenManagerConfig: options.TokenManagerConfig,\n\t\ttokenCache: &tokenCache,\n\t\ttokenCacheTTL: options.UserinfoCacheTTL,\n\t}\n\n\treturn func(next http.Handler) http.Handler {\n\t\treturn http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {\n\t\t\t// there is no bearer token on the request,\n\t\t\tif !h.shouldServe(req) {\n\t\t\t\t// oidc supported but token not present, add header and handover to the next middleware.\n\t\t\t\tuserAgentAuthenticateLockIn(w, req, options.CredentialsByUserAgent, \"bearer\")\n\t\t\t\tnext.ServeHTTP(w, req)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tif h.getProvider() == nil {\n\t\t\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\ttoken := strings.TrimPrefix(req.Header.Get(\"Authorization\"), \"Bearer \")\n\n\t\t\tclaims, status := h.getClaims(token, req)\n\t\t\tif status != 0 {\n\t\t\t\tw.WriteHeader(status)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\t// inject claims to the request context for the account_uuid middleware.\n\t\t\treq = req.WithContext(oidc.NewContext(req.Context(), claims))\n\n\t\t\t// store claims in context\n\t\t\t// uses the original context, not the one with probably reduced security\n\t\t\tnext.ServeHTTP(w, req.WithContext(oidc.NewContext(req.Context(), claims)))\n\t\t})\n\t}\n}", "func HandleSamlLogin(w http.ResponseWriter, r *http.Request) {\n\tvar redirectBackBaseValue string\n\ts := server.SamlServiceProvider\n\n\ts.XForwardedProto = r.Header.Get(\"X-Forwarded-Proto\")\n\n\tif r.URL.Query() != nil {\n\t\tredirectBackBaseValue = r.URL.Query().Get(redirectBackBase)\n\t\tif redirectBackBaseValue == \"\" {\n\t\t\tredirectBackBaseValue = server.GetRancherAPIHost()\n\t\t}\n\t} else {\n\t\tredirectBackBaseValue = server.GetRancherAPIHost()\n\t}\n\n\tif !isWhitelisted(redirectBackBaseValue, s.RedirectWhitelist) {\n\t\tlog.Errorf(\"Cannot redirect to anything other than whitelisted domains and rancher api host\")\n\t\tredirectBackPathValue := r.URL.Query().Get(redirectBackPath)\n\t\tredirectURL := server.GetSamlRedirectURL(server.GetRancherAPIHost(), redirectBackPathValue)\n\t\tredirectURL = addErrorToRedirect(redirectURL, \"422\")\n\t\thttp.Redirect(w, r, redirectURL, http.StatusFound)\n\t\treturn\n\t}\n\n\tserviceProvider := s.ServiceProvider\n\tif r.URL.Path == serviceProvider.AcsURL.Path {\n\t\treturn\n\t}\n\n\tbinding := saml.HTTPRedirectBinding\n\tbindingLocation := serviceProvider.GetSSOBindingLocation(binding)\n\tif bindingLocation == \"\" {\n\t\tbinding = saml.HTTPPostBinding\n\t\tbindingLocation = serviceProvider.GetSSOBindingLocation(binding)\n\t}\n\n\treq, err := serviceProvider.MakeAuthenticationRequest(bindingLocation)\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n\t// relayState is limited to 80 bytes but also must be integrety protected.\n\t// this means that we cannot use a JWT because it is way to long. Instead\n\t// we set a cookie that corresponds to the state\n\trelayState := base64.URLEncoding.EncodeToString(randomBytes(42))\n\n\tsecretBlock := x509.MarshalPKCS1PrivateKey(serviceProvider.Key)\n\tstate := jwt.New(jwt.SigningMethodHS256)\n\tclaims := state.Claims.(jwt.MapClaims)\n\tclaims[\"id\"] = req.ID\n\tclaims[\"uri\"] = r.URL.String()\n\tsignedState, err := state.SignedString(secretBlock)\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\ts.ClientState.SetState(w, r, relayState, signedState)\n\n\tif binding == saml.HTTPRedirectBinding {\n\t\tredirectURL := req.Redirect(relayState)\n\t\tw.Header().Add(\"Location\", redirectURL.String())\n\t\tw.WriteHeader(http.StatusFound)\n\t\treturn\n\t}\n\tif binding == saml.HTTPPostBinding {\n\t\tw.Header().Add(\"Content-Security-Policy\", \"\"+\n\t\t\t\"default-src; \"+\n\t\t\t\"script-src 'sha256-AjPdJSbZmeWHnEc5ykvJFay8FTWeTeRbs9dutfZ0HqE='; \"+\n\t\t\t\"reflected-xss block; referrer no-referrer;\")\n\t\tw.Header().Add(\"Content-type\", \"text/html\")\n\t\tw.Write([]byte(`<!DOCTYPE html><html><body>`))\n\t\tw.Write(req.Post(relayState))\n\t\tw.Write([]byte(`</body></html>`))\n\t\treturn\n\t}\n}", "func authCallbackHandler(ctx context.Context, w http.ResponseWriter, r *http.Request) {\n\tghu, err := authWithGithubCode(ctx, r.FormValue(\"code\"))\n\tif err != nil {\n\t\trenderError(w, err, http.StatusInternalServerError, \"GitHub login failed\")\n\t\treturn\n\t}\n\n\tu, err := findOrCreateUser(ghu)\n\tif err != nil {\n\t\trenderError(w, err, http.StatusInternalServerError, \"Failed to find a user using GitHub profile\")\n\t\treturn\n\t}\n\n\tsess := db.NewSession(u.ID)\n\tif err := sess.Create(); err != nil {\n\t\trenderError(w, err, http.StatusInternalServerError, \"Failed to create a session\")\n\t\treturn\n\t}\n\n\tctx = auth.ContextWithSession(ctx, sess)\n\tauth.AuthorizeResponse(ctx, w)\n\tauth.CacheSession(sess)\n\n\thttp.Redirect(w, r, rootPath, http.StatusTemporaryRedirect)\n}", "func completeAuth(w http.ResponseWriter, r *http.Request) {\n\tuser, err := gothic.CompleteUserAuth(w, r)\n\tif err != nil {\n\t\tfmt.Fprintln(w, err)\n\t\treturn\n\t}\n\tuserSession.Values[\"user\"] = user\n\thttp.Redirect(w, r, \"/\", 301)\n}", "func (p DirectHandler) AuthHandler(http.ResponseWriter, *http.Request) {}", "func (lh *AuthorizationCodeLocalhost) redirectUriHandler(w http.ResponseWriter, r *http.Request) {\n\tconst (\n\t\tcloseTab string = \". Please close this tab.\"\n\t)\n\n\trq := r.URL.RawQuery\n\turlValues, err := url.ParseQuery(rq)\n\tif err != nil {\n\t\terr := fmt.Sprintf(\"Unable to parse query: %v\", err)\n\n\t\tlh.AuthCodeReqStatus = AuthorizationCodeStatus{Status: FAILED, Details: err}\n\t\tlh.authCode = AuthorizationCode{}\n\t\tw.WriteHeader(http.StatusOK)\n\t\tw.Write([]byte(lh.AuthCodeReqStatus.Details + closeTab))\n\t\treturn\n\t}\n\n\turlError := urlValues.Get(\"error\")\n\t// Authentication Code Error from consent page\n\tif urlError != \"\" {\n\t\terr := fmt.Sprintf(\"An error occurred when getting authorization code: %s\", urlError)\n\t\tlh.AuthCodeReqStatus = AuthorizationCodeStatus{Status: FAILED, Details: err}\n\t\tlh.authCode = AuthorizationCode{}\n\t\tw.WriteHeader(http.StatusOK)\n\t\tw.Write([]byte(lh.AuthCodeReqStatus.Details + closeTab))\n\t\treturn\n\t}\n\n\turlCode := urlValues.Get(\"code\")\n\turlState := urlValues.Get(\"state\")\n\t// No Code, Status, or Error is treated as unknown error\n\tif urlCode == \"\" && urlState == \"\" {\n\t\terr := \"Unknown error when getting athorization code\"\n\t\tlh.AuthCodeReqStatus = AuthorizationCodeStatus{Status: FAILED, Details: err}\n\n\t\tlh.authCode = AuthorizationCode{}\n\t\tw.WriteHeader(http.StatusOK)\n\t\tw.Write([]byte(lh.AuthCodeReqStatus.Details + closeTab))\n\t\treturn\n\t}\n\n\t// Authorization code returned\n\tif urlCode != \"\" && urlState != \"\" {\n\t\tlh.authCode = AuthorizationCode{\n\t\t\tCode: urlCode,\n\t\t\tState: urlState,\n\t\t}\n\n\t\tlh.AuthCodeReqStatus = AuthorizationCodeStatus{\n\t\t\tStatus: GRANTED, Details: \"Authorization code granted\"}\n\n\t\tw.WriteHeader(http.StatusOK)\n\t\tw.Write([]byte(lh.AuthCodeReqStatus.Details + closeTab))\n\t\treturn\n\t}\n\n\terr = fmt.Errorf(\"Athorization code missing code or state.\")\n\tlh.AuthCodeReqStatus = AuthorizationCodeStatus{Status: FAILED, Details: err.Error()}\n\n\tlh.authCode = AuthorizationCode{}\n\tw.WriteHeader(http.StatusOK)\n\tw.Write([]byte(lh.AuthCodeReqStatus.Details + closeTab))\n\treturn\n}", "func (s *Services) authorize(handler func(wr http.ResponseWriter, req *http.Request, uid uint64)) http.HandlerFunc {\n\treturn func(wr http.ResponseWriter, req *http.Request) {\n\t\t// TODO: Save the requested url in a cookie that can be redirected to after logging in successfully\n\t\tuid, err := s.auth.Authorize(wr, req)\n\t\tif err != nil {\n\t\t\tlog.Print(err)\n\t\t\thttp.Redirect(wr, req, \"/login\", 302)\n\t\t\treturn\n\t\t}\n\n\t\thandler(wr, req, uid)\n\t}\n}", "func (s *Provider) HandleSamlLogin(w http.ResponseWriter, r *http.Request) (string, error) {\n\tserviceProvider := s.serviceProvider\n\tif r.URL.Path == serviceProvider.AcsURL.Path {\n\t\treturn \"\", fmt.Errorf(\"don't wrap Middleware with RequireAccount\")\n\t}\n\tlog.Debugf(\"SAML [HandleSamlLogin]: Creating authentication request for %v\", s.name)\n\tbinding := saml.HTTPRedirectBinding\n\tbindingLocation := serviceProvider.GetSSOBindingLocation(binding)\n\n\treq, err := serviceProvider.MakeAuthenticationRequest(bindingLocation, binding, saml.HTTPPostBinding)\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\treturn \"\", err\n\t}\n\t// relayState is limited to 80 bytes but also must be integrity protected.\n\t// this means that we cannot use a JWT because it is way too long. Instead\n\t// we set a cookie that corresponds to the state\n\trelayState := base64.URLEncoding.EncodeToString(randomBytes(42))\n\n\tsecretBlock := x509.MarshalPKCS1PrivateKey(serviceProvider.Key)\n\tstate := jwt.New(jwt.SigningMethodHS256)\n\tclaims := state.Claims.(jwt.MapClaims)\n\tclaims[\"id\"] = req.ID\n\tclaims[\"uri\"] = r.URL.String()\n\tsignedState, err := state.SignedString(secretBlock)\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\treturn \"\", err\n\t}\n\n\ts.clientState.SetState(w, r, relayState, signedState)\n\n\tredirectURL, err := req.Redirect(relayState, serviceProvider)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn redirectURL.String(), nil\n}", "func (r *oauthProxy) oauthCallbackHandler(w http.ResponseWriter, req *http.Request) {\n\tctx, span, logger := r.traceSpan(req.Context(), \"oauthCallbackHandler\")\n\tif span != nil {\n\t\tdefer span.End()\n\t}\n\n\tif r.config.SkipTokenVerification {\n\t\tr.errorResponse(w, req.WithContext(ctx), \"\", http.StatusNotAcceptable, nil)\n\t\treturn\n\t}\n\t// step: ensure we have a authorization code\n\tcode := req.URL.Query().Get(\"code\")\n\tif code == \"\" {\n\t\tr.errorResponse(w, req.WithContext(ctx), \"no code in query\", http.StatusBadRequest, nil)\n\t\treturn\n\t}\n\n\tclient, err := r.getOAuthClient(r.getRedirectionURL(w, req.WithContext(ctx)))\n\tif err != nil {\n\t\tr.errorResponse(w, req.WithContext(ctx), \"unable to create a oauth2 client\", http.StatusInternalServerError, err)\n\t\treturn\n\t}\n\n\tresp, err := exchangeAuthenticationCode(client, code)\n\tif err != nil {\n\t\tr.accessForbidden(w, req.WithContext(ctx), \"unable to exchange code for access token\", err.Error())\n\t\treturn\n\t}\n\n\t// Flow: once we exchange the authorization code we parse the ID Token; we then check for an access token,\n\t// if an access token is present and we can decode it, we use that as the session token, otherwise we default\n\t// to the ID Token.\n\ttoken, identity, err := parseToken(resp.IDToken)\n\tif err != nil {\n\t\tr.accessForbidden(w, req.WithContext(ctx), \"unable to parse ID token for identity\", err.Error())\n\n\t\treturn\n\t}\n\taccess, id, err := parseToken(resp.AccessToken)\n\tif err == nil {\n\t\ttoken = access\n\t\tidentity = id\n\t} else {\n\t\tlogger.Warn(\"unable to parse the access token, using id token only\", zap.Error(err))\n\t}\n\n\t// step: check the access token is valid\n\tif err = r.verifyToken(r.client, token); err != nil {\n\t\t// if not, we may have a valid session but fail to match extra criteria: logout first so the user does not remain\n\t\t// stuck with a valid session, but no access\n\t\tvar sessionToken string\n\t\tif resp.RefreshToken != \"\" {\n\t\t\tsessionToken = resp.RefreshToken\n\t\t} else {\n\t\t\tsessionToken = resp.IDToken\n\t\t}\n\t\tr.commonLogout(ctx, w, req, sessionToken, func(ww http.ResponseWriter) {\n\t\t\t// always return an error after logout in this case\n\t\t\tr.accessForbidden(w, req.WithContext(ctx), \"unable to verify the ID token\", err.Error())\n\t\t}, logger.With(zap.String(\"email\", identity.Email)))\n\n\t\treturn\n\t}\n\taccessToken := token.Encode()\n\n\t// step: are we encrypting the access token?\n\tif r.config.EnableEncryptedToken || r.config.ForceEncryptedCookie {\n\t\tif accessToken, err = encodeText(accessToken, r.config.EncryptionKey); err != nil {\n\t\t\tr.errorResponse(w, req.WithContext(ctx), \"unable to encode the access token\", http.StatusInternalServerError, err)\n\n\t\t\treturn\n\t\t}\n\t}\n\n\tlogger.Info(\"issuing access token for user\",\n\t\tzap.String(\"email\", identity.Email),\n\t\tzap.String(\"expires\", identity.ExpiresAt.Format(time.RFC3339)),\n\t\tzap.String(\"duration\", time.Until(identity.ExpiresAt).String()))\n\n\t// @metric a token has been issued\n\toauthTokensMetric.WithLabelValues(\"issued\").Inc()\n\n\t// step: does the response have a refresh token and we do NOT ignore refresh tokens?\n\tif r.config.EnableRefreshTokens && resp.RefreshToken != \"\" {\n\t\tvar encrypted string\n\t\tencrypted, err = encodeText(resp.RefreshToken, r.config.EncryptionKey)\n\t\tif err != nil {\n\t\t\tr.errorResponse(w, req.WithContext(ctx), \"failed to encrypt the refresh token\", http.StatusInternalServerError, err)\n\t\t\treturn\n\t\t}\n\n\t\t// drop in the access token - cookie expiration = access token\n\t\tr.dropAccessTokenCookie(req.WithContext(ctx), w, accessToken, r.getAccessCookieExpiration(token, resp.RefreshToken))\n\n\t\tswitch r.useStore() {\n\t\tcase true:\n\t\t\tif err = r.StoreRefreshToken(token, encrypted); err != nil {\n\t\t\t\tlogger.Warn(\"failed to save the refresh token in the store\", zap.Error(err))\n\t\t\t}\n\t\tdefault:\n\t\t\t// notes: not all idp refresh tokens are readable, google for example, so we attempt to decode into\n\t\t\t// a jwt and if possible extract the expiration, else we default to 10 days\n\t\t\tif _, ident, err := parseToken(resp.RefreshToken); err != nil {\n\t\t\t\tr.dropRefreshTokenCookie(req.WithContext(ctx), w, encrypted, 0)\n\t\t\t} else {\n\t\t\t\tr.dropRefreshTokenCookie(req.WithContext(ctx), w, encrypted, time.Until(ident.ExpiresAt))\n\t\t\t}\n\t\t}\n\t} else {\n\t\tr.dropAccessTokenCookie(req.WithContext(ctx), w, accessToken, time.Until(identity.ExpiresAt))\n\t}\n\n\t// step: decode the request variable\n\tredirectURI := \"/\"\n\tif req.URL.Query().Get(\"state\") != \"\" {\n\t\t// if the authorization has set a state, we now check if the calling client\n\t\t// requested a specific landing URL to end the authentication handshake\n\t\tif encodedRequestURI, _ := req.Cookie(requestURICookie); encodedRequestURI != nil {\n\t\t\t// some clients URL-escape padding characters\n\t\t\tunescapedValue, err := url.PathUnescape(encodedRequestURI.Value)\n\t\t\tif err != nil {\n\t\t\t\tlogger.Warn(\"app did send a corrupted redirectURI in cookie: invalid url espcaping\", zap.Error(err))\n\t\t\t}\n\t\t\t// Since the value is passed with a cookie, we do not expect the client to use base64url (but the\n\t\t\t// base64-encoded value may itself be url-encoded).\n\t\t\t// This is safe for browsers using atob() but needs to be treated with care for nodeJS clients,\n\t\t\t// which natively use base64url encoding, and url-escape padding '=' characters.\n\t\t\tdecoded, err := base64.StdEncoding.DecodeString(unescapedValue)\n\t\t\tif err != nil {\n\t\t\t\tlogger.Warn(\"app did send a corrupted redirectURI in cookie: invalid base64url encoding\",\n\t\t\t\t\tzap.Error(err),\n\t\t\t\t\tzap.String(\"encoded_value\", unescapedValue))\n\t\t\t}\n\t\t\tredirectURI = string(decoded)\n\t\t}\n\t}\n\n\tif r.config.BaseURI != \"\" {\n\t\t// assuming state starts with slash\n\t\tredirectURI = r.config.BaseURI + redirectURI\n\t}\n\n\tr.redirectToURL(redirectURI, w, req.WithContext(ctx), http.StatusTemporaryRedirect)\n}", "func Authorize(s Server) http.Handler {\n\treturn http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tswitch rtype := r.FormValue(\"response_type\"); rtype {\n\t\tcase \"code\":\n\t\t\turi, params := authorizeCode(s, w, r)\n\t\t\tif uri != \"\" || params != nil {\n\t\t\t\tRedirect(w, uri, params)\n\t\t\t}\n\t\tdefault:\n\t\t\tclient, uri := clientRedirectURI(s, w, r)\n\t\t\tif client != nil && uri != \"\" {\n\t\t\t\tRedirect(w, uri, Params{\n\t\t\t\t\t\"state\": r.FormValue(\"state\"),\n\t\t\t\t\t\"error\": \"unsupported_response_type\",\n\t\t\t\t})\n\t\t\t}\n\t\t}\n\t})\n}", "func HandleLoginRedirectResponse(r *http.Request, w http.ResponseWriter, cfg *setting.Cfg, identity *Identity, validator RedirectValidator) *response.RedirectResponse {\n\treturn response.Redirect(handleLogin(r, w, cfg, identity, validator))\n}", "func AuthorizeCallback(w http.ResponseWriter, r *http.Request, authorizer Authorizer) {\n\tparams := mux.Vars(r)\n\tid := params[\"id\"]\n\n\tauthReq, err := authorizer.Storage().AuthRequestByID(r.Context(), id)\n\tif err != nil {\n\t\tAuthRequestError(w, r, nil, err, authorizer.Encoder())\n\t\treturn\n\t}\n\tif !authReq.Done() {\n\t\tAuthRequestError(w, r, authReq,\n\t\t\toidc.ErrInteractionRequired().WithDescription(\"Unfortunately, the user may be not logged in and/or additional interaction is required.\"),\n\t\t\tauthorizer.Encoder())\n\t\treturn\n\t}\n\tAuthResponse(authReq, authorizer, w, r)\n}", "func CanvasOAuth2ResponseHandler(w http.ResponseWriter, r *http.Request, _ httprouter.Params) {\n\tcode := r.URL.Query().Get(\"code\")\n\tstate := unmarshalCanvasState(r.URL.Query().Get(\"state\"))\n\n\tif len(code) < 1 || len(state.Intent) < 1 || len(state.State) < 1 {\n\t\tif len(state.Intent) < 1 || len(state.State) < 1 {\n\t\t\tr.URL.Query().Set(\"proxy_error\", \"malformed state\")\n\t\t}\n\n\t\t// an error occurred, just redirect\n\t\tr.URL.Query().Set(\"error_source\", \"canvas\")\n\t\tutil.SendRedirect(\n\t\t\tw,\n\t\t\tfmt.Sprintf(\n\t\t\t\t\"%s?%s\",\n\t\t\t\tenv.CanvasOAuth2SuccessURI,\n\t\t\t\tr.URL.Query().Encode(),\n\t\t\t),\n\t\t)\n\t\treturn\n\t}\n\n\tgrantResp, err := getTokenFromAuthorizationCode(code)\n\tif err != nil {\n\t\thandleISE(w, fmt.Errorf(\"error getting token from authorization code: %w\", err))\n\t\treturn\n\t}\n\n\tif state.Intent == \"reauth\" {\n\t\tprofiles, err := users.List(db, &users.ListRequest{CanvasUserID: grantResp.User.ID})\n\t\tif err != nil {\n\t\t\thandleISE(w, fmt.Errorf(\"error listing users in canvas oauth2 response handler (reauth): %w\", err))\n\t\t\treturn\n\t\t}\n\n\t\tif len(*profiles) < 1 {\n\t\t\t// this user is trying to reauth without authing first?\n\t\t\tutil.SendRedirect(w, getCanvasOAuth2AuthURI(\"auth\", \"\"))\n\t\t\treturn\n\t\t}\n\n\t\t// this user just wants a new session\n\t\tss, err := sessions.Generate(db, &sessions.GenerateRequest{\n\t\t\tCanvasUserID: grantResp.User.ID,\n\t\t})\n\t\tif err != nil {\n\t\t\thandleISE(w, fmt.Errorf(\"error generating session in canvas oauth2 response handler (reauth): %w\", err))\n\t\t}\n\n\t\tutil.AddSessionToResponse(w, *ss)\n\n\t\tutil.SendRedirect(w, getCanvasOAuth2SuccessURI(grantResp.User.Name, state.Intent, state.Dest))\n\t\treturn\n\t}\n\n\trd := requestDetails{\n\t\tToken: grantResp.AccessToken,\n\t\tRefreshToken: grantResp.RefreshToken,\n\t}\n\n\tprofile, err := getCanvasProfile(rd, \"self\")\n\tif err != nil {\n\t\thandleISE(w, fmt.Errorf(\"error getting canvas self profile in canvas oauth2 response handler: %w\", err))\n\t\treturn\n\t}\n\n\t// doing this synchronously so that we can generate a session\n\tprofileResp, err := users.UpsertProfile(db, &users.UpsertRequest{\n\t\tName: profile.Name,\n\t\tEmail: profile.PrimaryEmail,\n\t\tLTIUserID: profile.LtiUserID,\n\t\tCanvasUserID: int64(profile.ID),\n\t}, true)\n\tif err != nil {\n\t\thandleISE(w, fmt.Errorf(\"error upserting profile in canvas oauth2 response handler: %w\", err))\n\t\treturn\n\t}\n\n\tif profileResp.InsertedAt.Add(time.Second * 30).After(time.Now()) {\n\t\tgo email.SendWelcome(profile.PrimaryEmail, profile.Name)\n\t}\n\n\t// this one can be done async\n\tgo saveCanvasOAuth2GrantToDB(grantResp)\n\n\tss, err := sessions.Generate(db, &sessions.GenerateRequest{CanvasUserID: profile.ID})\n\tif err != nil {\n\t\thandleISE(w, fmt.Errorf(\"error generating session in canvas oauth2 response handler: %w\", err))\n\t\treturn\n\t}\n\n\tutil.AddSessionToResponse(w, *ss)\n\n\tutil.SendRedirect(w, getCanvasOAuth2SuccessURI(profile.Name, state.Intent, state.Dest))\n\n\treturn\n}", "func (h *GitHubOAuth) Login(c *router.Control) {\n\turl := h.oAuthConf.AuthCodeURL(h.state, oauth2.AccessTypeOnline)\n\thttp.Redirect(c.Writer, c.Request, url, http.StatusTemporaryRedirect)\n}", "func (uh *UserHandler) HandleFacebookLogin(w http.ResponseWriter, r *http.Request) {\n\tOauthStateString = stringTools.RandomStringGN(20)\n\turl := facebookOauthConfig.AuthCodeURL(OauthStateString)\n\thttp.Redirect(w, r, url, http.StatusSeeOther)\n}", "func (h *Handler) serveAuthenticateDBUser(w http.ResponseWriter, r *http.Request) {}", "func (s *StashConsumer) AuthorizeRedirect() (string, string, error) {\n\trequestToken, err := s.requestToken()\n\tif err != nil {\n\t\tlog.Warning(\"requestToken>%s\\n\", err)\n\t\treturn \"\", \"\", err\n\t}\n\turl, err := s.consumer.AuthorizeRedirect(requestToken)\n\treturn requestToken.Token(), url, err\n}", "func (p *OIDCProvider) CallbackHandler() http.Handler {\n\treturn http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tctx := oidc.ClientContext(r.Context(), p.client)\n\n\t\tif errMsg := r.URL.Query().Get(\"error\"); errMsg != \"\" {\n\t\t\tdesc := r.URL.Query().Get(\"error_description\")\n\t\t\tmsg := fmt.Sprintf(\"%s: %s\", errMsg, desc)\n\t\t\tlevel.Debug(p.logger).Log(\"msg\", msg)\n\t\t\thttp.Error(w, msg, http.StatusBadRequest)\n\t\t\treturn\n\t\t}\n\n\t\tqueryCode := r.URL.Query().Get(\"code\")\n\t\tif queryCode == \"\" {\n\t\t\tconst msg = \"no code in request\"\n\t\t\tlevel.Debug(p.logger).Log(\"msg\", msg)\n\t\t\thttp.Error(w, msg, http.StatusBadRequest)\n\t\t\treturn\n\t\t}\n\t\tqueryState := r.URL.Query().Get(\"state\")\n\t\tif queryState != state {\n\t\t\tconst msg = \"incorrect state in request\"\n\t\t\tlevel.Debug(p.logger).Log(\"msg\", msg)\n\t\t\thttp.Error(w, msg, http.StatusBadRequest)\n\t\t\treturn\n\t\t}\n\n\t\ttoken, err := p.oauth2Config.Exchange(ctx, queryCode)\n\t\tif err != nil {\n\t\t\tmsg := fmt.Sprintf(\"failed to get token: %v\", err)\n\t\t\tlevel.Warn(p.logger).Log(\"msg\", msg, \"err\", err)\n\t\t\thttp.Error(w, msg, http.StatusInternalServerError)\n\t\t\treturn\n\t\t}\n\n\t\trawIDToken, ok := token.Extra(\"id_token\").(string)\n\t\tif !ok {\n\t\t\tconst msg = \"no id_token in token response\"\n\t\t\tlevel.Warn(p.logger).Log(\"msg\", msg)\n\t\t\thttp.Error(w, msg, http.StatusInternalServerError)\n\t\t\treturn\n\t\t}\n\n\t\t_, err = p.verifier.Verify(ctx, rawIDToken)\n\t\tif err != nil {\n\t\t\tmsg := fmt.Sprintf(\"failed to verify ID token: %v\", err)\n\t\t\tlevel.Warn(p.logger).Log(\"msg\", msg)\n\t\t\thttp.Error(w, msg, http.StatusInternalServerError)\n\t\t\treturn\n\t\t}\n\n\t\thttp.SetCookie(w, &http.Cookie{\n\t\t\tName: p.cookieName,\n\t\t\tValue: rawIDToken,\n\t\t\tPath: \"/\",\n\t\t\tExpires: token.Expiry,\n\t\t})\n\n\t\thttp.Redirect(w, r, p.redirectURL, http.StatusFound)\n\t})\n}", "func AuthCallback(w http.ResponseWriter, r *http.Request) {\n\tcode := r.FormValue(\"code\")\n\tcallbackState := r.FormValue(\"state\")\n\n\tclientID, err := state.DecryptState(callbackState, os.Getenv(\"SECRET\"))\n\tif err != nil {\n\t\thttp.Error(w, \"State could not be verified\", http.StatusBadRequest)\n\t\treturn\n\t}\n\n\tconfigValue, err := config.ReadConfigFromEnv(clientID)\n\tif err != nil {\n\t\tlog.Printf(\"Error while verifying state: %v\", err)\n\t\thttp.Error(w, err.Error(), http.StatusBadRequest)\n\t\treturn\n\t}\n\n\ttoken, err := github.Exchange(configValue.ClientID, configValue.ClientSecretID, code)\n\tif err != nil {\n\t\tlog.Printf(\"Error while exchange code %s for client %s with Github: %v\", code, configValue.ClientID, err)\n\t\thttp.Error(w, fmt.Sprintf(\"Code %s for client %s was not accepted by the Oauth provider\", code, configValue.ClientID), http.StatusBadRequest)\n\t\treturn\n\t}\n\n\tredirectURLWithToken := fmt.Sprintf(\"%s?token=%s\", configValue.RedirectURL, token)\n\n\tw.Header().Set(\"Location\", redirectURLWithToken)\n\tw.WriteHeader(http.StatusTemporaryRedirect)\n}", "func authHandler(c *fb.Context, w http.ResponseWriter, r *http.Request) (int, error) {\n\tif c.Auth.Method == \"none\" {\n\t\t// NoAuth instances shouldn't call this method.\n\t\treturn 0, nil\n\t}\n\n\tif c.Auth.Method == \"proxy\" {\n\t\t// Receive the Username from the Header and check if it exists.\n\t\tu, err := c.Store.Users.GetByUsername(r.Header.Get(c.Auth.Header), c.NewFS)\n\t\tif err != nil {\n\t\t\treturn http.StatusForbidden, nil\n\t\t}\n\n\t\tc.User = u\n\t\treturn printToken(c, w)\n\t}\n\n\t// Receive the credentials from the request and unmarshal them.\n\tvar cred cred\n\n\tif r.Body == nil {\n\t\treturn http.StatusForbidden, nil\n\t}\n\n\terr := json.NewDecoder(r.Body).Decode(&cred)\n\tif err != nil {\n\t\treturn http.StatusForbidden, err\n\t}\n\n\t// Wenkun, Validate the token of user from cloud server and return JWT token.\n\tif c.Auth.Method != \"none\" {\n\t\tok, u := validateAuthByUserId(c, cred.Username)\n\t\tif !ok {\n\t\t\treturn http.StatusForbidden, nil\n\t\t}\n\n\t\tc.User = u\n\t\treturn printToken(c, w)\n\t}\n\n\t// If ReCaptcha is enabled, check the code.\n\tif len(c.ReCaptcha.Secret) > 0 {\n\t\tok, err := reCaptcha(c.ReCaptcha.Host, c.ReCaptcha.Secret, cred.ReCaptcha)\n\t\tif err != nil {\n\t\t\treturn http.StatusForbidden, err\n\t\t}\n\n\t\tif !ok {\n\t\t\treturn http.StatusForbidden, nil\n\t\t}\n\t}\n\n\t// Checks if the user exists.\n\tu, err := c.Store.Users.GetByUsername(cred.Username, c.NewFS)\n\tif err != nil {\n\t\treturn http.StatusForbidden, nil\n\t}\n\n\t// Checks if the password is correct.\n\tif !fb.CheckPasswordHash(cred.Password, u.Password) {\n\t\treturn http.StatusForbidden, nil\n\t}\n\n\tc.User = u\n\treturn printToken(c, w)\n}", "func unauthenticated(fn http.HandlerFunc) http.HandlerFunc {\n\treturn func(w http.ResponseWriter, r *http.Request) {\n\t\tif s, _ := Session.Get(r, \"s\"); s != nil && s.Values[\"Id\"] != nil {\n\t\t\thttp.Redirect(w, r, \"/main/\", 302)\n\t\t\treturn\n\t\t}\n\t\tfn(w, r)\n\t}\n}", "func (endpoints *endpointDetails) requestToken(w http.ResponseWriter, req *http.Request) {\n\tauthReq := endpoints.osinOAuthClient.NewAuthorizeRequest(osincli.CODE)\n\toauthURL := authReq.GetAuthorizeUrl()\n\n\thttp.Redirect(w, req, oauthURL.String(), http.StatusFound)\n}", "func defaultHandler(w http.ResponseWriter, r *http.Request) {\n // this is the default handler that program defaults to\n http.Redirect(w, r, \"/login/\", http.StatusSeeOther)\n}", "func (endpoints *endpointDetails) requestToken(w http.ResponseWriter, req *http.Request) {\n\tauthReq := endpoints.originOAuthClient.NewAuthorizeRequest(osincli.CODE)\n\toauthURL := authReq.GetAuthorizeUrlWithParams(\"\")\n\n\thttp.Redirect(w, req, oauthURL.String(), http.StatusFound)\n}", "func (p *OAuthProxy) Authenticate(rw http.ResponseWriter, req *http.Request) (err error) {\n\tlogger := log.NewLogEntry().WithRemoteAddress(getRemoteAddr(req))\n\n\tremoteAddr := getRemoteAddr(req)\n\ttags := []string{\"action:authenticate\"}\n\n\tallowedGroups := p.upstreamConfig.AllowedGroups\n\n\t// Clear the session cookie if anything goes wrong.\n\tdefer func() {\n\t\tif err != nil {\n\t\t\tp.sessionStore.ClearSession(rw, req)\n\t\t}\n\t}()\n\n\tsession, err := p.sessionStore.LoadSession(req)\n\tif err != nil {\n\t\t// We loaded a cookie but it wasn't valid, clear it, and reject the request\n\t\tlogger.Error(err, \"error authenticating user\")\n\t\treturn err\n\t}\n\n\t// check if this session belongs to the correct identity provider application.\n\t// this case exists primarly to allow us to gracefully manage a clean ux during\n\t// transitions from one provider to another by gracefully restarting the authentication process.\n\tif session.ProviderSlug != p.provider.Data().ProviderSlug {\n\t\tlogger.WithUser(session.Email).Info(\n\t\t\t\"authenticated with incorrect identity provider; restarting authentication\")\n\t\treturn ErrWrongIdentityProvider\n\t}\n\n\t// check that the user has been authorized against the requested upstream\n\t// this is primarily to combat against a user authorizing with one upstream and attempting to use\n\t// the session cookie for a different upstream.\n\tif req.Host != session.AuthorizedUpstream {\n\t\tlogger.WithProxyHost(req.Host).WithAuthorizedUpstream(session.AuthorizedUpstream).WithUser(session.Email).Warn(\n\t\t\t\"session authorized against different upstream; restarting authentication\")\n\t\treturn ErrUnauthorizedUpstreamRequested\n\t}\n\n\t// Lifetime period is the entire duration in which the session is valid.\n\t// This should be set to something like 14 to 30 days.\n\tif session.LifetimePeriodExpired() {\n\t\t// session lifetime has expired, we reject the request and clear the cookie\n\t\tlogger.WithUser(session.Email).Info(\n\t\t\t\"lifetime has expired; restarting authentication\")\n\t\treturn ErrLifetimeExpired\n\t} else if session.RefreshPeriodExpired() {\n\t\t// Refresh period is the period in which the access token is valid. This is ultimately\n\t\t// controlled by the upstream provider and tends to be around 1 hour.\n\t\tok, err := p.provider.RefreshSession(session, allowedGroups)\n\t\t// We failed to refresh the session successfully\n\t\t// clear the cookie and reject the request\n\t\tif err != nil {\n\t\t\tlogger.WithUser(session.Email).Error(err, \"refreshing session failed\")\n\t\t\treturn err\n\t\t}\n\n\t\tif !ok {\n\t\t\t// User is not authorized after refresh\n\t\t\t// clear the cookie and reject the request\n\t\t\tlogger.WithUser(session.Email).Info(\n\t\t\t\t\"not authorized after refreshing session\")\n\t\t\treturn ErrUserNotAuthorized\n\t\t}\n\n\t\terr = p.sessionStore.SaveSession(rw, req, session)\n\t\tif err != nil {\n\t\t\t// We refreshed the session successfully, but failed to save it.\n\t\t\t//\n\t\t\t// This could be from failing to encode the session properly.\n\t\t\t// But, we clear the session cookie and reject the request!\n\t\t\tlogger.WithUser(session.Email).Error(\n\t\t\t\terr, \"could not save refreshed session\")\n\t\t\treturn err\n\t\t}\n\t} else if session.ValidationPeriodExpired() {\n\t\t// Validation period has expired, this is the shortest interval we use to\n\t\t// check for valid requests. This should be set to something like a minute.\n\t\t// This calls up the provider chain to validate this user is still active\n\t\t// and hasn't been de-authorized.\n\t\tok := p.provider.ValidateSessionState(session, allowedGroups)\n\t\tif !ok {\n\t\t\t// This user is now no longer authorized, or we failed to\n\t\t\t// validate the user.\n\t\t\t// Clear the cookie and reject the request\n\t\t\tlogger.WithUser(session.Email).Error(\n\t\t\t\terr, \"no longer authorized after validation period\")\n\t\t\treturn ErrUserNotAuthorized\n\t\t}\n\n\t\terr = p.sessionStore.SaveSession(rw, req, session)\n\t\tif err != nil {\n\t\t\t// We validated the session successfully, but failed to save it.\n\n\t\t\t// This could be from failing to encode the session properly.\n\t\t\t// But, we clear the session cookie and reject the request!\n\t\t\tlogger.WithUser(session.Email).Error(\n\t\t\t\terr, \"could not save validated session\")\n\t\t\treturn err\n\t\t}\n\t}\n\n\t// We revalidate group membership whenever the session is refreshed or revalidated\n\t// just above in the call to ValidateSessionState and RefreshSession.\n\t// To reduce strain on upstream identity providers we only revalidate email domains and\n\t// addresses on each request here.\n\tfor _, v := range p.Validators {\n\t\t_, EmailGroupValidator := v.(validators.EmailGroupValidator)\n\n\t\tif !EmailGroupValidator {\n\t\t\terr := v.Validate(session)\n\t\t\tif err != nil {\n\t\t\t\ttags = append(tags, \"error:validation_failed\")\n\t\t\t\tp.StatsdClient.Incr(\"application_error\", tags, 1.0)\n\t\t\t\tlogger.WithRemoteAddress(remoteAddr).WithUser(session.Email).Info(\n\t\t\t\t\tfmt.Sprintf(\"permission denied: unauthorized: %q\", err))\n\t\t\t\treturn ErrUserNotAuthorized\n\t\t\t}\n\t\t}\n\t}\n\n\tlogger.WithRemoteAddress(remoteAddr).WithUser(session.Email).Info(\n\t\tfmt.Sprintf(\"authentication: user validated\"))\n\n\tfor key, val := range p.upstreamConfig.InjectRequestHeaders {\n\t\treq.Header.Set(key, val)\n\t}\n\n\treq.Header.Set(\"X-Forwarded-User\", session.User)\n\n\tif p.upstreamConfig.PassAccessToken && session.AccessToken != \"\" {\n\t\treq.Header.Set(\"X-Forwarded-Access-Token\", session.AccessToken)\n\t}\n\n\treq.Header.Set(\"X-Forwarded-Email\", session.Email)\n\treq.Header.Set(\"X-Forwarded-Groups\", strings.Join(session.Groups, \",\"))\n\n\t// stash authenticated user so that it can be logged later (see func logRequest)\n\trw.Header().Set(loggingUserHeader, session.Email)\n\n\t// This user has been OK'd. Allow the request!\n\treturn nil\n}", "func (g *gitlabConsumer) AuthorizeRedirect(ctx context.Context) (string, string, error) {\n\t_, end := telemetry.Span(ctx, \"gitlab.AuthorizeRedirect\")\n\tdefer end()\n\n\t// See https://docs.gitlab.com/ce/api/oauth2.html\n\n\trequestToken, err := sdk.GenerateHash()\n\tif err != nil {\n\t\treturn \"\", \"\", err\n\t}\n\n\tval := url.Values{}\n\tval.Add(\"redirect_uri\", g.AuthorizationCallbackURL)\n\tval.Add(\"client_id\", g.appID)\n\tval.Add(\"response_type\", \"code\")\n\tval.Add(\"state\", requestToken)\n\n\turl := fmt.Sprintf(\"%s/oauth/authorize?%s\", g.URL, val.Encode())\n\treturn requestToken, url, nil\n}", "func HandleLoginResponse(r *http.Request, w http.ResponseWriter, cfg *setting.Cfg, identity *Identity, validator RedirectValidator) *response.NormalResponse {\n\tresult := map[string]interface{}{\"message\": \"Logged in\"}\n\tif redirectURL := handleLogin(r, w, cfg, identity, validator); redirectURL != cfg.AppSubURL+\"/\" {\n\t\tresult[\"redirectUrl\"] = redirectURL\n\t}\n\treturn response.JSON(http.StatusOK, result)\n}", "func (s *Controller) HandleGoogleLoginOrRegister(c *gin.Context) {\n\tgoogleOauthConfig = &oauth2.Config{\n\t\tRedirectURL: \"http://localhost:8080/callback\",\n\t\tClientID: os.Getenv(\"GOOGLE_CLIENT_ID\"),\n\t\tClientSecret: os.Getenv(\"GOOGLE_CLIENT_SECRET\"),\n\t\tScopes: []string{\n\t\t\t\"https://www.googleapis.com/auth/userinfo.email\",\n\t\t\t\"https://www.googleapis.com/auth/userinfo.profile\",\n\t\t},\n\t\tEndpoint: google.Endpoint,\n\t}\n\turl := googleOauthConfig.AuthCodeURL(oauthStateString)\n\tc.Redirect(http.StatusTemporaryRedirect, url)\n}", "func (c Client) Authorize(scope ...string) http.HandlerFunc {\n\treturn func(w http.ResponseWriter, r *http.Request) {\n\t\tauthURI := c.getAuthorizeURI(scope)\n\n\t\thttp.Redirect(w, r, authURI.String(), 302)\n\t}\n}", "func tokenAuthRequired() gin.HandlerFunc {\n\treturn func(c *gin.Context) {\n\t\tif sessions.Default(c).Get(\"userid\") == nil {\n\t\t\tc.Redirect(302, \"/login\")\n\t\t} else {\n\t\t\tc.Next()\n\t\t}\n\t}\n}", "func Authorize(next buffalo.Handler) buffalo.Handler {\n\treturn func(c buffalo.Context) error {\n\t\tif uid := c.Session().Get(\"current_user_id\"); uid == nil {\n\t\t\tc.Flash().Add(\"danger\", \"You must be authorized to see that page\")\n\t\t\treturn c.Redirect(302, \"/\")\n\t\t}\n\t\treturn next(c)\n\t}\n}", "func getRedirectHandler (w http.ResponseWriter, r *http.Request) {\n responseCode := 200\n\n r.ParseForm()\n cookieName := \"\"\n cookieUUID := r.FormValue(\"cookie\")\n if cookieUUID == \"\" { \n\tresponseCode = 400 // set response code to 400, malformed request\n } else {\n\tresponseCode = 200 // set response code to 200, request processed\n }\n \n //Attempt to retrieve user name from cookie map based on UUID\n foundCookie := false\n\n mutex.Lock()\n cookieLookup := cookieMap[cookieUUID]\n mutex.Unlock()\n\n if cookieLookup.Name != \"\" {\n\tfoundCookie = true\n\tcookieName = cookieLookup.Value\n }\n\n if !foundCookie {\n\tresponseCode = 400 // set response code to 400, malformed request\n }\n \n w.WriteHeader(responseCode)\n w.Write([]byte(cookieName))\n // timeserver will need to use r.ParseForm() and http.get(URL (i.e. authhost:9090/get) to retrieve data\n}", "func WrapAuthorize(hfn http.Handler, routeName string) http.HandlerFunc {\n\treturn http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\n\t\turlValues := r.URL.Query()\n\n\t\trefStr := gorillaContext.Get(r, \"str\").(stores.Store)\n\t\trefRoles := gorillaContext.Get(r, \"auth_roles\").([]string)\n\t\tserviceToken := gorillaContext.Get(r, \"auth_service_token\").(string)\n\n\t\t// Check first if service token is used\n\t\tif serviceToken != \"\" && serviceToken == urlValues.Get(\"key\") {\n\t\t\thfn.ServeHTTP(w, r)\n\t\t\treturn\n\t\t}\n\n\t\tif auth.Authorize(routeName, refRoles, refStr) {\n\t\t\thfn.ServeHTTP(w, r)\n\t\t} else {\n\t\t\terr := APIErrorForbidden()\n\t\t\trespondErr(w, err)\n\t\t}\n\t})\n}", "func authenticated(next http.Handler) http.Handler {\n\treturn http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {\n\t\t// check if authenticated\n\t\t_, err := session(w, req)\n\t\tif err != nil {\n\t\t\t//http.Error(w, \"not logged in\", http.StatusUnauthorized)\n\t\t\tlogger.SetPrefix(\"WARNING \")\n\t\t\tlogger.Println(err, `Failed to get/verify cookie \"session\"`)\n\t\t\thttp.Redirect(w, req, \"/\", http.StatusSeeOther)\n\t\t\treturn // don't call original handler\n\t\t}\n\t\tnext.ServeHTTP(w, req)\n\t})\n}", "func HandleRedirection(app *iris.Application) {\n\tapp.Get(\"/ping\", middleware.ProxyMiddleware, proxyHandler)\n}", "func authenticated(fn AuthHandlerFunc) http.HandlerFunc {\n\treturn func(w http.ResponseWriter, r *http.Request) {\n\t\ts, err := Session.Get(r, \"s\")\n\t\tif s == nil || s.Values[\"Id\"] == nil || err != nil {\n\t\t\thttp.Redirect(w, r, \"/signin/?next=\"+r.URL.Path, 302)\n\t\t\treturn\n\t\t}\n\t\tfn(w, r, s)\n\t}\n}", "func (a *AuthMock) Authenticate(next http.Handler) http.Handler {\n\treturn http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tif ic, ok := w.(injectContext); ok {\n\t\t\tnext.ServeHTTP(auth.ResponseContext{ResponseWriter: w, Auth: auth.Context(ic.claims)}, r)\n\t\t\treturn\n\t\t}\n\t\tnext.ServeHTTP(auth.ResponseContext{ResponseWriter: w, Auth: auth.Context{}}, r)\n\t})\n}", "func Request(wellKnownConfig oidc.WellKnownConfiguration, client OidcClient) error {\n\t// from original code\n\tcodeVerifier := \"\"\n\tcodeChallenge := \"\"\n\n\tstate, stateErr := oidc.GenerateRandomStringURLSafe(24)\n\tif stateErr != nil {\n\t\treturn fmt.Errorf(\"failed to generate random state. Check that your OS has a crypto implementation available\")\n\t}\n\n\tauthorisationURL, err := oidc.BuildCodeAuthorisationRequest(\n\t\twellKnownConfig,\n\t\tclient.ClientID,\n\t\tclient.RedirectURL.String(),\n\t\tclient.Scopes,\n\t\tstate,\n\t\tcodeChallenge,\n\t)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to build authorisation request %w\", err)\n\t}\n\n\tm := http.NewServeMux()\n\ts := http.Server{\n\t\tAddr: fmt.Sprintf(\":%s\", client.RedirectURL.Port()),\n\t\tHandler: m,\n\t}\n\tctx, cancel := context.WithCancel(context.Background())\n\n\tdefer cancel()\n\n\t// Open a web server to receive the redirect\n\tm.HandleFunc(\"/\", func(w http.ResponseWriter, r *http.Request) {\n\t\thandleOidcCallback(w, r,\n\t\t\tclient.Alias,\n\t\t\tclient.ClientID,\n\t\t\tclient.ClientSecret,\n\t\t\tclient.RedirectURL.String(),\n\t\t\twellKnownConfig,\n\t\t\tstate,\n\t\t\tcodeVerifier,\n\t\t\tcancel,\n\t\t)\n\t})\n\n\tfmt.Println(\"Open browser to\", authorisationURL)\n\n\tgo func() {\n\t\tif err := s.ListenAndServe(); err != nil && err != http.ErrServerClosed {\n\t\t\tlog.Println(err)\n\t\t}\n\t}()\n\n\tselect {\n\tcase <-ctx.Done():\n\t\t// Shutdown the server when the context is canceled\n\t\terr := s.Shutdown(ctx)\n\t\tif err != nil {\n\t\t\tlog.Println(err)\n\t\t}\n\t}\n\n\treturn nil\n}", "func HandleOAuth2Callback(w http.ResponseWriter, r *http.Request) (err error) {\n\tsession, err := cookieStore.Get(r, oauthSessionName)\n\tif err != nil {\n\t\tlog.Printf(\"corrupted session %s -- generated new\", err)\n\t\terr = nil\n\t}\n\n\t// ensure we flush the csrf challenge even if the request is ultimately unsuccessful\n\tdefer func() {\n\t\tif err := session.Save(r, w); err != nil {\n\t\t\tlog.Printf(\"error saving session: %s\", err)\n\t\t}\n\t}()\n\n\tswitch stateChallenge, state := session.Flashes(stateCallbackKey), r.FormValue(\"state\"); {\n\tcase state == \"\", len(stateChallenge) < 1:\n\t\terr = errors.New(\"missing state challenge\")\n\tcase state != stateChallenge[0]:\n\t\terr = fmt.Errorf(\"invalid oauth state, expected '%s', got '%s'\", state, stateChallenge[0])\n\t}\n\n\tif err != nil {\n\t\treturn AnnotateError(\n\t\t\terr,\n\t\t\t\"couldn't verify your confirmation, please try again.\",\n\t\t\thttp.StatusBadRequest,\n\t\t)\n\t}\n\n\ttoken, err := oauth2Config.Exchange(context.Background(), r.FormValue(\"code\"))\n\tif err != nil {\n\t\treturn\n\t}\n\n\t// add the oauth token to session\n\tsession.Values[oauthTokenKey] = token\n\n\tfmt.Printf(\"Access token: %s\\n\", token.AccessToken)\n\n\trawIDToken, ok := token.Extra(\"id_token\").(string)\n\tif !ok {\n\t\treturn AnnotateError(\n\t\t\tfmt.Errorf(\"can't extract id token from access token\"),\n\t\t\t\"Couldn't verify your confirmation, please try again.\",\n\t\t\thttp.StatusBadRequest,\n\t\t)\n\t}\n\n\tidToken, err := oidcVerifier.Verify(context.Background(), rawIDToken)\n\tif err != nil {\n\t\treturn AnnotateError(\n\t\t\terr,\n\t\t\t\"Couldn't verify your confirmation, please try again.\",\n\t\t\thttp.StatusBadRequest,\n\t\t)\n\t}\n\n\tvar claims struct {\n\t\tIss string `json:\"iss\"`\n\t\tSub string `json:\"sub\"`\n\t\tAud string `json:\"aud\"`\n\t\tExp int32 `json:\"exp\"`\n\t\tIat int32 `json:\"iat\"`\n\t\tNonce string `json:\"nonce\"`\n\t\tEmail string `json:\"email\"`\n\t}\n\n\tif err := idToken.Claims(&claims); err != nil {\n\t\treturn AnnotateError(\n\t\t\terr,\n\t\t\t\"Couldn't verify your confirmation, please try again.\",\n\t\t\thttp.StatusBadRequest,\n\t\t)\n\t}\n\n\tfmt.Printf(\"Email: %s\\n\", claims.Email)\n\n\thttp.Redirect(w, r, \"/\", http.StatusTemporaryRedirect)\n\n\treturn\n}", "func (g *Google) Authenticate(c *gin.Context) {\n\tvar appErr models.AppError\n\tvar err error\n\tvar matchUser models.User\n\tvar remoteOAuth models.OAuthAccount\n\n\tdestination := c.Query(\"destination\")\n\n\t// get user data from Google\n\tfstring, err := g.GetRemoteUserData(c.Request, c.Writer)\n\tif err != nil {\n\t\tc.Redirect(http.StatusTemporaryRedirect, destination)\n\t\treturn\n\t}\n\n\t// decode user data returned by Google oAuth\n\tremoteOAuth = models.OAuthAccount{\n\t\tType: constants.Google,\n\t\tAId: utils.ToNullString(gjson.Get(fstring, \"id\").Str),\n\t\tEmail: utils.ToNullString(gjson.Get(fstring, \"email\").Str),\n\t\tName: utils.ToNullString(gjson.Get(fstring, \"name\").Str),\n\t\tFirstName: utils.ToNullString(gjson.Get(fstring, \"given_name\").Str),\n\t\tLastName: utils.ToNullString(gjson.Get(fstring, \"family_name\").Str),\n\t\tGender: utils.GetGender(gjson.Get(fstring, \"gender\").Str),\n\t\tPicture: utils.ToNullString(gjson.Get(fstring, \"picture\").Str),\n\t}\n\n\t// get the record from o_auth_accounts table\n\t_, err = g.Storage.GetOAuthData(remoteOAuth.AId, remoteOAuth.Type)\n\n\t// oAuth account is not existed\n\t// sign in by oauth for the first time\n\tif err != nil {\n\t\tappErr = err.(models.AppError)\n\n\t\t// return internal server error\n\t\tif appErr.StatusCode != http.StatusNotFound {\n\t\t\tc.JSON(appErr.StatusCode, gin.H{\"status\": \"error\", \"error\": appErr.Error()})\n\n\t\t\treturn\n\t\t}\n\n\t\t// email is provided in oAuth response\n\t\tif remoteOAuth.Email.Valid {\n\t\t\t// get the record from users table\n\t\t\tmatchUser, err = g.Storage.GetUserByEmail(remoteOAuth.Email.String)\n\n\t\t\t// record is not existed in users table\n\t\t\tif err != nil {\n\t\t\t\tappErr = err.(models.AppError)\n\n\t\t\t\t// return internal server error\n\t\t\t\tif appErr.StatusCode != http.StatusNotFound {\n\t\t\t\t\tc.JSON(appErr.StatusCode, gin.H{\"status\": \"error\", \"error\": appErr.Error()})\n\t\t\t\t\treturn\n\t\t\t\t}\n\n\t\t\t\t// no record in users table with this email\n\t\t\t\t// create a record in users table\n\t\t\t\t// and create a record in o_auth_accounts table\n\t\t\t\tmatchUser = g.Storage.InsertUserByOAuth(remoteOAuth)\n\t\t\t} else {\n\t\t\t\t// record existed in user table\n\t\t\t\t// create record in o_auth_accounts table\n\t\t\t\t// and connect it to the user record\n\t\t\t\tremoteOAuth.UserID = matchUser.ID\n\t\t\t\terr = g.Storage.InsertOAuthAccount(remoteOAuth)\n\n\t\t\t\t// return internal server error\n\t\t\t\tif err != nil {\n\t\t\t\t\tc.JSON(http.StatusInternalServerError, gin.H{\"status\": \"error\", \"error\": err.Error()})\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t}\n\t\t} else {\n\t\t\t// email is not provided in oAuth response\n\t\t\t// create a record in users table\n\t\t\t// and also create a record in o_auth_accounts table\n\t\t\tmatchUser = g.Storage.InsertUserByOAuth(remoteOAuth)\n\t\t}\n\t} else {\n\t\t// user signed in before\n\t\tmatchUser, err = g.Storage.GetUserDataByOAuth(remoteOAuth)\n\t\tif err != nil {\n\t\t\tc.JSON(http.StatusInternalServerError, gin.H{\"status\": \"error\", \"error\": err.Error()})\n\t\t\treturn\n\t\t}\n\n\t\t// update existing OAuth data\n\t\t_, err = g.Storage.UpdateOAuthData(remoteOAuth)\n\t\tif err != nil {\n\t\t\tc.JSON(http.StatusInternalServerError, gin.H{\"status\": \"error\", \"error\": err.Error()})\n\t\t\treturn\n\t\t}\n\t}\n\n\ttoken, err := utils.RetrieveToken(matchUser.ID, matchUser.Email.String)\n\tif err != nil {\n\t\tc.JSON(appErr.StatusCode, gin.H{\"status\": \"error\", \"error\": appErr.Error()})\n\t\treturn\n\t}\n\n\tvar u *url.URL\n\tvar secure = false\n\tu, err = url.Parse(destination)\n\n\tif u.Scheme == \"https\" {\n\t\tsecure = true\n\t}\n\n\tparameters := u.Query()\n\tparameters.Add(\"login\", \"google\")\n\n\tu.RawQuery = parameters.Encode()\n\tdestination = u.String()\n\n\tauthJSON := &models.AuthenticatedResponse{ID: matchUser.ID, Privilege: matchUser.Privilege, FirstName: matchUser.FirstName.String, LastName: matchUser.LastName.String, Email: matchUser.Email.String, Jwt: token}\n\tauthResp, _ := json.Marshal(authJSON)\n\n\tc.SetCookie(\"auth_info\", string(authResp), 100, u.Path, utils.Cfg.ConsumerSettings.Domain, secure, true)\n\tc.Redirect(http.StatusTemporaryRedirect, destination)\n}" ]
[ "0.7668781", "0.7278873", "0.7195489", "0.68771523", "0.6792656", "0.675416", "0.6659408", "0.66327256", "0.6582657", "0.6579838", "0.6550146", "0.6537362", "0.6484869", "0.6481458", "0.64770114", "0.6313008", "0.62982273", "0.62486434", "0.62366086", "0.62057006", "0.6193944", "0.617074", "0.61279625", "0.6111006", "0.61102754", "0.610831", "0.61033946", "0.6103218", "0.60739315", "0.60445774", "0.6037267", "0.6014544", "0.59944737", "0.5991014", "0.5972795", "0.59428394", "0.5936968", "0.59330535", "0.5926426", "0.59076196", "0.5901329", "0.5884373", "0.58569324", "0.5852055", "0.58276224", "0.5803492", "0.579544", "0.5790239", "0.57892954", "0.57862645", "0.5779412", "0.5751209", "0.5751157", "0.5738125", "0.57288", "0.57270294", "0.57267743", "0.57040584", "0.5701948", "0.5700065", "0.56999594", "0.5690353", "0.568379", "0.5679887", "0.56782943", "0.5669159", "0.56555456", "0.56484777", "0.5643938", "0.5639731", "0.56077415", "0.5593753", "0.5592613", "0.5587393", "0.5582865", "0.55364347", "0.5517013", "0.55118644", "0.55107915", "0.5497143", "0.5496544", "0.549161", "0.5484209", "0.54839116", "0.5480116", "0.5478785", "0.54566664", "0.54491544", "0.5446663", "0.54402196", "0.5436025", "0.54312646", "0.5430289", "0.5424906", "0.5421562", "0.541025", "0.5403062", "0.5402817", "0.5397683", "0.5397446" ]
0.8068458
0
HandleAuthFlow initiates the authentication and redirects to a specific URL
func (a *loginAPI) HandleAuthFlow(w http.ResponseWriter, r *http.Request) error { state := randToken() a.appCookie.Set(stateParam, state, cookieExpiry, w) log.WithField("func", "server.HandleAuthFlow").Debugf("initiate using state '%s'", state) site, redirect := query(r, siteParam), query(r, redirectParam) if site == "" || redirect == "" { return errors.BadRequestError{Err: fmt.Errorf("missing or invalid parameters supplied"), Request: r} } a.appCookie.Set(authFlowCookie, fmt.Sprintf("%s%s%s", site, authFlowSep, redirect), cookieExpiry, w) http.Redirect(w, r, a.GetOIDCRedirectURL(), http.StatusTemporaryRedirect) return nil }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func handleAuthorize(rw http.ResponseWriter, req *http.Request) {\n\n\t// Get the Google URL which shows the Authentication page to the user.\n\turl := oauthCfg.AuthCodeURL(\"\")\n\n\t// Redirect user to that page.\n\thttp.Redirect(rw, req, url, http.StatusFound)\n}", "func authHandler(w http.ResponseWriter, r *http.Request) {\n\turl := config(r.Host).AuthCodeURL(r.URL.RawQuery)\n\thttp.Redirect(w, r, url, http.StatusFound)\n}", "func BeginAuthHandler(res http.ResponseWriter, req *http.Request) {\n\turl, err := GetAuthURL(res, req)\n\tif err != nil {\n\t\tres.WriteHeader(http.StatusBadRequest)\n\t\tfmt.Fprintln(res, err)\n\t\treturn\n\t}\n\n\thttp.Redirect(res, req, url, http.StatusTemporaryRedirect)\n}", "func StartAuthFlow(u user.User, w http.ResponseWriter, r *http.Request) {\n\ttype request struct {\n\t\tAuthHost string `json:\"authHost\"`\n\t}\n\n\ttype response struct {\n\t\tRedirect string `json:\"redirect\"`\n\t}\n\n\tvar authRequest request\n\tp, err := io.ReadAll(r.Body)\n\tif err != nil {\n\t\tcontrollers.WriteSimpleResponse(w, false, err.Error())\n\t\treturn\n\t}\n\n\tif err := json.Unmarshal(p, &authRequest); err != nil {\n\t\tcontrollers.WriteSimpleResponse(w, false, err.Error())\n\t\treturn\n\t}\n\n\taccessToken := r.URL.Query().Get(\"accessToken\")\n\n\tredirectURL, err := ia.StartAuthFlow(authRequest.AuthHost, u.ID, accessToken, u.DisplayName)\n\tif err != nil {\n\t\tcontrollers.WriteSimpleResponse(w, false, err.Error())\n\t\treturn\n\t}\n\n\tredirectResponse := response{\n\t\tRedirect: redirectURL.String(),\n\t}\n\tcontrollers.WriteResponse(w, redirectResponse)\n}", "func (a API) Auth(w http.ResponseWriter, r *http.Request, state string, redirect_url string) {\r\n\thttp.Redirect(w, r, a.AuthUrl(state, redirect_url), http.StatusFound)\r\n}", "func AuthHandler(c *gin.Context) {\r\n\tvar state utils.State\r\n\tdecoded, err := utils.B64Decode(c.Query(\"state\"))\r\n\terr = json.Unmarshal([]byte(decoded), &state)\r\n\tif err != nil {\r\n\t\tc.JSON(http.StatusConflict, gin.H{\"code\": http.StatusConflict, \"message\": err})\r\n\t\treturn\r\n\t}\r\n\r\n\tAccessKey := state.AccessKey\r\n\tif AccessKey == \"\" {\r\n\t\tAccessKey = state.Token\r\n\t}\r\n\r\n\tAPPUserID, _, err := utils.LoadAccessKey(AccessKey)\r\n\r\n\tif err != nil || APPUserID == \"\" {\r\n\t\tc.JSON(http.StatusNonAuthoritativeInfo, gin.H{\"code\": http.StatusNonAuthoritativeInfo, \"message\": err})\r\n\t\treturn\r\n\t}\r\n\r\n\tfmt.Println(\"redirURL\", state.URL)\r\n\r\n\tcode := c.Query(\"code\")\r\n\tuserID, _ := utils.VerifyCode(code)\r\n\tuserInfo, _ := utils.GetUserInfo(userID)\r\n\r\n\tu := url.Values{}\r\n\tdata, _ := json.Marshal(userInfo)\r\n\tu.Set(\"state\", utils.B64Encode(string(data)))\r\n\tu.Set(\"timestamp\", fmt.Sprintln(time.Now().Unix()))\r\n\tc.Redirect(http.StatusFound, state.URL+\"?\"+u.Encode())\r\n}", "func HandleRedirect(w http.ResponseWriter, r *http.Request) {\n\tstate := r.URL.Query().Get(\"state\")\n\tcode := r.URL.Query().Get(\"code\")\n\trequest, response, err := ia.HandleCallbackCode(code, state)\n\tif err != nil {\n\t\tlog.Debugln(err)\n\t\tmsg := `Unable to complete authentication. <a href=\"/\">Go back.</a><hr/>`\n\t\t_ = controllers.WriteString(w, msg, http.StatusBadRequest)\n\t\treturn\n\t}\n\n\t// Check if a user with this auth already exists, if so, log them in.\n\tif u := auth.GetUserByAuth(response.Me, auth.IndieAuth); u != nil {\n\t\t// Handle existing auth.\n\t\tlog.Debugln(\"user with provided indieauth already exists, logging them in\")\n\n\t\t// Update the current user's access token to point to the existing user id.\n\t\taccessToken := request.CurrentAccessToken\n\t\tuserID := u.ID\n\t\tif err := user.SetAccessTokenToOwner(accessToken, userID); err != nil {\n\t\t\tcontrollers.WriteSimpleResponse(w, false, err.Error())\n\t\t\treturn\n\t\t}\n\n\t\tif request.DisplayName != u.DisplayName {\n\t\t\tloginMessage := fmt.Sprintf(\"**%s** is now authenticated as **%s**\", request.DisplayName, u.DisplayName)\n\t\t\tif err := chat.SendSystemAction(loginMessage, true); err != nil {\n\t\t\t\tlog.Errorln(err)\n\t\t\t}\n\t\t}\n\n\t\thttp.Redirect(w, r, \"/\", http.StatusTemporaryRedirect)\n\n\t\treturn\n\t}\n\n\t// Otherwise, save this as new auth.\n\tlog.Debug(\"indieauth token does not already exist, saving it as a new one for the current user\")\n\tif err := auth.AddAuth(request.UserID, response.Me, auth.IndieAuth); err != nil {\n\t\tcontrollers.WriteSimpleResponse(w, false, err.Error())\n\t\treturn\n\t}\n\n\t// Update the current user's authenticated flag so we can show it in\n\t// the chat UI.\n\tif err := user.SetUserAsAuthenticated(request.UserID); err != nil {\n\t\tlog.Errorln(err)\n\t}\n\n\thttp.Redirect(w, r, \"/\", http.StatusTemporaryRedirect)\n}", "func (uh *UserHandler) HandleGoogleLogin(w http.ResponseWriter, r *http.Request) {\n\tOauthStateString = stringTools.RandomStringGN(20)\n\turl := googleOauthConfig.AuthCodeURL(OauthStateString)\n\thttp.Redirect(w, r, url, http.StatusSeeOther)\n}", "func authLoginHandler(ctx context.Context, w http.ResponseWriter, r *http.Request) {\n\tgithub.RedirectToLogin(w, r)\n}", "func (b *Browser) HandleAuth(username, password string) func() error {\n\tenable := b.DisableDomain(b.ctx, \"\", &proto.FetchEnable{})\n\tdisable := b.EnableDomain(b.ctx, \"\", &proto.FetchEnable{\n\t\tHandleAuthRequests: true,\n\t})\n\n\tpaused := &proto.FetchRequestPaused{}\n\tauth := &proto.FetchAuthRequired{}\n\n\twaitPaused := b.WaitEvent(paused)\n\twaitAuth := b.WaitEvent(auth)\n\n\treturn func() (err error) {\n\t\tdefer enable()\n\t\tdefer disable()\n\n\t\twaitPaused()\n\n\t\terr = proto.FetchContinueRequest{\n\t\t\tRequestID: paused.RequestID,\n\t\t}.Call(b)\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\n\t\twaitAuth()\n\n\t\terr = proto.FetchContinueWithAuth{\n\t\t\tRequestID: auth.RequestID,\n\t\t\tAuthChallengeResponse: &proto.FetchAuthChallengeResponse{\n\t\t\t\tResponse: proto.FetchAuthChallengeResponseResponseProvideCredentials,\n\t\t\t\tUsername: username,\n\t\t\t\tPassword: password,\n\t\t\t},\n\t\t}.Call(b)\n\n\t\treturn\n\t}\n}", "func OAUTHRedirect(ctx *fiber.Ctx) error {\n\n\tmodels.SYSLOG.Tracef(\"entering OAUTHRedirect; original URL: %v\", ctx.OriginalURL())\n\tdefer models.SYSLOG.Trace(\"exiting OAUTHRedirect\")\n\n\t// First, we need to get the value of the `code` query param\n\tcode := ctx.Query(\"code\", \"\")\n\tif len(code) < 1 {\n\t\treturn ctx.SendStatus(fiber.StatusBadRequest)\n\t}\n\n\t// Next, lets for the HTTP request to call the github oauth enpoint\tto get our access token\n\n\ta := fiber.AcquireAgent()\n\treq := a.Request()\n\treq.Header.SetMethod(fiber.MethodPost)\n\treq.Header.Set(\"accept\", \"application/json\")\n\treq.SetRequestURI(fmt.Sprintf(\"https://github.com/login/oauth/access_token?client_id=%s&client_secret=%s&code=%s\", models.ClientID, models.ClientSecret, code))\n\tif err := a.Parse(); err != nil {\n\t\tmodels.SYSLOG.Errorf(\"could not create HTTP request: %v\", err)\n\t}\n\n\tvar retCode int\n\tvar retBody []byte\n\tvar errs []error\n\t// Send out the HTTP request\n\tvar t *models.OAuthAccessResponse\n\n\tif retCode, retBody, errs = a.Struct(&t); len(errs) > 0 {\n\t\tmodels.SYSLOG.Tracef(\"received: %v\", string(retBody))\n\t\tmodels.SYSLOG.Errorf(\"could not send HTTP request: %v\", errs)\n\t\treturn ctx.SendStatus(fiber.StatusInternalServerError)\n\t}\n\tmodels.SYSLOG.Tracef(\"received : %v %v %v\", retCode, string(retBody), errs)\n\n\tvar sess *session.Session\n\tvar err error\n\t// Finally, send a response to redirect the user to the \"welcome\" page with the access token\n\tif sess, err = models.MySessionStore.Get(ctx); err == nil {\n\t\tsess.Set(\"token\", t.AccessToken)\n\t\tmodels.SYSLOG.Tracef(\"setting session token %v\", t.AccessToken)\n\t\tsessData, _ := models.MySessionStore.Get(ctx)\n\t\tdefer sessData.Save()\n\t\t//models.MySessionStore.RegisterType(models.OAuthAccessResponse)\n\t\tsessData.Set(\"oauth-scope\", t.Scope)\n\t\tsessData.Set(\"oauth-token-type\", t.TokenType)\n\t\tsessData.Set(\"oauth-token\", t.AccessToken)\n\n\t\tif err != nil {\n\t\t\tmodels.SYSLOG.Errorf(\"session saving exception %v\", err)\n\t\t}\n\t\tmodels.SYSLOG.Tracef(\"redirecting to /welcome.html?access_token=%v\", t.AccessToken)\n\t\t//\t\treturn ctx.Redirect(\"/welcome.html?access_token=\"+t.AccessToken, fiber.StatusFound)\n\t\treturn ctx.Redirect(\"/welcome.html\", fiber.StatusFound)\n\t}\n\n\tmodels.SYSLOG.Tracef(\"redirecting to /\")\n\treturn ctx.Redirect(\"/\", fiber.StatusTemporaryRedirect)\n}", "func handleAuth(h http.Handler) http.HandlerFunc {\n\treturn func(w http.ResponseWriter, r *http.Request) {\n\t\t_, pass, ok := r.BasicAuth()\n\t\tif !ok || pass != RequiredPassword {\n\t\t\thandlers.Error(w, http.StatusInternalServerError, ErrUnauthorized.Error())\n\t\t\treturn\n\t\t}\n\n\t\th.ServeHTTP(w, r)\n\t}\n}", "func (a *Auth) Authenticate(handler http.Handler) http.Handler {\n\tif handler == nil {\n\t\tpanic(\"auth: nil handler\")\n\t}\n\n\treturn http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tif a.cfg.Disable {\n\t\t\thandler.ServeHTTP(w, r)\n\t\t\treturn\n\t\t}\n\n\t\ttoken, err := a.getCookie(r)\n\t\tif token == nil && err == nil {\n\t\t\t// Cookie is missing, invalid. Fetch new token from OAuth2 provider.\n\t\t\t// Redirect user to the OAuth2 consent page to ask for permission for the scopes specified\n\t\t\t// above.\n\t\t\t// Set the scope to the current request URL, it will be used by the redirect handler to\n\t\t\t// redirect back to the url that requested the authentication.\n\t\t\turl := a.cfg.AuthCodeURL(r.RequestURI)\n\t\t\thttp.Redirect(w, r, url, http.StatusTemporaryRedirect)\n\t\t\treturn\n\t\t}\n\t\tif err != nil {\n\t\t\ta.clearCookie(w)\n\t\t\thttp.Error(w, \"Unauthorized\", http.StatusUnauthorized)\n\t\t\ta.logf(\"Get cookie error: %v\", err)\n\t\t\treturn\n\t\t}\n\n\t\t// Source token, in case the token needs a renewal.\n\t\tnewOauth2Token, err := a.cfg.TokenSource(r.Context(), token.toOauth2()).Token()\n\t\tif err != nil {\n\t\t\ta.clearCookie(w)\n\t\t\thttp.Error(w, \"Internal error\", http.StatusInternalServerError)\n\t\t\ta.logf(\"Failed token source: %s\", err)\n\t\t\treturn\n\t\t}\n\t\tnewToken := fromOauth2(newOauth2Token)\n\n\t\tif newToken.IDToken != token.IDToken {\n\t\t\ta.logf(\"Refreshed token\")\n\t\t\ttoken = newToken\n\t\t\ta.setCookie(w, token)\n\t\t}\n\n\t\t// Validate the id_token.\n\t\tpayload, err := a.validator.Validate(r.Context(), token.IDToken, a.cfg.ClientID)\n\t\tif err != nil {\n\t\t\ta.clearCookie(w)\n\t\t\thttp.Error(w, \"Invalid auth.\", http.StatusUnauthorized)\n\t\t\ta.logf(\"Invalid token, reset cookie: %s\", err)\n\t\t\treturn\n\t\t}\n\t\t// User is authenticated.\n\t\t// Store email and name in context, and call the inner handler.\n\t\tcreds := &Creds{\n\t\t\tEmail: payload.Claims[\"email\"].(string),\n\t\t\tName: payload.Claims[\"name\"].(string),\n\t\t}\n\t\tr = r.WithContext(context.WithValue(r.Context(), credsKey, creds))\n\t\thandler.ServeHTTP(w, r)\n\t})\n}", "func (handler *AuthHandler) Auth(c *gin.Context) {\n\tc.Redirect(http.StatusMovedPermanently, handler.Oauth2Conf.AuthCodeURL(handler.Oauth2StateString, oauth2.AccessTypeOffline))\n\tc.Abort()\n}", "func authHandler(c *fb.Context, w http.ResponseWriter, r *http.Request) (int, error) {\n\tif c.Auth.Method == \"none\" {\n\t\t// NoAuth instances shouldn't call this method.\n\t\treturn 0, nil\n\t}\n\n\tif c.Auth.Method == \"proxy\" {\n\t\t// Receive the Username from the Header and check if it exists.\n\t\tu, err := c.Store.Users.GetByUsername(r.Header.Get(c.Auth.Header), c.NewFS)\n\t\tif err != nil {\n\t\t\treturn http.StatusForbidden, nil\n\t\t}\n\n\t\tc.User = u\n\t\treturn printToken(c, w)\n\t}\n\n\t// Receive the credentials from the request and unmarshal them.\n\tvar cred cred\n\n\tif r.Body == nil {\n\t\treturn http.StatusForbidden, nil\n\t}\n\n\terr := json.NewDecoder(r.Body).Decode(&cred)\n\tif err != nil {\n\t\treturn http.StatusForbidden, err\n\t}\n\n\t// Wenkun, Validate the token of user from cloud server and return JWT token.\n\tif c.Auth.Method != \"none\" {\n\t\tok, u := validateAuthByUserId(c, cred.Username)\n\t\tif !ok {\n\t\t\treturn http.StatusForbidden, nil\n\t\t}\n\n\t\tc.User = u\n\t\treturn printToken(c, w)\n\t}\n\n\t// If ReCaptcha is enabled, check the code.\n\tif len(c.ReCaptcha.Secret) > 0 {\n\t\tok, err := reCaptcha(c.ReCaptcha.Host, c.ReCaptcha.Secret, cred.ReCaptcha)\n\t\tif err != nil {\n\t\t\treturn http.StatusForbidden, err\n\t\t}\n\n\t\tif !ok {\n\t\t\treturn http.StatusForbidden, nil\n\t\t}\n\t}\n\n\t// Checks if the user exists.\n\tu, err := c.Store.Users.GetByUsername(cred.Username, c.NewFS)\n\tif err != nil {\n\t\treturn http.StatusForbidden, nil\n\t}\n\n\t// Checks if the password is correct.\n\tif !fb.CheckPasswordHash(cred.Password, u.Password) {\n\t\treturn http.StatusForbidden, nil\n\t}\n\n\tc.User = u\n\treturn printToken(c, w)\n}", "func (a *App) AuthWorkflow() {\n\twebapp := &Webapp{}\n\tbackend := NewBackend()\n\n\topen.Start(backend.AuthURL())\n\tfmt.Println(\"Head to your browser to complete authorization steps.\")\n\tfmt.Println(\"Listening for response...\")\n\twebapp.Run()\n}", "func (e VerifyHandler) AuthHandler(http.ResponseWriter, *http.Request) {}", "func (a *Authenticator) AuthHandler() khttp.FuncHandler {\n\treturn func(w http.ResponseWriter, r *http.Request) {\n\t\t_, handled, err := a.PerformAuth(w, r)\n\t\tif err != nil {\n\t\t\thttp.Error(w, \"your lack of authentication cookie is impressive - something went wrong\", http.StatusInternalServerError)\n\t\t\tlog.Printf(\"ERROR - could not complete authentication - %s\", err)\n\t\t\treturn\n\t\t}\n\n\t\tif !handled {\n\t\t\thttp.Redirect(w, r, \"/\", http.StatusTemporaryRedirect)\n\t\t}\n\t}\n}", "func (p DirectHandler) AuthHandler(http.ResponseWriter, *http.Request) {}", "func (a *Auth) PerformAuth(next http.HandlerFunc) http.HandlerFunc {\n\treturn func(w http.ResponseWriter, r *http.Request) {\n\t\tuserPayload := user.PayloadFromTokenCookie(r)\n\n\t\tif userPayload == nil {\n\t\t\tnext(w, r)\n\n\t\t\treturn\n\t\t}\n\n\t\texist, err := a.userExist(userPayload.ID)\n\n\t\tif err != nil {\n\t\t\tflash.Add(\"/login\", w, r, dbErrrorMessage)\n\n\t\t\treturn\n\t\t}\n\n\t\tif !exist {\n\t\t\tnext(w, r)\n\n\t\t\treturn\n\t\t}\n\n\t\tnewR := r.WithContext(user.ContextWithPayload(r.Context(), userPayload))\n\n\t\tnext(w, newR)\n\t}\n}", "func (o *oidcServer) Authenticate(w http.ResponseWriter, r *http.Request) {\n\n\to.Lock()\n\tdefer o.Unlock()\n\n\tzap.L().Debug(\"Authenticating\")\n\n\tif o.serverFlow == ServerFlowTypeAuthFailure {\n\t\thttp.Error(w, \"Authentication failure\", http.StatusUnauthorized)\n\t\tzap.L().Warn(\"Authentication failure\", zap.Reflect(\"type\", o.serverFlow))\n\t\treturn\n\t}\n\n\tstate := r.URL.Query().Get(\"state\")\n\tredURI := r.URL.Query().Get(\"redirect_uri\")\n\n\treqURI, err := url.ParseRequestURI(redURI)\n\tif err != nil {\n\t\tzap.L().Error(\"Unable to parse redirect uri\", zap.Error(err))\n\t\treturn\n\t}\n\n\tq := reqURI.Query()\n\tq.Add(\"state\", state)\n\tq.Add(\"redirect_uri\", redURI)\n\treqURI.RawQuery = q.Encode()\n\n\thttp.Redirect(w, r, reqURI.String(), http.StatusTemporaryRedirect)\n}", "func completeAuth(w http.ResponseWriter, r *http.Request) {\n\tuser, err := gothic.CompleteUserAuth(w, r)\n\tif err != nil {\n\t\tfmt.Fprintln(w, err)\n\t\treturn\n\t}\n\tuserSession.Values[\"user\"] = user\n\thttp.Redirect(w, r, \"/\", 301)\n}", "func CheckAuth(next http.Handler) http.Handler {\n\treturn http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tif auth.IsLoggedIn(r) {\n\t\t\tnext.ServeHTTP(w, r)\n\t\t\treturn\n\t\t}\n\n\t\tif strings.HasPrefix(r.URL.Path, \"/api\") {\n\t\t\tcommon.NewAPIResponse(\"Login required\", nil).WriteResponse(w, http.StatusUnauthorized)\n\t\t\treturn\n\t\t}\n\t\thttp.Redirect(w, r, \"/login\", http.StatusTemporaryRedirect)\n\t\treturn\n\t})\n}", "func authCallbackHandler(ctx context.Context, w http.ResponseWriter, r *http.Request) {\n\tghu, err := authWithGithubCode(ctx, r.FormValue(\"code\"))\n\tif err != nil {\n\t\trenderError(w, err, http.StatusInternalServerError, \"GitHub login failed\")\n\t\treturn\n\t}\n\n\tu, err := findOrCreateUser(ghu)\n\tif err != nil {\n\t\trenderError(w, err, http.StatusInternalServerError, \"Failed to find a user using GitHub profile\")\n\t\treturn\n\t}\n\n\tsess := db.NewSession(u.ID)\n\tif err := sess.Create(); err != nil {\n\t\trenderError(w, err, http.StatusInternalServerError, \"Failed to create a session\")\n\t\treturn\n\t}\n\n\tctx = auth.ContextWithSession(ctx, sess)\n\tauth.AuthorizeResponse(ctx, w)\n\tauth.CacheSession(sess)\n\n\thttp.Redirect(w, r, rootPath, http.StatusTemporaryRedirect)\n}", "func rootHandler(w http.ResponseWriter, r *http.Request) {\n\n\tif !verifyLogin(r) {\n\t\turl := LoginCfg.AuthCodeURL(\"\")\n\t\turl = url + OauthURLParams\n\t\t// this will preseve the casenumber in the URI path during Oauth2 redirect\n\t\tparams := r.URL.Query()\n\t\tparamkeys := make([]string, 0)\n\t\tfor k := range params {\n\t\t\tfor i := range params[k] {\n\t\t\t\tparamkeys = append(paramkeys, k+\"=\"+params[k][i])\n\t\t\t}\n\t\t}\n\t\tif len(paramkeys) > 0 {\n\t\t\turl = url + \"&state=\" + base64.StdEncoding.EncodeToString([]byte(strings.Join(paramkeys, \"?\")))\n\t\t}\n\n\t\thttp.Redirect(w, r, url, http.StatusFound)\n\t\treturn\n\t}\n\n\t// if user is not using https then redirect them\n\tif ( r.Header.Get(\"x-forwarded-proto\") != \"https\" && BASEURL != LOCALBASEURL) {\n\t\tfmt.Printf(\"TLS handshake is https=false x-forwarded-proto=%s\\n\", r.Header.Get(\"x-forwarded-proto\"))\n\t\thttp.Redirect(w, r, BASEURL, http.StatusFound)\n\t\treturn\n\t}\n\n startPageTemplate.Execute(w, \"\")\n}", "func (r *oauthProxy) oauthAuthorizationHandler(w http.ResponseWriter, req *http.Request) {\n\tctx, span, logger := r.traceSpan(req.Context(), \"authorization handler\")\n\tif span != nil {\n\t\tdefer span.End()\n\t}\n\n\tif r.config.SkipTokenVerification {\n\t\tr.errorResponse(w, req.WithContext(ctx), \"\", http.StatusNotAcceptable, nil)\n\t\treturn\n\t}\n\n\tclient, err := r.getOAuthClient(r.getRedirectionURL(w, req.WithContext(ctx)))\n\tif err != nil {\n\t\tr.errorResponse(w, req.WithContext(ctx), \"failed to retrieve the oauth client for authorization\", http.StatusInternalServerError, err)\n\t\treturn\n\t}\n\n\t// step: set the access type of the session\n\tvar accessType string\n\tif containedIn(\"offline\", r.config.Scopes, false) {\n\t\taccessType = \"offline\"\n\t}\n\n\tauthURL := client.AuthCodeURL(req.URL.Query().Get(\"state\"), accessType, \"\")\n\tlogger.Debug(\"incoming authorization request from client address\",\n\t\tzap.String(\"access_type\", accessType),\n\t\tzap.String(\"auth_url\", authURL),\n\t\tzap.String(\"client_ip\", req.RemoteAddr))\n\n\t// step: if we have a custom sign in page, lets display that\n\tif r.config.hasCustomSignInPage() {\n\t\tmodel := make(map[string]string)\n\t\tmodel[\"redirect\"] = authURL\n\t\tw.Header().Set(\"Content-Type\", \"text/html; charset=utf-8\")\n\t\tw.WriteHeader(http.StatusOK)\n\t\t_ = r.Render(w, path.Base(r.config.SignInPage), mergeMaps(model, r.config.Tags))\n\n\t\treturn\n\t}\n\n\tr.redirectToURL(authURL, w, req.WithContext(ctx), http.StatusTemporaryRedirect)\n}", "func AuthCallback(w http.ResponseWriter, r *http.Request) {\n\tcode := r.FormValue(\"code\")\n\tcallbackState := r.FormValue(\"state\")\n\n\tclientID, err := state.DecryptState(callbackState, os.Getenv(\"SECRET\"))\n\tif err != nil {\n\t\thttp.Error(w, \"State could not be verified\", http.StatusBadRequest)\n\t\treturn\n\t}\n\n\tconfigValue, err := config.ReadConfigFromEnv(clientID)\n\tif err != nil {\n\t\tlog.Printf(\"Error while verifying state: %v\", err)\n\t\thttp.Error(w, err.Error(), http.StatusBadRequest)\n\t\treturn\n\t}\n\n\ttoken, err := github.Exchange(configValue.ClientID, configValue.ClientSecretID, code)\n\tif err != nil {\n\t\tlog.Printf(\"Error while exchange code %s for client %s with Github: %v\", code, configValue.ClientID, err)\n\t\thttp.Error(w, fmt.Sprintf(\"Code %s for client %s was not accepted by the Oauth provider\", code, configValue.ClientID), http.StatusBadRequest)\n\t\treturn\n\t}\n\n\tredirectURLWithToken := fmt.Sprintf(\"%s?token=%s\", configValue.RedirectURL, token)\n\n\tw.Header().Set(\"Location\", redirectURLWithToken)\n\tw.WriteHeader(http.StatusTemporaryRedirect)\n}", "func beginAuth(w http.ResponseWriter, r *http.Request) {\n\tgothic.BeginAuthHandler(w, r)\n}", "func (o *SlackOAuthHandlers) Auth(w http.ResponseWriter, r *http.Request) {\n\tparams, err := url.ParseQuery(r.URL.RawQuery)\n\tif err != nil {\n\t\thlog.FromRequest(r).Error().\n\t\t\tErr(err).\n\t\t\tMsg(\"parsing query params\")\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\t// An error is received when a user declines to install\n\t// or an unexpected issue occurs. The app treats a\n\t// declined install gracefully.\n\tif params[\"error\"] != nil {\n\t\tswitch params[\"error\"][0] {\n\t\tcase errAccessDenied:\n\t\t\thlog.FromRequest(r).Info().\n\t\t\t\tErr(errors.New(params[\"error\"][0])).\n\t\t\t\tMsg(\"user declined install\")\n\t\t\tw.WriteHeader(http.StatusOK)\n\t\t\treturn\n\t\tdefault:\n\t\t\thlog.FromRequest(r).Error().\n\t\t\t\tErr(errors.New(params[\"error\"][0])).\n\t\t\t\tMsg(\"failed install\")\n\t\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\t\treturn\n\t\t}\n\t}\n\n\tcode := params[\"code\"]\n\tif len(code) != 1 {\n\t\thlog.FromRequest(r).Error().\n\t\t\tErr(err).\n\t\t\tMsg(\"code not provided\")\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\t// TODO: inject an http client with http logging.\n\tresp, err := http.Get(fmt.Sprintf(\n\t\to.AccessURLTemplate,\n\t\to.ClientID,\n\t\to.ClientSecret,\n\t\tcode[0],\n\t))\n\tif err != nil {\n\t\thlog.FromRequest(r).Error().\n\t\t\tErr(err).\n\t\t\tMsg(\"oauth req error\")\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\tvar access accessResponse\n\tif err := json.NewDecoder(resp.Body).Decode(&access); err != nil {\n\t\thlog.FromRequest(r).Error().\n\t\t\tErr(err).\n\t\t\tMsg(\"unable to decode slack access response\")\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\tif !access.OK {\n\t\thlog.FromRequest(r).Warn().\n\t\t\tMsg(\"access not ok\")\n\t\tw.WriteHeader(http.StatusForbidden)\n\t\treturn\n\t}\n\n\terr = o.TokenWriter.Store(&TokenData{\n\t\tTeamID: access.TeamID,\n\t\tUserID: access.UserID,\n\t\tBotToken: access.Bot.BotAccessToken,\n\t\tBotUserID: access.Bot.BotUserID,\n\t\tAccessToken: access.AccessToken,\n\t})\n\tif err != nil {\n\t\thlog.FromRequest(r).Error().\n\t\t\tErr(err).\n\t\t\tMsg(\"unable to store token\")\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\tredirect := fmt.Sprintf(\"https://slack.com/app_redirect?app=%s\", o.AppID)\n\thttp.Redirect(w, r, redirect, http.StatusFound)\n}", "func Authentication(next http.Handler) http.Handler {\n\treturn http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tlog.Printf(\"Authentication: checking for existing authenticated session\\n\")\n\t\tauthenticated, ok := r.Context().Value(session.AuthenticatedKey).(bool)\n\t\tlog.Printf(\"Authentication: authenticated?: %b\\n\", authenticated)\n\t\tif (ok == false || authenticated == false) {\n\t\t\tstate := r.Context().Value(session.StateKey).(string)\n\t\t\tlog.Printf(\"Authentication: using state: %v\\n\", state)\n\t\t\tauthorizeURL := oauth2Config.AuthCodeURL(state, oauth2.AccessTypeOnline)\n\t\t\tlog.Printf(\"Authentication: redirecting to %s\\n\", authorizeURL)\n\t\t\thttp.Redirect(w, r, authorizeURL, http.StatusFound)\n\t\t\treturn\n\t\t} else { // authenticated == true\n log.Printf(\"Authentication: user is authenticated, done\\n\")\n next.ServeHTTP(w, r)\n }\n\t})\n}", "func handleNaturalistLogin(w http.ResponseWriter, r *http.Request) {\n\turl := authenticator.AuthUrl()\n\n\tlog.Printf(\"Redirecting: %s\", url)\n\n\thttp.Redirect(w, r, url, http.StatusTemporaryRedirect)\n}", "func Auth(next http.Handler) http.Handler {\n\treturn http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tlog.Infof(\"Authenticating request: \")\n\t\tif r.Header.Get(\"user\") != \"foo\" {\n\t\t\tw.WriteHeader(http.StatusUnauthorized)\n\t\t\treturn\n\t\t}\n\t\tlog.Infof(\"Auth: Pass\")\n\t\tnext.ServeHTTP(w, r)\n\n\t})\n}", "func (h *Handler) AuthHandler(w http.ResponseWriter, r *http.Request) {\n\t// Preveri ali se stanje iz *LoginHandler in v odgovoru ujemata\n\tsc, err := r.Cookie(\"originalState\")\n\tif err != nil || sc.Value != r.FormValue(\"state\") {\n\t\t// Stanje se ne ujema ali pa je prislo do napake, odgovori z 401\n\t\tlog.Error(err.Error())\n\t\thttp.Error(w, \"Neveljavno stanje v odgovoru\", http.StatusUnauthorized)\n\t\treturn\n\t}\n\n\t// Zamenjaj avtorizacijsko kodo pridobljeno iz prvotne preusmeritve za Token, s katerim lahko pridobimo podrobnosti o uporabniku\n\ttok, err := h.OAuthConf.Exchange(oauth2.NoContext, r.FormValue(\"code\"))\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusUnauthorized)\n\t\treturn\n\t}\n\n\t// Preveri ali je token veljaven\n\tif tok.Valid() == false {\n\t\thttp.Error(w, \"Tokec je neveljaven\", http.StatusUnauthorized)\n\t}\n\n\t// Preko klienta poslji zahtevek s tokenom na naslov za pridobivanje osnovnih podatkov o uporabniku\n\tclient := h.OAuthConf.Client(oauth2.NoContext, tok)\n\tuserResponse, err := client.Get(\"https://www.googleapis.com/oauth2/v3/userinfo\")\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusUnauthorized)\n\t\treturn\n\t}\n\n\t// Shrani prejete podatke v GoogleUser\n\tvar gu GoogleUser\n\terr = json.NewDecoder(userResponse.Body).Decode(&gu)\n\tif err != nil {\n\t\tlog.Error(err)\n\t}\n\tlog.Info(gu.Email)\n\n\t// Preveri ali uporabnik ze obstaja (preko unique emaila), ce ne ga shrani\n\tvar u *biolog.User\n\tu, err = h.UserHandler.UserService.UserByEmail(gu.Email)\n\n\tif err != nil {\n\t\tif err.Error() == \"Not found\" {\n\t\t\t// Uporabnik ni bil najden, torej se prijavlja na novo\n\t\t\t// Iz GoogleUser izgradi biolog.User in ga shrani v PB\n\t\t\tu = &biolog.User{\n\t\t\t\tExternalID: &gu.ID,\n\t\t\t\tDisplayName: &gu.Name,\n\t\t\t\tGivenName: &gu.GivenName,\n\t\t\t\tFamilyName: &gu.FamilyName,\n\t\t\t\tEmail: &gu.Email,\n\t\t\t\tPicture: &gu.Picture,\n\t\t\t\tExternalAuthProvider: &googleAuth,\n\t\t\t}\n\t\t\tu, err = h.UserHandler.UserService.CreateUser(*u)\n\t\t\tif err != nil {\n\t\t\t\tlog.Error(\"Uporabnika ni bilo mogoce kreirati: \", err)\n\t\t\t\trespondWithError(w, http.StatusInternalServerError, \"Napaka pri kreiranju uporabnika\")\n\t\t\t}\n\t\t} else {\n\t\t\tlog.Error(err.Error())\n\t\t\trespondWithError(w, http.StatusInternalServerError, \"Neznana napaka pri kreiranju uporabnika\")\n\t\t}\n\t}\n\n\t// Dodeli nov JWT uporabniku\n\t// TODO:\n\t// - preveri cas za potek JWT tokena (10-15min ?)\n\tclaims := &EmailClaims{\n\t\tgu.Email,\n\t\tjwt.StandardClaims{\n\t\t\tExpiresAt: time.Now().Unix() + 3600,\n\t\t\tIssuer: \"biolog-app\",\n\t\t},\n\t}\n\tjwttok := jwt.NewWithClaims(jwt.SigningMethodHS256, claims)\n\tss, _ := jwttok.SignedString(jwtSignKey)\n\tssJSON, _ := json.Marshal(map[string]string{\"token\": ss})\n\n\t// Odgovori z JWT v telesu zahtevka\n\tw.WriteHeader(http.StatusOK)\n\tw.Write(ssJSON)\n\t// TODO:\n\t// - logika za preusmeritev, ali naj bo to na frontend (vrni JWT v Cookie in preusmeri?)\n\t// - refresh token\n\t// glej https://stackoverflow.com/questions/43090518/how-to-properly-handle-a-jwt-refresh\n\n\t// Po uspesni prijavi uporabnika preusmeri na domaco stran\n\t//http.Redirect(w, r, \"/home\", http.StatusMovedPermanently)\n}", "func (app *appVars) oauthRedirect(w http.ResponseWriter, r *http.Request) {\n\n\t// get and compare state to prevent Cross-Site Request Forgery\n\tstate := r.FormValue(\"state\")\n\tif state != app.state {\n\t\tlog.Fatalln(\"state is not the same (CSRF?)\")\n\t}\n\n\t// get authorization code\n\tcode := r.FormValue(\"code\")\n\n\t// exchange authorization code for token\n\ttoken, err := app.conf.Exchange(app.ctx, code)\n\tif err != nil {\n\t\tlog.Println(\"conf.Exchange\", err)\n\t\t// signal that authorization was not successful\n\t\tapp.authChan <- false\n\t\treturn\n\t}\n\n\t// update HTTP client with token\n\tapp.client = app.conf.Client(app.ctx, token)\n\n\t// TODO\n\tapp.token = token\n\n\tconst tpl = `\n<!DOCTYPE html>\n<html>\n\t<head>\n\t\t<meta charset=\"UTF-8\">\n\t\t<title>{{.Title}}</title>\n\t</head>\n\t<body>\n\t<p>Authorization successful\n\t<p><a href=\"{{.BaseUrl}}/listNotebooks\">List Notebooks</a> \n\t<p><a href=\"{{.BaseUrl}}/listPages\">List Pages</a> \n\t</body>\n</html>`\n\n\tt, err := template.New(\"authorized\").Parse(tpl)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tdata := struct {\n\t\tTitle string\n\t\tBaseUrl string\n\t}{}\n\n\tdata.Title = \"Authorized\"\n\tdata.BaseUrl = \"http://localhost:9999\"\n\n\terr = t.Execute(w, data)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\treturn\n}", "func Auth(next http.HandlerFunc) http.HandlerFunc {\n\treturn http.HandlerFunc(func(res http.ResponseWriter, req *http.Request) {\n\t\tredir := req.URL.Scheme + req.URL.Host + \"/admin/login\"\n\n\t\tif IsValid(req) {\n\t\t\tnext.ServeHTTP(res, req)\n\t\t} else {\n\t\t\thttp.Redirect(res, req, redir, http.StatusFound)\n\t\t}\n\t})\n}", "func (g *Google) BeginAuth(c *gin.Context) {\n\tdestination := c.Query(\"destination\")\n\n\tg.InitOauthConfig(destination)\n\n\turl := g.oauthConf.AuthCodeURL(utils.Cfg.OauthSettings.GoogleSettings.Statestr)\n\n\tc.Redirect(http.StatusTemporaryRedirect, url)\n}", "func authHandler(w http.ResponseWriter, r *http.Request) {\n\t// make sure its post\n\tif r.Method != \"POST\" {\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\tfmt.Fprintln(w, \"No POST\", r.Method)\n\t\treturn\n\t}\n\n\tuser := r.FormValue(\"user\")\n\tpass := r.FormValue(\"pass\")\n\n\tlog.Printf(\"Authenticate: user[%s] pass[%s]\\n\", user, pass)\n\n\t// check values\n\tif user != \"test\" || pass != \"known\" {\n\t\tw.WriteHeader(http.StatusForbidden)\n\t\tfmt.Fprintln(w, \"Wrong info\")\n\t\treturn\n\t}\n\n\ttokenString, err := createToken(user)\n\tif err != nil {\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\tfmt.Fprintln(w, \"Sorry, error while Signing Token!\")\n\t\tlog.Printf(\"Token Signing error: %v\\n\", err)\n\t\treturn\n\t}\n\n\tw.Header().Set(\"Content-Type\", \"application/jwt\")\n\tw.WriteHeader(http.StatusOK)\n\tfmt.Fprintln(w, tokenString)\n}", "func requireAuthWeb(c *router.Context, next router.Handler) {\n\tif auth.CurrentIdentity(c.Context) == identity.AnonymousIdentity {\n\t\tloginURL, err := auth.LoginURL(c.Context, \"/\")\n\t\tif err != nil {\n\t\t\tlogging.Errorf(c.Context, \"Failed to get login URL\")\n\t\t\thttp.Error(c.Writer, err.Error(), http.StatusInternalServerError)\n\t\t\treturn\n\t\t}\n\t\tlogging.Infof(c.Context, \"Redirecting to %s\", loginURL)\n\t\thttp.Redirect(c.Writer, c.Request, loginURL, 302)\n\t\treturn\n\t}\n\n\tisGoogler, err := auth.IsMember(c.Context, rwGroup)\n\tif err != nil {\n\t\tc.Writer.WriteHeader(http.StatusInternalServerError)\n\t\tlogging.Errorf(c.Context, \"Failed to get group membership.\")\n\t\treturn\n\t}\n\tif isGoogler {\n\t\tnext(c)\n\t\treturn\n\t}\n\n\ttemplates.MustRender(c.Context, c.Writer, \"pages/access_denied.html\", nil)\n}", "func DoAuth(ctx *gin.Context) {\n\t// Handle the exchange code to initiate a transport.\n\tsession := sessions.Default(ctx)\n\tretrievedState := session.Get(\"state\")\n\n\tif session.Get(\"userid\") != nil {\n\t\treturn\n\t}\n\n\tif retrievedState != ctx.Query(\"state\") {\n\t\tctx.AbortWithError(http.StatusUnauthorized, fmt.Errorf(\"Invalid session state: %s\", retrievedState))\n\t\treturn\n\t}\n\n\ttok, err := conf.Exchange(oauth2.NoContext, ctx.Query(\"code\"))\n\tif err != nil {\n\t\tctx.AbortWithError(http.StatusBadRequest, err)\n\t\treturn\n\t}\n\n\tclient := conf.Client(oauth2.NoContext, tok)\n\temail, err := client.Get(\"https://www.googleapis.com/oauth2/v3/userinfo\")\n\tif err != nil {\n\t\tctx.AbortWithError(http.StatusBadRequest, err)\n\t\treturn\n\t}\n\tdefer email.Body.Close()\n\tdata, err := ioutil.ReadAll(email.Body)\n\tif err != nil {\n\t\tglog.Errorf(\"[Gin-OAuth] Could not read Body: %s\", err)\n\t\tctx.AbortWithError(http.StatusInternalServerError, err)\n\t\treturn\n\t}\n\n\tvar user User\n\terr = json.Unmarshal(data, &user)\n\tif err != nil {\n\t\tglog.Errorf(\"[Gin-OAuth] Unmarshal userinfo failed: %s\", err)\n\t\tctx.AbortWithError(http.StatusInternalServerError, err)\n\t\treturn\n\t}\n\t// save userinfo, which could be used in Handlers\n\tctx.Set(\"user\", user)\n\n\tvals := map[string]string{\n\t\t\"Name\": user.Name,\n\t\t\"Email\": user.Email,\n\t\t\"Picture\": user.Picture,\n\t\t\"GivenName\": user.GivenName,\n\t\t\"FamilyName\": user.FamilyName,\n\t\t\"EmailVerified\": fmt.Sprintf(\"%v\", user.EmailVerified),\n\t\t\"Gender\": user.Gender,\n\t\t\"Sub\": user.Sub,\n\t\t\"Profile\": user.Profile,\n\t}\n\tseccookie.StoreSecureCookie(ctx, vals, scookie)\n\n\t// Redirect to root after auth\n\tctx.Redirect(http.StatusTemporaryRedirect, \"/\")\n}", "func OAUTHGETHandler(c *fiber.Ctx) error {\n\tmodels.SYSLOG.Trace(\"entering OAUTHGETHandler\")\n\tdefer models.SYSLOG.Trace(\"exiting OAUTHGETHandler\")\n\treturn c.Render(\"protected\", fiber.Map{})\n}", "func (w *ServerInterfaceWrapper) PerformAuth(ctx echo.Context) error {\n\tvar err error\n\n\t// Invoke the callback with all the unmarshalled arguments\n\terr = w.Handler.PerformAuth(ctx)\n\treturn err\n}", "func (a *GoogleAuth) GoogleLoginHandler(w http.ResponseWriter, r *http.Request) {\n\tstate := a.NewAuthState(r)\n\turl := a.config.AuthCodeURL(state)\n\thttp.Redirect(w, r, url, http.StatusTemporaryRedirect)\n}", "func askForLogin(w http.ResponseWriter, r *http.Request) {\n\thttp.Redirect(w, r, oauthProviderConfig.oauthConfig.AuthCodeURL(\"\"), http.StatusFound)\n}", "func (app *application) requireAuthentication(next http.Handler) http.Handler {\r\n\treturn http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\r\n\t\t// If the client is not authenticated, redirect\r\n\t\tif loggedin := app.isAuthenticated(r); !loggedin {\r\n\t\t\thttp.Redirect(w, r, \"/login\", http.StatusSeeOther)\r\n\t\t\treturn\r\n\t\t}\r\n\t\t// If the client is not a verified user, redirect\r\n\t\tif verified := app.isVerified(r); !verified {\r\n\t\t\thttp.Redirect(w, r, \"/verifyuser\", http.StatusSeeOther)\r\n\t\t\treturn\r\n\t\t}\r\n\r\n\t\t/* This section should be reviewed */\r\n\t\t// Else, set the \"Cache-Control: no-store\" header so pages\r\n\t\t// which require authentication are not stored in cache\r\n\t\tw.Header().Add(\"Cache-Control\", \"no-store\")\r\n\r\n\t\t// and call the next handler in the chain.\r\n\t\tnext.ServeHTTP(w, r)\r\n\t})\r\n}", "func (p Service) authHandler(w http.ResponseWriter, r *http.Request) {\n\toauthClaims, _, err := p.JwtService.Get(r)\n\tif err != nil {\n\t\trest.SendErrorJSON(w, r, http.StatusInternalServerError, err, \"failed to get token\")\n\t\treturn\n\t}\n\n\tif oauthClaims.Handshake == nil {\n\t\trest.SendErrorJSON(w, r, http.StatusForbidden, nil, \"finvalid handshake token\")\n\t\treturn\n\t}\n\n\tretrievedState := oauthClaims.Handshake.State\n\tif retrievedState == \"\" || retrievedState != r.URL.Query().Get(\"state\") {\n\t\trest.SendErrorJSON(w, r, http.StatusForbidden, nil, \"unexpected state\")\n\t\treturn\n\t}\n\n\tp.Logf(\"[DEBUG] token with state %s\", retrievedState)\n\ttok, err := p.conf.Exchange(context.Background(), r.URL.Query().Get(\"code\"))\n\tif err != nil {\n\t\trest.SendErrorJSON(w, r, http.StatusInternalServerError, err, \"exchange failed\")\n\t\treturn\n\t}\n\n\tclient := p.conf.Client(context.Background(), tok)\n\tuinfo, err := client.Get(p.InfoURL)\n\tif err != nil {\n\t\trest.SendErrorJSON(w, r, http.StatusServiceUnavailable, err, \"failed to get client info\")\n\t\treturn\n\t}\n\n\tdefer func() {\n\t\tif e := uinfo.Body.Close(); e != nil {\n\t\t\tp.Logf(\"[WARN] failed to close response body, %s\", e)\n\t\t}\n\t}()\n\n\tdata, err := ioutil.ReadAll(uinfo.Body)\n\tif err != nil {\n\t\trest.SendErrorJSON(w, r, http.StatusInternalServerError, err, \"failed to read user info\")\n\t\treturn\n\t}\n\n\tjData := map[string]interface{}{}\n\tif e := json.Unmarshal(data, &jData); e != nil {\n\t\trest.SendErrorJSON(w, r, http.StatusInternalServerError, err, \"failed to unmarshal user info\")\n\t\treturn\n\t}\n\tp.Logf(\"[DEBUG] got raw user info %+v\", jData)\n\n\tu := p.MapUser(jData, data)\n\tu = p.setAvatar(u)\n\n\tcid, err := p.randToken()\n\tif err != nil {\n\t\trest.SendErrorJSON(w, r, http.StatusInternalServerError, err, \"failed to make claim's id\")\n\t\treturn\n\t}\n\tclaims := token.Claims{\n\t\tUser: &u,\n\t\tStandardClaims: jwt.StandardClaims{\n\t\t\tIssuer: p.Issuer,\n\t\t\tId: cid,\n\t\t\tAudience: oauthClaims.Audience,\n\t\t},\n\t\tSessionOnly: oauthClaims.SessionOnly,\n\t}\n\n\tif err = p.JwtService.Set(w, claims); err != nil {\n\t\trest.SendErrorJSON(w, r, http.StatusInternalServerError, err, \"failed to set token\")\n\t\treturn\n\t}\n\n\tp.Logf(\"[DEBUG] user info %+v\", u)\n\n\t// redirect to back url if presented in login query params\n\tif oauthClaims.Handshake != nil && oauthClaims.Handshake.From != \"\" {\n\t\thttp.Redirect(w, r, oauthClaims.Handshake.From, http.StatusTemporaryRedirect)\n\t\treturn\n\t}\n\trest.RenderJSON(w, r, &u)\n}", "func (srv *targetServiceHandler) auth(h http.Handler) http.Handler {\n\treturn http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {\n\t\tctx := httpbakery.ContextWithRequest(context.TODO(), req)\n\t\tops, err := opsForRequest(req)\n\t\tif err != nil {\n\t\t\tfail(w, http.StatusInternalServerError, \"%v\", err)\n\t\t\treturn\n\t\t}\n\t\tauthChecker := srv.checker.Auth(httpbakery.RequestMacaroons(req)...)\n\t\tif _, err = authChecker.Allow(ctx, ops...); err != nil {\n\t\t\thttpbakery.WriteError(ctx, w, srv.oven.Error(ctx, req, err))\n\t\t\treturn\n\t\t}\n\t\th.ServeHTTP(w, req)\n\t})\n}", "func loginRedirectHandler(w http.ResponseWriter, r *http.Request) {\n\tvars := mux.Vars(r)\n\tprovider, err := strconv.Atoi(vars[\"provider\"])\n\tif err != nil {\n\t\thttp.Error(w, \"Invalid identity provider\", http.StatusInternalServerError)\n\t\treturn\n\t} else {\n\t\tidp.Authenticate(provider, w, r)\n\t}\n}", "func MustAuth(handler http.Handler) http.Handler {\n return &authHandler{next: handler}\n}", "func (g *Google) Authenticate(c *gin.Context) {\n\tvar appErr models.AppError\n\tvar err error\n\tvar matchUser models.User\n\tvar remoteOAuth models.OAuthAccount\n\n\tdestination := c.Query(\"destination\")\n\n\t// get user data from Google\n\tfstring, err := g.GetRemoteUserData(c.Request, c.Writer)\n\tif err != nil {\n\t\tc.Redirect(http.StatusTemporaryRedirect, destination)\n\t\treturn\n\t}\n\n\t// decode user data returned by Google oAuth\n\tremoteOAuth = models.OAuthAccount{\n\t\tType: constants.Google,\n\t\tAId: utils.ToNullString(gjson.Get(fstring, \"id\").Str),\n\t\tEmail: utils.ToNullString(gjson.Get(fstring, \"email\").Str),\n\t\tName: utils.ToNullString(gjson.Get(fstring, \"name\").Str),\n\t\tFirstName: utils.ToNullString(gjson.Get(fstring, \"given_name\").Str),\n\t\tLastName: utils.ToNullString(gjson.Get(fstring, \"family_name\").Str),\n\t\tGender: utils.GetGender(gjson.Get(fstring, \"gender\").Str),\n\t\tPicture: utils.ToNullString(gjson.Get(fstring, \"picture\").Str),\n\t}\n\n\t// get the record from o_auth_accounts table\n\t_, err = g.Storage.GetOAuthData(remoteOAuth.AId, remoteOAuth.Type)\n\n\t// oAuth account is not existed\n\t// sign in by oauth for the first time\n\tif err != nil {\n\t\tappErr = err.(models.AppError)\n\n\t\t// return internal server error\n\t\tif appErr.StatusCode != http.StatusNotFound {\n\t\t\tc.JSON(appErr.StatusCode, gin.H{\"status\": \"error\", \"error\": appErr.Error()})\n\n\t\t\treturn\n\t\t}\n\n\t\t// email is provided in oAuth response\n\t\tif remoteOAuth.Email.Valid {\n\t\t\t// get the record from users table\n\t\t\tmatchUser, err = g.Storage.GetUserByEmail(remoteOAuth.Email.String)\n\n\t\t\t// record is not existed in users table\n\t\t\tif err != nil {\n\t\t\t\tappErr = err.(models.AppError)\n\n\t\t\t\t// return internal server error\n\t\t\t\tif appErr.StatusCode != http.StatusNotFound {\n\t\t\t\t\tc.JSON(appErr.StatusCode, gin.H{\"status\": \"error\", \"error\": appErr.Error()})\n\t\t\t\t\treturn\n\t\t\t\t}\n\n\t\t\t\t// no record in users table with this email\n\t\t\t\t// create a record in users table\n\t\t\t\t// and create a record in o_auth_accounts table\n\t\t\t\tmatchUser = g.Storage.InsertUserByOAuth(remoteOAuth)\n\t\t\t} else {\n\t\t\t\t// record existed in user table\n\t\t\t\t// create record in o_auth_accounts table\n\t\t\t\t// and connect it to the user record\n\t\t\t\tremoteOAuth.UserID = matchUser.ID\n\t\t\t\terr = g.Storage.InsertOAuthAccount(remoteOAuth)\n\n\t\t\t\t// return internal server error\n\t\t\t\tif err != nil {\n\t\t\t\t\tc.JSON(http.StatusInternalServerError, gin.H{\"status\": \"error\", \"error\": err.Error()})\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t}\n\t\t} else {\n\t\t\t// email is not provided in oAuth response\n\t\t\t// create a record in users table\n\t\t\t// and also create a record in o_auth_accounts table\n\t\t\tmatchUser = g.Storage.InsertUserByOAuth(remoteOAuth)\n\t\t}\n\t} else {\n\t\t// user signed in before\n\t\tmatchUser, err = g.Storage.GetUserDataByOAuth(remoteOAuth)\n\t\tif err != nil {\n\t\t\tc.JSON(http.StatusInternalServerError, gin.H{\"status\": \"error\", \"error\": err.Error()})\n\t\t\treturn\n\t\t}\n\n\t\t// update existing OAuth data\n\t\t_, err = g.Storage.UpdateOAuthData(remoteOAuth)\n\t\tif err != nil {\n\t\t\tc.JSON(http.StatusInternalServerError, gin.H{\"status\": \"error\", \"error\": err.Error()})\n\t\t\treturn\n\t\t}\n\t}\n\n\ttoken, err := utils.RetrieveToken(matchUser.ID, matchUser.Email.String)\n\tif err != nil {\n\t\tc.JSON(appErr.StatusCode, gin.H{\"status\": \"error\", \"error\": appErr.Error()})\n\t\treturn\n\t}\n\n\tvar u *url.URL\n\tvar secure = false\n\tu, err = url.Parse(destination)\n\n\tif u.Scheme == \"https\" {\n\t\tsecure = true\n\t}\n\n\tparameters := u.Query()\n\tparameters.Add(\"login\", \"google\")\n\n\tu.RawQuery = parameters.Encode()\n\tdestination = u.String()\n\n\tauthJSON := &models.AuthenticatedResponse{ID: matchUser.ID, Privilege: matchUser.Privilege, FirstName: matchUser.FirstName.String, LastName: matchUser.LastName.String, Email: matchUser.Email.String, Jwt: token}\n\tauthResp, _ := json.Marshal(authJSON)\n\n\tc.SetCookie(\"auth_info\", string(authResp), 100, u.Path, utils.Cfg.ConsumerSettings.Domain, secure, true)\n\tc.Redirect(http.StatusTemporaryRedirect, destination)\n}", "func HandleLoginRedirect(r *http.Request, w http.ResponseWriter, cfg *setting.Cfg, identity *Identity, validator RedirectValidator) {\n\tredirectURL := handleLogin(r, w, cfg, identity, validator)\n\thttp.Redirect(w, r, redirectURL, http.StatusFound)\n}", "func (lh *AuthorizationCodeLocalhost) redirectUriHandler(w http.ResponseWriter, r *http.Request) {\n\tconst (\n\t\tcloseTab string = \". Please close this tab.\"\n\t)\n\n\trq := r.URL.RawQuery\n\turlValues, err := url.ParseQuery(rq)\n\tif err != nil {\n\t\terr := fmt.Sprintf(\"Unable to parse query: %v\", err)\n\n\t\tlh.AuthCodeReqStatus = AuthorizationCodeStatus{Status: FAILED, Details: err}\n\t\tlh.authCode = AuthorizationCode{}\n\t\tw.WriteHeader(http.StatusOK)\n\t\tw.Write([]byte(lh.AuthCodeReqStatus.Details + closeTab))\n\t\treturn\n\t}\n\n\turlError := urlValues.Get(\"error\")\n\t// Authentication Code Error from consent page\n\tif urlError != \"\" {\n\t\terr := fmt.Sprintf(\"An error occurred when getting authorization code: %s\", urlError)\n\t\tlh.AuthCodeReqStatus = AuthorizationCodeStatus{Status: FAILED, Details: err}\n\t\tlh.authCode = AuthorizationCode{}\n\t\tw.WriteHeader(http.StatusOK)\n\t\tw.Write([]byte(lh.AuthCodeReqStatus.Details + closeTab))\n\t\treturn\n\t}\n\n\turlCode := urlValues.Get(\"code\")\n\turlState := urlValues.Get(\"state\")\n\t// No Code, Status, or Error is treated as unknown error\n\tif urlCode == \"\" && urlState == \"\" {\n\t\terr := \"Unknown error when getting athorization code\"\n\t\tlh.AuthCodeReqStatus = AuthorizationCodeStatus{Status: FAILED, Details: err}\n\n\t\tlh.authCode = AuthorizationCode{}\n\t\tw.WriteHeader(http.StatusOK)\n\t\tw.Write([]byte(lh.AuthCodeReqStatus.Details + closeTab))\n\t\treturn\n\t}\n\n\t// Authorization code returned\n\tif urlCode != \"\" && urlState != \"\" {\n\t\tlh.authCode = AuthorizationCode{\n\t\t\tCode: urlCode,\n\t\t\tState: urlState,\n\t\t}\n\n\t\tlh.AuthCodeReqStatus = AuthorizationCodeStatus{\n\t\t\tStatus: GRANTED, Details: \"Authorization code granted\"}\n\n\t\tw.WriteHeader(http.StatusOK)\n\t\tw.Write([]byte(lh.AuthCodeReqStatus.Details + closeTab))\n\t\treturn\n\t}\n\n\terr = fmt.Errorf(\"Athorization code missing code or state.\")\n\tlh.AuthCodeReqStatus = AuthorizationCodeStatus{Status: FAILED, Details: err.Error()}\n\n\tlh.authCode = AuthorizationCode{}\n\tw.WriteHeader(http.StatusOK)\n\tw.Write([]byte(lh.AuthCodeReqStatus.Details + closeTab))\n\treturn\n}", "func CheckAuth(prefix string, repo *model.Repo) func(jqeventrouter.Handler) jqeventrouter.Handler {\n\treturn func(h jqeventrouter.Handler) jqeventrouter.Handler {\n\t\treturn jqeventrouter.HandlerFunc(func(event *jquery.Event, ui *js.Object, params url.Values) bool {\n\t\t\treqURL, _ := url.Parse(ui.Get(\"toPage\").String())\n\t\t\tif reqURL.Path == prefix+\"/callback.html\" {\n\t\t\t\t// Allow unauthenticated callback, needed by dev logins\n\t\t\t\treturn true\n\t\t\t}\n\t\t\t_, err := repo.CurrentUser()\n\t\t\tif err != nil && err != model.ErrNotLoggedIn {\n\t\t\t\tlog.Printf(\"Unknown error: %s\", err)\n\t\t\t}\n\t\t\tif err == model.ErrNotLoggedIn {\n\t\t\t\tredir := \"login.html\"\n\t\t\t\tlog.Debug(\"TODO: use params instead of re-parsing URL?\")\n\t\t\t\tparsed, _ := url.Parse(js.Global.Get(\"location\").String())\n\t\t\t\tfmt.Printf(\"params = %v\\nparsed = %v\\n\", params, parsed.Query())\n\t\t\t\tif p := parsed.Query().Get(\"provider\"); p != \"\" {\n\t\t\t\t\tredir = \"callback.html\"\n\t\t\t\t}\n\t\t\t\tlog.Printf(\"Redirecting unauthenticated user to %s\\n\", redir)\n\t\t\t\tlog.Debug(\"TODO: Do I need ui.Set *and* trigger before change here?\")\n\t\t\t\tui.Set(\"toPage\", redir)\n\t\t\t\tevent.StopImmediatePropagation()\n\t\t\t\tjquery.NewJQuery(\":mobile-pagecontainer\").Trigger(\"pagecontainerbeforechange\", ui)\n\t\t\t\treturn true\n\t\t\t}\n\t\t\treturn h.HandleEvent(event, ui, url.Values{})\n\t\t})\n\t}\n}", "func RedirectHandler(c *gin.Context) {\n\t// Retrieve provider from route\n\tprovider := c.Param(\"provider\")\n\n\t// In this case we use a map to store our secrets, but you can use dotenv or your framework configuration\n\t// for example, in revel you could use revel.Config.StringDefault(provider + \"_clientID\", \"\") etc.\n\tproviderSecrets := map[string]map[string]string{\n\t\t\"github\": {\n\t\t\t\"clientID\": os.Getenv(\"CLIENT_ID_GH\"),\n\t\t\t\"clientSecret\": os.Getenv(\"CLIENT_SECRET_GH\"),\n\t\t\t\"redirectURL\": os.Getenv(\"AUTH_REDIRECT_URL\") + \"/github/callback\",\n\t\t},\n\t\t\"google\": {\n\t\t\t\"clientID\": os.Getenv(\"CLIENT_ID_G\"),\n\t\t\t\"clientSecret\": os.Getenv(\"CLIENT_SECRET_G\"),\n\t\t\t\"redirectURL\": os.Getenv(\"AUTH_REDIRECT_URL\") + \"/google/callback\",\n\t\t},\n\t}\n\n\tproviderScopes := map[string][]string{\n\t\t\"github\": []string{\"public_repo\"},\n\t\t\"google\": []string{},\n\t}\n\n\tproviderData := providerSecrets[provider]\n\tactualScopes := providerScopes[provider]\n\tauthURL, err := config.Gocial.New().\n\t\tDriver(provider).\n\t\tScopes(actualScopes).\n\t\tRedirect(\n\t\t\tproviderData[\"clientID\"],\n\t\t\tproviderData[\"clientSecret\"],\n\t\t\tproviderData[\"redirectURL\"],\n\t\t)\n\n\t// Check for errors (usually driver not valid)\n\tif err != nil {\n\t\tc.Writer.Write([]byte(\"Error: \" + err.Error()))\n\t\treturn\n\t}\n\n\t// Redirect with authURL\n\tc.Redirect(http.StatusFound, authURL)\n}", "func (a *Auth) RedirectHandler() http.Handler {\n\treturn http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tcode := r.URL.Query().Get(\"code\")\n\t\ttoken, err := a.cfg.Exchange(r.Context(), code)\n\t\tif err != nil {\n\t\t\ta.logf(\"Authentication failure for code %s: %s\", code, err)\n\t\t\thttp.Error(w, \"Authorization failure\", http.StatusUnauthorized)\n\t\t\treturn\n\t\t}\n\n\t\t_, ok := token.Extra(\"id_token\").(string)\n\t\tif !ok {\n\t\t\ta.logf(\"Invalid ID token %v (%T)\", token.Extra(\"id_token\"), token.Extra(\"id_token\"))\n\t\t\thttp.Error(w, \"Internal error\", http.StatusInternalServerError)\n\t\t\treturn\n\t\t}\n\n\t\terr = a.setCookie(w, fromOauth2(token))\n\t\tif err != nil {\n\t\t\ta.logf(\"Failed setting cookie: %v\", err)\n\t\t\thttp.Error(w, \"Internal error\", http.StatusInternalServerError)\n\t\t\treturn\n\t\t}\n\n\t\tredirectPath := r.URL.Query().Get(\"state\")\n\t\tif redirectPath == \"\" {\n\t\t\tredirectPath = \"/\"\n\t\t}\n\t\thttp.Redirect(w, r, redirectPath, http.StatusTemporaryRedirect)\n\t})\n}", "func (f AuthFlow) Run(ctx context.Context, client AuthFlowClient) error {\n\tif f.Auth == nil {\n\t\treturn xerrors.New(\"no UserAuthenticator provided\")\n\t}\n\tphone, err := f.Auth.Phone(ctx)\n\tif err != nil {\n\t\treturn xerrors.Errorf(\"get phone: %w\", err)\n\t}\n\thash, err := client.AuthSendCode(ctx, phone, f.Options)\n\tif err != nil {\n\t\treturn xerrors.Errorf(\"send code: %w\", err)\n\t}\n\tcode, err := f.Auth.Code(ctx)\n\tif err != nil {\n\t\treturn xerrors.Errorf(\"get code: %w\", err)\n\t}\n\n\tsignInErr := client.AuthSignIn(ctx, phone, code, hash)\n\n\tif errors.Is(signInErr, ErrPasswordAuthNeeded) {\n\t\tpassword, err := f.Auth.Password(ctx)\n\t\tif err != nil {\n\t\t\treturn xerrors.Errorf(\"get password: %w\", err)\n\t\t}\n\t\tif err := client.AuthPassword(ctx, password); err != nil {\n\t\t\treturn xerrors.Errorf(\"sign in with password: %w\", err)\n\t\t}\n\n\t\treturn nil\n\t}\n\n\tvar signUpRequired *SignUpRequired\n\tif errors.As(signInErr, &signUpRequired) {\n\t\tif err := f.Auth.AcceptTermsOfService(ctx, signUpRequired.TermsOfService); err != nil {\n\t\t\treturn xerrors.Errorf(\"confirm TOS: %w\", err)\n\t\t}\n\t\tif err := client.AuthAcceptTOS(ctx, signUpRequired.TermsOfService.ID); err != nil {\n\t\t\treturn xerrors.Errorf(\"accept TOS: %w\", err)\n\t\t}\n\t\tinfo, err := f.Auth.SignUp(ctx)\n\t\tif err != nil {\n\t\t\treturn xerrors.Errorf(\"sign up info not provided: %w\", err)\n\t\t}\n\t\tif err := client.AuthSignUp(ctx, SignUp{\n\t\t\tPhoneNumber: phone,\n\t\t\tPhoneCodeHash: hash,\n\t\t\tFirstName: info.FirstName,\n\t\t\tLastName: info.LastName,\n\t\t}); err != nil {\n\t\t\treturn xerrors.Errorf(\"sign up: %w\", err)\n\t\t}\n\n\t\treturn nil\n\t}\n\n\tif signInErr != nil {\n\t\treturn xerrors.Errorf(\"sign in: %w\", signInErr)\n\t}\n\n\treturn nil\n}", "func HandleLogin(w http.ResponseWriter, r *http.Request) (err error) {\n\tsession, err := cookieStore.Get(r, oauthSessionName)\n\tif err != nil {\n\t\tlog.Printf(\"corrupted session %s -- generated new\", err)\n\t\terr = nil\n\t}\n\n\tvar tokenBytes [255]byte\n\tif _, err := rand.Read(tokenBytes[:]); err != nil {\n\t\treturn AnnotateError(err, \"Couldn't generate a session!\", http.StatusInternalServerError)\n\t}\n\n\tstate := hex.EncodeToString(tokenBytes[:])\n\n\tsession.AddFlash(state, stateCallbackKey)\n\n\tif err = session.Save(r, w); err != nil {\n\t\treturn\n\t}\n\n\thttp.Redirect(w, r, oauth2Config.AuthCodeURL(state, claims), http.StatusTemporaryRedirect)\n\n\treturn\n}", "func UserAuthHandler(w http.ResponseWriter, req *http.Request) {\n // Parse & validate payload.\n var pl payload.UserAuthPayload\n\n if !pl.Validate(req) {\n respond.Error(w, errmsg.InvalidPayload())\n return\n }\n\n // Get user by email.\n user, err := usersvc.FromEmail(pl.Email)\n\n if err != nil {\n app.Log.Errorln(err.Error())\n respond.Error(w, errmsg.UserNotFound())\n return\n }\n\n // Ensure passwords match.\n if !crypt.VerifyBcrypt(pl.Password, user.HashedPw) {\n app.Log.Errorln(\"user auth error: invalid password\")\n respond.Error(w, errmsg.Unauthorized())\n return\n }\n\n // Create new session for user.\n session, err := sessionsvc.Create(user)\n\n if err != nil {\n app.Log.Errorln(err.Error())\n respond.Error(w, errmsg.SessionCreationFailed())\n return\n }\n\n // Put newly minted session token inside auth header.\n headers := map[string]string{\n app.Config.AuthHeaderName: session.Token,\n }\n\n // Respond with success and new auth token.\n respond.Ok(w, successmsg.UserLoginSuccess, headers)\n}", "func RedirectHandler(c *fiber.Ctx) {\n\tprovider := c.Params(\"provider\")\n\tif provider != \"google\" && provider != \"facebook\" {\n\t\terrors.SendErrors(c, http.StatusUnprocessableEntity, &[]string{\"unsupported provider\"})\n\t}\n\tcfg := config.GetInstance()\n\tproviderSecrets := map[string]map[string]string{\n\t\t\"facebook\": {\n\t\t\t\"clientID\": cfg.Facebook.ClientID,\n\t\t\t\"clientSecret\": cfg.Facebook.ClientSecret,\n\t\t\t\"redirectURL\": cfg.Domain+\"/api/auth/facebook/callback\",\n\t\t},\n\t\t\"google\": {\n\t\t\t\"clientID\": cfg.Google.ClientID,\n\t\t\t\"clientSecret\": cfg.Google.ClientSecret,\n\t\t\t\"redirectURL\": cfg.Domain + \"/api/auth/google/callback\",\n\t\t},\n\t}\n\tproviderData := providerSecrets[provider]\n\tauthURL, err := getGocialInstance().New().\n\t\tDriver(provider).\n\t\tRedirect(\n\t\t\tproviderData[\"clientID\"],\n\t\t\tproviderData[\"clientSecret\"],\n\t\t\tproviderData[\"redirectURL\"],\n\t\t)\n\n\t// Check for errors (usually driver not valid)\n\tif err != nil {\n\t\terrors.SendErrors(c, http.StatusInternalServerError, &[]string{err.Error()})\n\t\treturn\n\t}\n\t// Redirect with authURL\n\tc.Status(http.StatusOK).JSON(map[string]string{\"url\" : authURL})\n}", "func defaultAuthorizeFlowHandler(authorizeUrl string) (string, error) {\n\t// Print the url on console, let user authorize and paste the token back.\n\tfmt.Printf(\"Go to the following link in your browser:\\n\\n %s\\n\\n\", authorizeUrl)\n\tfmt.Println(\"Enter verification code: \")\n\tvar code string\n\tfmt.Scanln(&code)\n\treturn code, nil\n}", "func (a *Authenticator) MakeAuthHandler(handler khttp.FuncHandler) khttp.FuncHandler {\n\treturn func(w http.ResponseWriter, r *http.Request) {\n\t\tdata, handled, err := a.PerformAuth(w, r)\n\t\tif err == nil && data.Creds != nil {\n\t\t\tctx := SetCredentials(r.Context(), data.Creds)\n\t\t\tr = r.WithContext(ctx)\n\t\t}\n\t\tif !handled {\n\t\t\thandler(w, r)\n\t\t}\n\t}\n}", "func (s *session) handlePASS(args []string) error {\n\tif s.username == \"\" {\n\t\treturn NewReportableError(\"please provide username first\")\n\t}\n\tif err := s.handler.AuthenticatePASS(s.username, args[0]); err != nil {\n\t\treturn err\n\t}\n\treturn s.signIn()\n}", "func MakeExternalAuthHandler(next http.HandlerFunc, upstreamTimeout time.Duration, upstreamURL string, passBody bool) http.HandlerFunc {\n\treturn func(w http.ResponseWriter, r *http.Request) {\n\t\treq, _ := http.NewRequest(http.MethodGet, upstreamURL, nil)\n\n\t\tcopyHeaders(req.Header, &r.Header)\n\n\t\tdeadlineContext, cancel := context.WithTimeout(\n\t\t\tcontext.Background(),\n\t\t\tupstreamTimeout)\n\n\t\tdefer cancel()\n\n\t\tres, err := http.DefaultClient.Do(req.WithContext(deadlineContext))\n\t\tif err != nil {\n\t\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\t\tlog.Printf(\"ExternalAuthHandler: %s\", err.Error())\n\t\t\treturn\n\t\t}\n\n\t\tif res.Body != nil {\n\t\t\tdefer res.Body.Close()\n\t\t}\n\n\t\tif res.StatusCode == http.StatusOK {\n\t\t\tnext.ServeHTTP(w, r)\n\t\t\treturn\n\t\t}\n\n\t\tcopyHeaders(w.Header(), &res.Header)\n\t\tw.WriteHeader(res.StatusCode)\n\n\t\tif res.Body != nil {\n\t\t\tio.Copy(w, res.Body)\n\t\t}\n\t}\n}", "func (a *loginAPI) HandleOIDCLogin(w http.ResponseWriter, r *http.Request) error {\n\tctx := context.Background()\n\n\t// read the stateParam again\n\tstate := a.appCookie.Get(stateParam, r)\n\tlog.WithField(\"func\", \"server.HandleOIDCLogin\").Debugf(\"got state param: %s\", state)\n\n\tif query(r, stateParam) != state {\n\t\treturn errors.BadRequestError{Err: fmt.Errorf(\"state did not match\"), Request: r}\n\t}\n\ta.appCookie.Del(stateParam, w)\n\n\t// is this an auth/flow request\n\tvar (\n\t\tauthFlow bool\n\t\tsite, redirect string\n\t)\n\tauthFlowParams := a.appCookie.Get(authFlowCookie, r)\n\tif authFlowParams != \"\" {\n\t\tlog.WithField(\"func\", \"server.HandleOIDCLogin\").Debugf(\"auth/flow login-mode\")\n\t\tparts := strings.Split(authFlowParams, \"|\")\n\t\tsite = parts[0]\n\t\tredirect = parts[1]\n\t\tauthFlow = true\n\t}\n\ta.appCookie.Del(authFlowCookie, w)\n\n\toauth2Token, err := a.oauthConfig.Exchange(ctx, query(r, codeParam))\n\tif err != nil {\n\t\treturn errors.ServerError{Err: fmt.Errorf(\"failed to exchange token: %v\", err), Request: r}\n\t}\n\trawIDToken, ok := oauth2Token.Extra(idTokenParam).(string)\n\tif !ok {\n\t\treturn errors.ServerError{Err: fmt.Errorf(\"no id_token field in oauth2 token\"), Request: r}\n\t}\n\tidToken, err := a.oauthVerifier.VerifyToken(ctx, rawIDToken)\n\tif err != nil {\n\t\treturn errors.ServerError{Err: fmt.Errorf(\"failed to verify ID Token: %v\", err), Request: r}\n\t}\n\n\tvar oidcClaims struct {\n\t\tEmail string `json:\"email\"`\n\t\tEmailVerified bool `json:\"email_verified\"`\n\t\tDisplayName string `json:\"name\"`\n\t\tPicURL string `json:\"picture\"`\n\t\tGivenName string `json:\"given_name\"`\n\t\tFamilyName string `json:\"family_name\"`\n\t\tLocale string `json:\"locale\"`\n\t\tUserID string `json:\"sub\"`\n\t}\n\n\tif err := idToken.GetClaims(&oidcClaims); err != nil {\n\t\treturn errors.ServerError{Err: fmt.Errorf(\"claims error: %v\", err), Request: r}\n\t}\n\n\t// the user was authenticated successfully, check if sites are available for the given user!\n\tsuccess := true\n\tsites, err := a.repo.GetSitesByUser(oidcClaims.Email)\n\tif err != nil {\n\t\tlog.WithField(\"func\", \"server.HandleOIDCLogin\").Warnf(\"successfull login by '%s' but error fetching sites! %v\", oidcClaims.Email, err)\n\t\tsuccess = false\n\t}\n\n\tif len(sites) == 0 {\n\t\tlog.WithField(\"func\", \"server.HandleOIDCLogin\").Warnf(\"successfull login by '%s' but no sites availabel!\", oidcClaims.Email)\n\t\tsuccess = false\n\t}\n\n\tif authFlow {\n\t\tlog.WithField(\"func\", \"server.HandleOIDCLogin\").Debugf(\"auth/flow - check for specific site '%s'\", site)\n\t\tsuccess = false\n\t\t// check specific site\n\t\tfor _, e := range sites {\n\t\t\tif e.Name == site {\n\t\t\t\tsuccess = true\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t}\n\n\tif !success {\n\t\ta.appCookie.Set(errors.FlashKeyError, fmt.Sprintf(\"User '%s' is not allowed to login!\", oidcClaims.Email), cookieExpiry, w)\n\t\thttp.Redirect(w, r, \"/error\", http.StatusTemporaryRedirect)\n\t\treturn nil\n\t}\n\n\t// create the token using the claims of the database\n\tvar siteClaims []string\n\tfor _, s := range sites {\n\t\tsiteClaims = append(siteClaims, fmt.Sprintf(\"%s|%s|%s\", s.Name, s.URL, s.PermList))\n\t}\n\tclaims := security.Claims{\n\t\tType: \"login.User\",\n\t\tDisplayName: oidcClaims.DisplayName,\n\t\tEmail: oidcClaims.Email,\n\t\tUserID: oidcClaims.UserID,\n\t\tUserName: oidcClaims.Email,\n\t\tGivenName: oidcClaims.GivenName,\n\t\tSurname: oidcClaims.FamilyName,\n\t\tClaims: siteClaims,\n\t}\n\ttoken, err := security.CreateToken(a.jwt.JwtIssuer, []byte(a.jwt.JwtSecret), a.jwt.Expiry, claims)\n\tif err != nil {\n\t\tlog.WithField(\"func\", \"server.HandleOIDCLogin\").Errorf(\"could not create a JWT token: %v\", err)\n\t\treturn errors.ServerError{Err: fmt.Errorf(\"error creating JWT: %v\", err), Request: r}\n\t}\n\n\tlogin := persistence.Login{\n\t\tUser: oidcClaims.Email,\n\t\tCreated: time.Now().UTC(),\n\t\tType: persistence.DIRECT,\n\t}\n\n\tif authFlow {\n\t\tlogin.Type = persistence.FLOW\n\t}\n\n\terr = a.repo.StoreLogin(login, per.Atomic{})\n\tif err != nil {\n\t\tlog.WithField(\"func\", \"server.HandleOIDCLogin\").Errorf(\"the login could not be saved: %v\", err)\n\t\treturn errors.ServerError{Err: fmt.Errorf(\"error storing the login: %v\", err), Request: r}\n\t}\n\n\t// set the cookie\n\texp := a.jwt.Expiry * 24 * 3600\n\ta.setJWTCookie(a.jwt.CookieName, token, exp, w)\n\n\tredirectURL := a.jwt.LoginRedirect\n\tif authFlow {\n\t\tlog.WithField(\"func\", \"server.HandleOIDCLogin\").Debugf(\"auth/flow - redirect to specific URL: '%s'\", redirect)\n\t\tredirectURL = redirect\n\t}\n\n\t// redirect to provided URL\n\thttp.Redirect(w, r, redirectURL, http.StatusTemporaryRedirect)\n\treturn nil\n}", "func authEndpoint(rw http.ResponseWriter, req *http.Request) {\n\n\t// request has to be POST\n\tif req.Method != \"POST\" {\n\t\thttp.Error(rw, \"bad method, only post allowed\", http.StatusBadRequest)\n\t}\n\n\t// has to be authenticated, in a real we would use soemthing more\n\t// secure like certificates etc.\n\tuser, _, ok := req.BasicAuth()\n\n\tif !ok {\n\t\thttp.Error(rw, \"authentication required\", http.StatusForbidden)\n\t}\n\n\tlog.Println(\"basic authentication successful for \", user)\n\n\t// now we issue token and return it\n\n\t// This context will be passed to all methods.\n\tctx := req.Context()\n\n\t// Create an empty session object which will be passed to the request handlers\n\tmySessionData := newSession(\"\")\n\n\t// This will create an access request object and iterate through the registered TokenEndpointHandlers to validate the request.\n\taccessRequest, err := fositeInstance.NewAccessRequest(ctx, req, mySessionData)\n\n\t// Catch any errors, e.g.:\n\t// * unknown client\n\t// * invalid redirect\n\t// * ...\n\tif err != nil {\n\t\tlog.Printf(\"Error occurred in NewAccessRequest: %+v\", err)\n\t\tfositeInstance.WriteAccessError(rw, accessRequest, err)\n\t\treturn\n\t}\n\n\t// If this is a client_credentials grant, grant all requested scopes\n\t// NewAccessRequest validated that all requested scopes the client is allowed to perform\n\t// based on configured scope matching strategy.\n\tif accessRequest.GetGrantTypes().ExactOne(\"client_credentials\") {\n\t\tfor _, scope := range accessRequest.GetRequestedScopes() {\n\t\t\taccessRequest.GrantScope(scope)\n\t\t}\n\t}\n\n\t// Next we create a response for the access request. Again, we iterate through the TokenEndpointHandlers\n\t// and aggregate the result in response.\n\tresponse, err := fositeInstance.NewAccessResponse(ctx, accessRequest)\n\tif err != nil {\n\t\tlog.Printf(\"Error occurred in NewAccessResponse: %+v\", err)\n\t\tfositeInstance.WriteAccessError(rw, accessRequest, err)\n\t\treturn\n\t}\n\n\t// All done, send the response.\n\tfositeInstance.WriteAccessResponse(rw, accessRequest, response)\n\n}", "func (h *Helper) Authenticate(w http.ResponseWriter, r *http.Request, next http.HandlerFunc) {\n\th.authenticateWithErrHandler(w, r, next, HandleHttpError)\n}", "func RedirectHandler(w http.ResponseWriter, r *http.Request) {\n\tlog.Print(\"I AM HERE REDIRECTED\")\n\terr := r.ParseForm()\n\tif err != nil {\n\t\tfmt.Fprintf(os.Stdout, \"could not parse query: %s\", err.Error())\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t}\n\tcode := r.FormValue(\"code\")\n\n\treqURL := fmt.Sprintf(\"https://github.com/login/oauth/access_token?client_id=%s&client_secret=%s&code=%s\", ClientID, ClientSecret, code)\n\treq, err := http.NewRequest(http.MethodPost, reqURL, nil)\n\tif err != nil {\n\t\tfmt.Fprintf(os.Stdout, \"could not retrieve http request: %s\", err.Error())\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t}\n\n\treq.Header.Set(http.CanonicalHeaderKey(\"accept\"), \"application/json\")\n\treq.Header.Set(\"X-OAuth-Scopes\", \"gists\")\n\n\tres, err := http.DefaultClient.Do(req)\n\tif err != nil {\n\t\tfmt.Fprintf(os.Stdout, \"could not send HTTP request: %s\", err.Error())\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t}\n\n\tdefer res.Body.Close()\n\t// Parse the request body into the `OAuthAccessResponse` struct\n\tvar t OAuthAccessResponse\n\tif err := json.NewDecoder(res.Body).Decode(&t); err != nil {\n\t\tfmt.Fprintf(os.Stdout, \"could not parse JSON response: %s\", err.Error())\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t}\n\tSession.AccessToken = t.AccessToken\n\n\tw.WriteHeader(http.StatusFound)\n\tw.Write([]byte(\"OK\"))\n}", "func HandleLogin(w http.ResponseWriter, r *http.Request) {\n\terr := r.ParseForm()\n\tif err != nil {\n\t\tServeHandleIncorrect(w, r)\n\t\treturn\n\t}\n\tvalues := LoginFormValues{}\n\tdecoder := schema.NewDecoder()\n\terr = decoder.Decode(&values, r.PostForm)\n\tif err != nil {\n\t\tServeInternalServerError(w, r)\n\t\treturn\n\t}\n\n\tacc, err := data.GetAccountByHandle(values.Handle)\n\tif err == mgo.ErrNotFound {\n\t\tServeHandleIncorrect(w, r)\n\t\treturn\n\t}\n\tif err != nil {\n\t\tServeInternalServerError(w, r)\n\t\treturn\n\t}\n\tm := acc.Password.Match(values.Password)\n\tif !m {\n\t\thttp.Redirect(w, r, \"/login\", http.StatusSeeOther)\n\t\treturn\n\t}\n\n\tsess, err := store.Get(r, \"s\")\n\tif err != nil {\n\t\tServeInternalServerError(w, r)\n\t\treturn\n\t}\n\tsess.Values[\"accountID\"] = acc.ID.Hex()\n\tsess.Save(r, w)\n\thttp.Redirect(w, r, \"/tasks\", http.StatusSeeOther)\n}", "func authHandler(w io.Writer, r *http.Request) error {\n\treturn errors.New(\"authHandler not implemented\")\n\n\t// Obtain the urls that users should follow to log in and out.\n\t// If there's a user currently logged in obtain its email.\n\t// Then encode all that data as a JSON object into the given io.Writer\n\t// - user.LoginURL : http://golang.org/s/users#LoginURL\n\t// - user.LogoutURL : http://golang.org/s/users#LogoutURL\n\t// - user.Current : http://golang.org/s/users#Current\n}", "func authorizeFlow(secret map[string]interface{}, scope string, handler func(string) (string, error)) (string, error) {\n\t// Marshaw a url to be printed on console. In web based oauth flow, the\n\t// browser should redirect the user to this url\n\tparams := url.Values{\n\t\t\"access_type\": []string{\"offline\"},\n\t\t\"auth_provider_x509_cert_url\": nil,\n\t\t\"redirect_uri\": []string{oobCallbackUrn},\n\t\t\"response_type\": []string{\"code\"},\n\t\t\"client_id\": nil,\n\t\t\"scope\": []string{scope},\n\t\t\"project_id\": nil,\n\t}\n\n\tfor key := range params {\n\t\tif val, ok := secret[key]; ok {\n\t\t\tparams.Set(key, toString(val))\n\t\t}\n\t}\n\n\t// Call the handler function to handle the authorize url and get back\n\t// the verification code.\n\treturn handler(toString(secret[\"auth_uri\"]) + \"?\" + params.Encode())\n}", "func HandleSamlLogin(w http.ResponseWriter, r *http.Request) {\n\tvar redirectBackBaseValue string\n\ts := server.SamlServiceProvider\n\n\ts.XForwardedProto = r.Header.Get(\"X-Forwarded-Proto\")\n\n\tif r.URL.Query() != nil {\n\t\tredirectBackBaseValue = r.URL.Query().Get(redirectBackBase)\n\t\tif redirectBackBaseValue == \"\" {\n\t\t\tredirectBackBaseValue = server.GetRancherAPIHost()\n\t\t}\n\t} else {\n\t\tredirectBackBaseValue = server.GetRancherAPIHost()\n\t}\n\n\tif !isWhitelisted(redirectBackBaseValue, s.RedirectWhitelist) {\n\t\tlog.Errorf(\"Cannot redirect to anything other than whitelisted domains and rancher api host\")\n\t\tredirectBackPathValue := r.URL.Query().Get(redirectBackPath)\n\t\tredirectURL := server.GetSamlRedirectURL(server.GetRancherAPIHost(), redirectBackPathValue)\n\t\tredirectURL = addErrorToRedirect(redirectURL, \"422\")\n\t\thttp.Redirect(w, r, redirectURL, http.StatusFound)\n\t\treturn\n\t}\n\n\tserviceProvider := s.ServiceProvider\n\tif r.URL.Path == serviceProvider.AcsURL.Path {\n\t\treturn\n\t}\n\n\tbinding := saml.HTTPRedirectBinding\n\tbindingLocation := serviceProvider.GetSSOBindingLocation(binding)\n\tif bindingLocation == \"\" {\n\t\tbinding = saml.HTTPPostBinding\n\t\tbindingLocation = serviceProvider.GetSSOBindingLocation(binding)\n\t}\n\n\treq, err := serviceProvider.MakeAuthenticationRequest(bindingLocation)\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n\t// relayState is limited to 80 bytes but also must be integrety protected.\n\t// this means that we cannot use a JWT because it is way to long. Instead\n\t// we set a cookie that corresponds to the state\n\trelayState := base64.URLEncoding.EncodeToString(randomBytes(42))\n\n\tsecretBlock := x509.MarshalPKCS1PrivateKey(serviceProvider.Key)\n\tstate := jwt.New(jwt.SigningMethodHS256)\n\tclaims := state.Claims.(jwt.MapClaims)\n\tclaims[\"id\"] = req.ID\n\tclaims[\"uri\"] = r.URL.String()\n\tsignedState, err := state.SignedString(secretBlock)\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\ts.ClientState.SetState(w, r, relayState, signedState)\n\n\tif binding == saml.HTTPRedirectBinding {\n\t\tredirectURL := req.Redirect(relayState)\n\t\tw.Header().Add(\"Location\", redirectURL.String())\n\t\tw.WriteHeader(http.StatusFound)\n\t\treturn\n\t}\n\tif binding == saml.HTTPPostBinding {\n\t\tw.Header().Add(\"Content-Security-Policy\", \"\"+\n\t\t\t\"default-src; \"+\n\t\t\t\"script-src 'sha256-AjPdJSbZmeWHnEc5ykvJFay8FTWeTeRbs9dutfZ0HqE='; \"+\n\t\t\t\"reflected-xss block; referrer no-referrer;\")\n\t\tw.Header().Add(\"Content-type\", \"text/html\")\n\t\tw.Write([]byte(`<!DOCTYPE html><html><body>`))\n\t\tw.Write(req.Post(relayState))\n\t\tw.Write([]byte(`</body></html>`))\n\t\treturn\n\t}\n}", "func redirectHandler(c *gin.Context) {\n\t// Retrieve provider from route\n\tprovider := c.Param(\"provider\")\n\n\t//datos que provienen de github\n\tproviderSecrets := map[string]map[string]string{\n\t\t\"github\": {\n\t\t\t\"clientID\": \"b9563aec19bb264601a1\",\n\t\t\t\"clientSecret\": \"6c5cd9388386a6461a007576f4bfba1a7d144408\",\n\t\t\t\"redirectURL\": \"http://localhost:8090/api/socialLogin/auth/github/callback\",\n\t\t},\n\t}\n\n\tproviderScopes := map[string][]string{\n\t\t\"github\": []string{\"public_repo\"},\n\t}\n\n\tproviderData := providerSecrets[provider]\n\tactualScopes := providerScopes[provider]\n\tauthURL, err := gocial.New().\n\t\tDriver(provider).\n\t\tScopes(actualScopes).\n\t\tRedirect(\n\t\t\tproviderData[\"clientID\"],\n\t\t\tproviderData[\"clientSecret\"],\n\t\t\tproviderData[\"redirectURL\"],\n\t\t)\n\n\t// Check for errors (usually driver not valid)\n\tif err != nil {\n\t\tc.Writer.Write([]byte(\"Error: \" + err.Error()))\n\t\treturn\n\t}\n\t// Redirect with authURL\n\tc.Redirect(http.StatusFound, authURL)\n}", "func (app *application) authenticate(next http.Handler) http.Handler {\r\n\treturn http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\r\n\t\t// if a userID value does not exists in the\r\n\t\t// session cookie, call the next handler\r\n\t\texists := app.session.Exists(r, \"userid\")\r\n\t\tif !exists {\r\n\t\t\tnext.ServeHTTP(w, r)\r\n\t\t\treturn\r\n\t\t}\r\n\r\n\t\t// else, check that the userID value exist in\r\n\t\t// the database. If no matching record is found\r\n\t\t// remove the invalid userID value from the\r\n\t\t// session cookie and call the next handler.\r\n\t\tuser, err := app.users.Get(app.session.GetString(r, \"userid\"))\r\n\t\tif errors.Is(err, models.ErrNoRecord) {\r\n\t\t\tapp.session.Remove(r, \"authUser\")\r\n\t\t\tnext.ServeHTTP(w, r)\r\n\t\t\treturn\r\n\t\t} else if err != nil {\r\n\t\t\tapp.errorLog.Println(ErrMySQL, err)\r\n\t\t\tw.WriteHeader(http.StatusInternalServerError)\r\n\t\t\tapp.render(w, r, \"error.page.tmpl\", &templateData{\r\n\t\t\t\tError: http.StatusText(http.StatusInternalServerError),\r\n\t\t\t})\r\n\t\t\treturn\r\n\t\t}\r\n\r\n\t\t// else, the request is coming from an authenticated user\r\n\t\t// Create a new copy of the request, with an AuthUser value\r\n\t\t// added to the request context to indicate this, and call\r\n\t\t// the next handler with this new copy of the request\r\n\t\tau := AuthUser{user.UserID, user.Seller, user.Verified}\r\n\t\tctx := context.WithValue(r.Context(), contextKeyAuthUser, au)\r\n\t\tnext.ServeHTTP(w, r.WithContext(ctx))\r\n\t})\r\n}", "func Authenticate(next buffalo.Handler) buffalo.Handler {\n\treturn func(c buffalo.Context) error {\n\t\t// do some work before calling the next handler\n\t\tclient, err := FirebaseApp.Auth(context.Background())\n\n\t\tidToken := c.Request().Header.Get(\"Authorization\")\n\t\tidToken = strings.Replace(idToken, `bearer `, \"\", 1)\n\t\tif ENV == \"development\" || ENV == \"test\" {\n\t\t\tfmt.Println(\"Authorization\", idToken)\n\t\t}\n\t\ttoken, err := client.VerifyIDToken(idToken)\n\t\tif err != nil {\n\t\t\tfmt.Printf(\"error verifying ID token: %v\\n\", err)\n\t\t\tresponse := Response{\n\t\t\t\tMessage: \"Missing or invalid token.\",\n\t\t\t}\n\t\t\tc.Response().WriteHeader(http.StatusUnauthorized)\n\t\t\tjson.NewEncoder(c.Response()).Encode(response)\n\t\t\treturn err\n\t\t}\n\n\t\tif err := setCurrentUser(token.UID, c); err != nil {\n\t\t\treturn errors.WithStack(err)\n\t\t}\n\t\terr = next(c)\n\t\treturn err\n\t}\n}", "func (h *Handler) serveAuthenticateDBUser(w http.ResponseWriter, r *http.Request) {}", "func handleAuth(conn *userConn, content []byte) {\n\tvar msg cliproto_up.Authenticate\n\tif err := proto.Unmarshal(content, &msg); err != nil {\n\t\tconn.conn.Close()\n\t\treturn\n\t}\n\n\t// Try to get information, then release locks and hash the password.\n\t// If the situation changes we may have to hash it again anyway,\n\t// but scrypt hashing is extremely expensive and we want to try to\n\t// do this without holding our locks in the vast majority of cases.\n\tvar tryPassGenerated bool\n\tvar trySaltGenerated bool\n\tvar tryPass, trySalt, tryKey string\n\tif *msg.Password != \"\" {\n\t\ttryPass = *msg.Password\n\n\t\tstore.StartTransaction()\n\n\t\tuserId := store.NameLookup(\"user\", \"name username\",\n\t\t\t*msg.Username)\n\t\tif userId != 0 {\n\t\t\tuser := store.GetEntity(userId)\n\t\t\tif user != nil {\n\t\t\t\ttrySalt = user.Value(\"auth salt\")\n\t\t\t}\n\t\t}\n\n\t\tstore.EndTransaction()\n\n\t\tif trySalt == \"\" {\n\t\t\ttrySaltGenerated = true\n\n\t\t\tvar err error\n\t\t\ttrySalt, tryKey, err = genRandomSalt(conn,\n\t\t\t\t[]byte(tryPass))\n\n\t\t\tif err != nil {\n\t\t\t\treturn\n\t\t\t}\n\t\t} else {\n\t\t\tvar err error\n\t\t\ttryPass, err = genKey(conn, []byte(tryPass),\n\t\t\t\t[]byte(trySalt))\n\n\t\t\tif err != nil {\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\n\t} else {\n\t\ttryPassGenerated = true\n\t\ttrySaltGenerated = true\n\n\t\tvar err error\n\t\ttryPass, trySalt, tryKey, err = genRandomPass(conn)\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\t}\n\n\t// TODO: Validate username and password against constraints.\n\n\t// We hold this through quite a lot of logic.\n\t// Would be good to be locked less.\n\tstore.StartTransaction()\n\tdefer store.EndTransaction()\n\tsessionsLock.Lock()\n\tdefer sessionsLock.Unlock()\n\twaitingLock.Lock()\n\tdefer waitingLock.Unlock()\n\n\tif conn.session != 0 || conn.waitingAuth != nil {\n\t\tconn.conn.Close()\n\t\treturn\n\t}\n\n\tuserId := store.NameLookup(\"user\", \"name username\", *msg.Username)\n\tif userId != 0 {\n\t\tif *msg.Password == \"\" {\n\t\t\tsendAuthFail(conn, \"Invalid Password\")\n\t\t\treturn\n\t\t}\n\n\t\t// The user already exists.\n\t\tuser := store.GetEntity(userId)\n\n\t\t// Try to authenticate them to it.\n\t\tvar key string\n\n\t\t// If their salt and password matches our attempt above,\n\t\t// we can just take that key.\n\t\tsalt := user.Value(\"auth salt\")\n\t\tif trySalt == salt && tryPass == *msg.Password {\n\t\t\tkey = tryKey\n\t\t} else {\n\t\t\tsaltBytes := []byte(user.Value(\"auth salt\"))\n\t\t\tpassBytes := []byte(*msg.Password)\n\t\t\tvar err error\n\t\t\tkey, err = genKey(conn, passBytes, saltBytes)\n\t\t\tif err != nil {\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\n\t\tif user.Value(\"auth password\") != string(key) {\n\t\t\tsendAuthFail(conn, \"Invalid Password\")\n\t\t\treturn\n\t\t}\n\n\t\t// It's the real user.\n\t\tif *msg.SessionId != 0 {\n\n\t\t\t// They are attaching to an existing session.\n\t\t\t// Check it exists and is attached to this user.\n\t\t\tstrSessionId := strconv.FormatUint(*msg.SessionId, 10)\n\t\t\tif user.Value(\"attach \"+strSessionId) == \"\" {\n\t\t\t\tsendAuthFail(conn, \"Invalid Session\")\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\t// The session does exist.\n\t\t\tconn.session = *msg.SessionId\n\n\t\t\t// If this node is already attached to\n\t\t\t// the session, drop the other connection.\n\t\t\tif sessions[conn.session] != nil {\n\t\t\t\tsessions[conn.session].conn.Close()\n\t\t\t} else {\n\t\t\t\t// Create change attaching this node ID\n\t\t\t\t// to the session.\n\t\t\t\tid := config.Id()\n\t\t\t\tidStr := strconv.FormatUint(uint64(id), 10)\n\n\t\t\t\tchset := make([]store.Change, 1)\n\t\t\t\tchset[0].TargetEntity = conn.session\n\t\t\t\tchset[0].Key = \"attach \" + idStr\n\t\t\t\tchset[0].Value = \"true\"\n\t\t\t\treq := makeRequest(chset)\n\t\t\t\tgo chrequest.Request(req)\n\t\t\t}\n\n\t\t\t// Put us in the sessions map.\n\t\t\tsessions[conn.session] = conn\n\n\t\t\t// Tell the client they authenticated successfully.\n\t\t\tsendAuthSuccess(conn, \"\")\n\t\t} else {\n\t\t\t// They are creating a new session.\n\t\t\treq := makeNewSessionRequest(userId)\n\t\t\tgo chrequest.Request(req)\n\n\t\t\t// Stuff details in waiting auth.\n\t\t\tconn.waitingAuth = new(authData)\n\t\t\tconn.waitingAuth.msg = msg\n\t\t\tconn.waitingAuth.requestId = req.RequestId\n\t\t\twaiting[conn.waitingAuth.requestId] = conn\n\t\t}\n\t} else {\n\t\t// The user does not already exist.\n\t\t// Check they weren't trying to attach to a session.\n\t\tif *msg.SessionId != 0 {\n\t\t\tsendAuthFail(conn, \"User Does Not Exist\")\n\t\t\treturn\n\t\t}\n\n\t\t// We're creating a new user.\n\t\tnewUser := *msg.Username\n\t\tnewPass := *msg.Password\n\n\t\tif !strings.HasPrefix(newUser, \"Guest-\") {\n\n\t\t\t// We're creating a new non-guest user.\n\t\t\t// Make sure they have a password.\n\t\t\tif newPass == \"\" {\n\t\t\t\tsendAuthFail(conn, \"No Password\")\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tvar salt string\n\t\t\tvar hash string\n\t\t\tif tryPass == newPass && trySaltGenerated {\n\t\t\t\tsalt = trySalt\n\t\t\t\thash = tryKey\n\t\t\t} else {\n\t\t\t\tpassBytes := []byte(newPass)\n\n\t\t\t\tvar err error\n\t\t\t\tsalt, hash, err = genRandomSalt(conn, passBytes)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t}\n\n\t\t\t// Create the new user.\n\t\t\treq := makeNewUserRequest(newUser, hash, salt, false)\n\t\t\tgo chrequest.Request(req)\n\n\t\t\t// Stuff details in waiting auth.\n\t\t\tconn.waitingAuth = new(authData)\n\t\t\tconn.waitingAuth.msg = msg\n\t\t\tconn.waitingAuth.requestId = req.RequestId\n\t\t\twaiting[conn.waitingAuth.requestId] = conn\n\n\t\t\treturn\n\t\t}\n\n\t\t// We're creating a new guest user.\n\t\t// Guests get automatic passwords, and can't set them.\n\t\tif newPass != \"\" {\n\t\t\tsendAuthFail(conn, \"Cannot Set Password For Guest User\")\n\t\t\treturn\n\t\t}\n\n\t\tvar hash string\n\t\tvar salt string\n\t\tif tryPassGenerated && trySaltGenerated {\n\t\t\tnewPass = tryPass\n\t\t\tsalt = trySalt\n\t\t\thash = tryKey\n\t\t} else {\n\t\t\tvar err error\n\t\t\tnewPass, salt, hash, err = genRandomPass(conn)\n\t\t\tif err != nil {\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\n\t\twaitingLock.Lock()\n\n\t\t// Create the new user.\n\t\treq := makeNewUserRequest(newUser, hash, salt, true)\n\t\tgo chrequest.Request(req)\n\n\t\t// Stuff details in waiting auth.\n\t\tconn.waitingAuth = new(authData)\n\t\tconn.waitingAuth.msg = msg\n\t\tconn.waitingAuth.requestId = req.RequestId\n\t\twaiting[conn.waitingAuth.requestId] = conn\n\n\t\twaitingLock.Unlock()\n\n\t\treturn\n\t}\n}", "func HandleAuthenticateDispenser(w http.ResponseWriter, r *http.Request) {\n\t// Read auth token from request\n\tvar auth DispenserAuth\n\n\terr := utils.ReadJSONFromRequest(r, &auth)\n\n\tif err != nil {\n\t\tutils.WriteError(w, utils.BadRequestError(err))\n\t\treturn\n\t}\n\n\t// Try to authenticate the dispenser\n\ttoken, err := AuthenticateDispenser(auth)\n\n\tif err != nil {\n\t\tutils.WriteError(w, err)\n\t\treturn\n\t}\n\n\t// Return session token to user\n\tutils.WriteJSON(w, token)\n}", "func (s *service) authenticator(next http.Handler) http.Handler {\n\treturn http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tq := r.URL.Query()\n\t\tif q.Get(\"auth_token\") == \"\" || s.cfg.APIToken == \"\" || q.Get(\"auth_token\") != s.cfg.APIToken {\n\t\t\thttp.Error(w, http.StatusText(401), 401)\n\t\t\treturn\n\t\t}\n\t\tnext.ServeHTTP(w, r)\n\t})\n}", "func (client *OAuthClient) DoAuth(rToken *oauth.RequestToken) error {\n\taccessToken, err := client.OAuthConsumer.AuthorizeToken(rToken, \"\")\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tclient.http, err = client.OAuthConsumer.MakeHttpClient(accessToken)\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}", "func (rh *RealmRedirect) Handle(w http.ResponseWriter, req *http.Request) {\n\tsegments := strings.Split(req.URL.Path, \"/\")\n\t// last path segment is the base64d realm ID which we will pass the incoming request to\n\tbase64realmID := segments[len(segments)-1]\n\tbytesRealmID, err := base64.RawURLEncoding.DecodeString(base64realmID)\n\trealmID := string(bytesRealmID)\n\tif err != nil {\n\t\tlog.WithError(err).WithField(\"base64_realm_id\", base64realmID).Print(\n\t\t\t\"Not a b64 encoded string\",\n\t\t)\n\t\tw.WriteHeader(400)\n\t\treturn\n\t}\n\n\trealm, err := rh.DB.LoadAuthRealm(realmID)\n\tif err != nil {\n\t\tlog.WithError(err).WithField(\"realm_id\", realmID).Print(\"Failed to load realm\")\n\t\tw.WriteHeader(404)\n\t\treturn\n\t}\n\tlog.WithFields(log.Fields{\n\t\t\"realm_id\": realmID,\n\t}).Print(\"Incoming realm redirect request\")\n\trealm.OnReceiveRedirect(w, req)\n}", "func AuthHandler(handler AuthorisedRequestHandler) httprouter.Handle {\n\treturn func(w http.ResponseWriter, r *http.Request, params httprouter.Params) {\n\t\t// Measure time spent executing shit\n\t\tstart := time.Now()\n\n\t\t// Authorize request\n\t\terr := authorize(r)\n\t\tif err != nil {\n\t\t\t// Logs [source IP] [request method] [request URL] [HTTP status] [time spent serving request]\n\t\t\tlog.Printf(\"%v\\t \\\"%v - %v\\\"\\t%v\\t%v\", sourceIP(r), r.Method, r.RequestURI, http.StatusUnauthorized, time.Since(start))\n\t\t\thttp.Error(w, err.Error(), http.StatusUnauthorized)\n\t\t\treturn\n\t\t}\n\n\t\t// Pass to the real handler\n\t\tresponse, statusCode, err := handler(r, params)\n\n\t\t// Logs [source IP] [request method] [request URL] [HTTP status] [time spent serving request]\n\t\tlog.Printf(\"%v\\t \\\"%v - %v\\\"\\t%v\\t%v\", sourceIP(r), r.Method, r.RequestURI, statusCode, time.Since(start))\n\n\t\tif err != nil {\n\t\t\t// If we run into an error, throw it back to the client (as plain text)\n\t\t\thttp.Error(w, err.Error(), statusCode)\n\t\t\treturn\n\t\t}\n\n\t\tw.WriteHeader(statusCode)\n\t\tfmt.Fprintln(w, response)\n\t}\n}", "func (a *Authenticator) PerformAuth(w http.ResponseWriter, r *http.Request, co ...kcookie.Modifier) (AuthData, bool, error) {\n\tauth, err := a.ExtractAuth(w, r)\n\tif err != nil {\n\t\treturn AuthData{}, false, err\n\t}\n\n\thttp.SetCookie(w, a.CredentialsCookie(auth.Cookie, co...))\n\n\tif auth.Target != \"\" {\n\t\thttp.Redirect(w, r, auth.Target, http.StatusTemporaryRedirect)\n\t\treturn auth, true, nil\n\t}\n\treturn auth, false, nil\n}", "func (s *Controller) HandleGoogleLoginOrRegister(c *gin.Context) {\n\tgoogleOauthConfig = &oauth2.Config{\n\t\tRedirectURL: \"http://localhost:8080/callback\",\n\t\tClientID: os.Getenv(\"GOOGLE_CLIENT_ID\"),\n\t\tClientSecret: os.Getenv(\"GOOGLE_CLIENT_SECRET\"),\n\t\tScopes: []string{\n\t\t\t\"https://www.googleapis.com/auth/userinfo.email\",\n\t\t\t\"https://www.googleapis.com/auth/userinfo.profile\",\n\t\t},\n\t\tEndpoint: google.Endpoint,\n\t}\n\turl := googleOauthConfig.AuthCodeURL(oauthStateString)\n\tc.Redirect(http.StatusTemporaryRedirect, url)\n}", "func (a *AuthMock) Authenticate(next http.Handler) http.Handler {\n\treturn http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tif ic, ok := w.(injectContext); ok {\n\t\t\tnext.ServeHTTP(auth.ResponseContext{ResponseWriter: w, Auth: auth.Context(ic.claims)}, r)\n\t\t\treturn\n\t\t}\n\t\tnext.ServeHTTP(auth.ResponseContext{ResponseWriter: w, Auth: auth.Context{}}, r)\n\t})\n}", "func requireLogin(next http.Handler) http.Handler {\n\tfn := func(w http.ResponseWriter, req *http.Request) {\n\t\tif !isAuthenticated(req) {\n\t\t\thttp.Redirect(w, req, \"/\", http.StatusFound)\n\t\t\treturn\n\t\t}\n\t\tnext.ServeHTTP(w, req)\n\t}\n\treturn http.HandlerFunc(fn)\n}", "func MustAuth(next http.Handler) http.Handler {\n\treturn &authHandler{\n\t\tnext: next,\n\t}\n}", "func (a *loginAPI) HandleOIDCRedirectFinal(w http.ResponseWriter, r *http.Request) error {\n\tstate := a.appCookie.Get(stateParam, r)\n\tif state == \"\" {\n\t\tlog.WithField(\"func\", \"server.HandleOIDCRedirectFinal\").Debugf(\"emptiy state from cookie, referrer: '%s'\", r.Referer())\n\t\treturn errors.BadRequestError{Err: fmt.Errorf(\"missing state, cannot initiate OIDC\"), Request: r}\n\t}\n\tlog.WithField(\"func\", \"server.HandleOIDCRedirectFinal\").Debugf(\"initiate OIDC redirect using state: '%s'\", state)\n\thttp.Redirect(w, r, a.oauthConfig.AuthCodeURL(state), http.StatusFound)\n\treturn nil\n}", "func LoginHandler(c echo.Context) error {\n\tprovider, err := gomniauth.Provider(c.Param(\"provider\"))\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tauthURL, err := provider.GetBeginAuthURL(nil, nil)\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn c.Redirect(http.StatusTemporaryRedirect, authURL)\n}", "func AuthController(res http.ResponseWriter, req *http.Request) {\n\tgothic.BeginAuthHandler(res, req)\n}", "func (c *APIController) AuthGoogle() {\n\tcode := c.Input().Get(\"code\")\n\tstate := c.Input().Get(\"state\")\n\taddition := c.Input().Get(\"addition\")\n\tRedirectURL := c.Input().Get(\"redirect_url\")\n\n\tvar resp Response\n\tvar res authResponse\n\tres.IsAuthenticated = true\n\n\tif state != beego.AppConfig.String(\"GoogleAuthState\") {\n\t\tres.IsAuthenticated = false\n\t\tresp = Response{Status: \"fail\", Msg: \"unauthorized\", Data: res}\n\t\tc.Data[\"json\"] = resp\n\t\tc.ServeJSON()\n\t\treturn\n\t}\n\n\tgoogleOauthConfig.RedirectURL = RedirectURL\n\n\t// https://github.com/golang/oauth2/issues/123#issuecomment-103715338\n\tctx := context.WithValue(oauth2.NoContext, oauth2.HTTPClient, httpClient)\n\ttoken, err := googleOauthConfig.Exchange(ctx, code)\n\tif err != nil {\n\t\tres.IsAuthenticated = false\n\t\tpanic(err)\n\t}\n\n\tresponse, err := httpClient.Get(\"https://www.googleapis.com/oauth2/v2/userinfo?alt=json&access_token=\" + token.AccessToken)\n\tdefer response.Body.Close()\n\tcontents, err := ioutil.ReadAll(response.Body)\n\n\tvar tempUser userInfoFromGoogle\n\terr = json.Unmarshal(contents, &tempUser)\n\tif err != nil {\n\t\tres.IsAuthenticated = false\n\t\tpanic(err)\n\t}\n\tres.Email = tempUser.Email\n\tres.Avatar = tempUser.Picture\n\n\tif addition == \"signup\" {\n\t\tuserId := object.HasGoogleAccount(res.Email)\n\t\tif userId != \"\" {\n\t\t\tif len(object.GetMemberAvatar(userId)) == 0 {\n\t\t\t\tavatar := UploadAvatarToOSS(res.Avatar, userId)\n\t\t\t\tobject.LinkMemberAccount(userId, \"avatar\", avatar)\n\t\t\t}\n\t\t\tc.SetSessionUser(userId)\n\t\t\tutil.LogInfo(c.Ctx, \"API: [%s] signed in\", userId)\n\t\t\tres.IsSignedUp = true\n\t\t} else {\n\t\t\tif userId := object.HasMail(res.Email); userId != \"\" {\n\t\t\t\tc.SetSessionUser(userId)\n\t\t\t\tutil.LogInfo(c.Ctx, \"API: [%s] signed in\", userId)\n\t\t\t\tres.IsSignedUp = true\n\t\t\t\t_ = object.LinkMemberAccount(userId, \"google_account\", tempUser.Email)\n\t\t\t} else {\n\t\t\t\tres.IsSignedUp = false\n\t\t\t}\n\t\t}\n\t\tres.Addition = res.Email\n\t\tresp = Response{Status: \"ok\", Msg: \"success\", Data: res}\n\t} else {\n\t\tmemberId := c.GetSessionUser()\n\t\tif memberId == \"\" {\n\t\t\tresp = Response{Status: \"fail\", Msg: \"no account exist\", Data: res}\n\t\t\tc.Data[\"json\"] = resp\n\t\t\tc.ServeJSON()\n\t\t\treturn\n\t\t}\n\t\tlinkRes := object.LinkMemberAccount(memberId, \"google_account\", res.Email)\n\t\tif linkRes {\n\t\t\tresp = Response{Status: \"ok\", Msg: \"success\", Data: linkRes}\n\t\t} else {\n\t\t\tresp = Response{Status: \"fail\", Msg: \"link account failed\", Data: linkRes}\n\t\t}\n\t\tif len(object.GetMemberAvatar(memberId)) == 0 {\n\t\t\tavatar := UploadAvatarToOSS(res.Avatar, memberId)\n\t\t\tobject.LinkMemberAccount(memberId, \"avatar\", avatar)\n\t\t}\n\t}\n\n\tc.Data[\"json\"] = resp\n\n\tc.ServeJSON()\n}", "func authSelect(c buffalo.Context) error {\n\tauthType := c.Param(AuthTypeParam)\n\tif authType == \"\" {\n\t\tauthErr := authError{\n\t\t\thttpStatus: http.StatusBadRequest,\n\t\t\terrorKey: api.ErrorMissingAuthType,\n\t\t\terrorMsg: AuthTypeParam + \" is required to login\",\n\t\t}\n\t\treturn authRequestError(c, authErr)\n\t}\n\n\textras := map[string]interface{}{\"authType\": authType}\n\n\tap, err := getSocialAuthProvider(authType)\n\tif err != nil {\n\t\tauthErr := authError{\n\t\t\thttpStatus: http.StatusBadRequest,\n\t\t\terrorKey: api.ErrorLoadingAuthProvider,\n\t\t\terrorMsg: fmt.Sprintf(\"error loading social auth provider, %v\", err),\n\t\t}\n\t\treturn authRequestError(c, authErr, extras)\n\t}\n\n\tredirectURL, err := ap.AuthRequest(c)\n\tif err != nil {\n\t\tauthErr := authError{\n\t\t\thttpStatus: http.StatusInternalServerError,\n\t\t\terrorKey: api.ErrorGettingAuthURL,\n\t\t\terrorMsg: fmt.Sprintf(\"error getting social auth url, %v\", err),\n\t\t}\n\t\treturn authRequestError(c, authErr, extras)\n\t}\n\n\tc.Session().Set(SocialAuthTypeSessionKey, authType)\n\n\treturn c.Redirect(http.StatusFound, redirectURL)\n}", "func (s *Services) authorize(handler func(wr http.ResponseWriter, req *http.Request, uid uint64)) http.HandlerFunc {\n\treturn func(wr http.ResponseWriter, req *http.Request) {\n\t\t// TODO: Save the requested url in a cookie that can be redirected to after logging in successfully\n\t\tuid, err := s.auth.Authorize(wr, req)\n\t\tif err != nil {\n\t\t\tlog.Print(err)\n\t\t\thttp.Redirect(wr, req, \"/login\", 302)\n\t\t\treturn\n\t\t}\n\n\t\thandler(wr, req, uid)\n\t}\n}", "func oauthCallbackHandler(w http.ResponseWriter, r *http.Request) {\n\ttransport := &oauth.Transport{Config: &oauthProviderConfig.oauthConfig}\n\ttransport.Exchange(r.FormValue(\"code\"))\n\tclient := transport.Client()\n\tresponse, err := client.Get(oauthProviderConfig.UserInfoAPI)\n\tif err != nil {\n\t\tlog.Printf(\"Error while contacting '%s': %s\\n\", oauthProviderConfig.UserInfoAPI, err)\n\t\thttp.Error(w, http.StatusText(http.StatusBadRequest), http.StatusBadRequest)\n\t\treturn\n\t}\n\tbody, err := ioutil.ReadAll(response.Body)\n\tif err != nil {\n\t\tlog.Printf(\"Error while parsing response from '%s': %s\\n\", oauthProviderConfig.UserInfoAPI, err)\n\t\thttp.Error(w, http.StatusText(http.StatusBadGateway), http.StatusBadGateway)\n\t\treturn\n\t}\n\tresponse.Body.Close()\n\tauthorized, email := isAuthorized(body)\n\tif authorized {\n\t\tauthorizeEmail(email, w, r)\n\t\tlog.Println(\"User\", email, \"logged in\")\n\t\tsession, _ := store.Get(r, serverConfig.CookieName)\n\t\tif next, ok := session.Values[\"next\"]; ok {\n\t\t\thttp.Redirect(w, r, next.(string), http.StatusFound)\n\t\t}\n\t} else {\n\t\tlog.Println(\"Access Denied: Couldn't match an email address in the server response.\")\n\t\thttp.Error(w, http.StatusText(http.StatusForbidden), http.StatusForbidden)\n\t}\n}", "func (c Client) HandleAuthorization(handleAccess func(access Access, err error)) http.HandlerFunc {\n\treturn func(w http.ResponseWriter, r *http.Request) {\n\t\tquery := r.URL.Query()\n\n\t\thandleAccess(c.getAccessToken(query.Get(\"code\")))\n\t}\n}", "func (a *loginAPI) HandleOIDCRedirect(w http.ResponseWriter, r *http.Request) error {\n\tstate := randToken()\n\ta.appCookie.Set(stateParam, state, cookieExpiry, w)\n\tlog.WithField(\"func\", \"server.HandleOIDCRedirect\").Debugf(\"GetRedirect: initiate using state '%s'\", state)\n\thttp.Redirect(w, r, a.GetOIDCRedirectURL(), http.StatusTemporaryRedirect)\n\treturn nil\n}", "func (hc *HydraClient) AuthAccept(ctx context.Context, client *http.Client, challenge string, accept LoginAccept) (string, error) {\n\tvar feedback feedbackResponse\n\tif err := hc.request(ctx, client, \"/oauth2/auth/requests/login/accept\", http.MethodPut, LOGIN_CHALLENGE, challenge, accept, &feedback); err != nil {\n\t\treturn \"\", err\n\t}\n\treturn feedback.RedirectTo, nil\n}", "func (p *OAuthProxy) Authenticate(rw http.ResponseWriter, req *http.Request) (err error) {\n\tlogger := log.NewLogEntry().WithRemoteAddress(getRemoteAddr(req))\n\n\tremoteAddr := getRemoteAddr(req)\n\ttags := []string{\"action:authenticate\"}\n\n\tallowedGroups := p.upstreamConfig.AllowedGroups\n\n\t// Clear the session cookie if anything goes wrong.\n\tdefer func() {\n\t\tif err != nil {\n\t\t\tp.sessionStore.ClearSession(rw, req)\n\t\t}\n\t}()\n\n\tsession, err := p.sessionStore.LoadSession(req)\n\tif err != nil {\n\t\t// We loaded a cookie but it wasn't valid, clear it, and reject the request\n\t\tlogger.Error(err, \"error authenticating user\")\n\t\treturn err\n\t}\n\n\t// check if this session belongs to the correct identity provider application.\n\t// this case exists primarly to allow us to gracefully manage a clean ux during\n\t// transitions from one provider to another by gracefully restarting the authentication process.\n\tif session.ProviderSlug != p.provider.Data().ProviderSlug {\n\t\tlogger.WithUser(session.Email).Info(\n\t\t\t\"authenticated with incorrect identity provider; restarting authentication\")\n\t\treturn ErrWrongIdentityProvider\n\t}\n\n\t// check that the user has been authorized against the requested upstream\n\t// this is primarily to combat against a user authorizing with one upstream and attempting to use\n\t// the session cookie for a different upstream.\n\tif req.Host != session.AuthorizedUpstream {\n\t\tlogger.WithProxyHost(req.Host).WithAuthorizedUpstream(session.AuthorizedUpstream).WithUser(session.Email).Warn(\n\t\t\t\"session authorized against different upstream; restarting authentication\")\n\t\treturn ErrUnauthorizedUpstreamRequested\n\t}\n\n\t// Lifetime period is the entire duration in which the session is valid.\n\t// This should be set to something like 14 to 30 days.\n\tif session.LifetimePeriodExpired() {\n\t\t// session lifetime has expired, we reject the request and clear the cookie\n\t\tlogger.WithUser(session.Email).Info(\n\t\t\t\"lifetime has expired; restarting authentication\")\n\t\treturn ErrLifetimeExpired\n\t} else if session.RefreshPeriodExpired() {\n\t\t// Refresh period is the period in which the access token is valid. This is ultimately\n\t\t// controlled by the upstream provider and tends to be around 1 hour.\n\t\tok, err := p.provider.RefreshSession(session, allowedGroups)\n\t\t// We failed to refresh the session successfully\n\t\t// clear the cookie and reject the request\n\t\tif err != nil {\n\t\t\tlogger.WithUser(session.Email).Error(err, \"refreshing session failed\")\n\t\t\treturn err\n\t\t}\n\n\t\tif !ok {\n\t\t\t// User is not authorized after refresh\n\t\t\t// clear the cookie and reject the request\n\t\t\tlogger.WithUser(session.Email).Info(\n\t\t\t\t\"not authorized after refreshing session\")\n\t\t\treturn ErrUserNotAuthorized\n\t\t}\n\n\t\terr = p.sessionStore.SaveSession(rw, req, session)\n\t\tif err != nil {\n\t\t\t// We refreshed the session successfully, but failed to save it.\n\t\t\t//\n\t\t\t// This could be from failing to encode the session properly.\n\t\t\t// But, we clear the session cookie and reject the request!\n\t\t\tlogger.WithUser(session.Email).Error(\n\t\t\t\terr, \"could not save refreshed session\")\n\t\t\treturn err\n\t\t}\n\t} else if session.ValidationPeriodExpired() {\n\t\t// Validation period has expired, this is the shortest interval we use to\n\t\t// check for valid requests. This should be set to something like a minute.\n\t\t// This calls up the provider chain to validate this user is still active\n\t\t// and hasn't been de-authorized.\n\t\tok := p.provider.ValidateSessionState(session, allowedGroups)\n\t\tif !ok {\n\t\t\t// This user is now no longer authorized, or we failed to\n\t\t\t// validate the user.\n\t\t\t// Clear the cookie and reject the request\n\t\t\tlogger.WithUser(session.Email).Error(\n\t\t\t\terr, \"no longer authorized after validation period\")\n\t\t\treturn ErrUserNotAuthorized\n\t\t}\n\n\t\terr = p.sessionStore.SaveSession(rw, req, session)\n\t\tif err != nil {\n\t\t\t// We validated the session successfully, but failed to save it.\n\n\t\t\t// This could be from failing to encode the session properly.\n\t\t\t// But, we clear the session cookie and reject the request!\n\t\t\tlogger.WithUser(session.Email).Error(\n\t\t\t\terr, \"could not save validated session\")\n\t\t\treturn err\n\t\t}\n\t}\n\n\t// We revalidate group membership whenever the session is refreshed or revalidated\n\t// just above in the call to ValidateSessionState and RefreshSession.\n\t// To reduce strain on upstream identity providers we only revalidate email domains and\n\t// addresses on each request here.\n\tfor _, v := range p.Validators {\n\t\t_, EmailGroupValidator := v.(validators.EmailGroupValidator)\n\n\t\tif !EmailGroupValidator {\n\t\t\terr := v.Validate(session)\n\t\t\tif err != nil {\n\t\t\t\ttags = append(tags, \"error:validation_failed\")\n\t\t\t\tp.StatsdClient.Incr(\"application_error\", tags, 1.0)\n\t\t\t\tlogger.WithRemoteAddress(remoteAddr).WithUser(session.Email).Info(\n\t\t\t\t\tfmt.Sprintf(\"permission denied: unauthorized: %q\", err))\n\t\t\t\treturn ErrUserNotAuthorized\n\t\t\t}\n\t\t}\n\t}\n\n\tlogger.WithRemoteAddress(remoteAddr).WithUser(session.Email).Info(\n\t\tfmt.Sprintf(\"authentication: user validated\"))\n\n\tfor key, val := range p.upstreamConfig.InjectRequestHeaders {\n\t\treq.Header.Set(key, val)\n\t}\n\n\treq.Header.Set(\"X-Forwarded-User\", session.User)\n\n\tif p.upstreamConfig.PassAccessToken && session.AccessToken != \"\" {\n\t\treq.Header.Set(\"X-Forwarded-Access-Token\", session.AccessToken)\n\t}\n\n\treq.Header.Set(\"X-Forwarded-Email\", session.Email)\n\treq.Header.Set(\"X-Forwarded-Groups\", strings.Join(session.Groups, \",\"))\n\n\t// stash authenticated user so that it can be logged later (see func logRequest)\n\trw.Header().Set(loggingUserHeader, session.Email)\n\n\t// This user has been OK'd. Allow the request!\n\treturn nil\n}", "func MustAuth(h http.Handler) http.Handler {\n\treturn &authHandler{\n\t\tnext: h,\n\t}\n}", "func RedirectHandler(c *gin.Context) {\n\tauthURL, err := gocialite.NewDispatcher().New().\n\t\tDriver(\"asana\").\n\t\tScopes([]string{}).\n\t\tRedirect(\n\t\t\tclientID, // Client ID\n\t\t\tclientSecret, // Client Secret\n\t\t\tredirectURL, // Redirect URL\n\t\t)\n\n\t// Check for errors (usually driver not valid)\n\tif err != nil {\n\t\tc.Writer.Write([]byte(\"Error: \" + err.Error()))\n\t\treturn\n\t}\n\n\t// Redirect with authURL\n\tc.Redirect(http.StatusFound, authURL) // Redirect with 302 HTTP code\n}", "func InitAuthHandler(r *gin.RouterGroup, us model.UserService) {\n\thandler := &AuthHandler{\n\t\tService: us,\n\t}\n\n\tr.POST(\"/auth\", handler.Auth)\n\tr.POST(\"/register\", handler.Register)\n}", "func (s *Server) handleAuthLogin() http.HandlerFunc {\n\ttype req struct {\n\t\tUsername string `json:\"username\"`\n\t\tPassword string `json:\"password\"`\n\t}\n\treturn func(w http.ResponseWriter, r *http.Request) {\n\t\tcred := &req{}\n\t\tvar err error\n\n\t\tif err = json.NewDecoder(r.Body).Decode(cred); err != nil {\n\t\t\ts.logger.Logf(\"[ERROR] During decode body: %v\\n\", err)\n\t\t\ts.error(w, r, http.StatusInternalServerError, err)\n\t\t\treturn\n\t\t}\n\n\t\tif cred.Username == \"\" || cred.Password == \"\" {\n\t\t\ts.logger.Logf(\"[ERROR] Empty credentials in body: %v\\n\", helpers.ErrNoBodyParams)\n\t\t\ts.error(w, r, http.StatusBadRequest, helpers.ErrNoBodyParams)\n\t\t\treturn\n\t\t}\n\n\t\ttoken, expTime, err := s.store.Users().Login(cred.Username, cred.Password, s.config.SecretKey)\n\t\tif err != nil {\n\t\t\ts.logger.Logf(\"[ERROR] %v\\n\", err)\n\t\t\ts.error(w, r, http.StatusBadRequest, err)\n\t\t\treturn\n\t\t}\n\n\t\thttp.SetCookie(w, &http.Cookie{\n\t\t\tName: \"TKN\",\n\t\t\tValue: token,\n\t\t\tExpires: expTime,\n\t\t\tHttpOnly: true,\n\t\t\tPath: \"/\",\n\t\t\tDomain: s.config.AppDomain,\n\t\t})\n\n\t\ts.respond(w, r, http.StatusOK, map[string]string{\n\t\t\t\"login\": \"successful\",\n\t\t\t// \"user\": cred.Username,\n\t\t\t\"token\": token,\n\t\t})\n\t}\n}" ]
[ "0.6983542", "0.691219", "0.6888863", "0.67001015", "0.65768087", "0.65586627", "0.64946234", "0.633893", "0.6230806", "0.6222551", "0.619607", "0.61454254", "0.6140013", "0.60671693", "0.6062288", "0.6048465", "0.6025987", "0.59442234", "0.5926616", "0.5892844", "0.5879541", "0.58235836", "0.58191496", "0.5803675", "0.57993364", "0.57948226", "0.5791454", "0.57908857", "0.5787198", "0.5782829", "0.5780846", "0.5764805", "0.5748183", "0.5712055", "0.568178", "0.568162", "0.5676295", "0.5663219", "0.5660988", "0.5659049", "0.5655484", "0.5636505", "0.56236464", "0.5615414", "0.5560258", "0.55513006", "0.5540711", "0.5538692", "0.55364054", "0.55327797", "0.5532776", "0.5522087", "0.55215263", "0.55063325", "0.5501619", "0.5501043", "0.5497618", "0.549072", "0.5477605", "0.5474609", "0.54727685", "0.5456896", "0.54363525", "0.54302883", "0.54149204", "0.5393906", "0.5389639", "0.5388856", "0.53814054", "0.53808135", "0.5379648", "0.5370866", "0.5370207", "0.53632945", "0.5363029", "0.5357005", "0.5356745", "0.5356548", "0.5335562", "0.5310647", "0.5295911", "0.5292824", "0.5279946", "0.5272186", "0.527045", "0.526473", "0.5258937", "0.5254624", "0.5252757", "0.5251811", "0.5251123", "0.52458256", "0.52456206", "0.5243223", "0.5238628", "0.5234157", "0.5231926", "0.5230205", "0.52265364", "0.522535" ]
0.7993639
0
HandleOIDCRedirectFinal is responsible to set the state cookie for the OIDC interaction
func (a *loginAPI) HandleOIDCRedirectFinal(w http.ResponseWriter, r *http.Request) error { state := a.appCookie.Get(stateParam, r) if state == "" { log.WithField("func", "server.HandleOIDCRedirectFinal").Debugf("emptiy state from cookie, referrer: '%s'", r.Referer()) return errors.BadRequestError{Err: fmt.Errorf("missing state, cannot initiate OIDC"), Request: r} } log.WithField("func", "server.HandleOIDCRedirectFinal").Debugf("initiate OIDC redirect using state: '%s'", state) http.Redirect(w, r, a.oauthConfig.AuthCodeURL(state), http.StatusFound) return nil }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (a *loginAPI) HandleOIDCRedirect(w http.ResponseWriter, r *http.Request) error {\n\tstate := randToken()\n\ta.appCookie.Set(stateParam, state, cookieExpiry, w)\n\tlog.WithField(\"func\", \"server.HandleOIDCRedirect\").Debugf(\"GetRedirect: initiate using state '%s'\", state)\n\thttp.Redirect(w, r, a.GetOIDCRedirectURL(), http.StatusTemporaryRedirect)\n\treturn nil\n}", "func handleOidcCallback(\n\tw http.ResponseWriter,\n\tr *http.Request,\n\tclientName string,\n\tclientID string,\n\tclientSecret string,\n\tredirectURI string,\n\twellKnownConfig oidc.WellKnownConfiguration,\n\tstate string,\n\tcodeVerifier string,\n\tcancel context.CancelFunc,\n) {\n\tvar authorisationResponse, err = oidc.ValidateAuthorisationResponse(r.URL, state)\n\tif err != nil {\n\t\trenderAndLogError(w, cancel, fmt.Sprintf(\"%v\", err))\n\t\treturn\n\t}\n\n\tviewModel, err := VerifyCode(clientID, clientSecret, redirectURI, wellKnownConfig, codeVerifier, authorisationResponse.Code)\n\tif err != nil {\n\t\trenderAndLogError(w, cancel, fmt.Sprintf(\"%v\", err))\n\t\treturn\n\t}\n\n\t// show webpage\n\tt := template.New(\"credentials\")\n\t_, parseErr := t.Parse(TokenResultView())\n\tif parseErr != nil {\n\t\trenderAndLogError(w, cancel, fmt.Sprintf(\"%v\", parseErr))\n\t\treturn\n\t}\n\ttplErr := t.Execute(w, viewModel)\n\tif tplErr != nil {\n\t\trenderAndLogError(w, cancel, fmt.Sprintf(\"%v\", tplErr))\n\t\treturn\n\t}\n\n\tcancel()\n}", "func (a *loginAPI) HandleOIDCLogin(w http.ResponseWriter, r *http.Request) error {\n\tctx := context.Background()\n\n\t// read the stateParam again\n\tstate := a.appCookie.Get(stateParam, r)\n\tlog.WithField(\"func\", \"server.HandleOIDCLogin\").Debugf(\"got state param: %s\", state)\n\n\tif query(r, stateParam) != state {\n\t\treturn errors.BadRequestError{Err: fmt.Errorf(\"state did not match\"), Request: r}\n\t}\n\ta.appCookie.Del(stateParam, w)\n\n\t// is this an auth/flow request\n\tvar (\n\t\tauthFlow bool\n\t\tsite, redirect string\n\t)\n\tauthFlowParams := a.appCookie.Get(authFlowCookie, r)\n\tif authFlowParams != \"\" {\n\t\tlog.WithField(\"func\", \"server.HandleOIDCLogin\").Debugf(\"auth/flow login-mode\")\n\t\tparts := strings.Split(authFlowParams, \"|\")\n\t\tsite = parts[0]\n\t\tredirect = parts[1]\n\t\tauthFlow = true\n\t}\n\ta.appCookie.Del(authFlowCookie, w)\n\n\toauth2Token, err := a.oauthConfig.Exchange(ctx, query(r, codeParam))\n\tif err != nil {\n\t\treturn errors.ServerError{Err: fmt.Errorf(\"failed to exchange token: %v\", err), Request: r}\n\t}\n\trawIDToken, ok := oauth2Token.Extra(idTokenParam).(string)\n\tif !ok {\n\t\treturn errors.ServerError{Err: fmt.Errorf(\"no id_token field in oauth2 token\"), Request: r}\n\t}\n\tidToken, err := a.oauthVerifier.VerifyToken(ctx, rawIDToken)\n\tif err != nil {\n\t\treturn errors.ServerError{Err: fmt.Errorf(\"failed to verify ID Token: %v\", err), Request: r}\n\t}\n\n\tvar oidcClaims struct {\n\t\tEmail string `json:\"email\"`\n\t\tEmailVerified bool `json:\"email_verified\"`\n\t\tDisplayName string `json:\"name\"`\n\t\tPicURL string `json:\"picture\"`\n\t\tGivenName string `json:\"given_name\"`\n\t\tFamilyName string `json:\"family_name\"`\n\t\tLocale string `json:\"locale\"`\n\t\tUserID string `json:\"sub\"`\n\t}\n\n\tif err := idToken.GetClaims(&oidcClaims); err != nil {\n\t\treturn errors.ServerError{Err: fmt.Errorf(\"claims error: %v\", err), Request: r}\n\t}\n\n\t// the user was authenticated successfully, check if sites are available for the given user!\n\tsuccess := true\n\tsites, err := a.repo.GetSitesByUser(oidcClaims.Email)\n\tif err != nil {\n\t\tlog.WithField(\"func\", \"server.HandleOIDCLogin\").Warnf(\"successfull login by '%s' but error fetching sites! %v\", oidcClaims.Email, err)\n\t\tsuccess = false\n\t}\n\n\tif len(sites) == 0 {\n\t\tlog.WithField(\"func\", \"server.HandleOIDCLogin\").Warnf(\"successfull login by '%s' but no sites availabel!\", oidcClaims.Email)\n\t\tsuccess = false\n\t}\n\n\tif authFlow {\n\t\tlog.WithField(\"func\", \"server.HandleOIDCLogin\").Debugf(\"auth/flow - check for specific site '%s'\", site)\n\t\tsuccess = false\n\t\t// check specific site\n\t\tfor _, e := range sites {\n\t\t\tif e.Name == site {\n\t\t\t\tsuccess = true\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t}\n\n\tif !success {\n\t\ta.appCookie.Set(errors.FlashKeyError, fmt.Sprintf(\"User '%s' is not allowed to login!\", oidcClaims.Email), cookieExpiry, w)\n\t\thttp.Redirect(w, r, \"/error\", http.StatusTemporaryRedirect)\n\t\treturn nil\n\t}\n\n\t// create the token using the claims of the database\n\tvar siteClaims []string\n\tfor _, s := range sites {\n\t\tsiteClaims = append(siteClaims, fmt.Sprintf(\"%s|%s|%s\", s.Name, s.URL, s.PermList))\n\t}\n\tclaims := security.Claims{\n\t\tType: \"login.User\",\n\t\tDisplayName: oidcClaims.DisplayName,\n\t\tEmail: oidcClaims.Email,\n\t\tUserID: oidcClaims.UserID,\n\t\tUserName: oidcClaims.Email,\n\t\tGivenName: oidcClaims.GivenName,\n\t\tSurname: oidcClaims.FamilyName,\n\t\tClaims: siteClaims,\n\t}\n\ttoken, err := security.CreateToken(a.jwt.JwtIssuer, []byte(a.jwt.JwtSecret), a.jwt.Expiry, claims)\n\tif err != nil {\n\t\tlog.WithField(\"func\", \"server.HandleOIDCLogin\").Errorf(\"could not create a JWT token: %v\", err)\n\t\treturn errors.ServerError{Err: fmt.Errorf(\"error creating JWT: %v\", err), Request: r}\n\t}\n\n\tlogin := persistence.Login{\n\t\tUser: oidcClaims.Email,\n\t\tCreated: time.Now().UTC(),\n\t\tType: persistence.DIRECT,\n\t}\n\n\tif authFlow {\n\t\tlogin.Type = persistence.FLOW\n\t}\n\n\terr = a.repo.StoreLogin(login, per.Atomic{})\n\tif err != nil {\n\t\tlog.WithField(\"func\", \"server.HandleOIDCLogin\").Errorf(\"the login could not be saved: %v\", err)\n\t\treturn errors.ServerError{Err: fmt.Errorf(\"error storing the login: %v\", err), Request: r}\n\t}\n\n\t// set the cookie\n\texp := a.jwt.Expiry * 24 * 3600\n\ta.setJWTCookie(a.jwt.CookieName, token, exp, w)\n\n\tredirectURL := a.jwt.LoginRedirect\n\tif authFlow {\n\t\tlog.WithField(\"func\", \"server.HandleOIDCLogin\").Debugf(\"auth/flow - redirect to specific URL: '%s'\", redirect)\n\t\tredirectURL = redirect\n\t}\n\n\t// redirect to provided URL\n\thttp.Redirect(w, r, redirectURL, http.StatusTemporaryRedirect)\n\treturn nil\n}", "func (a *loginAPI) GetOIDCRedirectURL() string {\n\treturn oidcInitiateURL\n}", "func callbackHandler(w http.ResponseWriter, r *http.Request) {\n\n\t// Process identity provider callback, checking tokens, etc.\n\tauth, err := idp.Callback(w, r)\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t}\n\n\t// Store session authentication information in cookie\n\tsetCookie(w, r, auth, auth.ExpiresIn)\n\t\n\t// Redirect to original page\n\thttp.Redirect(w, r, auth.URL, http.StatusFound)\t\n}", "func HandleLogin(w http.ResponseWriter, r *http.Request) (err error) {\n\tsession, err := cookieStore.Get(r, oauthSessionName)\n\tif err != nil {\n\t\tlog.Printf(\"corrupted session %s -- generated new\", err)\n\t\terr = nil\n\t}\n\n\tvar tokenBytes [255]byte\n\tif _, err := rand.Read(tokenBytes[:]); err != nil {\n\t\treturn AnnotateError(err, \"Couldn't generate a session!\", http.StatusInternalServerError)\n\t}\n\n\tstate := hex.EncodeToString(tokenBytes[:])\n\n\tsession.AddFlash(state, stateCallbackKey)\n\n\tif err = session.Save(r, w); err != nil {\n\t\treturn\n\t}\n\n\thttp.Redirect(w, r, oauth2Config.AuthCodeURL(state, claims), http.StatusTemporaryRedirect)\n\n\treturn\n}", "func (s *Server) HandlerInitiate(w http.ResponseWriter, r *http.Request) {\n\t// ignore error because we don't need previous session values.\n\tsession, _ := s.SessionStore.Get(r, s.Config.SessionName)\n\n\tconf := s.ProviderConfigs[s.DefaultProvider].Config()\n\tcallback := r.Header.Get(\"x-ngx-omniauth-initiate-callback\")\n\tnext := r.Header.Get(\"x-ngx-omniauth-initiate-back-to\")\n\tstate := generateNewState()\n\n\tconf.RedirectURL = callback\n\tsession.Values = map[interface{}]interface{}{}\n\tsession.Values[\"provider\"] = s.DefaultProvider\n\tsession.Values[\"callback\"] = callback\n\tsession.Values[\"next\"] = next\n\tsession.Values[\"state\"] = state\n\tif err := session.Save(r, w); err != nil {\n\t\tlogrus.WithFields(logrus.Fields{\n\t\t\t\"err\": err.Error(),\n\t\t}).Error(\"failed to save session\")\n\t\thttp.Error(w, http.StatusText(http.StatusInternalServerError), http.StatusInternalServerError)\n\t}\n\n\thttp.Redirect(w, r, conf.AuthCodeURL(state), http.StatusFound)\n}", "func handleAuthorize(rw http.ResponseWriter, req *http.Request) {\n\n\t// Get the Google URL which shows the Authentication page to the user.\n\turl := oauthCfg.AuthCodeURL(\"\")\n\n\t// Redirect user to that page.\n\thttp.Redirect(rw, req, url, http.StatusFound)\n}", "func (o *oidcServer) Authenticate(w http.ResponseWriter, r *http.Request) {\n\n\to.Lock()\n\tdefer o.Unlock()\n\n\tzap.L().Debug(\"Authenticating\")\n\n\tif o.serverFlow == ServerFlowTypeAuthFailure {\n\t\thttp.Error(w, \"Authentication failure\", http.StatusUnauthorized)\n\t\tzap.L().Warn(\"Authentication failure\", zap.Reflect(\"type\", o.serverFlow))\n\t\treturn\n\t}\n\n\tstate := r.URL.Query().Get(\"state\")\n\tredURI := r.URL.Query().Get(\"redirect_uri\")\n\n\treqURI, err := url.ParseRequestURI(redURI)\n\tif err != nil {\n\t\tzap.L().Error(\"Unable to parse redirect uri\", zap.Error(err))\n\t\treturn\n\t}\n\n\tq := reqURI.Query()\n\tq.Add(\"state\", state)\n\tq.Add(\"redirect_uri\", redURI)\n\treqURI.RawQuery = q.Encode()\n\n\thttp.Redirect(w, r, reqURI.String(), http.StatusTemporaryRedirect)\n}", "func (h *Handler) oidcCallback(w http.ResponseWriter, r *http.Request, p httprouter.Params) (interface{}, error) {\n\tresult, err := h.GetConfig().Auth.ValidateOIDCAuthCallback(r.URL.Query())\n\tif err != nil {\n\t\th.Warnf(\"Error validating callback: %v.\", err)\n\t\thttp.Redirect(w, r, \"/web/msg/error/login_failed\", http.StatusFound)\n\t\treturn nil, nil\n\t}\n\th.Infof(\"Callback: %v %v %v.\", result.Username, result.Identity, result.Req.Type)\n\treturn nil, h.CallbackHandler(w, r, webapi.CallbackParams{\n\t\tUsername: result.Username,\n\t\tIdentity: result.Identity,\n\t\tSession: result.Session,\n\t\tCert: result.Cert,\n\t\tTLSCert: result.TLSCert,\n\t\tHostSigners: result.HostSigners,\n\t\tType: result.Req.Type,\n\t\tCreateWebSession: result.Req.CreateWebSession,\n\t\tCSRFToken: result.Req.CSRFToken,\n\t\tPublicKey: result.Req.PublicKey,\n\t\tClientRedirectURL: result.Req.ClientRedirectURL,\n\t})\n}", "func (a *Auth) Authenticate(handler http.Handler) http.Handler {\n\tif handler == nil {\n\t\tpanic(\"auth: nil handler\")\n\t}\n\n\treturn http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tif a.cfg.Disable {\n\t\t\thandler.ServeHTTP(w, r)\n\t\t\treturn\n\t\t}\n\n\t\ttoken, err := a.getCookie(r)\n\t\tif token == nil && err == nil {\n\t\t\t// Cookie is missing, invalid. Fetch new token from OAuth2 provider.\n\t\t\t// Redirect user to the OAuth2 consent page to ask for permission for the scopes specified\n\t\t\t// above.\n\t\t\t// Set the scope to the current request URL, it will be used by the redirect handler to\n\t\t\t// redirect back to the url that requested the authentication.\n\t\t\turl := a.cfg.AuthCodeURL(r.RequestURI)\n\t\t\thttp.Redirect(w, r, url, http.StatusTemporaryRedirect)\n\t\t\treturn\n\t\t}\n\t\tif err != nil {\n\t\t\ta.clearCookie(w)\n\t\t\thttp.Error(w, \"Unauthorized\", http.StatusUnauthorized)\n\t\t\ta.logf(\"Get cookie error: %v\", err)\n\t\t\treturn\n\t\t}\n\n\t\t// Source token, in case the token needs a renewal.\n\t\tnewOauth2Token, err := a.cfg.TokenSource(r.Context(), token.toOauth2()).Token()\n\t\tif err != nil {\n\t\t\ta.clearCookie(w)\n\t\t\thttp.Error(w, \"Internal error\", http.StatusInternalServerError)\n\t\t\ta.logf(\"Failed token source: %s\", err)\n\t\t\treturn\n\t\t}\n\t\tnewToken := fromOauth2(newOauth2Token)\n\n\t\tif newToken.IDToken != token.IDToken {\n\t\t\ta.logf(\"Refreshed token\")\n\t\t\ttoken = newToken\n\t\t\ta.setCookie(w, token)\n\t\t}\n\n\t\t// Validate the id_token.\n\t\tpayload, err := a.validator.Validate(r.Context(), token.IDToken, a.cfg.ClientID)\n\t\tif err != nil {\n\t\t\ta.clearCookie(w)\n\t\t\thttp.Error(w, \"Invalid auth.\", http.StatusUnauthorized)\n\t\t\ta.logf(\"Invalid token, reset cookie: %s\", err)\n\t\t\treturn\n\t\t}\n\t\t// User is authenticated.\n\t\t// Store email and name in context, and call the inner handler.\n\t\tcreds := &Creds{\n\t\t\tEmail: payload.Claims[\"email\"].(string),\n\t\t\tName: payload.Claims[\"name\"].(string),\n\t\t}\n\t\tr = r.WithContext(context.WithValue(r.Context(), credsKey, creds))\n\t\thandler.ServeHTTP(w, r)\n\t})\n}", "func HandleRedirect(w http.ResponseWriter, r *http.Request) {\n\tstate := r.URL.Query().Get(\"state\")\n\tcode := r.URL.Query().Get(\"code\")\n\trequest, response, err := ia.HandleCallbackCode(code, state)\n\tif err != nil {\n\t\tlog.Debugln(err)\n\t\tmsg := `Unable to complete authentication. <a href=\"/\">Go back.</a><hr/>`\n\t\t_ = controllers.WriteString(w, msg, http.StatusBadRequest)\n\t\treturn\n\t}\n\n\t// Check if a user with this auth already exists, if so, log them in.\n\tif u := auth.GetUserByAuth(response.Me, auth.IndieAuth); u != nil {\n\t\t// Handle existing auth.\n\t\tlog.Debugln(\"user with provided indieauth already exists, logging them in\")\n\n\t\t// Update the current user's access token to point to the existing user id.\n\t\taccessToken := request.CurrentAccessToken\n\t\tuserID := u.ID\n\t\tif err := user.SetAccessTokenToOwner(accessToken, userID); err != nil {\n\t\t\tcontrollers.WriteSimpleResponse(w, false, err.Error())\n\t\t\treturn\n\t\t}\n\n\t\tif request.DisplayName != u.DisplayName {\n\t\t\tloginMessage := fmt.Sprintf(\"**%s** is now authenticated as **%s**\", request.DisplayName, u.DisplayName)\n\t\t\tif err := chat.SendSystemAction(loginMessage, true); err != nil {\n\t\t\t\tlog.Errorln(err)\n\t\t\t}\n\t\t}\n\n\t\thttp.Redirect(w, r, \"/\", http.StatusTemporaryRedirect)\n\n\t\treturn\n\t}\n\n\t// Otherwise, save this as new auth.\n\tlog.Debug(\"indieauth token does not already exist, saving it as a new one for the current user\")\n\tif err := auth.AddAuth(request.UserID, response.Me, auth.IndieAuth); err != nil {\n\t\tcontrollers.WriteSimpleResponse(w, false, err.Error())\n\t\treturn\n\t}\n\n\t// Update the current user's authenticated flag so we can show it in\n\t// the chat UI.\n\tif err := user.SetUserAsAuthenticated(request.UserID); err != nil {\n\t\tlog.Errorln(err)\n\t}\n\n\thttp.Redirect(w, r, \"/\", http.StatusTemporaryRedirect)\n}", "func rootHandler(w http.ResponseWriter, r *http.Request) {\n\n\tif !verifyLogin(r) {\n\t\turl := LoginCfg.AuthCodeURL(\"\")\n\t\turl = url + OauthURLParams\n\t\t// this will preseve the casenumber in the URI path during Oauth2 redirect\n\t\tparams := r.URL.Query()\n\t\tparamkeys := make([]string, 0)\n\t\tfor k := range params {\n\t\t\tfor i := range params[k] {\n\t\t\t\tparamkeys = append(paramkeys, k+\"=\"+params[k][i])\n\t\t\t}\n\t\t}\n\t\tif len(paramkeys) > 0 {\n\t\t\turl = url + \"&state=\" + base64.StdEncoding.EncodeToString([]byte(strings.Join(paramkeys, \"?\")))\n\t\t}\n\n\t\thttp.Redirect(w, r, url, http.StatusFound)\n\t\treturn\n\t}\n\n\t// if user is not using https then redirect them\n\tif ( r.Header.Get(\"x-forwarded-proto\") != \"https\" && BASEURL != LOCALBASEURL) {\n\t\tfmt.Printf(\"TLS handshake is https=false x-forwarded-proto=%s\\n\", r.Header.Get(\"x-forwarded-proto\"))\n\t\thttp.Redirect(w, r, BASEURL, http.StatusFound)\n\t\treturn\n\t}\n\n startPageTemplate.Execute(w, \"\")\n}", "func (s *Server) handleLogin(w http.ResponseWriter, req *http.Request) error {\n\toauthState := uuid.New().String()\n\tloginSession, err := s.cookieStore.Get(req, LoginSessionName)\n\tif err != nil {\n\t\treturn err\n\t}\n\tloginSession.Options = &sessions.Options{\n\t\tMaxAge: 600,\n\t\tHttpOnly: true,\n\t\tSecure: s.opts.SecureCookie,\n\t}\n\tloginSession.Values[\"oauth_state\"] = oauthState\n\terr = loginSession.Save(req, w)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"error saving session: %s\", err)\n\t}\n\turl := s.oauthConfig.AuthCodeURL(oauthState)\n\thttp.Redirect(w, req, url, http.StatusTemporaryRedirect)\n\treturn nil\n}", "func (a *Auth) RedirectHandler() http.Handler {\n\treturn http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tcode := r.URL.Query().Get(\"code\")\n\t\ttoken, err := a.cfg.Exchange(r.Context(), code)\n\t\tif err != nil {\n\t\t\ta.logf(\"Authentication failure for code %s: %s\", code, err)\n\t\t\thttp.Error(w, \"Authorization failure\", http.StatusUnauthorized)\n\t\t\treturn\n\t\t}\n\n\t\t_, ok := token.Extra(\"id_token\").(string)\n\t\tif !ok {\n\t\t\ta.logf(\"Invalid ID token %v (%T)\", token.Extra(\"id_token\"), token.Extra(\"id_token\"))\n\t\t\thttp.Error(w, \"Internal error\", http.StatusInternalServerError)\n\t\t\treturn\n\t\t}\n\n\t\terr = a.setCookie(w, fromOauth2(token))\n\t\tif err != nil {\n\t\t\ta.logf(\"Failed setting cookie: %v\", err)\n\t\t\thttp.Error(w, \"Internal error\", http.StatusInternalServerError)\n\t\t\treturn\n\t\t}\n\n\t\tredirectPath := r.URL.Query().Get(\"state\")\n\t\tif redirectPath == \"\" {\n\t\t\tredirectPath = \"/\"\n\t\t}\n\t\thttp.Redirect(w, r, redirectPath, http.StatusTemporaryRedirect)\n\t})\n}", "func (p *OIDCProvider) LoginHandler() http.Handler {\n\treturn http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\turl := p.oauth2Config.AuthCodeURL(state)\n\t\thttp.Redirect(w, r, url, http.StatusSeeOther)\n\t})\n}", "func (a *loginAPI) HandleAuthFlow(w http.ResponseWriter, r *http.Request) error {\n\tstate := randToken()\n\ta.appCookie.Set(stateParam, state, cookieExpiry, w)\n\tlog.WithField(\"func\", \"server.HandleAuthFlow\").Debugf(\"initiate using state '%s'\", state)\n\n\tsite, redirect := query(r, siteParam), query(r, redirectParam)\n\tif site == \"\" || redirect == \"\" {\n\t\treturn errors.BadRequestError{Err: fmt.Errorf(\"missing or invalid parameters supplied\"), Request: r}\n\t}\n\ta.appCookie.Set(authFlowCookie, fmt.Sprintf(\"%s%s%s\", site, authFlowSep, redirect), cookieExpiry, w)\n\thttp.Redirect(w, r, a.GetOIDCRedirectURL(), http.StatusTemporaryRedirect)\n\treturn nil\n}", "func AuthHandler(c *gin.Context) {\r\n\tvar state utils.State\r\n\tdecoded, err := utils.B64Decode(c.Query(\"state\"))\r\n\terr = json.Unmarshal([]byte(decoded), &state)\r\n\tif err != nil {\r\n\t\tc.JSON(http.StatusConflict, gin.H{\"code\": http.StatusConflict, \"message\": err})\r\n\t\treturn\r\n\t}\r\n\r\n\tAccessKey := state.AccessKey\r\n\tif AccessKey == \"\" {\r\n\t\tAccessKey = state.Token\r\n\t}\r\n\r\n\tAPPUserID, _, err := utils.LoadAccessKey(AccessKey)\r\n\r\n\tif err != nil || APPUserID == \"\" {\r\n\t\tc.JSON(http.StatusNonAuthoritativeInfo, gin.H{\"code\": http.StatusNonAuthoritativeInfo, \"message\": err})\r\n\t\treturn\r\n\t}\r\n\r\n\tfmt.Println(\"redirURL\", state.URL)\r\n\r\n\tcode := c.Query(\"code\")\r\n\tuserID, _ := utils.VerifyCode(code)\r\n\tuserInfo, _ := utils.GetUserInfo(userID)\r\n\r\n\tu := url.Values{}\r\n\tdata, _ := json.Marshal(userInfo)\r\n\tu.Set(\"state\", utils.B64Encode(string(data)))\r\n\tu.Set(\"timestamp\", fmt.Sprintln(time.Now().Unix()))\r\n\tc.Redirect(http.StatusFound, state.URL+\"?\"+u.Encode())\r\n}", "func HandleLogout(w http.ResponseWriter, r *http.Request) {\n\tsess, err := store.Get(r, \"s\")\n\tif err != nil {\n\t\tServeInternalServerError(w, r)\n\t\treturn\n\t}\n\tdelete(sess.Values, \"accountID\")\n\tsess.Save(r, w)\n\thttp.Redirect(w, r, \"/\", http.StatusSeeOther)\n}", "func handleNaturalistLogin(w http.ResponseWriter, r *http.Request) {\n\turl := authenticator.AuthUrl()\n\n\tlog.Printf(\"Redirecting: %s\", url)\n\n\thttp.Redirect(w, r, url, http.StatusTemporaryRedirect)\n}", "func (s *Server) handleAuthLogout() http.HandlerFunc {\n\treturn func(w http.ResponseWriter, r *http.Request) {\n\t\thttp.SetCookie(w, &http.Cookie{\n\t\t\tName: \"TKN\",\n\t\t\tExpires: time.Unix(0, 0),\n\t\t\tHttpOnly: true,\n\t\t\tPath: \"/\",\n\t\t\tDomain: s.config.AppDomain,\n\t\t})\n\n\t\ts.respond(w, r, http.StatusOK, map[string]string{\n\t\t\t\"logout\": \"successful\",\n\t\t})\n\t}\n}", "func (p *OIDCProvider) CallbackHandler() http.Handler {\n\treturn http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tctx := oidc.ClientContext(r.Context(), p.client)\n\n\t\tif errMsg := r.URL.Query().Get(\"error\"); errMsg != \"\" {\n\t\t\tdesc := r.URL.Query().Get(\"error_description\")\n\t\t\tmsg := fmt.Sprintf(\"%s: %s\", errMsg, desc)\n\t\t\tlevel.Debug(p.logger).Log(\"msg\", msg)\n\t\t\thttp.Error(w, msg, http.StatusBadRequest)\n\t\t\treturn\n\t\t}\n\n\t\tqueryCode := r.URL.Query().Get(\"code\")\n\t\tif queryCode == \"\" {\n\t\t\tconst msg = \"no code in request\"\n\t\t\tlevel.Debug(p.logger).Log(\"msg\", msg)\n\t\t\thttp.Error(w, msg, http.StatusBadRequest)\n\t\t\treturn\n\t\t}\n\t\tqueryState := r.URL.Query().Get(\"state\")\n\t\tif queryState != state {\n\t\t\tconst msg = \"incorrect state in request\"\n\t\t\tlevel.Debug(p.logger).Log(\"msg\", msg)\n\t\t\thttp.Error(w, msg, http.StatusBadRequest)\n\t\t\treturn\n\t\t}\n\n\t\ttoken, err := p.oauth2Config.Exchange(ctx, queryCode)\n\t\tif err != nil {\n\t\t\tmsg := fmt.Sprintf(\"failed to get token: %v\", err)\n\t\t\tlevel.Warn(p.logger).Log(\"msg\", msg, \"err\", err)\n\t\t\thttp.Error(w, msg, http.StatusInternalServerError)\n\t\t\treturn\n\t\t}\n\n\t\trawIDToken, ok := token.Extra(\"id_token\").(string)\n\t\tif !ok {\n\t\t\tconst msg = \"no id_token in token response\"\n\t\t\tlevel.Warn(p.logger).Log(\"msg\", msg)\n\t\t\thttp.Error(w, msg, http.StatusInternalServerError)\n\t\t\treturn\n\t\t}\n\n\t\t_, err = p.verifier.Verify(ctx, rawIDToken)\n\t\tif err != nil {\n\t\t\tmsg := fmt.Sprintf(\"failed to verify ID token: %v\", err)\n\t\t\tlevel.Warn(p.logger).Log(\"msg\", msg)\n\t\t\thttp.Error(w, msg, http.StatusInternalServerError)\n\t\t\treturn\n\t\t}\n\n\t\thttp.SetCookie(w, &http.Cookie{\n\t\t\tName: p.cookieName,\n\t\t\tValue: rawIDToken,\n\t\t\tPath: \"/\",\n\t\t\tExpires: token.Expiry,\n\t\t})\n\n\t\thttp.Redirect(w, r, p.redirectURL, http.StatusFound)\n\t})\n}", "func (j *AuthMux) Callback() http.Handler {\n\treturn http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tlog := j.Logger.\n\t\t\tWithField(\"component\", \"auth\").\n\t\t\tWithField(\"remote_addr\", r.RemoteAddr).\n\t\t\tWithField(\"method\", r.Method).\n\t\t\tWithField(\"url\", r.URL)\n\n\t\tstate := r.FormValue(\"state\")\n\t\t// Check if the OAuth state token is valid to prevent CSRF\n\t\t// The state variable we set is actually a token. We'll check\n\t\t// if the token is valid. We don't need to know anything\n\t\t// about the contents of the principal only that it hasn't expired.\n\t\tif _, err := j.Tokens.ValidPrincipal(r.Context(), Token(state), TenMinutes); err != nil {\n\t\t\tlog.Error(\"Invalid OAuth state received: \", err.Error())\n\t\t\thttp.Redirect(w, r, j.FailureURL, http.StatusTemporaryRedirect)\n\t\t\treturn\n\t\t}\n\n\t\t// Exchange the code back with the provider to the the token\n\t\tconf := j.Provider.Config()\n\t\tcode := r.FormValue(\"code\")\n\t\ttoken, err := conf.Exchange(r.Context(), code)\n\t\tif err != nil {\n\t\t\tlog.Error(\"Unable to exchange code for token \", err.Error())\n\t\t\thttp.Redirect(w, r, j.FailureURL, http.StatusTemporaryRedirect)\n\t\t\treturn\n\t\t}\n\n\t\tif token.Extra(\"id_token\") != nil && !j.UseIDToken {\n\t\t\tlog.Info(\"found an extra id_token, but option --useidtoken is not set\")\n\t\t}\n\n\t\t// if we received an extra id_token, inspect it\n\t\tvar id string\n\t\tvar group string\n\t\tif j.UseIDToken && token.Extra(\"id_token\") != nil && token.Extra(\"id_token\") != \"\" {\n\t\t\tlog.Debug(\"found an extra id_token\")\n\t\t\tif provider, ok := j.Provider.(ExtendedProvider); ok {\n\t\t\t\tlog.Debug(\"provider implements PrincipalIDFromClaims()\")\n\t\t\t\ttokenString, ok := token.Extra(\"id_token\").(string)\n\t\t\t\tif !ok {\n\t\t\t\t\tlog.Error(\"cannot cast id_token as string\")\n\t\t\t\t\thttp.Redirect(w, r, j.FailureURL, http.StatusTemporaryRedirect)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tclaims, err := j.Tokens.GetClaims(tokenString)\n\t\t\t\tif err != nil {\n\t\t\t\t\tlog.Error(\"parsing extra id_token failed:\", err)\n\t\t\t\t\thttp.Redirect(w, r, j.FailureURL, http.StatusTemporaryRedirect)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tlog.Debug(\"found claims: \", claims)\n\t\t\t\tid, err = provider.PrincipalIDFromClaims(claims)\n\t\t\t\tif err != nil {\n\t\t\t\t\tlog.Error(\"requested claim not found in id_token:\", err)\n\t\t\t\t\thttp.Redirect(w, r, j.FailureURL, http.StatusTemporaryRedirect)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tgroup, err = provider.GroupFromClaims(claims)\n\t\t\t\tif err != nil {\n\t\t\t\t\tlog.Error(\"requested claim not found in id_token:\", err)\n\t\t\t\t\thttp.Redirect(w, r, j.FailureURL, http.StatusTemporaryRedirect)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tlog.Debug(\"provider does not implement PrincipalIDFromClaims()\")\n\t\t\t}\n\t\t} else {\n\t\t\t// otherwise perform an additional lookup\n\t\t\toauthClient := conf.Client(r.Context(), token)\n\t\t\t// Using the token get the principal identifier from the provider\n\t\t\tid, err = j.Provider.PrincipalID(oauthClient)\n\t\t\tif err != nil {\n\t\t\t\tlog.Error(\"Unable to get principal identifier \", err.Error())\n\t\t\t\thttp.Redirect(w, r, j.FailureURL, http.StatusTemporaryRedirect)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tgroup, err = j.Provider.Group(oauthClient)\n\t\t\tif err != nil {\n\t\t\t\tlog.Error(\"Unable to get OAuth Group\", err.Error())\n\t\t\t\thttp.Redirect(w, r, j.FailureURL, http.StatusTemporaryRedirect)\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\n\t\tp := Principal{\n\t\t\tSubject: id,\n\t\t\tIssuer: j.Provider.Name(),\n\t\t\tGroup: group,\n\t\t}\n\t\tctx := r.Context()\n\t\terr = j.Auth.Authorize(ctx, w, p)\n\t\tif err != nil {\n\t\t\tlog.Error(\"Unable to get add session to response \", err.Error())\n\t\t\thttp.Redirect(w, r, j.FailureURL, http.StatusTemporaryRedirect)\n\t\t\treturn\n\t\t}\n\t\tlog.Info(\"User \", id, \" is authenticated\")\n\t\thttp.Redirect(w, r, j.SuccessURL, http.StatusTemporaryRedirect)\n\t})\n}", "func OAUTHGETHandler(c *fiber.Ctx) error {\n\tmodels.SYSLOG.Trace(\"entering OAUTHGETHandler\")\n\tdefer models.SYSLOG.Trace(\"exiting OAUTHGETHandler\")\n\treturn c.Render(\"protected\", fiber.Map{})\n}", "func AuthorizeCallback(w http.ResponseWriter, r *http.Request, authorizer Authorizer) {\n\tparams := mux.Vars(r)\n\tid := params[\"id\"]\n\n\tauthReq, err := authorizer.Storage().AuthRequestByID(r.Context(), id)\n\tif err != nil {\n\t\tAuthRequestError(w, r, nil, err, authorizer.Encoder())\n\t\treturn\n\t}\n\tif !authReq.Done() {\n\t\tAuthRequestError(w, r, authReq,\n\t\t\toidc.ErrInteractionRequired().WithDescription(\"Unfortunately, the user may be not logged in and/or additional interaction is required.\"),\n\t\t\tauthorizer.Encoder())\n\t\treturn\n\t}\n\tAuthResponse(authReq, authorizer, w, r)\n}", "func HandleOAuth2Callback(w http.ResponseWriter, r *http.Request) (err error) {\n\tsession, err := cookieStore.Get(r, oauthSessionName)\n\tif err != nil {\n\t\tlog.Printf(\"corrupted session %s -- generated new\", err)\n\t\terr = nil\n\t}\n\n\t// ensure we flush the csrf challenge even if the request is ultimately unsuccessful\n\tdefer func() {\n\t\tif err := session.Save(r, w); err != nil {\n\t\t\tlog.Printf(\"error saving session: %s\", err)\n\t\t}\n\t}()\n\n\tswitch stateChallenge, state := session.Flashes(stateCallbackKey), r.FormValue(\"state\"); {\n\tcase state == \"\", len(stateChallenge) < 1:\n\t\terr = errors.New(\"missing state challenge\")\n\tcase state != stateChallenge[0]:\n\t\terr = fmt.Errorf(\"invalid oauth state, expected '%s', got '%s'\", state, stateChallenge[0])\n\t}\n\n\tif err != nil {\n\t\treturn AnnotateError(\n\t\t\terr,\n\t\t\t\"couldn't verify your confirmation, please try again.\",\n\t\t\thttp.StatusBadRequest,\n\t\t)\n\t}\n\n\ttoken, err := oauth2Config.Exchange(context.Background(), r.FormValue(\"code\"))\n\tif err != nil {\n\t\treturn\n\t}\n\n\t// add the oauth token to session\n\tsession.Values[oauthTokenKey] = token\n\n\tfmt.Printf(\"Access token: %s\\n\", token.AccessToken)\n\n\trawIDToken, ok := token.Extra(\"id_token\").(string)\n\tif !ok {\n\t\treturn AnnotateError(\n\t\t\tfmt.Errorf(\"can't extract id token from access token\"),\n\t\t\t\"Couldn't verify your confirmation, please try again.\",\n\t\t\thttp.StatusBadRequest,\n\t\t)\n\t}\n\n\tidToken, err := oidcVerifier.Verify(context.Background(), rawIDToken)\n\tif err != nil {\n\t\treturn AnnotateError(\n\t\t\terr,\n\t\t\t\"Couldn't verify your confirmation, please try again.\",\n\t\t\thttp.StatusBadRequest,\n\t\t)\n\t}\n\n\tvar claims struct {\n\t\tIss string `json:\"iss\"`\n\t\tSub string `json:\"sub\"`\n\t\tAud string `json:\"aud\"`\n\t\tExp int32 `json:\"exp\"`\n\t\tIat int32 `json:\"iat\"`\n\t\tNonce string `json:\"nonce\"`\n\t\tEmail string `json:\"email\"`\n\t}\n\n\tif err := idToken.Claims(&claims); err != nil {\n\t\treturn AnnotateError(\n\t\t\terr,\n\t\t\t\"Couldn't verify your confirmation, please try again.\",\n\t\t\thttp.StatusBadRequest,\n\t\t)\n\t}\n\n\tfmt.Printf(\"Email: %s\\n\", claims.Email)\n\n\thttp.Redirect(w, r, \"/\", http.StatusTemporaryRedirect)\n\n\treturn\n}", "func OAUTHDisconnect(c *fiber.Ctx) error {\n\tmodels.SYSLOG.Tracef(\"entering OAUTHDisconnect; original URL: %v\", c.OriginalURL())\n\tdefer models.SYSLOG.Trace(\"exiting OAUTHDisconnect\")\n\tsessData, err := models.MySessionStore.Get(c)\n\tif err != nil {\n\t\tmodels.SYSLOG.Errorf(\"session exception %v\", err)\n\t\tpanic(err)\n\t}\n\n\t// for debug purposes - inspect the session variables\n\tmodels.SYSLOG.Tracef(\"session id fresh ? %v\", sessData.Fresh())\n\n\tmodels.SYSLOG.Trace(\"trying to get 'oauth-scope' value\")\n\ttk := sessData.Get(\"oauth-scope\")\n\tmodels.SYSLOG.Tracef(\"session stored 'oauth-scope' is %v\", tk)\n\n\tmodels.SYSLOG.Trace(\"trying to get 'oauth-token-type' value\")\n\ttk = sessData.Get(\"oauth-token-type\")\n\tmodels.SYSLOG.Tracef(\"session stored 'oauth-token-type' is %v\", tk)\n\n\ttk = sessData.Get(\"oauth-token\")\n\tmodels.SYSLOG.Tracef(\"session stored 'oauth-token' is %v\", tk)\n\n\tsessData.Destroy()\n\n\treturn c.Redirect(\"/index.html\", fiber.StatusTemporaryRedirect)\n}", "func (s *Services) authorize(handler func(wr http.ResponseWriter, req *http.Request, uid uint64)) http.HandlerFunc {\n\treturn func(wr http.ResponseWriter, req *http.Request) {\n\t\t// TODO: Save the requested url in a cookie that can be redirected to after logging in successfully\n\t\tuid, err := s.auth.Authorize(wr, req)\n\t\tif err != nil {\n\t\t\tlog.Print(err)\n\t\t\thttp.Redirect(wr, req, \"/login\", 302)\n\t\t\treturn\n\t\t}\n\n\t\thandler(wr, req, uid)\n\t}\n}", "func redirectToCognitoLogout(ctx context.Context, d *aegis.HandlerDependencies, req *aegis.APIGatewayProxyRequest, res *aegis.APIGatewayProxyResponse, params url.Values) error {\n\thost := req.GetHeader(\"Host\")\n\tres.SetHeader(\"Set-Cookie\", \"access_token=; Domain=\"+host+\"; Secure; HttpOnly\")\n\tres.Redirect(301, d.Services.Cognito.HostedLogoutURL)\n\treturn nil\n}", "func loginRedirectHandler(w http.ResponseWriter, r *http.Request) {\n\tvars := mux.Vars(r)\n\tprovider, err := strconv.Atoi(vars[\"provider\"])\n\tif err != nil {\n\t\thttp.Error(w, \"Invalid identity provider\", http.StatusInternalServerError)\n\t\treturn\n\t} else {\n\t\tidp.Authenticate(provider, w, r)\n\t}\n}", "func authenticated(f authenticatedHandler) http.HandlerFunc {\n\treturn func(w http.ResponseWriter, r *http.Request) {\n\t\tvar a idp.Auth\n\t\terr := getCookie(r, &a)\n\t\tif (err != nil) || (a.IDToken == \"\") {\n\t\t\turl := \"/login?page=\" + url.QueryEscape(r.URL.String())\n\t\t\thttp.Redirect(w, r, url, http.StatusFound)\n\t\t} else {\n\t\t\tf(w, r, &a)\n\t\t}\n\t}\n}", "func (application *Application) ApplyAuth(c *web.C, h http.Handler) http.Handler {\n\tfn := func(w http.ResponseWriter, r *http.Request) {\n\t\t// Get session ID and nonce from the cookie\n\t\tsession := c.Env[\"Session\"].(*sessions.Session)\n\t\tif sessionID, ok := session.Values[\"SessionID\"]; ok {\n\t\t\tif sessionNonce, ok := session.Values[\"SessionNonce\"]; ok {\n\t\t\t\tif sessionID == nil || sessionNonce == nil {\n\t\t\t\t\t// No session values so get out of here\n\t\t\t\t\tc.Env[\"User\"] = nil\n\t\t\t\t\tsession.Values[\"SessionID\"] = nil\n\t\t\t\t\tsession.Values[\"SessionNonce\"] = nil\n\t\t\t\t} else {\n\n\t\t\t\t\t// Sanity check it is the correct form\n\t\t\t\t\tsessionNonceBytes, ok := sessionNonce.([]byte)\n\t\t\t\t\tif !ok {\n\t\t\t\t\t\tlog.Warningf(\"Session nonce was not a byte array... that should not happen\")\n\t\t\t\t\t\tc.Env[\"User\"] = nil\n\t\t\t\t\t\tsession.Values[\"SessionID\"] = nil\n\t\t\t\t\t\tsession.Values[\"SessionNonce\"] = nil\n\t\t\t\t\t} else {\n\n\t\t\t\t\t\thasher := sha256.New()\n\t\t\t\t\t\thasher.Write(sessionNonceBytes)\n\t\t\t\t\t\tsessionNonceHash := hasher.Sum(nil)\n\n\t\t\t\t\t\t// Look for the session ID in the database\n\t\t\t\t\t\tdb := application.DBSession\n\t\t\t\t\t\tvar browserSession models.BrowserSession\n\t\t\t\t\t\terr := db.SelectOne(&browserSession, \"select * from browserSessions where ID=:id\",\n\t\t\t\t\t\t\tmap[string]interface{}{\n\t\t\t\t\t\t\t\t\"id\": sessionID,\n\t\t\t\t\t\t\t})\n\t\t\t\t\t\tif err != nil {\n\t\t\t\t\t\t\tlog.Warningf(\"Session values not found\")\n\t\t\t\t\t\t\tc.Env[\"User\"] = nil\n\t\t\t\t\t\t\tsession.Values[\"SessionID\"] = nil\n\t\t\t\t\t\t\tsession.Values[\"SessionNonce\"] = nil\n\t\t\t\t\t\t} else {\n\n\t\t\t\t\t\t\t// Check our nonce matches\n\t\t\t\t\t\t\tif subtle.ConstantTimeCompare(browserSession.NonceHash, sessionNonceHash) != 1 {\n\t\t\t\t\t\t\t\tlog.Errorf(\"Nonce does not match! Possible attack!?\")\n\t\t\t\t\t\t\t\tc.Env[\"User\"] = nil\n\t\t\t\t\t\t\t\tsession.Values[\"SessionID\"] = nil\n\t\t\t\t\t\t\t\tsession.Values[\"SessionNonce\"] = nil\n\t\t\t\t\t\t\t} else {\n\n\t\t\t\t\t\t\t\t// Ensure this session is not stale\n\t\t\t\t\t\t\t\tvar timeout int64 = 86400 // Seconds in a day\n\t\t\t\t\t\t\t\tif utils.DBTimeNow()-browserSession.CreationDate > timeout {\n\t\t\t\t\t\t\t\t\tlog.Warningf(\"Session expired\")\n\t\t\t\t\t\t\t\t\tc.Env[\"User\"] = nil\n\t\t\t\t\t\t\t\t\tsession.Values[\"SessionID\"] = nil\n\t\t\t\t\t\t\t\t\tsession.Values[\"SessionNonce\"] = nil\n\t\t\t\t\t\t\t\t} else {\n\n\t\t\t\t\t\t\t\t\t// Valid browser session, so look for the user\n\t\t\t\t\t\t\t\t\tvar user models.User\n\t\t\t\t\t\t\t\t\terr = db.SelectOne(&user, \"select * from users where ID=:id\",\n\t\t\t\t\t\t\t\t\t\tmap[string]interface{}{\n\t\t\t\t\t\t\t\t\t\t\t\"id\": browserSession.UserID,\n\t\t\t\t\t\t\t\t\t\t})\n\t\t\t\t\t\t\t\t\tif err != nil {\n\t\t\t\t\t\t\t\t\t\tlog.Warningf(\"Problem finding the user: %v\", err)\n\t\t\t\t\t\t\t\t\t\tc.Env[\"User\"] = nil\n\t\t\t\t\t\t\t\t\t\tsession.Values[\"SessionID\"] = nil\n\t\t\t\t\t\t\t\t\t\tsession.Values[\"SessionNonce\"] = nil\n\t\t\t\t\t\t\t\t\t} else {\n\t\t\t\t\t\t\t\t\t\tc.Env[\"User\"] = user\n\t\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\th.ServeHTTP(w, r)\n\t}\n\treturn http.HandlerFunc(fn)\n}", "func HandleAuthenticateDispenser(w http.ResponseWriter, r *http.Request) {\n\t// Read auth token from request\n\tvar auth DispenserAuth\n\n\terr := utils.ReadJSONFromRequest(r, &auth)\n\n\tif err != nil {\n\t\tutils.WriteError(w, utils.BadRequestError(err))\n\t\treturn\n\t}\n\n\t// Try to authenticate the dispenser\n\ttoken, err := AuthenticateDispenser(auth)\n\n\tif err != nil {\n\t\tutils.WriteError(w, err)\n\t\treturn\n\t}\n\n\t// Return session token to user\n\tutils.WriteJSON(w, token)\n}", "func authHandler(w http.ResponseWriter, r *http.Request) {\n\turl := config(r.Host).AuthCodeURL(r.URL.RawQuery)\n\thttp.Redirect(w, r, url, http.StatusFound)\n}", "func (uh *UserHandler) HandleGoogleLogin(w http.ResponseWriter, r *http.Request) {\n\tOauthStateString = stringTools.RandomStringGN(20)\n\turl := googleOauthConfig.AuthCodeURL(OauthStateString)\n\thttp.Redirect(w, r, url, http.StatusSeeOther)\n}", "func Callback(state string, codeVerifier string, redirectURL string, oauth2config oauth2.Config, targetLabel string, finish chan bool) http.HandlerFunc {\n\treturn func(w http.ResponseWriter, r *http.Request) {\n\t\tdefer func() {\n\t\t\tfinish <- true\n\t\t}()\n\t\tvar msg string\n\t\tvar page string\n\n\t\t// checking state\n\t\tif state != r.URL.Query().Get(\"state\") {\n\t\t\tmsg = fmt.Sprintf(errorMarkup, \"Invalid state\")\n\t\t\tpage = fmt.Sprintf(callbackPage, msg)\n\t\t} else {\n\t\t\t// State OK, continue OpenID Connect Flow\n\t\t\tcode := r.URL.Query().Get(\"code\")\n\t\t\tctx := context.Background()\n\t\t\toauth2Token, err := oauth2config.Exchange(ctx, code, oauth2.SetAuthURLParam(\"code_verifier\", codeVerifier))\n\t\t\tif err != nil {\n\t\t\t\t// Exchange error\n\t\t\t\tmsg = fmt.Sprintf(errorMarkup, err.Error())\n\t\t\t\tpage = fmt.Sprintf(callbackPage, msg)\n\t\t\t} else {\n\t\t\t\t// Exchange success\n\t\t\t\tpage = fmt.Sprintf(callbackPage, successMarkup)\n\n\t\t\t\t// Storing tokens on current target\n\t\t\t\toauth2Token.AccessToken = oauth2Token.Extra(\"id_token\").(string)\n\t\t\t\terr = StorageTokens(targetLabel, *oauth2Token)\n\t\t\t\tif err != nil {\n\t\t\t\t\t// Exchange error\n\t\t\t\t\tmsg = fmt.Sprintf(errorMarkup, err.Error())\n\t\t\t\t\tpage = fmt.Sprintf(callbackPage, msg)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\tw.Header().Add(\"Content-Type\", \"text/html\")\n\t\t_, err := w.Write([]byte(page))\n\t\tif err != nil {\n\t\t\tfmt.Printf(\"Client error writing callback page: (%s)\\n\", err.Error())\n\t\t}\n\t}\n}", "func CanvasOAuth2ResponseHandler(w http.ResponseWriter, r *http.Request, _ httprouter.Params) {\n\tcode := r.URL.Query().Get(\"code\")\n\tstate := unmarshalCanvasState(r.URL.Query().Get(\"state\"))\n\n\tif len(code) < 1 || len(state.Intent) < 1 || len(state.State) < 1 {\n\t\tif len(state.Intent) < 1 || len(state.State) < 1 {\n\t\t\tr.URL.Query().Set(\"proxy_error\", \"malformed state\")\n\t\t}\n\n\t\t// an error occurred, just redirect\n\t\tr.URL.Query().Set(\"error_source\", \"canvas\")\n\t\tutil.SendRedirect(\n\t\t\tw,\n\t\t\tfmt.Sprintf(\n\t\t\t\t\"%s?%s\",\n\t\t\t\tenv.CanvasOAuth2SuccessURI,\n\t\t\t\tr.URL.Query().Encode(),\n\t\t\t),\n\t\t)\n\t\treturn\n\t}\n\n\tgrantResp, err := getTokenFromAuthorizationCode(code)\n\tif err != nil {\n\t\thandleISE(w, fmt.Errorf(\"error getting token from authorization code: %w\", err))\n\t\treturn\n\t}\n\n\tif state.Intent == \"reauth\" {\n\t\tprofiles, err := users.List(db, &users.ListRequest{CanvasUserID: grantResp.User.ID})\n\t\tif err != nil {\n\t\t\thandleISE(w, fmt.Errorf(\"error listing users in canvas oauth2 response handler (reauth): %w\", err))\n\t\t\treturn\n\t\t}\n\n\t\tif len(*profiles) < 1 {\n\t\t\t// this user is trying to reauth without authing first?\n\t\t\tutil.SendRedirect(w, getCanvasOAuth2AuthURI(\"auth\", \"\"))\n\t\t\treturn\n\t\t}\n\n\t\t// this user just wants a new session\n\t\tss, err := sessions.Generate(db, &sessions.GenerateRequest{\n\t\t\tCanvasUserID: grantResp.User.ID,\n\t\t})\n\t\tif err != nil {\n\t\t\thandleISE(w, fmt.Errorf(\"error generating session in canvas oauth2 response handler (reauth): %w\", err))\n\t\t}\n\n\t\tutil.AddSessionToResponse(w, *ss)\n\n\t\tutil.SendRedirect(w, getCanvasOAuth2SuccessURI(grantResp.User.Name, state.Intent, state.Dest))\n\t\treturn\n\t}\n\n\trd := requestDetails{\n\t\tToken: grantResp.AccessToken,\n\t\tRefreshToken: grantResp.RefreshToken,\n\t}\n\n\tprofile, err := getCanvasProfile(rd, \"self\")\n\tif err != nil {\n\t\thandleISE(w, fmt.Errorf(\"error getting canvas self profile in canvas oauth2 response handler: %w\", err))\n\t\treturn\n\t}\n\n\t// doing this synchronously so that we can generate a session\n\tprofileResp, err := users.UpsertProfile(db, &users.UpsertRequest{\n\t\tName: profile.Name,\n\t\tEmail: profile.PrimaryEmail,\n\t\tLTIUserID: profile.LtiUserID,\n\t\tCanvasUserID: int64(profile.ID),\n\t}, true)\n\tif err != nil {\n\t\thandleISE(w, fmt.Errorf(\"error upserting profile in canvas oauth2 response handler: %w\", err))\n\t\treturn\n\t}\n\n\tif profileResp.InsertedAt.Add(time.Second * 30).After(time.Now()) {\n\t\tgo email.SendWelcome(profile.PrimaryEmail, profile.Name)\n\t}\n\n\t// this one can be done async\n\tgo saveCanvasOAuth2GrantToDB(grantResp)\n\n\tss, err := sessions.Generate(db, &sessions.GenerateRequest{CanvasUserID: profile.ID})\n\tif err != nil {\n\t\thandleISE(w, fmt.Errorf(\"error generating session in canvas oauth2 response handler: %w\", err))\n\t\treturn\n\t}\n\n\tutil.AddSessionToResponse(w, *ss)\n\n\tutil.SendRedirect(w, getCanvasOAuth2SuccessURI(profile.Name, state.Intent, state.Dest))\n\n\treturn\n}", "func (a *apiServer) handleSAMLResponse(w http.ResponseWriter, req *http.Request) {\n\tvar subject, authCode string\n\tvar err *errutil.HTTPError\n\n\tlogRequest := \"SAML login request\"\n\ta.LogReq(logRequest)\n\tdefer func(start time.Time) {\n\t\tif subject != \"\" {\n\t\t\tlogRequest = fmt.Sprintf(\"SAML login request for %s\", subject)\n\t\t}\n\t\ta.LogResp(logRequest, errutil.PrettyPrintCode(err), err, time.Since(start))\n\t}(time.Now())\n\n\tsubject, authCode, err = a.handleSAMLResponseInternal(req)\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), err.Code())\n\t\treturn\n\t}\n\n\t// Redirect caller back to dash with auth code\n\tu := *defaultDashRedirectURL\n\tif a.redirectAddress != nil {\n\t\tu = *a.redirectAddress\n\t}\n\tu.RawQuery = url.Values{\"auth_code\": []string{authCode}}.Encode()\n\tw.Header().Set(\"Location\", u.String())\n\tw.WriteHeader(http.StatusFound) // Send redirect\n}", "func AuthCallback(w http.ResponseWriter, r *http.Request) {\n\tcode := r.FormValue(\"code\")\n\tcallbackState := r.FormValue(\"state\")\n\n\tclientID, err := state.DecryptState(callbackState, os.Getenv(\"SECRET\"))\n\tif err != nil {\n\t\thttp.Error(w, \"State could not be verified\", http.StatusBadRequest)\n\t\treturn\n\t}\n\n\tconfigValue, err := config.ReadConfigFromEnv(clientID)\n\tif err != nil {\n\t\tlog.Printf(\"Error while verifying state: %v\", err)\n\t\thttp.Error(w, err.Error(), http.StatusBadRequest)\n\t\treturn\n\t}\n\n\ttoken, err := github.Exchange(configValue.ClientID, configValue.ClientSecretID, code)\n\tif err != nil {\n\t\tlog.Printf(\"Error while exchange code %s for client %s with Github: %v\", code, configValue.ClientID, err)\n\t\thttp.Error(w, fmt.Sprintf(\"Code %s for client %s was not accepted by the Oauth provider\", code, configValue.ClientID), http.StatusBadRequest)\n\t\treturn\n\t}\n\n\tredirectURLWithToken := fmt.Sprintf(\"%s?token=%s\", configValue.RedirectURL, token)\n\n\tw.Header().Set(\"Location\", redirectURLWithToken)\n\tw.WriteHeader(http.StatusTemporaryRedirect)\n}", "func (this *IdentityProvider) ServeIDPInitiated(w http.ResponseWriter, r *http.Request, serviceProviderID string, relayState string) {\n\treq := &IdpAuthnRequest{\n\t\tIDP: this,\n\t\tHTTPRequest: r,\n\t\tRelayState: relayState,\n\t\tNow: TimeNow(),\n\t}\n\n\tsession := this.SessionProvider.GetSession(w, req)\n\tif session == nil {\n\t\t// If GetSession returns nil, it must have written an HTTP response, per the interface\n\t\t// (this is probably because it drew a login form or something)\n\t\treturn\n\t}\n\n\tvar err error\n\tsp, err := this.ServiceProviderProvider.GetServiceProvider(req, serviceProviderID)\n\tif err == os.ErrNotExist {\n\t\tthis.Logger.Printf(\"cannot find service provider: %s\", serviceProviderID)\n\t\thttp.Error(w, http.StatusText(http.StatusNotFound), http.StatusNotFound)\n\t\treturn\n\t} else if err != nil {\n\t\tthis.Logger.Printf(\"cannot find service provider %s: %v\", serviceProviderID, err)\n\t\thttp.Error(w, http.StatusText(http.StatusInternalServerError), http.StatusInternalServerError)\n\t\treturn\n\t}\n\treq.ServiceProviderMetadata = sp.Metadata()\n\n\t// find an ACS endpoint that we can use\n\tfor _, spssoDescriptor := range req.ServiceProviderMetadata.SPSSODescriptors {\n\t\tfor _, endpoint := range spssoDescriptor.AssertionConsumerServices {\n\t\t\tif endpoint.Binding == HTTPPostBinding {\n\t\t\t\treq.ACSEndpoint = &endpoint\n\t\t\t\treq.SPSSODescriptor = &spssoDescriptor\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\tif req.ACSEndpoint != nil {\n\t\t\tbreak\n\t\t}\n\t}\n\tif req.ACSEndpoint == nil {\n\t\tthis.Logger.Printf(\"saml metadata does not contain an Assertion Customer Service url\")\n\t\thttp.Error(w, http.StatusText(http.StatusInternalServerError), http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\tif err = this.AttributesProviders.Provides(req, session); err != nil {\n\t\tthis.Logger.Printf(\"provides session attributes failed: %v\", err)\n\t\thttp.Error(w, http.StatusText(http.StatusInternalServerError), http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\tif len(req.Response) == 0 {\n\t\tassertionMaker := sp.AssertionMaker()\n\t\tif assertionMaker == nil {\n\t\t\tassertionMaker = this.AssertionMaker\n\t\t}\n\t\tif assertionMaker == nil {\n\t\t\tassertionMaker = DefaultAssertionMaker{}\n\t\t}\n\t\tif err := assertionMaker.MakeAssertion(req, session); err != nil {\n\t\t\tthis.Logger.Printf(\"failed to make assertion: %s\", err)\n\t\t\thttp.Error(w, http.StatusText(http.StatusInternalServerError), http.StatusInternalServerError)\n\t\t\treturn\n\t\t}\n\t}\n\n\tif err = this.IdpAuthnResponseFactory.Factory(req, session, w); err != nil {\n\t\tthis.Logger.Printf(\"failed to create response: %s\", err)\n\t\thttp.Error(w, http.StatusText(http.StatusInternalServerError), http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\tif err := req.WriteResponse(w); err != nil {\n\t\tthis.Logger.Printf(\"failed to write response: %s\", err)\n\t\thttp.Error(w, http.StatusText(http.StatusInternalServerError), http.StatusInternalServerError)\n\t\treturn\n\t}\n}", "func (a *GoogleAuth) GoogleLoginHandler(w http.ResponseWriter, r *http.Request) {\n\tstate := a.NewAuthState(r)\n\turl := a.config.AuthCodeURL(state)\n\thttp.Redirect(w, r, url, http.StatusTemporaryRedirect)\n}", "func HandleLoginResponse(r *http.Request, w http.ResponseWriter, cfg *setting.Cfg, identity *Identity, validator RedirectValidator) *response.NormalResponse {\n\tresult := map[string]interface{}{\"message\": \"Logged in\"}\n\tif redirectURL := handleLogin(r, w, cfg, identity, validator); redirectURL != cfg.AppSubURL+\"/\" {\n\t\tresult[\"redirectUrl\"] = redirectURL\n\t}\n\treturn response.JSON(http.StatusOK, result)\n}", "func (uh *UserHandler) HandleLinkedInLogin(w http.ResponseWriter, r *http.Request) {\n\tOauthStateString = stringTools.RandomStringGN(20)\n\turl := linkedinOauthConfig.AuthCodeURL(OauthStateString)\n\thttp.Redirect(w, r, url, http.StatusSeeOther)\n}", "func authLogoutHandler(ctx context.Context, w http.ResponseWriter, r *http.Request) {\n\tauth.ClearCookie(ctx, w)\n\thttp.Redirect(w, r, rootPath, http.StatusTemporaryRedirect)\n}", "func authCallbackHandler(ctx context.Context, w http.ResponseWriter, r *http.Request) {\n\tghu, err := authWithGithubCode(ctx, r.FormValue(\"code\"))\n\tif err != nil {\n\t\trenderError(w, err, http.StatusInternalServerError, \"GitHub login failed\")\n\t\treturn\n\t}\n\n\tu, err := findOrCreateUser(ghu)\n\tif err != nil {\n\t\trenderError(w, err, http.StatusInternalServerError, \"Failed to find a user using GitHub profile\")\n\t\treturn\n\t}\n\n\tsess := db.NewSession(u.ID)\n\tif err := sess.Create(); err != nil {\n\t\trenderError(w, err, http.StatusInternalServerError, \"Failed to create a session\")\n\t\treturn\n\t}\n\n\tctx = auth.ContextWithSession(ctx, sess)\n\tauth.AuthorizeResponse(ctx, w)\n\tauth.CacheSession(sess)\n\n\thttp.Redirect(w, r, rootPath, http.StatusTemporaryRedirect)\n}", "func OAUTHProtected(c *fiber.Ctx) error {\n\tmodels.SYSLOG.Tracef(\"entering OAUTHProtected; original URL: %v\", c.OriginalURL())\n\tdefer models.SYSLOG.Trace(\"exiting OAUTHProtected\")\n\n\tsessData, err := models.MySessionStore.Get(c)\n\tif err != nil {\n\t\tmodels.SYSLOG.Errorf(\"session exception %v\", err)\n\t\tpanic(err)\n\t}\n\n\t// for debug purposes - inspect the session variables\n\tmodels.SYSLOG.Tracef(\"session id fresh ? %v\", sessData.Fresh())\n\n\tmodels.SYSLOG.Trace(\"trying to get 'oauth-scope' value\")\n\ttk := sessData.Get(\"oauth-scope\")\n\tmodels.SYSLOG.Tracef(\"session stored 'oauth-scope' is %v\", tk)\n\n\tmodels.SYSLOG.Trace(\"trying to get 'oauth-token-type' value\")\n\ttk = sessData.Get(\"oauth-token-type\")\n\tmodels.SYSLOG.Tracef(\"session stored 'oauth-token-type' is %v\", tk)\n\n\ttk = sessData.Get(\"oauth-token\")\n\tmodels.SYSLOG.Tracef(\"session stored 'oauth-token' is %v\", tk)\n\n\tif tk == nil {\n\t\tsessData.Destroy()\n\t\tmodels.SYSLOG.Tracef(\"token is NULL\")\n\t\treturn c.Redirect(\"/index.html\", fiber.StatusTemporaryRedirect)\n\t}\n\n\treturn c.Next()\n}", "func (handler *AuthHandler) Callback(c *gin.Context) {\n\tstate := c.Query(\"state\")\n\tcode := c.Query(\"code\")\n\n\ttoken, err := handler.GetAccessToken(state, code)\n\tif err != nil {\n\t\tlog.Println(err.Error())\n\t\tc.AbortWithError(400, err)\n\t}\n\n\tmarshalledToken, err := json.Marshal(token)\n\tif err != nil {\n\t\tlog.Println(err.Error())\n\t\tc.AbortWithError(400, err)\n\t}\n\n\ttokenString := base64.StdEncoding.EncodeToString(marshalledToken)\n\n\tc.SetCookie(\"token\", tokenString, 15*60*60, \"/\", \"\", true, true)\n\n\tc.Redirect(http.StatusMovedPermanently, \"/index\")\n\tc.Abort()\n}", "func OAUTHRedirect(ctx *fiber.Ctx) error {\n\n\tmodels.SYSLOG.Tracef(\"entering OAUTHRedirect; original URL: %v\", ctx.OriginalURL())\n\tdefer models.SYSLOG.Trace(\"exiting OAUTHRedirect\")\n\n\t// First, we need to get the value of the `code` query param\n\tcode := ctx.Query(\"code\", \"\")\n\tif len(code) < 1 {\n\t\treturn ctx.SendStatus(fiber.StatusBadRequest)\n\t}\n\n\t// Next, lets for the HTTP request to call the github oauth enpoint\tto get our access token\n\n\ta := fiber.AcquireAgent()\n\treq := a.Request()\n\treq.Header.SetMethod(fiber.MethodPost)\n\treq.Header.Set(\"accept\", \"application/json\")\n\treq.SetRequestURI(fmt.Sprintf(\"https://github.com/login/oauth/access_token?client_id=%s&client_secret=%s&code=%s\", models.ClientID, models.ClientSecret, code))\n\tif err := a.Parse(); err != nil {\n\t\tmodels.SYSLOG.Errorf(\"could not create HTTP request: %v\", err)\n\t}\n\n\tvar retCode int\n\tvar retBody []byte\n\tvar errs []error\n\t// Send out the HTTP request\n\tvar t *models.OAuthAccessResponse\n\n\tif retCode, retBody, errs = a.Struct(&t); len(errs) > 0 {\n\t\tmodels.SYSLOG.Tracef(\"received: %v\", string(retBody))\n\t\tmodels.SYSLOG.Errorf(\"could not send HTTP request: %v\", errs)\n\t\treturn ctx.SendStatus(fiber.StatusInternalServerError)\n\t}\n\tmodels.SYSLOG.Tracef(\"received : %v %v %v\", retCode, string(retBody), errs)\n\n\tvar sess *session.Session\n\tvar err error\n\t// Finally, send a response to redirect the user to the \"welcome\" page with the access token\n\tif sess, err = models.MySessionStore.Get(ctx); err == nil {\n\t\tsess.Set(\"token\", t.AccessToken)\n\t\tmodels.SYSLOG.Tracef(\"setting session token %v\", t.AccessToken)\n\t\tsessData, _ := models.MySessionStore.Get(ctx)\n\t\tdefer sessData.Save()\n\t\t//models.MySessionStore.RegisterType(models.OAuthAccessResponse)\n\t\tsessData.Set(\"oauth-scope\", t.Scope)\n\t\tsessData.Set(\"oauth-token-type\", t.TokenType)\n\t\tsessData.Set(\"oauth-token\", t.AccessToken)\n\n\t\tif err != nil {\n\t\t\tmodels.SYSLOG.Errorf(\"session saving exception %v\", err)\n\t\t}\n\t\tmodels.SYSLOG.Tracef(\"redirecting to /welcome.html?access_token=%v\", t.AccessToken)\n\t\t//\t\treturn ctx.Redirect(\"/welcome.html?access_token=\"+t.AccessToken, fiber.StatusFound)\n\t\treturn ctx.Redirect(\"/welcome.html\", fiber.StatusFound)\n\t}\n\n\tmodels.SYSLOG.Tracef(\"redirecting to /\")\n\treturn ctx.Redirect(\"/\", fiber.StatusTemporaryRedirect)\n}", "func processLogoutRequest(w http.ResponseWriter, r *http.Request) {\n\t// grab the \"ID\" cookie, erase from map if it is found\n\tid, err := r.Cookie(\"id\")\n\tif err != http.ErrNoCookie {\n\t\tcookieStore.Lock()\n\t\tdelete(cookieStore.m, id.Value)\n\t\tcookieStore.Unlock()\n\t\t// create a log-out (info) flash\n\t\tflash := http.Cookie{Name: \"iflash\", Value: \"Logout successful\", Path: \"/\"}\n\t\thttp.SetCookie(w, &flash)\n\t}\n\n\t// clear the cookies on the client\n\tclearID := http.Cookie{Name: \"id\", Value: \"-1\", Expires: time.Now(), Path: \"/\"}\n\thttp.SetCookie(w, &clearID)\n\tclearVal := http.Cookie{Name: \"key\", Value: \"-1\", Expires: time.Now(), Path: \"/\"}\n\thttp.SetCookie(w, &clearVal)\n}", "func (c *LoggedInContext) DoSignOutRequestHandler(rw web.ResponseWriter, req *web.Request) {\n\tsession, _ := c.Store.Get(req.Request, \"session-security\")\n\tsession.Values[\"sessionId\"] = nil\n\tc.SetNotificationMessage(rw, req, \"Goodbye!\")\n\n\tc.Account.ExpireSession(c.Storage)\n\n\tsession.Save(req.Request, rw)\n\thttp.Redirect(rw, req.Request, HomeUrl.Make(), http.StatusFound)\n}", "func (a *Authenticator) AuthHandler() khttp.FuncHandler {\n\treturn func(w http.ResponseWriter, r *http.Request) {\n\t\t_, handled, err := a.PerformAuth(w, r)\n\t\tif err != nil {\n\t\t\thttp.Error(w, \"your lack of authentication cookie is impressive - something went wrong\", http.StatusInternalServerError)\n\t\t\tlog.Printf(\"ERROR - could not complete authentication - %s\", err)\n\t\t\treturn\n\t\t}\n\n\t\tif !handled {\n\t\t\thttp.Redirect(w, r, \"/\", http.StatusTemporaryRedirect)\n\t\t}\n\t}\n}", "func AuthorizeAndSetRedirect(c *fiber.Ctx) {\n\t// Create cookie\n\tcookie := new(fiber.Cookie)\n\tcookie.Name = \"authorize_redirect\"\n\tcookie.Value = c.Path()\n\tcookie.Expires = time.Now().Add(24 * time.Hour)\n\tc.Cookie(cookie)\n\n\tAuthorize(c)\n}", "func Authentication(next http.Handler) http.Handler {\n\treturn http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tlog.Printf(\"Authentication: checking for existing authenticated session\\n\")\n\t\tauthenticated, ok := r.Context().Value(session.AuthenticatedKey).(bool)\n\t\tlog.Printf(\"Authentication: authenticated?: %b\\n\", authenticated)\n\t\tif (ok == false || authenticated == false) {\n\t\t\tstate := r.Context().Value(session.StateKey).(string)\n\t\t\tlog.Printf(\"Authentication: using state: %v\\n\", state)\n\t\t\tauthorizeURL := oauth2Config.AuthCodeURL(state, oauth2.AccessTypeOnline)\n\t\t\tlog.Printf(\"Authentication: redirecting to %s\\n\", authorizeURL)\n\t\t\thttp.Redirect(w, r, authorizeURL, http.StatusFound)\n\t\t\treturn\n\t\t} else { // authenticated == true\n log.Printf(\"Authentication: user is authenticated, done\\n\")\n next.ServeHTTP(w, r)\n }\n\t})\n}", "func TestHandler_Authorize(t *testing.T) {\n\t// Create the mock session store.\n\tvar saved bool\n\tstore := NewTestStore()\n\tsession := sessions.NewSession(store, \"\")\n\tstore.GetFunc = func(r *http.Request, name string) (*sessions.Session, error) {\n\t\treturn session, nil\n\t}\n\tstore.SaveFunc = func(r *http.Request, w http.ResponseWriter, session *sessions.Session) error {\n\t\tsaved = true\n\t\treturn nil\n\t}\n\n\t// Setup handler.\n\th := NewTestHandler()\n\th.Handler.Store = store\n\tdefer h.Close()\n\n\t// Create non-redirecting client.\n\tvar redirectURL *url.URL\n\tclient := &http.Client{\n\t\tCheckRedirect: func(req *http.Request, via []*http.Request) error {\n\t\t\tredirectURL = req.URL\n\t\t\treturn errors.New(\"no redirects\")\n\t\t},\n\t}\n\n\t// Retrieve authorize redirect.\n\t// We should be redirected to GitHub's OAuth URL.\n\t// We should save the auth state to the session so it can be check on callback.\n\tresp, _ := client.Get(h.Server.URL + \"/_/login\")\n\tresp.Body.Close()\n\tequals(t, \"https\", redirectURL.Scheme)\n\tequals(t, \"github.com\", redirectURL.Host)\n\tequals(t, \"/login/oauth/authorize\", redirectURL.Path)\n\tequals(t, 32, len(redirectURL.Query().Get(\"state\")))\n\n\tassert(t, saved, \"expected session save\")\n\tequals(t, redirectURL.Query().Get(\"state\"), session.Values[\"AuthState\"])\n}", "func (s *Provider) HandleSamlLogin(w http.ResponseWriter, r *http.Request) (string, error) {\n\tserviceProvider := s.serviceProvider\n\tif r.URL.Path == serviceProvider.AcsURL.Path {\n\t\treturn \"\", fmt.Errorf(\"don't wrap Middleware with RequireAccount\")\n\t}\n\tlog.Debugf(\"SAML [HandleSamlLogin]: Creating authentication request for %v\", s.name)\n\tbinding := saml.HTTPRedirectBinding\n\tbindingLocation := serviceProvider.GetSSOBindingLocation(binding)\n\n\treq, err := serviceProvider.MakeAuthenticationRequest(bindingLocation, binding, saml.HTTPPostBinding)\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\treturn \"\", err\n\t}\n\t// relayState is limited to 80 bytes but also must be integrity protected.\n\t// this means that we cannot use a JWT because it is way too long. Instead\n\t// we set a cookie that corresponds to the state\n\trelayState := base64.URLEncoding.EncodeToString(randomBytes(42))\n\n\tsecretBlock := x509.MarshalPKCS1PrivateKey(serviceProvider.Key)\n\tstate := jwt.New(jwt.SigningMethodHS256)\n\tclaims := state.Claims.(jwt.MapClaims)\n\tclaims[\"id\"] = req.ID\n\tclaims[\"uri\"] = r.URL.String()\n\tsignedState, err := state.SignedString(secretBlock)\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\treturn \"\", err\n\t}\n\n\ts.clientState.SetState(w, r, relayState, signedState)\n\n\tredirectURL, err := req.Redirect(relayState, serviceProvider)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn redirectURL.String(), nil\n}", "func getRedirectHandler (w http.ResponseWriter, r *http.Request) {\n responseCode := 200\n\n r.ParseForm()\n cookieName := \"\"\n cookieUUID := r.FormValue(\"cookie\")\n if cookieUUID == \"\" { \n\tresponseCode = 400 // set response code to 400, malformed request\n } else {\n\tresponseCode = 200 // set response code to 200, request processed\n }\n \n //Attempt to retrieve user name from cookie map based on UUID\n foundCookie := false\n\n mutex.Lock()\n cookieLookup := cookieMap[cookieUUID]\n mutex.Unlock()\n\n if cookieLookup.Name != \"\" {\n\tfoundCookie = true\n\tcookieName = cookieLookup.Value\n }\n\n if !foundCookie {\n\tresponseCode = 400 // set response code to 400, malformed request\n }\n \n w.WriteHeader(responseCode)\n w.Write([]byte(cookieName))\n // timeserver will need to use r.ParseForm() and http.get(URL (i.e. authhost:9090/get) to retrieve data\n}", "func handleSignIn(ctx *macaron.Context, sess session.Store, user *models.User) {\n\t_ = sess.Delete(\"openid_verified_uri\")\n\t_ = sess.Delete(\"openid_signin_remember\")\n\t_ = sess.Delete(\"openid_determined_email\")\n\t_ = sess.Delete(\"openid_determined_username\")\n\t_ = sess.Delete(\"twofaUid\")\n\t_ = sess.Delete(\"twofaRemember\")\n\t_ = sess.Delete(\"u2fChallenge\")\n\t_ = sess.Delete(\"linkAccount\")\n\terr := sess.Set(\"uid\", user.ID)\n\tif err != nil {\n\t\tlog.Error(fmt.Sprintf(\"Error setting session: %v\", err))\n\t}\n\terr = sess.Set(\"uname\", user.Name)\n\tif err != nil {\n\t\tlog.Error(fmt.Sprintf(\"Error setting session: %v\", err))\n\t}\n\n\t// Language setting of the user overwrites the one previously set\n\t// If the user does not have a locale set, we save the current one.\n\tif len(user.Language) == 0 {\n\t\tuser.Language = ctx.Locale.Language()\n\t\tif err := models.UpdateUserCols(user, \"language\"); err != nil {\n\t\t\tlog.Error(fmt.Sprintf(\"Error updating user language [user: %d, locale: %s]\", user.ID, user.Language))\n\t\t\treturn\n\t\t}\n\t}\n\n\tctx.SetCookie(\"lang\", user.Language, nil, setting.AppSubURL, setting.SessionConfig.Domain, setting.SessionConfig.Secure, true)\n\n\t// Clear whatever CSRF has right now, force to generate a new one\n\tctx.SetCookie(setting.CSRFCookieName, \"\", -1, setting.AppSubURL, setting.SessionConfig.Domain, setting.SessionConfig.Secure, true)\n}", "func AuthzCodeHandler(w http.ResponseWriter, r *http.Request) {\n\tlog.Printf(\"AuthzCodeHandler: extracting code and checking state\\n\")\n\tstate, ok := r.Context().Value(session.StateKey).(string)\n if ok == false {\n\t\thttp.Error(w, \"AuthzCodeHandler: could not find state\\n\",\n\t\t\thttp.StatusInternalServerError)\n\t\treturn\n\t}\n\tif state != r.FormValue(\"state\") {\n\t\tlog.Printf(\"AuthzCodeHandler: state mismatch: have: %s; got: %s\\n\",\n\t\t\tstate, r.FormValue(\"state\"))\n\t\thttp.Error(w, \"AuthzCodeHandler: state doesn't match session's state, rejecting\",\n\t\t\thttp.StatusNotAcceptable)\n\t\treturn\n\t}\n\n\tcode := r.FormValue(\"code\")\n\tlog.Printf(\"AuthzCodeHandler: going to request access token with code: %s\\n\", code)\n\ttoken, err := oauth2Config.Exchange(context.Background(), code)\n\tif err != nil {\n\t\tlog.Printf(\"AuthzCodeHandler: failed to exchange authz code: %v\\n\", err)\n\t\thttp.Error(w, \"failed to get access token with authz code\",\n\t\t\thttp.StatusInternalServerError)\n\t\treturn\n\t}\n\tlog.Printf(\"AuthzCodeHandler: got token: %+v\\n\", token)\n\n\tidToken, ok := token.Extra(\"id_token\").(string)\n\tif ok == false {\n\t\tlog.Printf(\"AuthzCodeHandler: but didn't find id_token\\n\")\n\t\thttp.Error(w, \"didn't receive id_token\", http.StatusInternalServerError)\n\t\treturn\n\t} else {\n\t\tlog.Printf(\"AuthzCodeHandler: and id_token: %+v\\n\\n\", idToken)\n\t}\n\n\tlog.Printf(\"AuthzCodeHandler: building rider via id_token: %+v\\n\", idToken)\n\trider, err := riderFromJwt(idToken)\n\tif err != nil {\n\t\tlog.Printf(\"AuthzCodeHandler: failed to build rider from jwt: %s\\n\", err)\n\t\thttp.Error(w, \"failed to build rider from jwt\", http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\tlog.Printf(\"AuthzCodeHandler: setting state with rider: %v\\n\", rider)\n\tsession.SetSession(rider, w, r)\n\tlog.Printf(\"AuthzCodeHandler: done, redirecting to SPA\\n\")\n\thttp.Redirect(w, r, \"/web/\", http.StatusFound)\n}", "func HandleLoginRedirectResponse(r *http.Request, w http.ResponseWriter, cfg *setting.Cfg, identity *Identity, validator RedirectValidator) *response.RedirectResponse {\n\treturn response.Redirect(handleLogin(r, w, cfg, identity, validator))\n}", "func OIDCAuth(optionSetters ...Option) func(next http.Handler) http.Handler {\n\toptions := newOptions(optionSetters...)\n\ttokenCache := sync.NewCache(options.UserinfoCacheSize)\n\n\th := oidcAuth{\n\t\tlogger: options.Logger,\n\t\tproviderFunc: options.OIDCProviderFunc,\n\t\thttpClient: options.HTTPClient,\n\t\toidcIss: options.OIDCIss,\n\t\tTokenManagerConfig: options.TokenManagerConfig,\n\t\ttokenCache: &tokenCache,\n\t\ttokenCacheTTL: options.UserinfoCacheTTL,\n\t}\n\n\treturn func(next http.Handler) http.Handler {\n\t\treturn http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {\n\t\t\t// there is no bearer token on the request,\n\t\t\tif !h.shouldServe(req) {\n\t\t\t\t// oidc supported but token not present, add header and handover to the next middleware.\n\t\t\t\tuserAgentAuthenticateLockIn(w, req, options.CredentialsByUserAgent, \"bearer\")\n\t\t\t\tnext.ServeHTTP(w, req)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tif h.getProvider() == nil {\n\t\t\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\ttoken := strings.TrimPrefix(req.Header.Get(\"Authorization\"), \"Bearer \")\n\n\t\t\tclaims, status := h.getClaims(token, req)\n\t\t\tif status != 0 {\n\t\t\t\tw.WriteHeader(status)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\t// inject claims to the request context for the account_uuid middleware.\n\t\t\treq = req.WithContext(oidc.NewContext(req.Context(), claims))\n\n\t\t\t// store claims in context\n\t\t\t// uses the original context, not the one with probably reduced security\n\t\t\tnext.ServeHTTP(w, req.WithContext(oidc.NewContext(req.Context(), claims)))\n\t\t})\n\t}\n}", "func (l *RemoteProvider) InitiateLogin(w http.ResponseWriter, r *http.Request, _ bool) {\n\ttu := viper.GetString(\"MESHERY_SERVER_CALLBACK_URL\")\n\tif tu == \"\" {\n\t\ttu = \"http://\" + r.Host + \"/api/user/token\" // Hard coding the path because this is what meshery expects\n\t}\n\n\t_, err := r.Cookie(tokenName)\n\t// logrus.Debugf(\"url token: %v %v\", token, err)\n\tif err != nil {\n\t\thttp.SetCookie(w, &http.Cookie{\n\t\t\tName: l.RefCookieName,\n\t\t\tValue: \"/\",\n\t\t\tExpires: time.Now().Add(l.LoginCookieDuration),\n\t\t\tPath: \"/\",\n\t\t\tHttpOnly: true,\n\t\t})\n\t\thttp.Redirect(w, r, l.RemoteProviderURL+\"?source=\"+base64.RawURLEncoding.EncodeToString([]byte(tu))+\"&provider_version=\"+l.ProviderVersion, http.StatusFound)\n\t\treturn\n\t}\n\n\t// TODO: go to ref cookie\n\thttp.Redirect(w, r, \"/\", http.StatusFound)\n}", "func Authorize(s Server) http.Handler {\n\treturn http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tswitch rtype := r.FormValue(\"response_type\"); rtype {\n\t\tcase \"code\":\n\t\t\turi, params := authorizeCode(s, w, r)\n\t\t\tif uri != \"\" || params != nil {\n\t\t\t\tRedirect(w, uri, params)\n\t\t\t}\n\t\tdefault:\n\t\t\tclient, uri := clientRedirectURI(s, w, r)\n\t\t\tif client != nil && uri != \"\" {\n\t\t\t\tRedirect(w, uri, Params{\n\t\t\t\t\t\"state\": r.FormValue(\"state\"),\n\t\t\t\t\t\"error\": \"unsupported_response_type\",\n\t\t\t\t})\n\t\t\t}\n\t\t}\n\t})\n}", "func (s *Server) handleCallback(w http.ResponseWriter, req *http.Request) error {\n\tloginSession, err := s.cookieStore.Get(req, LoginSessionName)\n\tif err != nil {\n\t\treturn err\n\t}\n\toauthState := loginSession.Values[\"oauth_state\"]\n\treq.ParseForm()\n\tstate := req.FormValue(\"state\")\n\tif state != oauthState {\n\t\treturn fmt.Errorf(\"invalid oauth state\")\n\t}\n\tloginSession.Options.MaxAge = -1\n\terr = loginSession.Save(req, w)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"error saving session: %s\", err)\n\t}\n\tb, err := s.getUserInfo(req.FormValue(\"code\"))\n\tif err != nil {\n\t\treturn err\n\t}\n\tvar profile UserProfile\n\t//fmt.Printf(\"%s\\n\", b)\n\terr = json.Unmarshal(b, &profile)\n\tw.Header().Set(\"Content-Type\", \"text/html; charset=utf-8\")\n\tsession, err := s.cookieStore.Get(req, UserSessionName)\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\treturn nil\n\t}\n\tif len(s.opts.AllowedUsers) > 0 && !s.opts.AllowedUsers[profile.Email] {\n\t\tsession.Options.MaxAge = -1\n\t\tdelete(session.Values, \"hash\")\n\t\tdelete(session.Values, \"email\")\n\t\terr = session.Save(req, w)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"error saving session: %s\", err)\n\t\t}\n\t\tw.Header().Set(\"Content-Type\", \"text/html; charset=utf-8\")\n\t\tw.WriteHeader(http.StatusForbidden)\n\t\tw.Write([]byte(fmt.Sprintf(\"<title>Forbidden</title>User %s is not authorized.<br>\"+\n\t\t\t\"Try a different Google account. <a href='https://mail.google.com/mail/logout'>Log out of Google</a>.\", profile.Email)))\n\t\treturn nil\n\t}\n\t// Restrict the cookie by 1h, HttpOnly and Secure (if configured).\n\tsession.Options = &sessions.Options{\n\t\tMaxAge: 3600,\n\t\tHttpOnly: true,\n\t\tSecure: s.opts.SecureCookie,\n\t\tSameSite: http.SameSiteNoneMode,\n\t}\n\t// Instead of email, we store a salted cryptographic hash (pseudonymous id).\n\tsession.Values[\"hash\"] = s.hashId(profile.Email)\n\tsession.Values[\"email\"] = profile.Email\n\terr = session.Save(req, w)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"error saving session: %s\", err)\n\t}\n\thttp.Redirect(w, req, \"/token\", http.StatusTemporaryRedirect)\n\treturn nil\n}", "func LoginSuccess(r *http.Request, client *http.Client, hydraAdminURL, challenge, subject, stateID string, extra map[string]interface{}) (string, error) {\n\treq := &hydraapi.HandledLoginRequest{\n\t\tSubject: &subject,\n\t\tContext: map[string]interface{}{},\n\t}\n\n\tif len(stateID) > 0 {\n\t\treq.Context[StateIDKey] = stateID\n\t}\n\n\tfor k, v := range extra {\n\t\treq.Context[k] = v\n\t}\n\n\tresp, err := AcceptLogin(client, hydraAdminURL, challenge, req)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn resp.RedirectTo, nil\n}", "func oauthCallbackHandler(w http.ResponseWriter, r *http.Request) {\n\ttransport := &oauth.Transport{Config: &oauthProviderConfig.oauthConfig}\n\ttransport.Exchange(r.FormValue(\"code\"))\n\tclient := transport.Client()\n\tresponse, err := client.Get(oauthProviderConfig.UserInfoAPI)\n\tif err != nil {\n\t\tlog.Printf(\"Error while contacting '%s': %s\\n\", oauthProviderConfig.UserInfoAPI, err)\n\t\thttp.Error(w, http.StatusText(http.StatusBadRequest), http.StatusBadRequest)\n\t\treturn\n\t}\n\tbody, err := ioutil.ReadAll(response.Body)\n\tif err != nil {\n\t\tlog.Printf(\"Error while parsing response from '%s': %s\\n\", oauthProviderConfig.UserInfoAPI, err)\n\t\thttp.Error(w, http.StatusText(http.StatusBadGateway), http.StatusBadGateway)\n\t\treturn\n\t}\n\tresponse.Body.Close()\n\tauthorized, email := isAuthorized(body)\n\tif authorized {\n\t\tauthorizeEmail(email, w, r)\n\t\tlog.Println(\"User\", email, \"logged in\")\n\t\tsession, _ := store.Get(r, serverConfig.CookieName)\n\t\tif next, ok := session.Values[\"next\"]; ok {\n\t\t\thttp.Redirect(w, r, next.(string), http.StatusFound)\n\t\t}\n\t} else {\n\t\tlog.Println(\"Access Denied: Couldn't match an email address in the server response.\")\n\t\thttp.Error(w, http.StatusText(http.StatusForbidden), http.StatusForbidden)\n\t}\n}", "func unauthenticated(fn http.HandlerFunc) http.HandlerFunc {\n\treturn func(w http.ResponseWriter, r *http.Request) {\n\t\tif s, _ := Session.Get(r, \"s\"); s != nil && s.Values[\"Id\"] != nil {\n\t\t\thttp.Redirect(w, r, \"/main/\", 302)\n\t\t\treturn\n\t\t}\n\t\tfn(w, r)\n\t}\n}", "func (h *Handler) LogoutHandler(w http.ResponseWriter, r *http.Request) {\n\n\tchallenge, err := readURLChallangeParams(r, \"logout\")\n\tif err != nil {\n\t\tlog.Println(err)\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\treturn\n\t}\n\n\tif r.Method == \"POST\" {\n\t\tvar err error\n\t\tif r.Form == nil {\n\t\t\tif err := r.ParseForm(); err != nil {\n\t\t\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t\taccept := r.Form.Get(\"accept\")\n\t\tlogoutChallenge := r.Form.Get(\"challenge\")\n\t\tvar redirectURL string\n\n\t\tif accept == \"true\" {\n\t\t\tredirectURL, err = h.LoginService.SendAcceptBody(\"logout\", logoutChallenge, nil)\n\n\t\t} else {\n\t\t\tredirectURL, err = h.LoginService.SendRejectBody(\"logout\", logoutChallenge, nil)\n\t\t}\n\t\tif err != nil {\n\t\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\t\tlog.Fatal(err)\n\t\t}\n\n\t\thttp.Redirect(w, r, redirectURL, http.StatusFound)\n\t} else {\n\n\t\tchallengeBody, err := h.LoginService.ReadChallenge(challenge, \"logout\")\n\t\tif err != nil {\n\t\t\tlog.Println(err)\n\t\t}\n\n\t\tif challengeBody.RpInitiated {\n\t\t\ttemplLogout := template.Must(template.ParseFiles(\"templates/logout.html\"))\n\t\t\tlogoutData := h.ConfigService.FetchLogoutConfig(challenge, challengeBody.Subject)\n\t\t\ttemplLogout.Execute(w, logoutData)\n\t\t} else {\n\t\t\tredirectURL, err := h.LoginService.SendAcceptBody(\"logout\", challenge, nil)\n\t\t\tif err != nil {\n\t\t\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\t\t\tlog.Fatal(err)\n\t\t\t}\n\n\t\t\thttp.Redirect(w, r, redirectURL, http.StatusFound)\n\t\t}\n\t}\n\n}", "func authenticated(fn AuthHandlerFunc) http.HandlerFunc {\n\treturn func(w http.ResponseWriter, r *http.Request) {\n\t\ts, err := Session.Get(r, \"s\")\n\t\tif s == nil || s.Values[\"Id\"] == nil || err != nil {\n\t\t\thttp.Redirect(w, r, \"/signin/?next=\"+r.URL.Path, 302)\n\t\t\treturn\n\t\t}\n\t\tfn(w, r, s)\n\t}\n}", "func (lh *AuthorizationCodeLocalhost) redirectUriHandler(w http.ResponseWriter, r *http.Request) {\n\tconst (\n\t\tcloseTab string = \". Please close this tab.\"\n\t)\n\n\trq := r.URL.RawQuery\n\turlValues, err := url.ParseQuery(rq)\n\tif err != nil {\n\t\terr := fmt.Sprintf(\"Unable to parse query: %v\", err)\n\n\t\tlh.AuthCodeReqStatus = AuthorizationCodeStatus{Status: FAILED, Details: err}\n\t\tlh.authCode = AuthorizationCode{}\n\t\tw.WriteHeader(http.StatusOK)\n\t\tw.Write([]byte(lh.AuthCodeReqStatus.Details + closeTab))\n\t\treturn\n\t}\n\n\turlError := urlValues.Get(\"error\")\n\t// Authentication Code Error from consent page\n\tif urlError != \"\" {\n\t\terr := fmt.Sprintf(\"An error occurred when getting authorization code: %s\", urlError)\n\t\tlh.AuthCodeReqStatus = AuthorizationCodeStatus{Status: FAILED, Details: err}\n\t\tlh.authCode = AuthorizationCode{}\n\t\tw.WriteHeader(http.StatusOK)\n\t\tw.Write([]byte(lh.AuthCodeReqStatus.Details + closeTab))\n\t\treturn\n\t}\n\n\turlCode := urlValues.Get(\"code\")\n\turlState := urlValues.Get(\"state\")\n\t// No Code, Status, or Error is treated as unknown error\n\tif urlCode == \"\" && urlState == \"\" {\n\t\terr := \"Unknown error when getting athorization code\"\n\t\tlh.AuthCodeReqStatus = AuthorizationCodeStatus{Status: FAILED, Details: err}\n\n\t\tlh.authCode = AuthorizationCode{}\n\t\tw.WriteHeader(http.StatusOK)\n\t\tw.Write([]byte(lh.AuthCodeReqStatus.Details + closeTab))\n\t\treturn\n\t}\n\n\t// Authorization code returned\n\tif urlCode != \"\" && urlState != \"\" {\n\t\tlh.authCode = AuthorizationCode{\n\t\t\tCode: urlCode,\n\t\t\tState: urlState,\n\t\t}\n\n\t\tlh.AuthCodeReqStatus = AuthorizationCodeStatus{\n\t\t\tStatus: GRANTED, Details: \"Authorization code granted\"}\n\n\t\tw.WriteHeader(http.StatusOK)\n\t\tw.Write([]byte(lh.AuthCodeReqStatus.Details + closeTab))\n\t\treturn\n\t}\n\n\terr = fmt.Errorf(\"Athorization code missing code or state.\")\n\tlh.AuthCodeReqStatus = AuthorizationCodeStatus{Status: FAILED, Details: err.Error()}\n\n\tlh.authCode = AuthorizationCode{}\n\tw.WriteHeader(http.StatusOK)\n\tw.Write([]byte(lh.AuthCodeReqStatus.Details + closeTab))\n\treturn\n}", "func (r *oauthProxy) oauthAuthorizationHandler(w http.ResponseWriter, req *http.Request) {\n\tctx, span, logger := r.traceSpan(req.Context(), \"authorization handler\")\n\tif span != nil {\n\t\tdefer span.End()\n\t}\n\n\tif r.config.SkipTokenVerification {\n\t\tr.errorResponse(w, req.WithContext(ctx), \"\", http.StatusNotAcceptable, nil)\n\t\treturn\n\t}\n\n\tclient, err := r.getOAuthClient(r.getRedirectionURL(w, req.WithContext(ctx)))\n\tif err != nil {\n\t\tr.errorResponse(w, req.WithContext(ctx), \"failed to retrieve the oauth client for authorization\", http.StatusInternalServerError, err)\n\t\treturn\n\t}\n\n\t// step: set the access type of the session\n\tvar accessType string\n\tif containedIn(\"offline\", r.config.Scopes, false) {\n\t\taccessType = \"offline\"\n\t}\n\n\tauthURL := client.AuthCodeURL(req.URL.Query().Get(\"state\"), accessType, \"\")\n\tlogger.Debug(\"incoming authorization request from client address\",\n\t\tzap.String(\"access_type\", accessType),\n\t\tzap.String(\"auth_url\", authURL),\n\t\tzap.String(\"client_ip\", req.RemoteAddr))\n\n\t// step: if we have a custom sign in page, lets display that\n\tif r.config.hasCustomSignInPage() {\n\t\tmodel := make(map[string]string)\n\t\tmodel[\"redirect\"] = authURL\n\t\tw.Header().Set(\"Content-Type\", \"text/html; charset=utf-8\")\n\t\tw.WriteHeader(http.StatusOK)\n\t\t_ = r.Render(w, path.Base(r.config.SignInPage), mergeMaps(model, r.config.Tags))\n\n\t\treturn\n\t}\n\n\tr.redirectToURL(authURL, w, req.WithContext(ctx), http.StatusTemporaryRedirect)\n}", "func authenticated(next http.Handler) http.Handler {\n\treturn http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {\n\t\t// check if authenticated\n\t\t_, err := session(w, req)\n\t\tif err != nil {\n\t\t\t//http.Error(w, \"not logged in\", http.StatusUnauthorized)\n\t\t\tlogger.SetPrefix(\"WARNING \")\n\t\t\tlogger.Println(err, `Failed to get/verify cookie \"session\"`)\n\t\t\thttp.Redirect(w, req, \"/\", http.StatusSeeOther)\n\t\t\treturn // don't call original handler\n\t\t}\n\t\tnext.ServeHTTP(w, req)\n\t})\n}", "func loginHandler(rw http.ResponseWriter, req *http.Request) {\n\tsession, err := store.Get(req, CONFIG.SessionName)\n\tif err != nil {\n\t\tlog.Printf(\"Error reading session: %v\", err)\n\t\tif cookie, err := req.Cookie(CONFIG.SessionName); err != nil {\n\t\t\tlog.Printf(\"Error reading cookie: %v\", err)\n\t\t\thttp.Error(rw, err.Error(), http.StatusInternalServerError)\n\t\t} else {\n\t\t\tcookie.MaxAge = -1\n\t\t\thttp.SetCookie(rw, cookie)\n\t\t\thttp.Redirect(rw, req, \"/\", http.StatusFound)\n\t\t}\n\t\treturn\n\t}\n\tcode := req.FormValue(\"code\")\n\tclient := &http.Client{}\n\tresp, err := client.PostForm(\"https://cloud.digitalocean.com/v1/oauth/token\",\n\t\turl.Values{\n\t\t\t\"client_id\": {CONFIG.ClientId},\n\t\t\t\"client_secret\": {CONFIG.ClientSecret},\n\t\t\t\"code\": {code},\n\t\t\t\"grant_type\": {\"authorization_code\"},\n\t\t\t\"redirect_uri\": {CONFIG.CallbackUrl},\n\t\t})\n\tif err != nil {\n\t\tlog.Printf(\"Error reading cookie: %v\", err)\n\t\thttp.Error(rw, err.Error(), http.StatusInternalServerError)\n\t}\n\tdefer resp.Body.Close()\n\tbody, err := ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\tlog.Printf(\"Error reading cookie: %v\", err)\n\t\thttp.Error(rw, err.Error(), http.StatusInternalServerError)\n\t}\n\tvar credentials DigitalOceanResponse\n\terr = json.Unmarshal(body, &credentials)\n\tsession.Values[\"accesstoken\"] = credentials.AccessToken\n\tsession.Values[\"name\"] = credentials.Info.Name\n\tsession.Save(req, rw)\n\thttp.Redirect(rw, req, \"/\", http.StatusFound)\n}", "func AuthResponseToken(w http.ResponseWriter, r *http.Request, authReq AuthRequest, authorizer Authorizer, client Client) {\n\tcreateAccessToken := authReq.GetResponseType() != oidc.ResponseTypeIDTokenOnly\n\tresp, err := CreateTokenResponse(r.Context(), authReq, client, authorizer, createAccessToken, \"\", \"\")\n\tif err != nil {\n\t\tAuthRequestError(w, r, authReq, err, authorizer.Encoder())\n\t\treturn\n\t}\n\tcallback, err := AuthResponseURL(authReq.GetRedirectURI(), authReq.GetResponseType(), authReq.GetResponseMode(), resp, authorizer.Encoder())\n\tif err != nil {\n\t\tAuthRequestError(w, r, authReq, err, authorizer.Encoder())\n\t\treturn\n\t}\n\thttp.Redirect(w, r, callback, http.StatusFound)\n}", "func LogoutPostHandler(writer http.ResponseWriter, request *http.Request) {\n\tclearSession(writer)\n\thttp.Redirect(writer, request, \"/\", 302)\n}", "func (m *Minion) defaultUnauthorizedHandler(w http.ResponseWriter, r *http.Request) {\n\tsession, _ := m.Sessions.Get(r, m.SessionName)\n\tsession.Values[RedirectKey] = r.URL.String()\n\tsession.Save(r, w)\n\n\thttp.Redirect(w, r, m.UnauthorizedURL, http.StatusSeeOther)\n}", "func AuthResponse(authReq AuthRequest, authorizer Authorizer, w http.ResponseWriter, r *http.Request) {\n\tclient, err := authorizer.Storage().GetClientByClientID(r.Context(), authReq.GetClientID())\n\tif err != nil {\n\t\tAuthRequestError(w, r, authReq, err, authorizer.Encoder())\n\t\treturn\n\t}\n\tif authReq.GetResponseType() == oidc.ResponseTypeCode {\n\t\tAuthResponseCode(w, r, authReq, authorizer)\n\t\treturn\n\t}\n\tAuthResponseToken(w, r, authReq, authorizer, client)\n}", "func RedirectHandler(w http.ResponseWriter, r *http.Request) {\n\tlog.Print(\"I AM HERE REDIRECTED\")\n\terr := r.ParseForm()\n\tif err != nil {\n\t\tfmt.Fprintf(os.Stdout, \"could not parse query: %s\", err.Error())\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t}\n\tcode := r.FormValue(\"code\")\n\n\treqURL := fmt.Sprintf(\"https://github.com/login/oauth/access_token?client_id=%s&client_secret=%s&code=%s\", ClientID, ClientSecret, code)\n\treq, err := http.NewRequest(http.MethodPost, reqURL, nil)\n\tif err != nil {\n\t\tfmt.Fprintf(os.Stdout, \"could not retrieve http request: %s\", err.Error())\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t}\n\n\treq.Header.Set(http.CanonicalHeaderKey(\"accept\"), \"application/json\")\n\treq.Header.Set(\"X-OAuth-Scopes\", \"gists\")\n\n\tres, err := http.DefaultClient.Do(req)\n\tif err != nil {\n\t\tfmt.Fprintf(os.Stdout, \"could not send HTTP request: %s\", err.Error())\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t}\n\n\tdefer res.Body.Close()\n\t// Parse the request body into the `OAuthAccessResponse` struct\n\tvar t OAuthAccessResponse\n\tif err := json.NewDecoder(res.Body).Decode(&t); err != nil {\n\t\tfmt.Fprintf(os.Stdout, \"could not parse JSON response: %s\", err.Error())\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t}\n\tSession.AccessToken = t.AccessToken\n\n\tw.WriteHeader(http.StatusFound)\n\tw.Write([]byte(\"OK\"))\n}", "func Authorize(c *gin.Context) {\n\n\tsess := models.NewSession(ginject.Deps(c))\n\n\tif err := c.Bind(sess); err != nil {\n\t\tc.AbortWithStatus(400)\n\t\treturn\n\t}\n\n\tsess.Store()\n\n\tc.Header(\"Cache-Control\", \"no-cache, no-store, must-revalidate\")\n\tc.Header(\"Pragma\", \"no-cache\")\n\tc.Header(\"Expires\", \"0\")\n\tc.Redirect(http.StatusTemporaryRedirect, \"/gui/login?sess=\"+sess.GetSessionID())\n}", "func signoutHandler(w http.ResponseWriter, r *http.Request) {\n\tuserId, err := userID(r)\n\tif err != nil || userId == \"\" {\n\t\tw.WriteHeader(400)\n\t\tLogPrintf(\"signout: userid\")\n\t\treturn\n\t}\n\tt := authTransport(userId)\n\tif t == nil {\n\t\tw.WriteHeader(500)\n\t\tLogPrintf(\"signout: auth\")\n\t\treturn\n\t}\n\treq, err := http.NewRequest(\"GET\", fmt.Sprintf(revokeEndpointFmt, t.Token.RefreshToken), nil)\n\tresponse, err := http.DefaultClient.Do(req)\n\tif err != nil {\n\t\tw.WriteHeader(500)\n\t\tLogPrintf(\"signout: revoke\")\n\t\treturn\n\t}\n\tdefer response.Body.Close()\n\tstoreUserID(w, r, \"\")\n\tdeleteCredential(userId)\n\thttp.Redirect(w, r, fullUrl, http.StatusFound)\n}", "func AuthResponseCode(w http.ResponseWriter, r *http.Request, authReq AuthRequest, authorizer Authorizer) {\n\tcode, err := CreateAuthRequestCode(r.Context(), authReq, authorizer.Storage(), authorizer.Crypto())\n\tif err != nil {\n\t\tAuthRequestError(w, r, authReq, err, authorizer.Encoder())\n\t\treturn\n\t}\n\tcodeResponse := struct {\n\t\tcode string\n\t\tstate string\n\t}{\n\t\tcode: code,\n\t\tstate: authReq.GetState(),\n\t}\n\tcallback, err := AuthResponseURL(authReq.GetRedirectURI(), authReq.GetResponseType(), authReq.GetResponseMode(), &codeResponse, authorizer.Encoder())\n\tif err != nil {\n\t\tAuthRequestError(w, r, authReq, err, authorizer.Encoder())\n\t\treturn\n\t}\n\thttp.Redirect(w, r, callback, http.StatusFound)\n}", "func (c *Operation) callback(w http.ResponseWriter, r *http.Request) { //nolint: funlen,gocyclo\n\tif len(r.URL.Query()[\"error\"]) != 0 {\n\t\tif r.URL.Query()[\"error\"][0] == \"access_denied\" {\n\t\t\thttp.Redirect(w, r, c.homePage, http.StatusTemporaryRedirect)\n\t\t}\n\t}\n\n\ttk, err := c.tokenIssuer.Exchange(r)\n\tif err != nil {\n\t\tlogger.Errorf(\"failed to exchange code for token: %s\", err.Error())\n\t\tc.writeErrorResponse(w, http.StatusBadRequest,\n\t\t\tfmt.Sprintf(\"failed to exchange code for token: %s\", err.Error()))\n\n\t\treturn\n\t}\n\n\t// user info from token will be used for to retrieve data from cms\n\tinfo, err := c.tokenResolver.Resolve(tk.AccessToken)\n\tif err != nil {\n\t\tlogger.Errorf(\"failed to get token info: %s\", err.Error())\n\t\tc.writeErrorResponse(w, http.StatusBadRequest,\n\t\t\tfmt.Sprintf(\"failed to get token info: %s\", err.Error()))\n\n\t\treturn\n\t}\n\n\tuserID, subject, err := c.getCMSData(tk, \"email=\"+info.Subject, info.Scope)\n\tif err != nil {\n\t\tlogger.Errorf(\"failed to get cms data: %s\", err.Error())\n\t\tc.writeErrorResponse(w, http.StatusBadRequest,\n\t\t\tfmt.Sprintf(\"failed to get cms data: %s\", err.Error()))\n\n\t\treturn\n\t}\n\n\tcallbackURLCookie, err := r.Cookie(callbackURLCookie)\n\tif err != nil && !errors.Is(err, http.ErrNoCookie) {\n\t\tc.writeErrorResponse(w, http.StatusBadRequest,\n\t\t\tfmt.Sprintf(\"failed to get authMode cookie: %s\", err.Error()))\n\n\t\treturn\n\t}\n\n\tif callbackURLCookie != nil && callbackURLCookie.Value != \"\" {\n\t\ttxnID := uuid.NewString()\n\t\tdata := txnData{\n\t\t\tUserID: userID,\n\t\t\tScope: info.Scope,\n\t\t\tToken: tk.AccessToken,\n\t\t}\n\n\t\tdataBytes, mErr := json.Marshal(data)\n\t\tif mErr != nil {\n\t\t\tc.writeErrorResponse(w, http.StatusInternalServerError,\n\t\t\t\tfmt.Sprintf(\"failed to marshal txn data: %s\", mErr.Error()))\n\t\t\treturn\n\t\t}\n\n\t\terr = c.store.Put(txnID, dataBytes)\n\t\tif err != nil {\n\t\t\tc.writeErrorResponse(w, http.StatusInternalServerError,\n\t\t\t\tfmt.Sprintf(\"failed to save txn data: %s\", err.Error()))\n\n\t\t\treturn\n\t\t}\n\n\t\thttp.Redirect(w, r, callbackURLCookie.Value+\"?txnID=\"+txnID, http.StatusTemporaryRedirect)\n\n\t\treturn\n\t}\n\n\tvcsProfileCookie, err := r.Cookie(vcsProfileCookie)\n\tif err != nil {\n\t\tlogger.Errorf(\"failed to get cookie: %s\", err.Error())\n\t\tc.writeErrorResponse(w, http.StatusBadRequest,\n\t\t\tfmt.Sprintf(\"failed to get cookie: %s\", err.Error()))\n\n\t\treturn\n\t}\n\n\tcred, err := c.prepareCredential(subject, info.Scope, vcsProfileCookie.Value)\n\tif err != nil {\n\t\tlogger.Errorf(\"failed to create credential: %s\", err.Error())\n\t\tc.writeErrorResponse(w, http.StatusInternalServerError,\n\t\t\tfmt.Sprintf(\"failed to create credential: %s\", err.Error()))\n\n\t\treturn\n\t}\n\n\tw.Header().Set(\"Content-Type\", \"text/html; charset=utf-8\")\n\n\tt, err := template.ParseFiles(c.didAuthHTML)\n\tif err != nil {\n\t\tlogger.Errorf(err.Error())\n\t\tc.writeErrorResponse(w, http.StatusInternalServerError,\n\t\t\tfmt.Sprintf(\"unable to load html: %s\", err.Error()))\n\n\t\treturn\n\t}\n\n\tif err := t.Execute(w, map[string]interface{}{\n\t\t\"Path\": generate + \"?\" + \"profile=\" + vcsProfileCookie.Value,\n\t\t\"Cred\": string(cred),\n\t}); err != nil {\n\t\tlogger.Errorf(fmt.Sprintf(\"failed execute qr html template: %s\", err.Error()))\n\t}\n}", "func HandleLoginRedirect(r *http.Request, w http.ResponseWriter, cfg *setting.Cfg, identity *Identity, validator RedirectValidator) {\n\tredirectURL := handleLogin(r, w, cfg, identity, validator)\n\thttp.Redirect(w, r, redirectURL, http.StatusFound)\n}", "func HandleSamlLogin(w http.ResponseWriter, r *http.Request) {\n\tvar redirectBackBaseValue string\n\ts := server.SamlServiceProvider\n\n\ts.XForwardedProto = r.Header.Get(\"X-Forwarded-Proto\")\n\n\tif r.URL.Query() != nil {\n\t\tredirectBackBaseValue = r.URL.Query().Get(redirectBackBase)\n\t\tif redirectBackBaseValue == \"\" {\n\t\t\tredirectBackBaseValue = server.GetRancherAPIHost()\n\t\t}\n\t} else {\n\t\tredirectBackBaseValue = server.GetRancherAPIHost()\n\t}\n\n\tif !isWhitelisted(redirectBackBaseValue, s.RedirectWhitelist) {\n\t\tlog.Errorf(\"Cannot redirect to anything other than whitelisted domains and rancher api host\")\n\t\tredirectBackPathValue := r.URL.Query().Get(redirectBackPath)\n\t\tredirectURL := server.GetSamlRedirectURL(server.GetRancherAPIHost(), redirectBackPathValue)\n\t\tredirectURL = addErrorToRedirect(redirectURL, \"422\")\n\t\thttp.Redirect(w, r, redirectURL, http.StatusFound)\n\t\treturn\n\t}\n\n\tserviceProvider := s.ServiceProvider\n\tif r.URL.Path == serviceProvider.AcsURL.Path {\n\t\treturn\n\t}\n\n\tbinding := saml.HTTPRedirectBinding\n\tbindingLocation := serviceProvider.GetSSOBindingLocation(binding)\n\tif bindingLocation == \"\" {\n\t\tbinding = saml.HTTPPostBinding\n\t\tbindingLocation = serviceProvider.GetSSOBindingLocation(binding)\n\t}\n\n\treq, err := serviceProvider.MakeAuthenticationRequest(bindingLocation)\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n\t// relayState is limited to 80 bytes but also must be integrety protected.\n\t// this means that we cannot use a JWT because it is way to long. Instead\n\t// we set a cookie that corresponds to the state\n\trelayState := base64.URLEncoding.EncodeToString(randomBytes(42))\n\n\tsecretBlock := x509.MarshalPKCS1PrivateKey(serviceProvider.Key)\n\tstate := jwt.New(jwt.SigningMethodHS256)\n\tclaims := state.Claims.(jwt.MapClaims)\n\tclaims[\"id\"] = req.ID\n\tclaims[\"uri\"] = r.URL.String()\n\tsignedState, err := state.SignedString(secretBlock)\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\ts.ClientState.SetState(w, r, relayState, signedState)\n\n\tif binding == saml.HTTPRedirectBinding {\n\t\tredirectURL := req.Redirect(relayState)\n\t\tw.Header().Add(\"Location\", redirectURL.String())\n\t\tw.WriteHeader(http.StatusFound)\n\t\treturn\n\t}\n\tif binding == saml.HTTPPostBinding {\n\t\tw.Header().Add(\"Content-Security-Policy\", \"\"+\n\t\t\t\"default-src; \"+\n\t\t\t\"script-src 'sha256-AjPdJSbZmeWHnEc5ykvJFay8FTWeTeRbs9dutfZ0HqE='; \"+\n\t\t\t\"reflected-xss block; referrer no-referrer;\")\n\t\tw.Header().Add(\"Content-type\", \"text/html\")\n\t\tw.Write([]byte(`<!DOCTYPE html><html><body>`))\n\t\tw.Write(req.Post(relayState))\n\t\tw.Write([]byte(`</body></html>`))\n\t\treturn\n\t}\n}", "func (s *Server) handleLogout(w http.ResponseWriter, req *http.Request) error {\n\t// Intentionally ignore errors that may be caused by the stale session.\n\tsession, _ := s.cookieStore.Get(req, UserSessionName)\n\tsession.Options.MaxAge = -1\n\tdelete(session.Values, \"hash\")\n\tdelete(session.Values, \"email\")\n\t_ = session.Save(req, w)\n\tfmt.Fprintf(w, `<!DOCTYPE html><a href='/login'>Log in</a>`)\n\treturn nil\n}", "func (p *Proxy) OAuthCallback(w http.ResponseWriter, r *http.Request) {\n\terr := r.ParseForm()\n\tif err != nil {\n\t\tlog.FromRequest(r).Error().Err(err).Msg(\"proxy: failed parsing request form\")\n\t\thttputil.ErrorResponse(w, r, err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n\terrorString := r.Form.Get(\"error\")\n\tif errorString != \"\" {\n\t\thttputil.ErrorResponse(w, r, errorString, http.StatusForbidden)\n\t\treturn\n\t}\n\t// We begin the process of redeeming the code for an access token.\n\tsession, err := p.AuthenticateClient.Redeem(r.Context(), r.Form.Get(\"code\"))\n\tif err != nil {\n\t\tlog.FromRequest(r).Error().Err(err).Msg(\"proxy: error redeeming authorization code\")\n\t\thttputil.ErrorResponse(w, r, \"Internal error\", http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\tencryptedState := r.Form.Get(\"state\")\n\tstateParameter := &StateParameter{}\n\terr = p.cipher.Unmarshal(encryptedState, stateParameter)\n\tif err != nil {\n\t\tlog.FromRequest(r).Error().Err(err).Msg(\"proxy: could not unmarshal state\")\n\t\thttputil.ErrorResponse(w, r, \"Internal error\", http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\tc, err := p.csrfStore.GetCSRF(r)\n\tif err != nil {\n\t\tlog.FromRequest(r).Error().Err(err).Msg(\"proxy: failed parsing csrf cookie\")\n\t\thttputil.ErrorResponse(w, r, err.Error(), http.StatusBadRequest)\n\t\treturn\n\t}\n\tp.csrfStore.ClearCSRF(w, r)\n\n\tencryptedCSRF := c.Value\n\tcsrfParameter := &StateParameter{}\n\terr = p.cipher.Unmarshal(encryptedCSRF, csrfParameter)\n\tif err != nil {\n\t\tlog.FromRequest(r).Error().Err(err).Msg(\"proxy: couldn't unmarshal CSRF\")\n\t\thttputil.ErrorResponse(w, r, \"Internal error\", http.StatusInternalServerError)\n\t\treturn\n\t}\n\tif encryptedState == encryptedCSRF {\n\t\tlog.FromRequest(r).Error().Msg(\"encrypted state and CSRF should not be equal\")\n\t\thttputil.ErrorResponse(w, r, \"Bad request\", http.StatusBadRequest)\n\t\treturn\n\t}\n\tif !reflect.DeepEqual(stateParameter, csrfParameter) {\n\t\tlog.FromRequest(r).Error().Msg(\"state and CSRF should be equal\")\n\t\thttputil.ErrorResponse(w, r, \"Bad request\", http.StatusBadRequest)\n\t\treturn\n\t}\n\n\t// We store the session in a cookie and redirect the user back to the application\n\terr = p.sessionStore.SaveSession(w, r, session)\n\tif err != nil {\n\t\tlog.FromRequest(r).Error().Msg(\"error saving session\")\n\t\thttputil.ErrorResponse(w, r, \"Error saving session\", http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\tlog.FromRequest(r).Debug().\n\t\tStr(\"code\", r.Form.Get(\"code\")).\n\t\tStr(\"state\", r.Form.Get(\"state\")).\n\t\tStr(\"RefreshToken\", session.RefreshToken).\n\t\tStr(\"session\", session.AccessToken).\n\t\tStr(\"RedirectURI\", stateParameter.RedirectURI).\n\t\tMsg(\"session\")\n\n\t// This is the redirect back to the original requested application\n\thttp.Redirect(w, r, stateParameter.RedirectURI, http.StatusFound)\n}", "func logoutHandler(w http.ResponseWriter, r *http.Request) {\n\tlog.Print(\"logoutHandler: process form\")\n\tctx := context.Background()\n\tif b.authenticator == nil {\n\t\tvar err error\n\t\tb.authenticator, err = initAuth(ctx)\n\t\tif err != nil {\n\t\t\tlog.Print(\"logoutHandler authenticator could not be initialized\")\n\t\t\thttp.Error(w, \"Server error\", http.StatusInternalServerError)\n\t\t\treturn\n\t\t}\n\t}\n\tcookie, err := r.Cookie(\"session\")\n\tif err != nil {\n\t\t// OK, just don't show the contents that require a login\n\t\tlog.Println(\"logoutHandler: no cookie\")\n\t} else {\n\t\tb.authenticator.Logout(ctx, cookie.Value)\n\t\tcookie.MaxAge = -1\n\t\thttp.SetCookie(w, cookie)\n\t}\n\n\t// Return HTML if method is post\n\tif httphandling.AcceptHTML(r) {\n\t\ttitle := b.webConfig.GetVarWithDefault(\"Title\", defTitle)\n\t\tcontent := htmlContent{\n\t\t\tTitle: title,\n\t\t}\n\t\tb.pageDisplayer.DisplayPage(w, \"logged_out.html\", content)\n\t\treturn\n\t}\n\n\tmessage := \"Please come back again\"\n\tw.Header().Set(\"Content-Type\", \"application/json; charset=utf-8\")\n\tfmt.Fprintf(w, \"{\\\"message\\\" :\\\"%s\\\"}\", message)\n}", "func redirectToCognitoLogin(ctx context.Context, d *aegis.HandlerDependencies, req *aegis.APIGatewayProxyRequest, res *aegis.APIGatewayProxyResponse, params url.Values) error {\n\tres.Redirect(301, d.Services.Cognito.HostedLoginURL)\n\treturn nil\n}", "func (ths *ReceiveBackEnd) handleLogout(w http.ResponseWriter, r *http.Request) {\n\tvar sessionId = r.URL.Query().Get(\"session\");\n\tths.log.Println(\"Handle logout... \");\n\tths.log.Println(\"Session: \" + sessionId + \" is terminating.\");\n\tths.store.GetJSonBlobs(map[string]string{\"SessionId\": sessionId});\n\tr.Body.Close();\n\treturn;\n}", "func cognitoCallback(ctx context.Context, d *aegis.HandlerDependencies, req *aegis.APIGatewayProxyRequest, res *aegis.APIGatewayProxyResponse, params url.Values) error {\n\t// Exchange code for token\n\ttokens, err := d.Services.Cognito.GetTokens(req.QueryStringParameters[\"code\"], []string{})\n\t// Depending on Cognito configuration, there could be an error here.\n\t// This service is for an OAuth2 with an authorization code flow.\n\t// NOTE: tokens.AccessToken is generally used.\n\t// If using an openid grant, you may also use tokens.IDToken with ParseAndVerifyJWT() below.\n\tif tokens.Error != \"\" {\n\t\terr = errors.New(tokens.Error)\n\t}\n\tif err != nil {\n\t\tlog.Println(\"Couldn't get access token\", err)\n\t\tres.JSONError(500, err)\n\t} else {\n\t\t// verify the token\n\t\t_, err := d.Services.Cognito.ParseAndVerifyJWT(tokens.AccessToken)\n\t\tif err == nil {\n\t\t\thost := req.GetHeader(\"Host\")\n\t\t\tstage := req.RequestContext.Stage\n\t\t\tres.SetHeader(\"Set-Cookie\", \"access_token=\"+tokens.AccessToken+\"; Domain=\"+host+\"; Secure; HttpOnly\")\n\t\t\tres.Redirect(301, \"https://\"+host+\"/\"+stage+\"/protected\")\n\t\t} else {\n\t\t\tres.JSONError(401, errors.New(\"unauthorized, invalid token\"))\n\t\t}\n\t}\n\treturn nil\n}", "func (c *clientInfo) CompleteAuthorizeURI(ctx context.Context, challengeURI string) error {\n\tchallenge, err := c.client.GetChallenge(ctx, challengeURI)\n\tif err != nil {\n\t\treturn logger.Errore(err)\n\t}\n\t_, err = c.client.Accept(ctx, challenge)\n\tif err != nil {\n\t\treturn logger.Errore(err)\n\t}\n\t_, err = c.client.WaitAuthorization(ctx, challenge.URI)\n\tif err != nil {\n\t\treturn logger.Errore(err)\n\t}\n\treturn nil\n}", "func completeAuth(w http.ResponseWriter, r *http.Request) {\n\tuser, err := gothic.CompleteUserAuth(w, r)\n\tif err != nil {\n\t\tfmt.Fprintln(w, err)\n\t\treturn\n\t}\n\tuserSession.Values[\"user\"] = user\n\thttp.Redirect(w, r, \"/\", 301)\n}", "func (o *oauth) authorizeHandler(w http.ResponseWriter, r *http.Request) {\n\t// We aren't using HandleAuthorizeRequest here because that assumes redirect_uri\n\t// exists on the request. We're just checking for a valid token.\n\tti, err := o.server.ValidationBearerToken(r)\n\tif err != nil {\n\t\tauthFailures.With(\"method\", \"oauth2\").Add(1)\n\t\tencodeError(w, err)\n\t\treturn\n\t}\n\tif ti.GetClientID() == \"\" {\n\t\tauthFailures.With(\"method\", \"oauth2\").Add(1)\n\t\tencodeError(w, fmt.Errorf(\"missing client_id\"))\n\t\treturn\n\t}\n\n\t// Passed token check, return \"200 OK\"\n\tauthSuccesses.With(\"method\", \"oauth2\").Add(1)\n\tw.Header().Set(\"Content-Type\", \"text/plain\")\n\tw.WriteHeader(http.StatusOK)\n}", "func (api *CoreHandler) AuthorizeCallback(w http.ResponseWriter, r *http.Request) {\n\tlog.Println(\"Reaceived callback from Instagram oauth\")\n\n\t// Get the query string\n\tvals := r.URL.Query()\n\n\t// If \"error\" is not an empty string we have not received our access code\n\t// This is error param is specified by the Reddit API\n\tif val, ok := vals[\"error\"]; ok {\n\t\tif len(val) != 0 {\n\t\t\tlog.Printf(\"Did not receive authorization. Error: %v\\n\", vals[\"error\"][0])\n\t\t\t// This is the case where the user likely denied us access\n\t\t\t// TODO: should redirect back to appropriate page in front-end\n\t\t\treturn\n\t\t}\n\t}\n\n\tvar instaAuth *InstagramAuthResponse\n\tvar err error\n\t// Make sure the code exists\n\tif len(vals[\"code\"]) > 0 {\n\t\t// Now request bearer token using the code we received\n\t\tinstaAuth, err = api.requestToken(vals[\"code\"][0])\n\t\tif err != nil {\n\t\t\tlog.Printf(\"Unable to receive bearer token: %v\\n\", err)\n\t\t\treturn\n\t\t}\n\t}\n\n\tlog.Printf(\"Received the following auth from instagram: %+v\", *instaAuth)\n\n\t// Post code back to core async as the rest is not dependant on this -- vals[\"state\"] should be userID\n\tgo api.postInstaAuth(instaAuth, vals[\"state\"][0])\n\n\t// Redirect to frontend\n\thttp.Redirect(w, r, api.conf.FrontendURL, http.StatusMovedPermanently)\n}", "func Authorize(redirect string) (string, error) {\n\tstate := uuid.New().String()\n\tctx := context.Background()\n\n\terr := storage.Redis.Set(ctx, wrapStateKey(state), redirect, time.Duration(300*time.Second)).Err()\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\treturn cli.config.AuthCodeURL(state), nil\n}", "func afterLogin(ctx *context.Context, u *models.User, remember bool) {\n\tif remember {\n\t\tdays := 86400 * setting.LoginRememberDays\n\t\tctx.SetCookie(setting.CookieUserName, u.UserName, days, setting.AppSubURL, \"\", setting.CookieSecure, true)\n\t\tctx.SetSuperSecureCookie(u.Rands+u.Password, setting.CookieRememberName, u.UserName, days, setting.AppSubURL, \"\", setting.CookieSecure, true)\n\t}\n\n\tctx.Session.Set(\"uid\", u.ID)\n\tctx.Session.Set(\"uname\", u.UserName)\n\n\t// Clear CSRF and force regenerate one\n\tctx.SetCookie(setting.CSRFCookieName, \"\", -1, setting.AppSubURL)\n\tif setting.EnableLoginStatusCookie {\n\t\tctx.SetCookie(setting.LoginStatusCookieName, \"true\", 0, setting.AppSubURL)\n\t}\n\n\tredirectTo, _ := url.QueryUnescape(ctx.GetCookie(\"redirect_to\"))\n\tctx.SetCookie(\"redirect_to\", \"\", -1, setting.AppSubURL)\n\tif isValidRedirect(redirectTo) {\n\t\tctx.Redirect(redirectTo)\n\t\treturn\n\t}\n\n\tctx.Redirect(setting.AppSubURL + \"/\")\n}", "func setRedirectHandler (w http.ResponseWriter, r *http.Request) {\n r.ParseForm()\n cookieTemp := r.FormValue(\"cookie\")\n cookieName := r.FormValue(\"name\") // UUID (used as cookieMap's cookie name)\n\n if cookieTemp == \"\" || cookieName == \"\" {\n\tw.WriteHeader(400) // set response code to 400, request malformed\n\treturn\n } else {\n w.WriteHeader(200) // set response code to 200, request processed\n }\n\n // attempt to add cookie to internal cookie map\n var newCookie http.Cookie\n err1 := json.Unmarshal([]byte(cookieTemp), &newCookie)\n if err1 != nil {\n fmt.Println(\"Error unmarshalling new cookie\")\n\n if printToFile == 1 {\n\t defer Log.Flush()\n\t Log.Error(\"Error unmarshalling new cookie\")\n\t return\n\t}\n }\n \n mutex.Lock()\n cookieMap[cookieName] = newCookie\n mutex.Unlock()\n}", "func (app *appVars) oauthRedirect(w http.ResponseWriter, r *http.Request) {\n\n\t// get and compare state to prevent Cross-Site Request Forgery\n\tstate := r.FormValue(\"state\")\n\tif state != app.state {\n\t\tlog.Fatalln(\"state is not the same (CSRF?)\")\n\t}\n\n\t// get authorization code\n\tcode := r.FormValue(\"code\")\n\n\t// exchange authorization code for token\n\ttoken, err := app.conf.Exchange(app.ctx, code)\n\tif err != nil {\n\t\tlog.Println(\"conf.Exchange\", err)\n\t\t// signal that authorization was not successful\n\t\tapp.authChan <- false\n\t\treturn\n\t}\n\n\t// update HTTP client with token\n\tapp.client = app.conf.Client(app.ctx, token)\n\n\t// TODO\n\tapp.token = token\n\n\tconst tpl = `\n<!DOCTYPE html>\n<html>\n\t<head>\n\t\t<meta charset=\"UTF-8\">\n\t\t<title>{{.Title}}</title>\n\t</head>\n\t<body>\n\t<p>Authorization successful\n\t<p><a href=\"{{.BaseUrl}}/listNotebooks\">List Notebooks</a> \n\t<p><a href=\"{{.BaseUrl}}/listPages\">List Pages</a> \n\t</body>\n</html>`\n\n\tt, err := template.New(\"authorized\").Parse(tpl)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tdata := struct {\n\t\tTitle string\n\t\tBaseUrl string\n\t}{}\n\n\tdata.Title = \"Authorized\"\n\tdata.BaseUrl = \"http://localhost:9999\"\n\n\terr = t.Execute(w, data)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\treturn\n}", "func Authorize(redirect string) (string, error) {\n\tstate := &models.AuthState{\n\t\tState: uuid.New().String(),\n\t\tTyp: \"OAuth2.CODE\",\n\t\tRedirect: redirect,\n\t\tExpiresAt: time.Now().Unix() + cli.stateExpiresIn,\n\t}\n\n\tif err := state.Save(); err != nil {\n\t\treturn \"\", err\n\t}\n\n\t// log.Printf(\"add state %s\", state)\n\treturn cli.config.AuthCodeURL(state.State), nil\n}", "func clearRedirectHandler (w http.ResponseWriter, r *http.Request) {\n r.ParseForm()\n cookieName := r.FormValue(\"cookie\") // UUID (used as cookieMap's cookie name)\n if cookieName == \"\"{\n\tw.WriteHeader(400) // set response code to 400, request malformed\n\treturn\n } else {\n delete(cookieMap, cookieName) // delete cookie from map (if exists)\n w.WriteHeader(200) // set response code to 200, request processed\n }\n}", "func DisconnectHandler(w http.ResponseWriter, r *http.Request) {\n\tdb := Connect()\n\tdefer db.Close()\n\n\tcanAccess, account := ValidateAuth(db, r, w)\n\tif !canAccess {\n\t\treturn\n\t}\n\n\terr := UnlinkAnyConnection(db, account, 0)\n\tif err != nil {\n\t\tlog.Printf(\"UnlinkAnyConnection failed: %s\", err)\n\t\thttp.Error(w, \"could not unlink connection\", http.StatusBadRequest)\n\t\treturn\n\t}\n\n\tstateResponse := &StateResponse{}\n\tif err := json.NewEncoder(w).Encode(stateResponse); err != nil {\n\t\tpanic(err)\n\t}\n}" ]
[ "0.80746967", "0.625512", "0.6228842", "0.6228461", "0.5922042", "0.59072685", "0.58812433", "0.5815238", "0.5804305", "0.5746236", "0.5677897", "0.5610163", "0.5562772", "0.5533723", "0.5492632", "0.5460316", "0.54499465", "0.54415345", "0.5431017", "0.53495276", "0.5328235", "0.5323974", "0.5312351", "0.5299858", "0.52953243", "0.52503747", "0.52223694", "0.52098805", "0.51713914", "0.51287067", "0.5116791", "0.50927716", "0.5089623", "0.5087798", "0.5069357", "0.50638914", "0.50551534", "0.50485456", "0.50265884", "0.5026457", "0.5016945", "0.5014298", "0.49735782", "0.49670273", "0.49627897", "0.49575773", "0.49567884", "0.49550933", "0.4945631", "0.4921843", "0.49193558", "0.49152985", "0.4913467", "0.49115658", "0.4897556", "0.48870748", "0.48849562", "0.4878197", "0.48768958", "0.48618814", "0.48561898", "0.48499364", "0.4821826", "0.48186967", "0.48165986", "0.48109978", "0.47951874", "0.47837928", "0.47699136", "0.4741507", "0.47386804", "0.47252363", "0.47177374", "0.47168317", "0.471175", "0.4711425", "0.47081858", "0.47004685", "0.46883866", "0.46866095", "0.46837026", "0.46628264", "0.46618122", "0.46599966", "0.4654871", "0.4651928", "0.4641975", "0.46385264", "0.46339124", "0.46293375", "0.46282965", "0.4626057", "0.46244502", "0.4623781", "0.4617624", "0.4613757", "0.46072364", "0.460623", "0.45975986", "0.45883995" ]
0.8837215
0
HandleOIDCLogin performs the login/authentication of the OIDC context the user of the external authentication provider is checked against the database. If a match is found a token with the valid claims is created and a redirect is made to the defined URL
func (a *loginAPI) HandleOIDCLogin(w http.ResponseWriter, r *http.Request) error { ctx := context.Background() // read the stateParam again state := a.appCookie.Get(stateParam, r) log.WithField("func", "server.HandleOIDCLogin").Debugf("got state param: %s", state) if query(r, stateParam) != state { return errors.BadRequestError{Err: fmt.Errorf("state did not match"), Request: r} } a.appCookie.Del(stateParam, w) // is this an auth/flow request var ( authFlow bool site, redirect string ) authFlowParams := a.appCookie.Get(authFlowCookie, r) if authFlowParams != "" { log.WithField("func", "server.HandleOIDCLogin").Debugf("auth/flow login-mode") parts := strings.Split(authFlowParams, "|") site = parts[0] redirect = parts[1] authFlow = true } a.appCookie.Del(authFlowCookie, w) oauth2Token, err := a.oauthConfig.Exchange(ctx, query(r, codeParam)) if err != nil { return errors.ServerError{Err: fmt.Errorf("failed to exchange token: %v", err), Request: r} } rawIDToken, ok := oauth2Token.Extra(idTokenParam).(string) if !ok { return errors.ServerError{Err: fmt.Errorf("no id_token field in oauth2 token"), Request: r} } idToken, err := a.oauthVerifier.VerifyToken(ctx, rawIDToken) if err != nil { return errors.ServerError{Err: fmt.Errorf("failed to verify ID Token: %v", err), Request: r} } var oidcClaims struct { Email string `json:"email"` EmailVerified bool `json:"email_verified"` DisplayName string `json:"name"` PicURL string `json:"picture"` GivenName string `json:"given_name"` FamilyName string `json:"family_name"` Locale string `json:"locale"` UserID string `json:"sub"` } if err := idToken.GetClaims(&oidcClaims); err != nil { return errors.ServerError{Err: fmt.Errorf("claims error: %v", err), Request: r} } // the user was authenticated successfully, check if sites are available for the given user! success := true sites, err := a.repo.GetSitesByUser(oidcClaims.Email) if err != nil { log.WithField("func", "server.HandleOIDCLogin").Warnf("successfull login by '%s' but error fetching sites! %v", oidcClaims.Email, err) success = false } if len(sites) == 0 { log.WithField("func", "server.HandleOIDCLogin").Warnf("successfull login by '%s' but no sites availabel!", oidcClaims.Email) success = false } if authFlow { log.WithField("func", "server.HandleOIDCLogin").Debugf("auth/flow - check for specific site '%s'", site) success = false // check specific site for _, e := range sites { if e.Name == site { success = true break } } } if !success { a.appCookie.Set(errors.FlashKeyError, fmt.Sprintf("User '%s' is not allowed to login!", oidcClaims.Email), cookieExpiry, w) http.Redirect(w, r, "/error", http.StatusTemporaryRedirect) return nil } // create the token using the claims of the database var siteClaims []string for _, s := range sites { siteClaims = append(siteClaims, fmt.Sprintf("%s|%s|%s", s.Name, s.URL, s.PermList)) } claims := security.Claims{ Type: "login.User", DisplayName: oidcClaims.DisplayName, Email: oidcClaims.Email, UserID: oidcClaims.UserID, UserName: oidcClaims.Email, GivenName: oidcClaims.GivenName, Surname: oidcClaims.FamilyName, Claims: siteClaims, } token, err := security.CreateToken(a.jwt.JwtIssuer, []byte(a.jwt.JwtSecret), a.jwt.Expiry, claims) if err != nil { log.WithField("func", "server.HandleOIDCLogin").Errorf("could not create a JWT token: %v", err) return errors.ServerError{Err: fmt.Errorf("error creating JWT: %v", err), Request: r} } login := persistence.Login{ User: oidcClaims.Email, Created: time.Now().UTC(), Type: persistence.DIRECT, } if authFlow { login.Type = persistence.FLOW } err = a.repo.StoreLogin(login, per.Atomic{}) if err != nil { log.WithField("func", "server.HandleOIDCLogin").Errorf("the login could not be saved: %v", err) return errors.ServerError{Err: fmt.Errorf("error storing the login: %v", err), Request: r} } // set the cookie exp := a.jwt.Expiry * 24 * 3600 a.setJWTCookie(a.jwt.CookieName, token, exp, w) redirectURL := a.jwt.LoginRedirect if authFlow { log.WithField("func", "server.HandleOIDCLogin").Debugf("auth/flow - redirect to specific URL: '%s'", redirect) redirectURL = redirect } // redirect to provided URL http.Redirect(w, r, redirectURL, http.StatusTemporaryRedirect) return nil }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func HandleLogin(w http.ResponseWriter, r *http.Request) (err error) {\n\tsession, err := cookieStore.Get(r, oauthSessionName)\n\tif err != nil {\n\t\tlog.Printf(\"corrupted session %s -- generated new\", err)\n\t\terr = nil\n\t}\n\n\tvar tokenBytes [255]byte\n\tif _, err := rand.Read(tokenBytes[:]); err != nil {\n\t\treturn AnnotateError(err, \"Couldn't generate a session!\", http.StatusInternalServerError)\n\t}\n\n\tstate := hex.EncodeToString(tokenBytes[:])\n\n\tsession.AddFlash(state, stateCallbackKey)\n\n\tif err = session.Save(r, w); err != nil {\n\t\treturn\n\t}\n\n\thttp.Redirect(w, r, oauth2Config.AuthCodeURL(state, claims), http.StatusTemporaryRedirect)\n\n\treturn\n}", "func (p *OIDCProvider) LoginHandler() http.Handler {\n\treturn http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\turl := p.oauth2Config.AuthCodeURL(state)\n\t\thttp.Redirect(w, r, url, http.StatusSeeOther)\n\t})\n}", "func loginRedirectHandler(w http.ResponseWriter, r *http.Request) {\n\tvars := mux.Vars(r)\n\tprovider, err := strconv.Atoi(vars[\"provider\"])\n\tif err != nil {\n\t\thttp.Error(w, \"Invalid identity provider\", http.StatusInternalServerError)\n\t\treturn\n\t} else {\n\t\tidp.Authenticate(provider, w, r)\n\t}\n}", "func (a *loginAPI) HandleOIDCRedirect(w http.ResponseWriter, r *http.Request) error {\n\tstate := randToken()\n\ta.appCookie.Set(stateParam, state, cookieExpiry, w)\n\tlog.WithField(\"func\", \"server.HandleOIDCRedirect\").Debugf(\"GetRedirect: initiate using state '%s'\", state)\n\thttp.Redirect(w, r, a.GetOIDCRedirectURL(), http.StatusTemporaryRedirect)\n\treturn nil\n}", "func HandleLoginRedirect(r *http.Request, w http.ResponseWriter, cfg *setting.Cfg, identity *Identity, validator RedirectValidator) {\n\tredirectURL := handleLogin(r, w, cfg, identity, validator)\n\thttp.Redirect(w, r, redirectURL, http.StatusFound)\n}", "func LoginHandler(c echo.Context) error {\n\tprovider, err := gomniauth.Provider(c.Param(\"provider\"))\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tauthURL, err := provider.GetBeginAuthURL(nil, nil)\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn c.Redirect(http.StatusTemporaryRedirect, authURL)\n}", "func (a *loginAPI) HandleOIDCRedirectFinal(w http.ResponseWriter, r *http.Request) error {\n\tstate := a.appCookie.Get(stateParam, r)\n\tif state == \"\" {\n\t\tlog.WithField(\"func\", \"server.HandleOIDCRedirectFinal\").Debugf(\"emptiy state from cookie, referrer: '%s'\", r.Referer())\n\t\treturn errors.BadRequestError{Err: fmt.Errorf(\"missing state, cannot initiate OIDC\"), Request: r}\n\t}\n\tlog.WithField(\"func\", \"server.HandleOIDCRedirectFinal\").Debugf(\"initiate OIDC redirect using state: '%s'\", state)\n\thttp.Redirect(w, r, a.oauthConfig.AuthCodeURL(state), http.StatusFound)\n\treturn nil\n}", "func (s *Server) handleLogin(w http.ResponseWriter, req *http.Request) error {\n\toauthState := uuid.New().String()\n\tloginSession, err := s.cookieStore.Get(req, LoginSessionName)\n\tif err != nil {\n\t\treturn err\n\t}\n\tloginSession.Options = &sessions.Options{\n\t\tMaxAge: 600,\n\t\tHttpOnly: true,\n\t\tSecure: s.opts.SecureCookie,\n\t}\n\tloginSession.Values[\"oauth_state\"] = oauthState\n\terr = loginSession.Save(req, w)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"error saving session: %s\", err)\n\t}\n\turl := s.oauthConfig.AuthCodeURL(oauthState)\n\thttp.Redirect(w, req, url, http.StatusTemporaryRedirect)\n\treturn nil\n}", "func (h *UserAuthHandler) OAuthLogin() echo.HandlerFunc {\n\treturn func(c echo.Context) error {\n\t\tredirectURL := c.QueryParam(\"redirect_url\")\n\t\tif redirectURL == \"\" {\n\t\t\th.Logger.Warn().Msg(\"missing redirect url\")\n\t\t\treturn c.String(http.StatusBadRequest, \"error invalid redirect_url\")\n\t\t}\n\n\t\tlog := h.Logger.With().Str(\"redirect_url\", redirectURL).Logger()\n\n\t\tstate, err := h.OAuth.GenerateState(redirectURL)\n\t\tif err != nil {\n\t\t\tlog.Debug().Err(err).Msg(\"failed to generate state\")\n\t\t\treturn c.String(http.StatusUnauthorized, \"error generating state\")\n\t\t}\n\n\t\turl := h.OAuth.GetAuthCodeURL(state)\n\t\treturn c.Redirect(http.StatusTemporaryRedirect, url)\n\t}\n}", "func LoginHandler(db *sql.DB) func(http.ResponseWriter, *http.Request, httprouter.Params) {\n\treturn func(w http.ResponseWriter, r *http.Request, _ httprouter.Params) {\n\n\t\tvar creds s.Credentials\n\t\terr := json.NewDecoder(r.Body).Decode(&creds)\n\t\tusername = creds.Username\n\n\t\tif err != nil || creds.Username == \"\" || creds.Password == \"\" {\n\t\t\tw.WriteHeader(http.StatusBadRequest)\n\t\t\treturn\n\t\t}\n\n\t\tpasswordBool := checkPassword(db, w, creds)\n\n\t\tif !passwordBool {\n\t\t\t// Unauthorised access\n\t\t\tw.WriteHeader(http.StatusUnauthorized)\n\t\t\treturn\n\t\t}\n\n\t\t// Login Sucess\n\t\tif !(generateToken(w, r, creds)) {\n\t\t\treturn\n\t\t}\n\n\t\tfmt.Println(\"Login Sucessful\")\n\t\thttp.Redirect(w, r, \"/login/sample\", 301)\n\t}\n}", "func HandleLogin(w http.ResponseWriter, r *http.Request) {\n\terr := r.ParseForm()\n\tif err != nil {\n\t\tServeHandleIncorrect(w, r)\n\t\treturn\n\t}\n\tvalues := LoginFormValues{}\n\tdecoder := schema.NewDecoder()\n\terr = decoder.Decode(&values, r.PostForm)\n\tif err != nil {\n\t\tServeInternalServerError(w, r)\n\t\treturn\n\t}\n\n\tacc, err := data.GetAccountByHandle(values.Handle)\n\tif err == mgo.ErrNotFound {\n\t\tServeHandleIncorrect(w, r)\n\t\treturn\n\t}\n\tif err != nil {\n\t\tServeInternalServerError(w, r)\n\t\treturn\n\t}\n\tm := acc.Password.Match(values.Password)\n\tif !m {\n\t\thttp.Redirect(w, r, \"/login\", http.StatusSeeOther)\n\t\treturn\n\t}\n\n\tsess, err := store.Get(r, \"s\")\n\tif err != nil {\n\t\tServeInternalServerError(w, r)\n\t\treturn\n\t}\n\tsess.Values[\"accountID\"] = acc.ID.Hex()\n\tsess.Save(r, w)\n\thttp.Redirect(w, r, \"/tasks\", http.StatusSeeOther)\n}", "func handleOidcCallback(\n\tw http.ResponseWriter,\n\tr *http.Request,\n\tclientName string,\n\tclientID string,\n\tclientSecret string,\n\tredirectURI string,\n\twellKnownConfig oidc.WellKnownConfiguration,\n\tstate string,\n\tcodeVerifier string,\n\tcancel context.CancelFunc,\n) {\n\tvar authorisationResponse, err = oidc.ValidateAuthorisationResponse(r.URL, state)\n\tif err != nil {\n\t\trenderAndLogError(w, cancel, fmt.Sprintf(\"%v\", err))\n\t\treturn\n\t}\n\n\tviewModel, err := VerifyCode(clientID, clientSecret, redirectURI, wellKnownConfig, codeVerifier, authorisationResponse.Code)\n\tif err != nil {\n\t\trenderAndLogError(w, cancel, fmt.Sprintf(\"%v\", err))\n\t\treturn\n\t}\n\n\t// show webpage\n\tt := template.New(\"credentials\")\n\t_, parseErr := t.Parse(TokenResultView())\n\tif parseErr != nil {\n\t\trenderAndLogError(w, cancel, fmt.Sprintf(\"%v\", parseErr))\n\t\treturn\n\t}\n\ttplErr := t.Execute(w, viewModel)\n\tif tplErr != nil {\n\t\trenderAndLogError(w, cancel, fmt.Sprintf(\"%v\", tplErr))\n\t\treturn\n\t}\n\n\tcancel()\n}", "func (e VerifyHandler) LoginHandler(w http.ResponseWriter, r *http.Request) {\n\n\t// GET /login?site=site&user=name&[email protected]\n\ttkn := r.URL.Query().Get(\"token\")\n\tif tkn == \"\" { // no token, ask confirmation via email\n\t\te.sendConfirmation(w, r)\n\t\treturn\n\t}\n\n\t// confirmation token presented\n\t// GET /login?token=confirmation-jwt&sess=1\n\tconfClaims, err := e.TokenService.Parse(tkn)\n\tif err != nil {\n\t\trest.SendErrorJSON(w, r, e.L, http.StatusForbidden, err, \"failed to verify confirmation token\")\n\t\treturn\n\t}\n\n\tif e.TokenService.IsExpired(confClaims) {\n\t\trest.SendErrorJSON(w, r, e.L, http.StatusForbidden, fmt.Errorf(\"expired\"), \"failed to verify confirmation token\")\n\t\treturn\n\t}\n\n\telems := strings.Split(confClaims.Handshake.ID, \"::\")\n\tif len(elems) != 2 {\n\t\trest.SendErrorJSON(w, r, e.L, http.StatusBadRequest, fmt.Errorf(\"%s\", confClaims.Handshake.ID), \"invalid handshake token\")\n\t\treturn\n\t}\n\tuser, address := elems[0], elems[1]\n\tsessOnly := r.URL.Query().Get(\"sess\") == \"1\"\n\n\tu := token.User{\n\t\tName: user,\n\t\tID: e.ProviderName + \"_\" + token.HashID(sha1.New(), address),\n\t}\n\t// try to get gravatar for email\n\tif e.UseGravatar && strings.Contains(address, \"@\") { // TODO: better email check to avoid silly hits to gravatar api\n\t\tif picURL, e := avatar.GetGravatarURL(address); e == nil {\n\t\t\tu.Picture = picURL\n\t\t}\n\t}\n\n\tif u, err = setAvatar(e.AvatarSaver, u, &http.Client{Timeout: 5 * time.Second}); err != nil {\n\t\trest.SendErrorJSON(w, r, e.L, http.StatusInternalServerError, err, \"failed to save avatar to proxy\")\n\t\treturn\n\t}\n\n\tcid, err := randToken()\n\tif err != nil {\n\t\trest.SendErrorJSON(w, r, e.L, http.StatusInternalServerError, err, \"can't make token id\")\n\t\treturn\n\t}\n\n\tclaims := token.Claims{\n\t\tUser: &u,\n\t\tStandardClaims: jwt.StandardClaims{\n\t\t\tId: cid,\n\t\t\tIssuer: e.Issuer,\n\t\t\tAudience: confClaims.Audience,\n\t\t},\n\t\tSessionOnly: sessOnly,\n\t}\n\n\tif _, err = e.TokenService.Set(w, claims); err != nil {\n\t\trest.SendErrorJSON(w, r, e.L, http.StatusInternalServerError, err, \"failed to set token\")\n\t\treturn\n\t}\n\tif confClaims.Handshake != nil && confClaims.Handshake.From != \"\" {\n\t\thttp.Redirect(w, r, confClaims.Handshake.From, http.StatusTemporaryRedirect)\n\t\treturn\n\t}\n\trest.RenderJSON(w, claims.User)\n}", "func (uh *UserHandler) HandleLinkedInLogin(w http.ResponseWriter, r *http.Request) {\n\tOauthStateString = stringTools.RandomStringGN(20)\n\turl := linkedinOauthConfig.AuthCodeURL(OauthStateString)\n\thttp.Redirect(w, r, url, http.StatusSeeOther)\n}", "func (j *AuthMux) Login() http.Handler {\n\tconf := j.Provider.Config()\n\treturn http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\t// We are creating a token with an encoded random string to prevent CSRF attacks\n\t\t// This token will be validated during the OAuth callback.\n\t\t// We'll give our users 10 minutes from this point to type in their\n\t\t// oauth2 provider's password.\n\t\t// If the callback is not received within 10 minutes, then authorization will fail.\n\t\tcsrf := randomString(32) // 32 is not important... just long\n\t\tnow := j.Now()\n\n\t\t// This token will be valid for 10 minutes. Any chronograf server will\n\t\t// be able to validate this token.\n\t\tp := Principal{\n\t\t\tSubject: csrf,\n\t\t\tIssuedAt: now,\n\t\t\tExpiresAt: now.Add(TenMinutes),\n\t\t}\n\t\ttoken, err := j.Tokens.Create(r.Context(), p)\n\n\t\t// This is likely an internal server error\n\t\tif err != nil {\n\t\t\tj.Logger.\n\t\t\t\tWithField(\"component\", \"auth\").\n\t\t\t\tWithField(\"remote_addr\", r.RemoteAddr).\n\t\t\t\tWithField(\"method\", r.Method).\n\t\t\t\tWithField(\"url\", r.URL).\n\t\t\t\tError(\"Internal authentication error: \", err.Error())\n\t\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\t\treturn\n\t\t}\n\t\turl := conf.AuthCodeURL(string(token), oauth2.AccessTypeOnline)\n\t\thttp.Redirect(w, r, url, http.StatusTemporaryRedirect)\n\t})\n}", "func (r *Login) Handle(context provider.APIContext, sess *sessions.Session) {\n\tstate, err := r.RandToken(32)\n\tif err != nil {\n\t\t_ = context.JSON(http.StatusBadRequest, map[string]interface{}{\n\t\t\t\"errors\": []string{\"bad request given by client\", \"Error while generating random data.\"},\n\t\t\t\"message\": \"Bad request\",\n\t\t})\n\t\treturn\n\t}\n\n\tif val, ok := sess.Values[\"google-user\"]; ok {\n\t\tvar users entity.GoogleUser\n\t\tif val != \"\" {\n\t\t\tstrMarshal := fmt.Sprintf(\"%v\", val)\n\t\t\tjson.Unmarshal([]byte(strMarshal), &users)\n\n\t\t\t_ = context.JSON(http.StatusOK, map[string]interface{}{\n\t\t\t\t\"data\": users,\n\t\t\t})\n\t\t\treturn\n\t\t}\n\t}\n\n\tswitch context.Param(\"provider\") {\n\tcase \"google\":\n\t\tsess.Values[\"state\"] = state\n\t\t_ = sess.Save(context.Request(), context.Response())\n\n\t\tresponse, err := r.oauthProvider.Login(context.Request().Context(), state)\n\t\tif err != nil {\n\t\t\t_ = context.JSON(err.HTTPStatus, map[string]interface{}{\n\t\t\t\t\"errors\": err.ErrorString(),\n\t\t\t\t\"message\": err.Error(),\n\t\t\t})\n\t\t\treturn\n\t\t}\n\n\t\t_ = context.JSON(http.StatusOK, map[string]interface{}{\n\t\t\t\"data\": response,\n\t\t})\n\tdefault:\n\t\t_ = context.JSON(http.StatusBadRequest, map[string]interface{}{\n\t\t\t\"errors\": []string{fmt.Sprintf(\"provider not found %s\", context.Param(\"provider\"))},\n\t\t\t\"message\": \"Bad request\",\n\t\t})\n\t\treturn\n\t}\n}", "func OauthLoginHandler(db *sqlx.DB, cfg config.Config) http.HandlerFunc {\n\t// The jwk.AutoRefresh and jwk.Whitelist objects only get created once.\n\t// They are shared between all handlers\n\t// Note: This assumes two things:\n\t// 1) that the cfg.ConfigTrafficOpsGolang.WhitelistedOAuthUrls is not updated once it has been initialized\n\t// 2) OauthLoginHandler is not called conccurently\n\tif jwksFetcher == nil {\n\t\tar := jwk.NewAutoRefresh(context.TODO())\n\t\twl := &whitelist{urls: cfg.ConfigTrafficOpsGolang.WhitelistedOAuthUrls}\n\t\tjwksFetcher = &jwksFetch{\n\t\t\tar: ar,\n\t\t\twl: wl,\n\t\t}\n\t}\n\n\treturn func(w http.ResponseWriter, r *http.Request) {\n\t\tdefer r.Body.Close()\n\t\tresp := struct {\n\t\t\ttc.Alerts\n\t\t}{}\n\n\t\tform := auth.PasswordForm{}\n\t\tparameters := struct {\n\t\t\tAuthCodeTokenUrl string `json:\"authCodeTokenUrl\"`\n\t\t\tCode string `json:\"code\"`\n\t\t\tClientId string `json:\"clientId\"`\n\t\t\tRedirectUri string `json:\"redirectUri\"`\n\t\t}{}\n\n\t\tif err := json.NewDecoder(r.Body).Decode(&parameters); err != nil {\n\t\t\tapi.HandleErr(w, r, nil, http.StatusBadRequest, err, nil)\n\t\t\treturn\n\t\t}\n\n\t\tmatched, err := VerifyUrlOnWhiteList(parameters.AuthCodeTokenUrl, cfg.ConfigTrafficOpsGolang.WhitelistedOAuthUrls)\n\t\tif err != nil {\n\t\t\tapi.HandleErr(w, r, nil, http.StatusInternalServerError, nil, err)\n\t\t\treturn\n\t\t}\n\t\tif !matched {\n\t\t\tapi.HandleErr(w, r, nil, http.StatusForbidden, nil, errors.New(\"Key URL from token is not included in the whitelisted urls. Received: \"+parameters.AuthCodeTokenUrl))\n\t\t\treturn\n\t\t}\n\n\t\tdata := url.Values{}\n\t\tdata.Add(\"code\", parameters.Code)\n\t\tdata.Add(\"client_id\", parameters.ClientId)\n\t\tdata.Add(\"grant_type\", \"authorization_code\") // Required by RFC6749 section 4.1.3\n\t\tdata.Add(\"redirect_uri\", parameters.RedirectUri)\n\n\t\treq, err := http.NewRequest(http.MethodPost, parameters.AuthCodeTokenUrl, bytes.NewBufferString(data.Encode()))\n\t\treq.Header.Set(\"Content-Type\", \"application/x-www-form-urlencoded\")\n\t\tif cfg.OAuthClientSecret != \"\" {\n\t\t\treq.Header.Set(\"Authorization\", \"Basic \"+base64.StdEncoding.EncodeToString([]byte(parameters.ClientId+\":\"+cfg.OAuthClientSecret))) // per RFC6749 section 2.3.1\n\t\t}\n\t\tif err != nil {\n\t\t\tapi.HandleErr(w, r, nil, http.StatusInternalServerError, nil, fmt.Errorf(\"obtaining token using code from oauth provider: %w\", err))\n\t\t\treturn\n\t\t}\n\n\t\tclient := http.Client{\n\t\t\tTimeout: 30 * time.Second,\n\t\t}\n\t\tresponse, err := client.Do(req)\n\t\tif err != nil {\n\t\t\tapi.HandleErr(w, r, nil, http.StatusInternalServerError, nil, fmt.Errorf(\"getting an http client: %w\", err))\n\t\t\treturn\n\t\t}\n\t\tdefer response.Body.Close()\n\n\t\tbuf := new(bytes.Buffer)\n\t\tbuf.ReadFrom(response.Body)\n\t\tencodedToken := \"\"\n\n\t\tvar result map[string]interface{}\n\t\tif err := json.Unmarshal(buf.Bytes(), &result); err != nil {\n\t\t\tlog.Warnf(\"Error parsing JSON response from OAuth: %s\", err)\n\t\t\tencodedToken = buf.String()\n\t\t} else if _, ok := result[rfc.IDToken]; !ok {\n\t\t\tsysErr := fmt.Errorf(\"Missing access token in response: %s\\n\", buf.String())\n\t\t\tusrErr := errors.New(\"Bad response from OAuth2.0 provider\")\n\t\t\tapi.HandleErr(w, r, nil, http.StatusBadGateway, usrErr, sysErr)\n\t\t\treturn\n\t\t} else {\n\t\t\tswitch t := result[rfc.IDToken].(type) {\n\t\t\tcase string:\n\t\t\t\tencodedToken = result[rfc.IDToken].(string)\n\t\t\tdefault:\n\t\t\t\tsysErr := fmt.Errorf(\"Incorrect type of access_token! Expected 'string', got '%v'\\n\", t)\n\t\t\t\tusrErr := errors.New(\"Bad response from OAuth2.0 provider\")\n\t\t\t\tapi.HandleErr(w, r, nil, http.StatusBadGateway, usrErr, sysErr)\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\n\t\tif encodedToken == \"\" {\n\t\t\tapi.HandleErr(w, r, nil, http.StatusBadRequest, errors.New(\"Token not found in request but is required\"), nil)\n\t\t\treturn\n\t\t}\n\n\t\tvar decodedToken jwt.Token\n\t\tif decodedToken, err = jwt.Parse(\n\t\t\t[]byte(encodedToken),\n\t\t\tjwt.WithVerifyAuto(true),\n\t\t\tjwt.WithJWKSetFetcher(jwksFetcher),\n\t\t); err != nil {\n\t\t\tif decodedToken, err = jwt.Parse(\n\t\t\t\t[]byte(encodedToken),\n\t\t\t\tjwt.WithVerifyAuto(false),\n\t\t\t\tjwt.WithJWKSetFetcher(jwksFetcher),\n\t\t\t); err != nil {\n\t\t\t\tapi.HandleErr(w, r, nil, http.StatusInternalServerError, nil, fmt.Errorf(\"error decoding token with message: %w\", err))\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\n\t\tvar userIDInterface interface{}\n\t\tvar userID string\n\t\tvar ok bool\n\t\tif cfg.OAuthUserAttribute != \"\" {\n\t\t\tattributes := decodedToken.PrivateClaims()\n\t\t\tif userIDInterface, ok = attributes[cfg.OAuthUserAttribute]; !ok {\n\t\t\t\tapi.HandleErr(w, r, nil, http.StatusInternalServerError, nil, fmt.Errorf(\"Non-existent OAuth attribute : %s\", cfg.OAuthUserAttribute))\n\t\t\t\treturn\n\t\t\t}\n\t\t\tuserID = userIDInterface.(string)\n\t\t} else {\n\t\t\tuserID = decodedToken.Subject()\n\t\t}\n\t\tform.Username = userID\n\n\t\tdbCtx, cancelTx := context.WithTimeout(r.Context(), time.Duration(cfg.DBQueryTimeoutSeconds)*time.Second)\n\t\tdefer cancelTx()\n\t\tuserAllowed, err, blockingErr := auth.CheckLocalUserIsAllowed(form.Username, db, dbCtx)\n\t\tif blockingErr != nil {\n\t\t\tapi.HandleErr(w, r, nil, http.StatusServiceUnavailable, nil, fmt.Errorf(\"error checking local user password: %s\\n\", blockingErr.Error()))\n\t\t\treturn\n\t\t}\n\t\tif err != nil {\n\t\t\tlog.Errorf(\"checking local user: %s\\n\", err)\n\t\t}\n\n\t\tif userAllowed {\n\t\t\t_, dbErr := db.Exec(UpdateLoginTimeQuery, form.Username)\n\t\t\tif dbErr != nil {\n\t\t\t\tdbErr = fmt.Errorf(\"unable to update authentication time for user '%s': %w\", form.Username, dbErr)\n\t\t\t\tapi.HandleErr(w, r, nil, http.StatusInternalServerError, nil, dbErr)\n\t\t\t\treturn\n\t\t\t}\n\t\t\thttpCookie := tocookie.GetCookie(userID, defaultCookieDuration, cfg.Secrets[0])\n\t\t\thttp.SetCookie(w, httpCookie)\n\t\t\tresp = struct {\n\t\t\t\ttc.Alerts\n\t\t\t}{tc.CreateAlerts(tc.SuccessLevel, \"Successfully logged in.\")}\n\t\t} else {\n\t\t\tresp = struct {\n\t\t\t\ttc.Alerts\n\t\t\t}{tc.CreateAlerts(tc.ErrorLevel, \"Invalid username or password.\")}\n\t\t}\n\n\t\trespBts, err := json.Marshal(resp)\n\t\tif err != nil {\n\t\t\tapi.HandleErr(w, r, nil, http.StatusInternalServerError, nil, fmt.Errorf(\"encoding response: %w\", err))\n\t\t\treturn\n\t\t}\n\t\tw.Header().Set(rfc.ContentType, rfc.ApplicationJSON)\n\t\tif !userAllowed {\n\t\t\tw.WriteHeader(http.StatusForbidden)\n\t\t}\n\t\tfmt.Fprintf(w, \"%s\", respBts)\n\n\t}\n}", "func HandleUserLogin(context *gin.Context) {\n\tuserAcc := context.PostForm(\"user_acc\")\n\tuserPassword := context.PostForm(\"user_password\")\n\n\t// find the user and check the password\n\t// if right, return a token, otherwise refuse login\n\tuserTry := models.User{}\n\tif db.DB.Where(\"user_acc = ?\", userAcc).First(&userTry).RecordNotFound(){\n\t\tcontext.JSON(200, gin.H{\n\t\t\t\"status\": \"error\",\n\t\t\t\"code\": http.StatusNotFound,\n\t\t\t\"msg\": \"login what? you are not even exist!\",\n\t\t\t\"data\": \"\",\n\t\t})\n\t} else {\n\t\t//log.Infof(\"[login] here what found: %s\", userTry)\n\t\tif userTry.UserPassword == userPassword{\n\t\t\t// return a token?\n\t\t\tclaims := make(map[string]interface{})\n\t\t\tclaims[\"id\"] = userTry.ID\n\t\t\tclaims[\"msg\"] = \"hiding egg\"\n\t\t\tclaims[\"user_addr\"] = userTry.UserAddr\n\t\t\ttoken, _ := utils.Encrypt(claims)\n\t\t\tdata := map[string]interface{}{\"token\": token, \"id\": userTry.ID, \"user_addr\": userTry.UserAddr}\n\t\t\tcontext.JSON(200, gin.H{\n\t\t\t\t\"status\": \"success\",\n\t\t\t\t\"code\": http.StatusOK,\n\t\t\t\t\"msg\": \"login success, welcome \" + userTry.UserNickName,\n\t\t\t\t\"data\": data,\n\t\t\t})\n\t\t} else {\n\t\t\t// login failed, refuse it\n\t\t\tcontext.JSON(200, gin.H{\n\t\t\t\t\"status\": \"unauthorized\",\n\t\t\t\t\"code\": http.StatusUnauthorized,\n\t\t\t\t\"msg\": \"you are not allowed to login\",\n\t\t\t\t\"data\": \"\",\n\t\t\t})\n\t\t}\n\t}\n}", "func LoginHandler(er *Errorly) http.HandlerFunc {\n\treturn func(rw http.ResponseWriter, r *http.Request) {\n\t\tsession, _ := er.Store.Get(r, sessionName)\n\t\tdefer er.SaveSession(session, r, rw)\n\n\t\t// Create a simple CSRF string to verify clients and 500 if we\n\t\t// cannot generate one.\n\t\tcsrfString, err := uuid.GenerateUUID()\n\t\tif err != nil {\n\t\t\thttp.Error(rw, \"Internal server error: \"+err.Error(), http.StatusInternalServerError)\n\n\t\t\treturn\n\t\t}\n\n\t\t// Store the CSRF in the session then redirect the user to the\n\t\t// OAuth page.\n\t\tsession.Values[\"oauth_csrf\"] = csrfString\n\n\t\turl := er.Configuration.OAuth.AuthCodeURL(csrfString)\n\t\thttp.Redirect(rw, r, url, http.StatusTemporaryRedirect)\n\t}\n}", "func (p *OIDCProvider) CallbackHandler() http.Handler {\n\treturn http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tctx := oidc.ClientContext(r.Context(), p.client)\n\n\t\tif errMsg := r.URL.Query().Get(\"error\"); errMsg != \"\" {\n\t\t\tdesc := r.URL.Query().Get(\"error_description\")\n\t\t\tmsg := fmt.Sprintf(\"%s: %s\", errMsg, desc)\n\t\t\tlevel.Debug(p.logger).Log(\"msg\", msg)\n\t\t\thttp.Error(w, msg, http.StatusBadRequest)\n\t\t\treturn\n\t\t}\n\n\t\tqueryCode := r.URL.Query().Get(\"code\")\n\t\tif queryCode == \"\" {\n\t\t\tconst msg = \"no code in request\"\n\t\t\tlevel.Debug(p.logger).Log(\"msg\", msg)\n\t\t\thttp.Error(w, msg, http.StatusBadRequest)\n\t\t\treturn\n\t\t}\n\t\tqueryState := r.URL.Query().Get(\"state\")\n\t\tif queryState != state {\n\t\t\tconst msg = \"incorrect state in request\"\n\t\t\tlevel.Debug(p.logger).Log(\"msg\", msg)\n\t\t\thttp.Error(w, msg, http.StatusBadRequest)\n\t\t\treturn\n\t\t}\n\n\t\ttoken, err := p.oauth2Config.Exchange(ctx, queryCode)\n\t\tif err != nil {\n\t\t\tmsg := fmt.Sprintf(\"failed to get token: %v\", err)\n\t\t\tlevel.Warn(p.logger).Log(\"msg\", msg, \"err\", err)\n\t\t\thttp.Error(w, msg, http.StatusInternalServerError)\n\t\t\treturn\n\t\t}\n\n\t\trawIDToken, ok := token.Extra(\"id_token\").(string)\n\t\tif !ok {\n\t\t\tconst msg = \"no id_token in token response\"\n\t\t\tlevel.Warn(p.logger).Log(\"msg\", msg)\n\t\t\thttp.Error(w, msg, http.StatusInternalServerError)\n\t\t\treturn\n\t\t}\n\n\t\t_, err = p.verifier.Verify(ctx, rawIDToken)\n\t\tif err != nil {\n\t\t\tmsg := fmt.Sprintf(\"failed to verify ID token: %v\", err)\n\t\t\tlevel.Warn(p.logger).Log(\"msg\", msg)\n\t\t\thttp.Error(w, msg, http.StatusInternalServerError)\n\t\t\treturn\n\t\t}\n\n\t\thttp.SetCookie(w, &http.Cookie{\n\t\t\tName: p.cookieName,\n\t\t\tValue: rawIDToken,\n\t\t\tPath: \"/\",\n\t\t\tExpires: token.Expiry,\n\t\t})\n\n\t\thttp.Redirect(w, r, p.redirectURL, http.StatusFound)\n\t})\n}", "func handleAuthorize(rw http.ResponseWriter, req *http.Request) {\n\n\t// Get the Google URL which shows the Authentication page to the user.\n\turl := oauthCfg.AuthCodeURL(\"\")\n\n\t// Redirect user to that page.\n\thttp.Redirect(rw, req, url, http.StatusFound)\n}", "func (a *Auth) Authenticate(handler http.Handler) http.Handler {\n\tif handler == nil {\n\t\tpanic(\"auth: nil handler\")\n\t}\n\n\treturn http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tif a.cfg.Disable {\n\t\t\thandler.ServeHTTP(w, r)\n\t\t\treturn\n\t\t}\n\n\t\ttoken, err := a.getCookie(r)\n\t\tif token == nil && err == nil {\n\t\t\t// Cookie is missing, invalid. Fetch new token from OAuth2 provider.\n\t\t\t// Redirect user to the OAuth2 consent page to ask for permission for the scopes specified\n\t\t\t// above.\n\t\t\t// Set the scope to the current request URL, it will be used by the redirect handler to\n\t\t\t// redirect back to the url that requested the authentication.\n\t\t\turl := a.cfg.AuthCodeURL(r.RequestURI)\n\t\t\thttp.Redirect(w, r, url, http.StatusTemporaryRedirect)\n\t\t\treturn\n\t\t}\n\t\tif err != nil {\n\t\t\ta.clearCookie(w)\n\t\t\thttp.Error(w, \"Unauthorized\", http.StatusUnauthorized)\n\t\t\ta.logf(\"Get cookie error: %v\", err)\n\t\t\treturn\n\t\t}\n\n\t\t// Source token, in case the token needs a renewal.\n\t\tnewOauth2Token, err := a.cfg.TokenSource(r.Context(), token.toOauth2()).Token()\n\t\tif err != nil {\n\t\t\ta.clearCookie(w)\n\t\t\thttp.Error(w, \"Internal error\", http.StatusInternalServerError)\n\t\t\ta.logf(\"Failed token source: %s\", err)\n\t\t\treturn\n\t\t}\n\t\tnewToken := fromOauth2(newOauth2Token)\n\n\t\tif newToken.IDToken != token.IDToken {\n\t\t\ta.logf(\"Refreshed token\")\n\t\t\ttoken = newToken\n\t\t\ta.setCookie(w, token)\n\t\t}\n\n\t\t// Validate the id_token.\n\t\tpayload, err := a.validator.Validate(r.Context(), token.IDToken, a.cfg.ClientID)\n\t\tif err != nil {\n\t\t\ta.clearCookie(w)\n\t\t\thttp.Error(w, \"Invalid auth.\", http.StatusUnauthorized)\n\t\t\ta.logf(\"Invalid token, reset cookie: %s\", err)\n\t\t\treturn\n\t\t}\n\t\t// User is authenticated.\n\t\t// Store email and name in context, and call the inner handler.\n\t\tcreds := &Creds{\n\t\t\tEmail: payload.Claims[\"email\"].(string),\n\t\t\tName: payload.Claims[\"name\"].(string),\n\t\t}\n\t\tr = r.WithContext(context.WithValue(r.Context(), credsKey, creds))\n\t\thandler.ServeHTTP(w, r)\n\t})\n}", "func (h *Handler) oidcCallback(w http.ResponseWriter, r *http.Request, p httprouter.Params) (interface{}, error) {\n\tresult, err := h.GetConfig().Auth.ValidateOIDCAuthCallback(r.URL.Query())\n\tif err != nil {\n\t\th.Warnf(\"Error validating callback: %v.\", err)\n\t\thttp.Redirect(w, r, \"/web/msg/error/login_failed\", http.StatusFound)\n\t\treturn nil, nil\n\t}\n\th.Infof(\"Callback: %v %v %v.\", result.Username, result.Identity, result.Req.Type)\n\treturn nil, h.CallbackHandler(w, r, webapi.CallbackParams{\n\t\tUsername: result.Username,\n\t\tIdentity: result.Identity,\n\t\tSession: result.Session,\n\t\tCert: result.Cert,\n\t\tTLSCert: result.TLSCert,\n\t\tHostSigners: result.HostSigners,\n\t\tType: result.Req.Type,\n\t\tCreateWebSession: result.Req.CreateWebSession,\n\t\tCSRFToken: result.Req.CSRFToken,\n\t\tPublicKey: result.Req.PublicKey,\n\t\tClientRedirectURL: result.Req.ClientRedirectURL,\n\t})\n}", "func loginHandler(w http.ResponseWriter, r *http.Request) {\n\tsep := strings.Split(r.URL.Path, \"/\")\n\taction := sep[2]\n\tprovider := sep[3]\n\tswitch action {\n\tcase \"login\":\n\t\tprovider, err := gomniauth.Provider(provider)\n\t\tif err != nil {\n\t\t\thttp.Error(\n\t\t\t\tw,\n\t\t\t\tfmt.Sprintf(\"Error when trying to get provider %s: %s\", provider, err),\n\t\t\t\thttp.StatusBadRequest)\n\t\t\treturn\n\t\t}\n\t\tloginUrl, err := provider.GetBeginAuthURL(nil, nil)\n\t\tif err != nil {\n\t\t\thttp.Error(\n\t\t\t\tw,\n\t\t\t\tfmt.Sprintf(\"Error when trying to get auth url %s: %s\", provider, err),\n\t\t\t\thttp.StatusBadRequest)\n\t\t\treturn\n\t\t}\n\t\tw.Header().Set(\"Location\", loginUrl)\n\t\tw.WriteHeader(http.StatusTemporaryRedirect)\n\tcase \"callback\":\n\t\tprovider, err := gomniauth.Provider(provider)\n\t\tif err != nil {\n\t\t\thttp.Error(\n\t\t\t\tw,\n\t\t\t\tfmt.Sprintf(\"Error when trying to get provider %s: %s\", provider, err),\n\t\t\t\thttp.StatusBadRequest)\n\t\t\treturn\n\t\t}\n\t\tcreds, err := provider.CompleteAuth(objx.MustFromURLQuery(r.URL.RawQuery))\n\t\tif err != nil {\n\t\t\thttp.Error(\n\t\t\t\tw,\n\t\t\t\tfmt.Sprintf(\"Error when trying to complete auth for\" +\n\t\t\t\t\t\"%s: %s\", provider, err),\n\t\t\t\thttp.StatusInternalServerError,\n\t\t\t)\n\t\t\treturn\n\t\t}\n\t\tuser, err := provider.GetUser(creds)\n\t\tif err != nil {\n\t\t\tlog.Fatalln(\"Error when trying to get user from\", provider, \"-\", err)\n\t\t}\n\t\tchatUser := &chatUser{ User: user }\n\t\t// create a user id using md5 hashing\n\t\tm := md5.New()\n\t\tio.WriteString(m, strings.ToLower(user.Email()))\n\t\tchatUser.uniqueID = fmt.Sprintf(\"%x\", m.Sum(nil))\n\t\tavatarURL, err := avatars.GetAvatarURL(chatUser)\n\t\tif err != nil {\n\t\t\tlog.Fatalln(\"Error when trying to GetAvatarURL\", \"-\", err)\n\t\t}\n\t\tauthCookieValue := objx.New(map[string]interface{}{\n\t\t\t\"userid\": chatUser.uniqueID,\n\t\t\t\"name\": user.Name(),\n\t\t\t\"avatar_url\": avatarURL,\n\t\t\t\"email\": user.Email(),\n\t\t}).MustBase64()\n\n\t\thttp.SetCookie(w, &http.Cookie{\n\t\t\tName: \"auth\",\n\t\t\tValue: authCookieValue,\n\t\t\tPath: \"/\",\n\t\t})\n\t\tw.Header().Set(\"Location\", \"/chat\")\n\t\tw.WriteHeader(http.StatusTemporaryRedirect)\n\tdefault:\n\t\tw.WriteHeader(http.StatusNotFound)\n\t\tfmt.Fprintf(w, \"Auth action %s not supported\", action)\n\t}\n}", "func (app *App) HandleLoginUser(w http.ResponseWriter, r *http.Request) {\n\n\tuserid := chi.URLParam(r, \"userid\")\n\tpassword := chi.URLParam(r, \"password\")\n\n\tapp.logger.Log().Msg(\"Userid is ::\" + userid + \"::\")\n\tuser, err := repository.GetUserByUserid(app.db, userid)\n\tif err != nil {\n\t\t// if err == gorm.ErrRecordNotFound {\n\t\t// \tw.WriteHeader(http.StatusNotFound)\n\t\t// \treturn\n\t\t// }\n\t\tapp.logger.Warn().Err(err).Msg(\"\")\n\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\tfmt.Fprintf(w, `{\"error\": \"%v\"}`, appErrDataAccessFailure)\n\t\treturn\n\t}\n\tapp.logger.Log().Msgf(\"User is :: \", user)\n\tsuccess := myauth.ComparePasswords(user.Password, []byte(password))\n\tif !success {\n\t\tapp.logger.Log().Msg(\"password is NOT Correct\")\n\t\tw.WriteHeader(http.StatusUnauthorized)\n\t\treturn\n\t}\n\tapp.logger.Warn().Msg(\"DEBUG JWT: <<<<<<<<<>>>>>>>User: \" + user.Userid)\n\tjwtoken := myauth.JWTClient{}.New(user)\n\tlog.Println(\"DEBUG JWT:\", jwtoken)\n\tif err := json.NewEncoder(w).Encode(jwtoken); err != nil {\n\t\tapp.logger.Warn().Err(err).Msg(\"\")\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\tfmt.Fprintf(w, `{\"error\": \"%v\"}`, appErrJsonCreationFailure)\n\t\treturn\n\t}\n\n}", "func authLoginHandler(ctx context.Context, w http.ResponseWriter, r *http.Request) {\n\tgithub.RedirectToLogin(w, r)\n}", "func handleNaturalistLogin(w http.ResponseWriter, r *http.Request) {\n\turl := authenticator.AuthUrl()\n\n\tlog.Printf(\"Redirecting: %s\", url)\n\n\thttp.Redirect(w, r, url, http.StatusTemporaryRedirect)\n}", "func HandleRedirect(w http.ResponseWriter, r *http.Request) {\n\tstate := r.URL.Query().Get(\"state\")\n\tcode := r.URL.Query().Get(\"code\")\n\trequest, response, err := ia.HandleCallbackCode(code, state)\n\tif err != nil {\n\t\tlog.Debugln(err)\n\t\tmsg := `Unable to complete authentication. <a href=\"/\">Go back.</a><hr/>`\n\t\t_ = controllers.WriteString(w, msg, http.StatusBadRequest)\n\t\treturn\n\t}\n\n\t// Check if a user with this auth already exists, if so, log them in.\n\tif u := auth.GetUserByAuth(response.Me, auth.IndieAuth); u != nil {\n\t\t// Handle existing auth.\n\t\tlog.Debugln(\"user with provided indieauth already exists, logging them in\")\n\n\t\t// Update the current user's access token to point to the existing user id.\n\t\taccessToken := request.CurrentAccessToken\n\t\tuserID := u.ID\n\t\tif err := user.SetAccessTokenToOwner(accessToken, userID); err != nil {\n\t\t\tcontrollers.WriteSimpleResponse(w, false, err.Error())\n\t\t\treturn\n\t\t}\n\n\t\tif request.DisplayName != u.DisplayName {\n\t\t\tloginMessage := fmt.Sprintf(\"**%s** is now authenticated as **%s**\", request.DisplayName, u.DisplayName)\n\t\t\tif err := chat.SendSystemAction(loginMessage, true); err != nil {\n\t\t\t\tlog.Errorln(err)\n\t\t\t}\n\t\t}\n\n\t\thttp.Redirect(w, r, \"/\", http.StatusTemporaryRedirect)\n\n\t\treturn\n\t}\n\n\t// Otherwise, save this as new auth.\n\tlog.Debug(\"indieauth token does not already exist, saving it as a new one for the current user\")\n\tif err := auth.AddAuth(request.UserID, response.Me, auth.IndieAuth); err != nil {\n\t\tcontrollers.WriteSimpleResponse(w, false, err.Error())\n\t\treturn\n\t}\n\n\t// Update the current user's authenticated flag so we can show it in\n\t// the chat UI.\n\tif err := user.SetUserAsAuthenticated(request.UserID); err != nil {\n\t\tlog.Errorln(err)\n\t}\n\n\thttp.Redirect(w, r, \"/\", http.StatusTemporaryRedirect)\n}", "func (fn *authController) Login(api fiber.Router) fiber.Handler {\n\treturn func(ctx *fiber.Ctx) error {\n\t\t// set parameter\n\t\tUserLogin := new(models.UserLogin)\n\t\tUser := new(models.User)\n\n\t\tif err := ctx.BodyParser(UserLogin); err != nil {\n\t\t\treturn helper.ErrorHandler(ctx, fiber.ErrForbidden, 400, \"Cannot unmarshal request body, wrong type data.\")\n\t\t}\n\n\t\t// validate request body\n\t\tif err := validator.New().Struct(UserLogin); err != nil {\n\t\t\treturn helper.ErrorHandler(ctx, fiber.ErrForbidden, 400, err.Error())\n\t\t}\n\n\t\t// get usecase auth\n\t\tif err := fn.authUsecase.Login(ctx, User, UserLogin); err != nil {\n\t\t\treturn helper.ErrorHandler(ctx, fiber.ErrForbidden, 400, err.Error())\n\t\t}\n\n\t\ttransform := fn.authTransform.DetailTransform(ctx, fiber.Map{\"user\": User, \"token\": UserLogin.Token})\n\t\treturn ctx.JSON(transform)\n\t}\n}", "func (s *Provider) HandleSamlLogin(w http.ResponseWriter, r *http.Request) (string, error) {\n\tserviceProvider := s.serviceProvider\n\tif r.URL.Path == serviceProvider.AcsURL.Path {\n\t\treturn \"\", fmt.Errorf(\"don't wrap Middleware with RequireAccount\")\n\t}\n\tlog.Debugf(\"SAML [HandleSamlLogin]: Creating authentication request for %v\", s.name)\n\tbinding := saml.HTTPRedirectBinding\n\tbindingLocation := serviceProvider.GetSSOBindingLocation(binding)\n\n\treq, err := serviceProvider.MakeAuthenticationRequest(bindingLocation, binding, saml.HTTPPostBinding)\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\treturn \"\", err\n\t}\n\t// relayState is limited to 80 bytes but also must be integrity protected.\n\t// this means that we cannot use a JWT because it is way too long. Instead\n\t// we set a cookie that corresponds to the state\n\trelayState := base64.URLEncoding.EncodeToString(randomBytes(42))\n\n\tsecretBlock := x509.MarshalPKCS1PrivateKey(serviceProvider.Key)\n\tstate := jwt.New(jwt.SigningMethodHS256)\n\tclaims := state.Claims.(jwt.MapClaims)\n\tclaims[\"id\"] = req.ID\n\tclaims[\"uri\"] = r.URL.String()\n\tsignedState, err := state.SignedString(secretBlock)\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\treturn \"\", err\n\t}\n\n\ts.clientState.SetState(w, r, relayState, signedState)\n\n\tredirectURL, err := req.Redirect(relayState, serviceProvider)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn redirectURL.String(), nil\n}", "func (h *GitHubOAuth) Login(c *router.Control) {\n\turl := h.oAuthConf.AuthCodeURL(h.state, oauth2.AccessTypeOnline)\n\thttp.Redirect(c.Writer, c.Request, url, http.StatusTemporaryRedirect)\n}", "func (uh *UserHandler) HandleFacebookLogin(w http.ResponseWriter, r *http.Request) {\n\tOauthStateString = stringTools.RandomStringGN(20)\n\turl := facebookOauthConfig.AuthCodeURL(OauthStateString)\n\thttp.Redirect(w, r, url, http.StatusSeeOther)\n}", "func LoginHandler(c *gin.Context) {\n\tloginFrom := LoginRequest{}\n\terr := c.ShouldBindJSON(&loginFrom)\n\tif err != nil {\n\t\tc.AbortWithStatusJSON(400, gin.H{\n\t\t\t\"message\": \"Invalid json request.\",\n\t\t})\n\t\treturn\n\t}\n\tisPasswordCorrect := comparePassword(loginFrom)\n\tif !isPasswordCorrect {\n\t\tc.AbortWithStatusJSON(401, gin.H{\n\t\t\t\"message\": \"Username or/and Password are not correct.\",\n\t\t})\n\t\treturn\n\t}\n\ttoken, err := login(loginFrom)\n\tif err != nil {\n\t\tc.AbortWithStatusJSON(500, gin.H{\n\t\t\t\"message\": \"Something went wrong when generating token.\",\n\t\t})\n\t\treturn\n\t}\n\tc.JSON(200, gin.H{\n\t\t\"token\": token,\n\t})\n}", "func LoginHandler(db *sqlx.DB, cfg config.Config) http.HandlerFunc {\n\treturn func(w http.ResponseWriter, r *http.Request) {\n\t\tdefer r.Body.Close()\n\t\tauthenticated := false\n\t\tform := auth.PasswordForm{}\n\t\tvar resp tc.Alerts\n\t\tdbCtx, cancelTx := context.WithTimeout(r.Context(), time.Duration(cfg.DBQueryTimeoutSeconds)*time.Second)\n\t\tdefer cancelTx()\n\n\t\t// Attempt to perform client certificate authentication. If fails, goto standard form auth. If the\n\t\t// certificate was verified, has a UID, and the UID matches an existing user we consider this to\n\t\t// be a successful login.\n\t\tauthenticated = clientCertAuthentication(w, r, db, cfg, dbCtx, cancelTx, form, authenticated)\n\n\t\t// Failed certificate-based auth, perform standard form auth\n\t\tif !authenticated {\n\t\t\t// Perform form authentication\n\t\t\tif err := json.NewDecoder(r.Body).Decode(&form); err != nil {\n\t\t\t\tapi.HandleErr(w, r, nil, http.StatusBadRequest, err, nil)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tif form.Username == \"\" || form.Password == \"\" {\n\t\t\t\tapi.HandleErr(w, r, nil, http.StatusBadRequest, errors.New(\"username and password are required\"), nil)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\t// Check if user exists and has a role\n\t\t\tuserAllowed, err, blockingErr := auth.CheckLocalUserIsAllowed(form.Username, db, dbCtx)\n\t\t\tif blockingErr != nil {\n\t\t\t\tapi.HandleErr(w, r, nil, http.StatusServiceUnavailable, nil, fmt.Errorf(\"error checking local user has role: %s\", blockingErr.Error()))\n\t\t\t\treturn\n\t\t\t}\n\t\t\tif err != nil {\n\t\t\t\tlog.Errorf(\"checking local user: %s\\n\", err)\n\t\t\t}\n\n\t\t\t// User w/ role does not exist, return unauthorized\n\t\t\tif !userAllowed {\n\t\t\t\tresp = tc.CreateAlerts(tc.ErrorLevel, \"Invalid username or password.\")\n\t\t\t\tw.WriteHeader(http.StatusUnauthorized)\n\t\t\t\tapi.WriteRespRaw(w, r, resp)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\t// Check local DB or LDAP\n\t\t\tauthenticated, err, blockingErr = auth.CheckLocalUserPassword(form, db, dbCtx)\n\t\t\tif blockingErr != nil {\n\t\t\t\tapi.HandleErr(w, r, nil, http.StatusServiceUnavailable, nil, fmt.Errorf(\"error checking local user password: %s\", blockingErr.Error()))\n\t\t\t\treturn\n\t\t\t}\n\t\t\tif err != nil {\n\t\t\t\tlog.Errorf(\"checking local user password: %s\\n\", err)\n\t\t\t}\n\t\t\tvar ldapErr error\n\t\t\tif !authenticated && cfg.LDAPEnabled {\n\t\t\t\tauthenticated, ldapErr = auth.CheckLDAPUser(form, cfg.ConfigLDAP)\n\t\t\t\tif ldapErr != nil {\n\t\t\t\t\tlog.Errorf(\"checking ldap user: %s\\n\", ldapErr.Error())\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\t// Failed to authenticate in either local DB or LDAP, return unauthorized\n\t\tif !authenticated {\n\t\t\tresp = tc.CreateAlerts(tc.ErrorLevel, \"Invalid username or password.\")\n\t\t\tw.WriteHeader(http.StatusUnauthorized)\n\t\t\tapi.WriteRespRaw(w, r, resp)\n\t\t\treturn\n\t\t}\n\n\t\t// Successful authentication, write cookie and return\n\t\thttpCookie := tocookie.GetCookie(form.Username, defaultCookieDuration, cfg.Secrets[0])\n\t\thttp.SetCookie(w, httpCookie)\n\n\t\tvar jwtToken jwt.Token\n\t\tvar jwtSigned []byte\n\t\tjwtBuilder := jwt.NewBuilder()\n\n\t\temptyConf := config.CdniConf{}\n\t\tif cfg.Cdni != nil && *cfg.Cdni != emptyConf {\n\t\t\tucdn, err := auth.GetUserUcdn(form, db, dbCtx)\n\t\t\tif err != nil {\n\t\t\t\t// log but do not error out since this is optional in the JWT for CDNi integration\n\t\t\t\tlog.Errorf(\"getting ucdn for user %s: %v\", form.Username, err)\n\t\t\t}\n\t\t\tjwtBuilder.Claim(jwt.IssuerKey, ucdn)\n\t\t\tjwtBuilder.Claim(jwt.AudienceKey, cfg.Cdni.DCdnId)\n\t\t}\n\n\t\tjwtBuilder.Claim(jwt.ExpirationKey, httpCookie.Expires.Unix())\n\t\tjwtBuilder.Claim(api.MojoCookie, httpCookie.Value)\n\t\tjwtToken, err := jwtBuilder.Build()\n\t\tif err != nil {\n\t\t\tapi.HandleErr(w, r, nil, http.StatusInternalServerError, nil, fmt.Errorf(\"building token: %s\", err))\n\t\t\treturn\n\t\t}\n\n\t\tjwtSigned, err = jwt.Sign(jwtToken, jwa.HS256, []byte(cfg.Secrets[0]))\n\t\tif err != nil {\n\t\t\tapi.HandleErr(w, r, nil, http.StatusInternalServerError, nil, err)\n\t\t\treturn\n\t\t}\n\n\t\thttp.SetCookie(w, &http.Cookie{\n\t\t\tName: rfc.AccessToken,\n\t\t\tValue: string(jwtSigned),\n\t\t\tPath: \"/\",\n\t\t\tMaxAge: httpCookie.MaxAge,\n\t\t\tExpires: httpCookie.Expires,\n\t\t\tHttpOnly: true, // prevents the cookie being accessed by Javascript. DO NOT remove, security vulnerability\n\t\t})\n\n\t\t// If all's well until here, then update last authenticated time\n\t\ttx, txErr := db.BeginTx(dbCtx, nil)\n\t\tif txErr != nil {\n\t\t\tapi.HandleErr(w, r, tx, http.StatusInternalServerError, nil, fmt.Errorf(\"beginning transaction: %w\", txErr))\n\t\t\treturn\n\t\t}\n\t\tdefer func() {\n\t\t\tif err := tx.Commit(); err != nil && err != sql.ErrTxDone {\n\t\t\t\tlog.Errorf(\"committing transaction: %s\", err)\n\t\t\t}\n\t\t}()\n\t\t_, dbErr := tx.Exec(UpdateLoginTimeQuery, form.Username)\n\t\tif dbErr != nil {\n\t\t\tlog.Errorf(\"unable to update authentication time for a given user: %s\\n\", dbErr.Error())\n\t\t}\n\n\t\tresp = tc.CreateAlerts(tc.SuccessLevel, \"Successfully logged in.\")\n\t\tw.WriteHeader(http.StatusOK)\n\t\tapi.WriteRespRaw(w, r, resp)\n\t}\n}", "func LoginHandler(c *gin.Context) {\n\tval, _ := c.Cookie(\"auth\")\n\tif val != \"\" {\n\t\tc.Redirect(http.StatusOK, \"/dashboard\")\n\t\treturn\n\t}\n\n\tinfo := models.LoginInfo{\n\t\tEmail: c.Query(\"email\"),\n\t\tPassword: c.Query(\"password\"),\n\t}\n\n\tif info.Email == \"\" || info.Password == \"\" {\n\t\tc.Redirect(http.StatusTemporaryRedirect, \"/\")\n\t\treturn\n\t}\n\n\tif ok, err := crud.CheckLoginInfo(info); !ok || err != nil {\n\t\tc.HTML(http.StatusInternalServerError, \"login.html\", err.Error())\n\t}\n\n\tc.SetCookie(\"auth\", \"yes\", 86400, \"/\", \"127.0.0.1\", false, false)\n\tc.SetCookie(\"auth\", \"yes\", 86400, \"/dashboard\", \"127.0.0.1\", false, false)\n\tc.SetCookie(\"auth\", \"yes\", 86400, \"/data\", \"127.0.0.1\", false, false)\n\tc.SetCookie(\"auth\", \"yes\", 86400, \"/report\", \"127.0.0.1\", false, false)\n\tc.SetCookie(\"auth\", \"yes\", 86400, \"/config\", \"127.0.0.1\", false, false)\n\tc.SetCookie(\"auth\", \"yes\", 86400, \"/config/submit\", \"127.0.0.1\", false, false)\n\n\tc.Redirect(http.StatusTemporaryRedirect, \"/dashboard\")\n\treturn\n}", "func checkLogin(c appengine.Context, rw http.ResponseWriter, req *http.Request) *user.User {\n\tu := user.Current(c)\n\tif u == nil {\n\t\turl, err := user.LoginURL(c, req.URL.String())\n\t\tif err != nil {\n\t\t\thttp.Error(rw, err.Error(), http.StatusInternalServerError)\n\t\t\treturn nil\n\t\t}\n\t\trw.Header().Set(\"Location\", url)\n\t\trw.WriteHeader(http.StatusFound)\n\t\treturn nil\n\t}\n\treturn u\n}", "func (h *Handler) LoginHandler(w http.ResponseWriter, r *http.Request) {\n\tvar u, storedUser *user.User\n\tvar tokenString string\n\n\terr := json.NewDecoder(r.Body).Decode(&u)\n\tif err != nil {\n\t\th.log.Error(err)\n\t\t_ = response.HTTPError(w, http.StatusBadRequest, response.ErrorBadRequest.Error())\n\t\treturn\n\t}\n\n\tctx, cancel := context.WithCancel(r.Context())\n\tdefer cancel()\n\n\tselect {\n\tcase <-ctx.Done():\n\t\t_ = response.HTTPError(w, http.StatusBadGateway, response.ErrTimeout.Error())\n\t\treturn\n\tdefault:\n\t\tstoredUser, tokenString, err = h.service.LoginUser(ctx, u)\n\t}\n\n\tif err != nil {\n\t\th.log.Error(err)\n\t\tif errors.Is(err, response.ErrorBadEmailOrPassword) {\n\t\t\t_ = response.HTTPError(w, http.StatusBadRequest, err.Error())\n\t\t\treturn\n\t\t} else if errors.Is(err, response.ErrorNotFound) {\n\t\t\t_ = response.HTTPError(w, http.StatusNotFound, err.Error())\n\t\t\treturn\n\n\t\t} else {\n\t\t\t_ = response.HTTPError(w, http.StatusInternalServerError, err.Error())\n\t\t\treturn\n\t\t}\n\n\t}\n\n\t_ = response.JSON(w, http.StatusOK, response.Map{\n\t\t\"token\": tokenString,\n\t\t\"user\": storedUser,\n\t})\n}", "func callbackHandler(w http.ResponseWriter, r *http.Request) {\n\n\t// Process identity provider callback, checking tokens, etc.\n\tauth, err := idp.Callback(w, r)\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t}\n\n\t// Store session authentication information in cookie\n\tsetCookie(w, r, auth, auth.ExpiresIn)\n\t\n\t// Redirect to original page\n\thttp.Redirect(w, r, auth.URL, http.StatusFound)\t\n}", "func (ah *AuthHandler) Login() http.HandlerFunc {\n\treturn http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tvar dataResourse UserResource\n\t\t// Decode the incoming user json\n\t\terr := json.NewDecoder(r.Body).Decode(&dataResourse)\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\n\t\ttoken, err := ah.authInteractor.Login(dataResourse.Data)\n\t\tif err != nil {\n\t\t\tw.WriteHeader(http.StatusUnauthorized)\n\t\t}\n\n\t\tsetAuthCookie(&w, token.AuthToken, token.RefreshToken)\n\t\tsetRefreshCookie(&w, token.AuthToken, token.RefreshToken)\n\n\t\tw.Header().Set(\"X-CSRF-Token\", token.CSRFKey)\n\t\tw.WriteHeader(http.StatusOK)\n\t})\n}", "func (a *loginAPI) HandleAuthFlow(w http.ResponseWriter, r *http.Request) error {\n\tstate := randToken()\n\ta.appCookie.Set(stateParam, state, cookieExpiry, w)\n\tlog.WithField(\"func\", \"server.HandleAuthFlow\").Debugf(\"initiate using state '%s'\", state)\n\n\tsite, redirect := query(r, siteParam), query(r, redirectParam)\n\tif site == \"\" || redirect == \"\" {\n\t\treturn errors.BadRequestError{Err: fmt.Errorf(\"missing or invalid parameters supplied\"), Request: r}\n\t}\n\ta.appCookie.Set(authFlowCookie, fmt.Sprintf(\"%s%s%s\", site, authFlowSep, redirect), cookieExpiry, w)\n\thttp.Redirect(w, r, a.GetOIDCRedirectURL(), http.StatusTemporaryRedirect)\n\treturn nil\n}", "func LoginHandler(w *http.ResponseWriter, r *http.Request, p *persistance.Persistance) {\n\tvar env s.Envelop\n\terr := env.FromEnvelop(r)\n\tcheckErr(err)\n\n}", "func Login() gin.HandlerFunc {\r\n\tif gin.Mode() == \"debug\" {\r\n\t\treturn func(c *gin.Context) { c.Next() }\r\n\t}\r\n\treturn func(c *gin.Context) {\r\n\t\tsession := sessions.Default(c)\r\n\t\tUserID := session.Get(\"UserID\")\r\n\t\tIsLeader := session.Get(\"IsLeader\")\r\n\r\n\t\tfmt.Println(\"UserID, IsLeader\", UserID, IsLeader)\r\n\t\tif UserID == nil {\r\n\t\t\tstate := string([]byte(c.Request.URL.Path)[1:])\r\n\t\t\tc.Redirect(http.StatusFound, \"/login?state=\"+state)\r\n\t\t\tc.Abort()\r\n\t\t} else {\r\n\t\t\tc.Set(\"UserID\", UserID)\r\n\t\t\tc.Set(\"IsLeader\", IsLeader)\r\n\t\t\tc.Next()\r\n\t\t}\r\n\r\n\t}\r\n}", "func (app *Application) LoginHandler(w http.ResponseWriter, r *http.Request) {\n\tvar data map[string]interface{}\n\tdata = make(map[string]interface{})\n\tfmt.Println(\"login.html\")\n\tif r.FormValue(\"submitted\") == \"true\" {\n\t\tuname := r.FormValue(\"username\")\n\t\tpword := r.FormValue(\"password\")\n\t\torg := r.FormValue(\"org\")\n\t\tprintln(uname, pword, org)\n\t\t//according uname, comparing pword with map[uname]\n\t\tfor _, v := range webutil.Orgnization[org] {\n\t\t\tfmt.Println(\"org user\", v.UserName)\n\t\t\tif v.UserName == uname {\n\t\t\t\tif v.Secret == pword {\n\t\t\t\t\twebutil.MySession.SetSession(uname, org, w)\n\t\t\t\t\thttp.Redirect(w, r, \"./home.html\", 302)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t}\n\n\t\t}\n\t\t//login failed redirect to login page and show failed\n\t\tdata[\"LoginFailed\"] = true\n\t\tloginTemplate(w, r, \"login.html\", data)\n\t\treturn\n\t}\n\tloginTemplate(w, r, \"login.html\", data)\n}", "func LoginHandler(w http.ResponseWriter, r *http.Request) {\n\tusername := r.FormValue(\"username\")\n\tpassword := r.FormValue(\"password\")\n\n\tif username != \"\" && password != \"\" {\n\t\tauth := database.QuickGetAuth()\n\t\tnauth := &database.Auth{\n\t\t\tUsername: username,\n\t\t\tPassword: password,\n\t\t}\n\n\t\tif auth.Equal(nauth) {\n\t\t\thttp.SetCookie(w, nauth.MakeCookie())\n\t\t\thttp.Redirect(w, r, \"/admin\", 301)\n\t\t} else {\n\t\t\thttp.Redirect(w, r, \"/admin/nono\", 301)\n\t\t}\n\t} else {\n\t\thttp.Redirect(w, r, \"/admin\", 301)\n\t}\n}", "func (c *controller) Login(ctx context.Context, request *web.Request) web.Result {\n\tredirecturl, ok := request.Params[\"redirecturl\"]\n\tif !ok || redirecturl == \"\" {\n\t\tredirecturl = request.Request().Referer()\n\t}\n\trequest.Params[\"redirecturl\"] = redirecturl\n\n\tif resp := c.service.AuthenticateFor(ctx, request.Params[\"broker\"], request); resp != nil {\n\t\treturn resp\n\t}\n\treturn c.responder.NotFound(errors.New(\"broker for login not found\"))\n}", "func (r *router) loginHandler(c *gin.Context) {\n\n\tvar json login\n\n\tif err := c.ShouldBindJSON(&json); err != nil {\n\t\tc.JSON(http.StatusBadRequest, gin.H{\"error\": err.Error()})\n\t\treturn\n\t}\n\n\ttoken, err := r.security.Auth(json.Email, json.Password)\n\n\tif err != nil {\n\t\tc.JSON(http.StatusUnauthorized, gin.H{\"error\": err.Error()})\n\t\treturn\n\t}\n\n\tc.Header(\"Authorization\", \"Bearer \"+token)\n\tc.JSON(http.StatusOK, gin.H{\"token\": token})\n\n}", "func (a *GoogleAuth) GoogleLoginHandler(w http.ResponseWriter, r *http.Request) {\n\tstate := a.NewAuthState(r)\n\turl := a.config.AuthCodeURL(state)\n\thttp.Redirect(w, r, url, http.StatusTemporaryRedirect)\n}", "func (a Authentic) loginHandler(c buffalo.Context) error {\n\tc.Request().ParseForm()\n\n\t//TODO: schema ?\n\tloginData := struct {\n\t\tUsername string\n\t\tPassword string\n\t}{}\n\n\tc.Bind(&loginData)\n\n\tu, err := a.provider.FindByUsername(loginData.Username)\n\tif err != nil || ValidatePassword(loginData.Password, u) == false {\n\t\tc.Flash().Add(\"danger\", \"Invalid Username or Password\")\n\t\treturn c.Redirect(http.StatusSeeOther, a.Config.LoginPath)\n\t}\n\n\tc.Session().Set(SessionField, u.GetID())\n\tc.Session().Save()\n\n\treturn c.Redirect(http.StatusSeeOther, a.Config.AfterLoginPath)\n}", "func HandleLogin() echo.HandlerFunc {\n\treturn auth.BuildLoginHandler(settings)\n}", "func (uh *UserHandler) HandleGoogleLogin(w http.ResponseWriter, r *http.Request) {\n\tOauthStateString = stringTools.RandomStringGN(20)\n\turl := googleOauthConfig.AuthCodeURL(OauthStateString)\n\thttp.Redirect(w, r, url, http.StatusSeeOther)\n}", "func (h *Handler) LoginHandler(w http.ResponseWriter, r *http.Request) {\n\n\tchallenge, err := readURLChallangeParams(r, \"login\")\n\tif err != nil {\n\t\tlog.Println(err)\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\treturn\n\t}\n\n\tif r.Method == \"POST\" {\n\t\tif r.Form == nil {\n\t\t\tif err := r.ParseForm(); err != nil {\n\t\t\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t\tuserName := r.Form.Get(\"username\")\n\t\tpassword := r.Form.Get(\"password\")\n\t\tloginChallenge := r.Form.Get(\"challenge\")\n\t\tpass, err := h.LoginService.CheckPasswords(userName, password)\n\t\tif err != nil {\n\t\t\tlog.Println(err)\n\t\t}\n\n\t\tif pass {\n\n\t\t\tacceptLoginBody := h.ConfigService.FetchAcceptLoginConfig(userName)\n\t\t\trawJson, err := json.Marshal(acceptLoginBody)\n\n\t\t\tredirectURL, err := h.LoginService.SendAcceptBody(\"login\", loginChallenge, rawJson)\n\t\t\tif err != nil {\n\t\t\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\t\t\tlog.Fatal(err)\n\t\t\t}\n\t\t\thttp.Redirect(w, r, redirectURL, http.StatusFound)\n\t\t}\n\n\t\tw.WriteHeader(http.StatusForbidden)\n\t\ttemplLogin := template.Must(template.ParseFiles(\"templates/login.html\"))\n\t\tloginData := h.ConfigService.FetchLoginConfig(challenge, true)\n\t\ttemplLogin.Execute(w, loginData)\n\t} else {\n\t\tchallengeBody, err := h.LoginService.ReadChallenge(challenge, \"login\")\n\n\t\tif err != nil {\n\t\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\t\tlog.Print(err)\n\t\t}\n\n\t\tif !challengeBody.Skip {\n\t\t\ttemplLogin := template.Must(template.ParseFiles(\"templates/login.html\"))\n\t\t\tloginData := h.ConfigService.FetchLoginConfig(challenge, false)\n\t\t\ttemplLogin.Execute(w, loginData)\n\t\t} else {\n\n\t\t\tacceptLoginBody := h.ConfigService.FetchAcceptLoginConfig(challengeBody.Subject)\n\t\t\trawJson, err := json.Marshal(acceptLoginBody)\n\n\t\t\tredirectURL, err := h.LoginService.SendAcceptBody(\"login\", challenge, rawJson)\n\t\t\tif err != nil {\n\t\t\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\t\t\tlog.Fatal(err)\n\t\t\t}\n\t\t\thttp.Redirect(w, r, redirectURL, http.StatusFound)\n\n\t\t}\n\t}\n}", "func LoginHandler(c buffalo.Context) error {\n\ttx, ok := c.Value(\"tx\").(*pop.Connection)\n\tif !ok {\n\t\treturn errors.WithStack(errors.New(\"no transaction found\"))\n\t}\n\n\tparams := &LoginParams{}\n\tif err := c.Bind(params); err != nil {\n\t\treturn errors.WithStack(err)\n\t}\n\n\tuser := &models.User{}\n\terr := tx.Where(\"user_name = ?\", params.UserName).First(user)\n\tif err != nil {\n\t\treturn errors.WithStack(err)\n\t}\n\n\tmatch := user.CheckPasswordHash(params.Password)\n\n\tif match {\n\t\tclaims := jwt.StandardClaims{\n\t\t\tExpiresAt: time.Now().Add(oneWeek()).Unix(),\n\t\t\tId: user.ID.String(),\n\t\t}\n\t\ttoken := jwt.NewWithClaims(jwt.SigningMethodHS256, claims)\n\t\tbox := packr.NewBox(\"../config\")\n\t\tsigningKey := box.Bytes(os.Getenv(\"JWT_SIGN_KEY\"))\n\n\t\ttokenString, err := token.SignedString(signingKey)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"could not sign token, %v\", err)\n\t\t}\n\t\tc.Response().Header().Set(\"Token\", tokenString)\n\t\treturn c.Render(200, r.JSON(map[string]string{\"token\": tokenString}))\n\t}\n\n\treturn c.Render(401, r.JSON(map[string]string{\"message\": \"Username/password mismatch\"}))\n\n}", "func (a *App) Login(destination string) http.HandlerFunc {\n\tif destination == \"\" {\n\t\tdestination = \"/\"\n\t}\n\n\tmodel := http.Cookie{\n\t\tName: a.Cookie,\n\t\tPath: \"/\",\n\t\tHttpOnly: true,\n\t\tMaxAge: 24 * 60 * 60,\n\t\tSameSite: http.SameSiteStrictMode,\n\t\tSecure: true,\n\t}\n\n\treturn func(w http.ResponseWriter, r *http.Request) {\n\t\tj := r.URL.Query().Get(\"jwt\")\n\t\tif _, err := a.FromJWT(j); err != nil {\n\t\t\ta.Error(w, r, \"JWT error: \"+err.Error(), http.StatusBadRequest)\n\t\t\treturn\n\t\t}\n\n\t\tc := model\n\t\tc.Value = j\n\t\tw.Header().Add(\"Set-Cookie\", c.String())\n\n\t\tto := fromb64(r.URL.Query().Get(\"r\"))\n\t\tif to == \"\" {\n\t\t\tto = destination\n\t\t}\n\t\tredirection(w, to)\n\t}\n}", "func (h *ATMHandler) HandleLogin(w http.ResponseWriter, r *http.Request) {\n\tbody, err := ioutil.ReadAll(r.Body)\n\tif err != nil {\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\treturn\n\t}\n\tvar loginPayload LoginRequest\n\tjson.Unmarshal(body, &loginPayload)\n\ttoken, atmErr := h.ATMClient.Login(r.Context(), loginPayload.Username, loginPayload.PIN)\n\tif atmErr != nil {\n\t\tif atmErr.IsAuthenticated() {\n\t\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\t\treturn\n\t\t}\n\t\tw.WriteHeader(http.StatusUnauthorized)\n\t\treturn\n\t}\n\tresponse := LoginResponse{\n\t\tToken: token,\n\t}\n\tb, _ := json.Marshal(response)\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\tw.WriteHeader(http.StatusOK)\n\tfmt.Fprint(w, string(b))\n}", "func (h AuthByEmailHandler) serveLogin(w http.ResponseWriter, r *http.Request) (int, error) {\n\t// Parse the form data in the request body\n\tr.ParseForm()\n\n\tif len(r.PostForm[\"email\"]) == 0 {\n\t\treturn h.serveBadRequest(w)\n\t}\n\n\temail, err := NewEmailAddrFromString(r.PostForm[\"email\"][0])\n\tif err != nil {\n\t\treturn h.serveBadRequest(w)\n\t}\n\n\tuserID := CRYPTO.UserIDfromEmail(email)\n\n\t// If the user is new but from a whitelisted domain, they should be added before being sent a link\n\tif h.config.IsDomainWhitelisted(email.Domain) && !h.database.IsKnownUser(userID) {\n\t\t// If the user is not known, but should be automatically approved, we add them to the database\n\t\t// and then send the e-mail.\n\t\th.database.AddUser(userID)\n\t}\n\n\t// Send the appropriate email\n\tif h.database.IsKnownUser(userID) {\n\t\t// If the user is known, we support a kiosk login by giving this browser an invalid\n\t\t// cookie that can later be validated.\n\t\tcookie, err := h.database.NewCookieToken(CookieToken{UserID: userID, IsValidated: false, BrowserContext: GetBrowserContext(r)})\n\t\tif err != nil {\n\t\t\th.logger.Printf(\"Database error trying to set cookie for an existing user, %v\\n\", err)\n\t\t\treturn 500, err\n\t\t}\n\n\t\ttoken, err := h.database.NewLinkToken(LinkToken{UserID: userID, CorrespondingCookie: cookie}, time.Hour)\n\t\tif err != nil {\n\t\t\th.logger.Printf(\"Database error trying to log in an existing user, %v\\n\", err)\n\t\t\treturn 500, err\n\t\t}\n\n\t\terr = h.mailer.SendLoginLink(email, token)\n\t\tif err != nil {\n\t\t\th.logger.Printf(\"Error mailing user %v a login link, %v\", email.String(), err)\n\t\t\treturn 500, err\n\t\t}\n\n\t\t// Everything worked, give the cookie\n\t\thttp.SetCookie(w, &http.Cookie{\n\t\t\tName: \"authByEmailToken\",\n\t\t\tPath: \"/\",\n\t\t\tValue: cookie,\n\t\t\tMaxAge: int(h.config.CookieValidity.Seconds()), // seconds\n\t\t\tSecure: r.URL.Scheme == \"https\",\n\t\t\tHttpOnly: true,\n\t\t})\n\t} else {\n\t\t// For unknown users, make an admin request. Given the timescale, setting an unvalidated\n\t\t// cookie is not necessary (kiosk login is not supported).\n\t\terr := h.mailer.SendAdminLoginRequest(email)\n\t\tif err != nil {\n\t\t\th.logger.Printf(\"Error mailing user %v's admin an approval link, %v\", email.String(), err)\n\t\t\treturn 500, err\n\t\t}\n\n\t\t// We still make and give a cookie, though it is not tracked. This is necessary to prevent\n\t\t// users from using this interface to test if a certain e-mail address is known to us.\n\t\thttp.SetCookie(w, &http.Cookie{\n\t\t\tName: \"authByEmailToken\",\n\t\t\tPath: \"/\",\n\t\t\tValue: newRandom(),\n\t\t\tMaxAge: int(h.config.CookieValidity.Seconds()), // seconds\n\t\t\tSecure: r.URL.Scheme == \"https\",\n\t\t\tHttpOnly: true,\n\t\t})\n\t}\n\n\treturn h.serveRedirect(w, \"/auth/wait\")\n}", "func LoginHandler(w http.ResponseWriter, r *http.Request) {\n\t// Initialize the fields that we need in the custom struct.\n\ttype Context struct {\n\t\tErr string\n\t\tErrExists bool\n\t\tOTPRequired bool\n\t\tUsername string\n\t\tPassword string\n\t}\n\t// Call the Context struct.\n\tc := Context{}\n\n\t// If the request method is POST\n\tif r.Method == \"POST\" {\n\t\t// This is a login request from the user.\n\t\tusername := r.PostFormValue(\"username\")\n\t\tusername = strings.TrimSpace(strings.ToLower(username))\n\t\tpassword := r.PostFormValue(\"password\")\n\t\totp := r.PostFormValue(\"otp\")\n\n\t\t// Login2FA login using username, password and otp for users with OTPRequired = true.\n\t\tsession := uadmin.Login2FA(r, username, password, otp)\n\n\t\t// Check whether the session returned is nil or the user is not active.\n\t\tif session == nil || !session.User.Active {\n\t\t\t/* Assign the login validation here that will be used for UI displaying. ErrExists and\n\t\t\tErr fields are coming from the Context struct. */\n\t\t\tc.ErrExists = true\n\t\t\tc.Err = \"Invalid username/password or inactive user\"\n\n\t\t} else {\n\t\t\t// If the user has OTPRequired enabled, it will print the username and OTP in the terminal.\n\t\t\tif session.PendingOTP {\n\t\t\t\tuadmin.Trail(uadmin.INFO, \"User: %s OTP: %s\", session.User.Username, session.User.GetOTP())\n\t\t\t}\n\n\t\t\t/* As long as the username and password is valid, it will create a session cookie in the\n\t\t\tbrowser. */\n\t\t\tcookie, _ := r.Cookie(\"session\")\n\t\t\tif cookie == nil {\n\t\t\t\tcookie = &http.Cookie{}\n\t\t\t}\n\t\t\tcookie.Name = \"session\"\n\t\t\tcookie.Value = session.Key\n\t\t\tcookie.Path = \"/\"\n\t\t\tcookie.SameSite = http.SameSiteStrictMode\n\t\t\thttp.SetCookie(w, cookie)\n\n\t\t\t// Check for OTP\n\t\t\tif session.PendingOTP {\n\t\t\t\t/* After the user enters a valid username and password in the first part of the form, these\n\t\t\t\tvalues will be used on the second part in the UI where the OTP input field will be\n\t\t\t\tdisplayed afterwards. */\n\t\t\t\tc.Username = username\n\t\t\t\tc.Password = password\n\t\t\t\tc.OTPRequired = true\n\n\t\t\t} else {\n\t\t\t\t// If the next value is empty, redirect the page that omits the logout keyword in the last part.\n\t\t\t\tif r.URL.Query().Get(\"next\") == \"\" {\n\t\t\t\t\thttp.Redirect(w, r, strings.TrimSuffix(r.RequestURI, \"logout\"), http.StatusSeeOther)\n\t\t\t\t\treturn\n\t\t\t\t}\n\n\t\t\t\t// Redirect to the page depending on the value of the next.\n\t\t\t\thttp.Redirect(w, r, r.URL.Query().Get(\"next\"), http.StatusSeeOther)\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}\n\n\t// Render the login filepath and pass the context data object to the HTML file.\n\tuadmin.RenderHTML(w, r, \"templates/login.html\", c)\n}", "func GithubLoginHandler(w http.ResponseWriter, r *http.Request) {\n\tfmt.Println(\"GithubLoginHandler\")\n\tparams := r.URL.Query()[\"jwt\"]\n\tif len(params) == 0 {\n\t\thttp.Redirect(w, r, \"/\", http.StatusBadRequest)\n\t\treturn\n\t}\n\tcurState := params[0]\n\turl := githubOauthConfig.AuthCodeURL(curState)\n\thttp.Redirect(w, r, url, http.StatusTemporaryRedirect)\n}", "func OIDCAuth(optionSetters ...Option) func(next http.Handler) http.Handler {\n\toptions := newOptions(optionSetters...)\n\ttokenCache := sync.NewCache(options.UserinfoCacheSize)\n\n\th := oidcAuth{\n\t\tlogger: options.Logger,\n\t\tproviderFunc: options.OIDCProviderFunc,\n\t\thttpClient: options.HTTPClient,\n\t\toidcIss: options.OIDCIss,\n\t\tTokenManagerConfig: options.TokenManagerConfig,\n\t\ttokenCache: &tokenCache,\n\t\ttokenCacheTTL: options.UserinfoCacheTTL,\n\t}\n\n\treturn func(next http.Handler) http.Handler {\n\t\treturn http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {\n\t\t\t// there is no bearer token on the request,\n\t\t\tif !h.shouldServe(req) {\n\t\t\t\t// oidc supported but token not present, add header and handover to the next middleware.\n\t\t\t\tuserAgentAuthenticateLockIn(w, req, options.CredentialsByUserAgent, \"bearer\")\n\t\t\t\tnext.ServeHTTP(w, req)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tif h.getProvider() == nil {\n\t\t\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\ttoken := strings.TrimPrefix(req.Header.Get(\"Authorization\"), \"Bearer \")\n\n\t\t\tclaims, status := h.getClaims(token, req)\n\t\t\tif status != 0 {\n\t\t\t\tw.WriteHeader(status)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\t// inject claims to the request context for the account_uuid middleware.\n\t\t\treq = req.WithContext(oidc.NewContext(req.Context(), claims))\n\n\t\t\t// store claims in context\n\t\t\t// uses the original context, not the one with probably reduced security\n\t\t\tnext.ServeHTTP(w, req.WithContext(oidc.NewContext(req.Context(), claims)))\n\t\t})\n\t}\n}", "func HandleLoginResponse(r *http.Request, w http.ResponseWriter, cfg *setting.Cfg, identity *Identity, validator RedirectValidator) *response.NormalResponse {\n\tresult := map[string]interface{}{\"message\": \"Logged in\"}\n\tif redirectURL := handleLogin(r, w, cfg, identity, validator); redirectURL != cfg.AppSubURL+\"/\" {\n\t\tresult[\"redirectUrl\"] = redirectURL\n\t}\n\treturn response.JSON(http.StatusOK, result)\n}", "func Login(w http.ResponseWriter, r *http.Request) {\n\tappengineContext := appengine.NewContext(r)\n\tappengineUser := user.Current(appengineContext)\n\tif appengineUser != nil {\n\t\turl, err := user.LogoutURL(appengineContext, \"/\")\n\t\tif err == nil {\n\t\t\tw.Header().Set(\"Location\", url)\n\t\t\tw.WriteHeader(http.StatusFound)\n\t\t\treturn\n\t\t}\n\t}\n\tw.Header().Set(\"Location\", \"/\")\n\tw.WriteHeader(http.StatusFound)\n}", "func (provider WechatWorkProvider) Login(context *auth.Context) {\n\tAuthCodeURL := provider.buildAuthCodeURL(context)\n\t// claims := claims.Claims{}\n\t// claims.Subject = \"state\"\n\t// signedToken := context.Auth.SessionStorer.SignedToken(&claims)\n\n\t// url := provider.OAuthConfig(context).AuthCodeURL(signedToken)\n\t// http.Redirect(context.Writer, context.Request, url, http.StatusFound)\n\thttp.Redirect(context.Writer, context.Request, AuthCodeURL, http.StatusFound)\n}", "func HandlerLogin(responseWriter http.ResponseWriter, request *http.Request) {\n\trequest.ParseForm()\n\n\tif request.Method == STR_GET {\n\t\tServeLogin(responseWriter, STR_EMPTY)\n\t} else {\n\t\tvar userName string = request.FormValue(API_KEY_username)\n\t\tvar password string = request.FormValue(API_KEY_password)\n\t\tif userName == STR_EMPTY || password == STR_EMPTY {\n\t\t\tServeLogin(responseWriter, \"Please enter username and password\")\n\t\t\treturn\n\t\t}\n\n\t\tvar userId = -1\n\t\tvar errorUser error = nil\n\t\tuserId, errorUser = DbGetUser(userName, password, nil)\n\t\tif errorUser != nil {\n\t\t\tlog.Printf(\"HandlerLogin, errorUser=%s\", errorUser.Error())\n\t\t}\n\t\tif userId > -1 {\n\t\t\ttoken := DbAddToken(userId, nil)\n\t\t\tAddCookie(responseWriter, token)\n\t\t\thttp.Redirect(responseWriter, request, GetApiUrlListApiKeys(), http.StatusTemporaryRedirect)\n\t\t} else {\n\t\t\tServeLogin(responseWriter, \"Wrong username or password\")\n\t\t}\n\t}\n}", "func (r *oauthProxy) loginHandler(w http.ResponseWriter, req *http.Request) {\n\tctx, span, _ := r.traceSpan(req.Context(), \"login handler\")\n\tif span != nil {\n\t\tdefer span.End()\n\t}\n\n\terrorMsg, code, err := func() (string, int, error) {\n\t\tif !r.config.EnableLoginHandler {\n\t\t\treturn \"attempt to login when login handler is disabled\", http.StatusNotImplemented, errors.New(\"login handler disabled\")\n\t\t}\n\t\tusername := req.PostFormValue(\"username\")\n\t\tpassword := req.PostFormValue(\"password\")\n\t\tif username == \"\" || password == \"\" {\n\t\t\treturn \"request does not have both username and password\", http.StatusBadRequest, errors.New(\"no credentials\")\n\t\t}\n\n\t\tclient, err := r.client.OAuthClient()\n\t\tif err != nil {\n\t\t\treturn \"unable to create the oauth client for user_credentials request\", http.StatusInternalServerError, err\n\t\t}\n\n\t\tstart := time.Now()\n\t\ttoken, err := client.UserCredsToken(username, password)\n\t\tif err != nil {\n\t\t\tif strings.HasPrefix(err.Error(), oauth2.ErrorInvalidGrant) {\n\t\t\t\treturn \"invalid user credentials provided\", http.StatusUnauthorized, err\n\t\t\t}\n\t\t\treturn \"unable to request the access token via grant_type 'password'\", http.StatusInternalServerError, err\n\t\t}\n\t\t// @metric observe the time taken for a login request\n\t\toauthLatencyMetric.WithLabelValues(\"login\").Observe(time.Since(start).Seconds())\n\n\t\t_, identity, err := parseToken(token.AccessToken)\n\t\tif err != nil {\n\t\t\treturn \"unable to decode the access token\", http.StatusNotImplemented, err\n\t\t}\n\n\t\tr.dropAccessTokenCookie(req.WithContext(ctx), w, token.AccessToken, time.Until(identity.ExpiresAt))\n\n\t\t// @metric a token has been issued\n\t\toauthTokensMetric.WithLabelValues(\"login\").Inc()\n\n\t\tw.Header().Set(\"Content-Type\", jsonMime)\n\t\terr = json.NewEncoder(w).Encode(tokenResponse{\n\t\t\tIDToken: token.IDToken,\n\t\t\tAccessToken: token.AccessToken,\n\t\t\tRefreshToken: token.RefreshToken,\n\t\t\tExpiresIn: token.Expires,\n\t\t\tScope: token.Scope,\n\t\t})\n\t\tif err != nil {\n\t\t\treturn \"\", http.StatusInternalServerError, err\n\t\t}\n\n\t\treturn \"\", http.StatusOK, nil\n\t}()\n\tif err != nil {\n\t\tr.errorResponse(w, req.WithContext(ctx), strings.Join([]string{errorMsg, \"client_ip\", req.RemoteAddr}, \",\"), code, err)\n\t}\n}", "func redirectHandler(c *gin.Context) {\n\t// Retrieve provider from route\n\tprovider := c.Param(\"provider\")\n\n\t//datos que provienen de github\n\tproviderSecrets := map[string]map[string]string{\n\t\t\"github\": {\n\t\t\t\"clientID\": \"b9563aec19bb264601a1\",\n\t\t\t\"clientSecret\": \"6c5cd9388386a6461a007576f4bfba1a7d144408\",\n\t\t\t\"redirectURL\": \"http://localhost:8090/api/socialLogin/auth/github/callback\",\n\t\t},\n\t}\n\n\tproviderScopes := map[string][]string{\n\t\t\"github\": []string{\"public_repo\"},\n\t}\n\n\tproviderData := providerSecrets[provider]\n\tactualScopes := providerScopes[provider]\n\tauthURL, err := gocial.New().\n\t\tDriver(provider).\n\t\tScopes(actualScopes).\n\t\tRedirect(\n\t\t\tproviderData[\"clientID\"],\n\t\t\tproviderData[\"clientSecret\"],\n\t\t\tproviderData[\"redirectURL\"],\n\t\t)\n\n\t// Check for errors (usually driver not valid)\n\tif err != nil {\n\t\tc.Writer.Write([]byte(\"Error: \" + err.Error()))\n\t\treturn\n\t}\n\t// Redirect with authURL\n\tc.Redirect(http.StatusFound, authURL)\n}", "func (l *RemoteProvider) InitiateLogin(w http.ResponseWriter, r *http.Request, _ bool) {\n\ttu := viper.GetString(\"MESHERY_SERVER_CALLBACK_URL\")\n\tif tu == \"\" {\n\t\ttu = \"http://\" + r.Host + \"/api/user/token\" // Hard coding the path because this is what meshery expects\n\t}\n\n\t_, err := r.Cookie(tokenName)\n\t// logrus.Debugf(\"url token: %v %v\", token, err)\n\tif err != nil {\n\t\thttp.SetCookie(w, &http.Cookie{\n\t\t\tName: l.RefCookieName,\n\t\t\tValue: \"/\",\n\t\t\tExpires: time.Now().Add(l.LoginCookieDuration),\n\t\t\tPath: \"/\",\n\t\t\tHttpOnly: true,\n\t\t})\n\t\thttp.Redirect(w, r, l.RemoteProviderURL+\"?source=\"+base64.RawURLEncoding.EncodeToString([]byte(tu))+\"&provider_version=\"+l.ProviderVersion, http.StatusFound)\n\t\treturn\n\t}\n\n\t// TODO: go to ref cookie\n\thttp.Redirect(w, r, \"/\", http.StatusFound)\n}", "func Login(w http.ResponseWriter, r *http.Request, next http.HandlerFunc) {\n\ttokenstring := r.Header.Get(\"Authorization\")\n\tusername, id := utils.GetUsernameByToken(tokenstring, w)\n\tif username != \"error\" {\n\t\tctx := context.WithValue(r.Context(), \"user\", structs.UserToken{\n\t\t\tUsername: username,\n\t\t\tID: id,\n\t\t})\n\t\t//ctx2 := context.WithValue(r.Context(), \"id\", id)\n\t\tnext(w, r.WithContext(ctx))\n\t} else {\n\t\treturn\n\t}\n}", "func OAUTHRedirect(ctx *fiber.Ctx) error {\n\n\tmodels.SYSLOG.Tracef(\"entering OAUTHRedirect; original URL: %v\", ctx.OriginalURL())\n\tdefer models.SYSLOG.Trace(\"exiting OAUTHRedirect\")\n\n\t// First, we need to get the value of the `code` query param\n\tcode := ctx.Query(\"code\", \"\")\n\tif len(code) < 1 {\n\t\treturn ctx.SendStatus(fiber.StatusBadRequest)\n\t}\n\n\t// Next, lets for the HTTP request to call the github oauth enpoint\tto get our access token\n\n\ta := fiber.AcquireAgent()\n\treq := a.Request()\n\treq.Header.SetMethod(fiber.MethodPost)\n\treq.Header.Set(\"accept\", \"application/json\")\n\treq.SetRequestURI(fmt.Sprintf(\"https://github.com/login/oauth/access_token?client_id=%s&client_secret=%s&code=%s\", models.ClientID, models.ClientSecret, code))\n\tif err := a.Parse(); err != nil {\n\t\tmodels.SYSLOG.Errorf(\"could not create HTTP request: %v\", err)\n\t}\n\n\tvar retCode int\n\tvar retBody []byte\n\tvar errs []error\n\t// Send out the HTTP request\n\tvar t *models.OAuthAccessResponse\n\n\tif retCode, retBody, errs = a.Struct(&t); len(errs) > 0 {\n\t\tmodels.SYSLOG.Tracef(\"received: %v\", string(retBody))\n\t\tmodels.SYSLOG.Errorf(\"could not send HTTP request: %v\", errs)\n\t\treturn ctx.SendStatus(fiber.StatusInternalServerError)\n\t}\n\tmodels.SYSLOG.Tracef(\"received : %v %v %v\", retCode, string(retBody), errs)\n\n\tvar sess *session.Session\n\tvar err error\n\t// Finally, send a response to redirect the user to the \"welcome\" page with the access token\n\tif sess, err = models.MySessionStore.Get(ctx); err == nil {\n\t\tsess.Set(\"token\", t.AccessToken)\n\t\tmodels.SYSLOG.Tracef(\"setting session token %v\", t.AccessToken)\n\t\tsessData, _ := models.MySessionStore.Get(ctx)\n\t\tdefer sessData.Save()\n\t\t//models.MySessionStore.RegisterType(models.OAuthAccessResponse)\n\t\tsessData.Set(\"oauth-scope\", t.Scope)\n\t\tsessData.Set(\"oauth-token-type\", t.TokenType)\n\t\tsessData.Set(\"oauth-token\", t.AccessToken)\n\n\t\tif err != nil {\n\t\t\tmodels.SYSLOG.Errorf(\"session saving exception %v\", err)\n\t\t}\n\t\tmodels.SYSLOG.Tracef(\"redirecting to /welcome.html?access_token=%v\", t.AccessToken)\n\t\t//\t\treturn ctx.Redirect(\"/welcome.html?access_token=\"+t.AccessToken, fiber.StatusFound)\n\t\treturn ctx.Redirect(\"/welcome.html\", fiber.StatusFound)\n\t}\n\n\tmodels.SYSLOG.Tracef(\"redirecting to /\")\n\treturn ctx.Redirect(\"/\", fiber.StatusTemporaryRedirect)\n}", "func (s *Server) handleAuthLogin() http.HandlerFunc {\n\ttype req struct {\n\t\tUsername string `json:\"username\"`\n\t\tPassword string `json:\"password\"`\n\t}\n\treturn func(w http.ResponseWriter, r *http.Request) {\n\t\tcred := &req{}\n\t\tvar err error\n\n\t\tif err = json.NewDecoder(r.Body).Decode(cred); err != nil {\n\t\t\ts.logger.Logf(\"[ERROR] During decode body: %v\\n\", err)\n\t\t\ts.error(w, r, http.StatusInternalServerError, err)\n\t\t\treturn\n\t\t}\n\n\t\tif cred.Username == \"\" || cred.Password == \"\" {\n\t\t\ts.logger.Logf(\"[ERROR] Empty credentials in body: %v\\n\", helpers.ErrNoBodyParams)\n\t\t\ts.error(w, r, http.StatusBadRequest, helpers.ErrNoBodyParams)\n\t\t\treturn\n\t\t}\n\n\t\ttoken, expTime, err := s.store.Users().Login(cred.Username, cred.Password, s.config.SecretKey)\n\t\tif err != nil {\n\t\t\ts.logger.Logf(\"[ERROR] %v\\n\", err)\n\t\t\ts.error(w, r, http.StatusBadRequest, err)\n\t\t\treturn\n\t\t}\n\n\t\thttp.SetCookie(w, &http.Cookie{\n\t\t\tName: \"TKN\",\n\t\t\tValue: token,\n\t\t\tExpires: expTime,\n\t\t\tHttpOnly: true,\n\t\t\tPath: \"/\",\n\t\t\tDomain: s.config.AppDomain,\n\t\t})\n\n\t\ts.respond(w, r, http.StatusOK, map[string]string{\n\t\t\t\"login\": \"successful\",\n\t\t\t// \"user\": cred.Username,\n\t\t\t\"token\": token,\n\t\t})\n\t}\n}", "func (a *Authenticator) LoginHandler(lm ...LoginModifier) khttp.FuncHandler {\n\treturn func(w http.ResponseWriter, r *http.Request) {\n\t\terr := a.PerformLogin(w, r, lm...)\n\t\tif err != nil {\n\t\t\thttp.Error(w, \"internal error\", http.StatusInternalServerError)\n\t\t\tlog.Printf(\"ERROR - could not complete login - %s\", err)\n\t\t}\n\t}\n}", "func loginHandler(w http.ResponseWriter, r *http.Request) {\n\tctx := context.Background()\n\tif b.authenticator == nil {\n\t\tvar err error\n\t\tb.authenticator, err = initAuth(ctx)\n\t\tif err != nil {\n\t\t\tlog.Print(\"loginHandler authenticator could not be initialized\")\n\t\t\thttp.Error(w, \"Server error\", http.StatusInternalServerError)\n\t\t\treturn\n\t\t}\n\t}\n\tsessionInfo := identity.InvalidSession()\n\terr := r.ParseForm()\n\tif err != nil {\n\t\tlog.Printf(\"loginHandler: error parsing form: %v\", err)\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n\tusername := r.PostFormValue(\"UserName\")\n\tlog.Printf(\"loginHandler: username = %s\", username)\n\tpassword := r.PostFormValue(\"Password\")\n\tusers, err := b.authenticator.CheckLogin(ctx, username, password)\n\tif err != nil {\n\t\tlog.Printf(\"main.loginHandler checking login, %v\", err)\n\t\thttp.Error(w, \"Error checking login\", http.StatusInternalServerError)\n\t\treturn\n\t}\n\tif len(users) != 1 {\n\t\tlog.Printf(\"loginHandler: user %s not found or password does not match\", username)\n\t} else {\n\t\tcookie, err := r.Cookie(\"session\")\n\t\tif err == nil {\n\t\t\tlog.Printf(\"loginHandler: updating session: %s\", cookie.Value)\n\t\t\tsessionInfo = b.authenticator.UpdateSession(ctx, cookie.Value, users[0], 1)\n\t\t}\n\t\tif (err != nil) || !sessionInfo.Valid {\n\t\t\tsessionid := identity.NewSessionId()\n\t\t\tdomain := config.GetSiteDomain()\n\t\t\tlog.Printf(\"loginHandler: setting new session %s for domain %s\",\n\t\t\t\tsessionid, domain)\n\t\t\tcookie := &http.Cookie{\n\t\t\t\tName: \"session\",\n\t\t\t\tValue: sessionid,\n\t\t\t\tDomain: domain,\n\t\t\t\tPath: \"/\",\n\t\t\t\tMaxAge: 86400 * 30, // One month\n\t\t\t}\n\t\t\thttp.SetCookie(w, cookie)\n\t\t\tsessionInfo = b.authenticator.SaveSession(ctx, sessionid, users[0], 1)\n\t\t}\n\t}\n\tif strings.Contains(r.Header.Get(\"Accept\"), \"application/json\") {\n\t\tsendJSON(w, sessionInfo)\n\t} else {\n\t\tif sessionInfo.Authenticated == 1 {\n\t\t\ttitle := b.webConfig.GetVarWithDefault(\"Title\", defTitle)\n\t\t\tcontent := htmlContent{\n\t\t\t\tTitle: title,\n\t\t\t}\n\t\t\tb.pageDisplayer.DisplayPage(w, \"index.html\", content)\n\t\t} else {\n\t\t\tloginFormHandler(w, r)\n\t\t}\n\t}\n}", "func oauthCallbackHandler(w http.ResponseWriter, r *http.Request) {\n\ttransport := &oauth.Transport{Config: &oauthProviderConfig.oauthConfig}\n\ttransport.Exchange(r.FormValue(\"code\"))\n\tclient := transport.Client()\n\tresponse, err := client.Get(oauthProviderConfig.UserInfoAPI)\n\tif err != nil {\n\t\tlog.Printf(\"Error while contacting '%s': %s\\n\", oauthProviderConfig.UserInfoAPI, err)\n\t\thttp.Error(w, http.StatusText(http.StatusBadRequest), http.StatusBadRequest)\n\t\treturn\n\t}\n\tbody, err := ioutil.ReadAll(response.Body)\n\tif err != nil {\n\t\tlog.Printf(\"Error while parsing response from '%s': %s\\n\", oauthProviderConfig.UserInfoAPI, err)\n\t\thttp.Error(w, http.StatusText(http.StatusBadGateway), http.StatusBadGateway)\n\t\treturn\n\t}\n\tresponse.Body.Close()\n\tauthorized, email := isAuthorized(body)\n\tif authorized {\n\t\tauthorizeEmail(email, w, r)\n\t\tlog.Println(\"User\", email, \"logged in\")\n\t\tsession, _ := store.Get(r, serverConfig.CookieName)\n\t\tif next, ok := session.Values[\"next\"]; ok {\n\t\t\thttp.Redirect(w, r, next.(string), http.StatusFound)\n\t\t}\n\t} else {\n\t\tlog.Println(\"Access Denied: Couldn't match an email address in the server response.\")\n\t\thttp.Error(w, http.StatusText(http.StatusForbidden), http.StatusForbidden)\n\t}\n}", "func (ctrl LoginController) ProcessLogin(w http.ResponseWriter, r *http.Request, _ httprouter.Params) {\n\tsession, _ := store.Get(r, \"session-id\")\n\tusername := r.PostFormValue(\"username\")\n\tpassword := r.PostFormValue(\"password\")\n\n\tuser, _ := model.GetUserByUserName(username)\n\tv := new(Validator)\n\n\tif !v.ValidateUsername(username) {\n\t\tSessionFlash(v.err, w, r)\n\t\thttp.Redirect(w, r, URL_LOGIN, http.StatusMovedPermanently)\n\t\treturn\n\t}\n\n\tif user.Username == \"\" || !CheckPasswordHash(password, user.Password) {\n\t\tSessionFlash(messages.Error_username_or_password, w, r)\n\t\thttp.Redirect(w, r, URL_LOGIN, http.StatusMovedPermanently)\n\t\treturn\n\t}\n\n\tsession.Values[\"username\"] = user.Username\n\tsession.Values[\"id\"] = user.ID\n\tsession.Save(r, w)\n\thttp.Redirect(w, r, URL_HOME, http.StatusMovedPermanently)\n}", "func (c *UserController) HandleLogin() {\n\tuserName := c.GetString(\"username\")\n\tif userName == \"\" {\n\t\tbeego.Error(\"HandleLogin:: empty username\")\n\t\tc.TplName = \"login.html\"\n\t\treturn\n\t}\n\tpwd := c.GetString(\"pwd\")\n\tif pwd == \"\" {\n\t\tbeego.Error(\"HandleLogin:: empty pwd\")\n\t\tc.TplName = \"login.html\"\n\t\treturn\n\t}\n\thash := sha256.New()\n\thash.Write([]byte(pwd))\n\tmd := hash.Sum(nil)\n\tpwdStr := hex.EncodeToString(md)\n\n\tvar user models.User\n\tuser.Name = userName\n\to := orm.NewOrm()\n\terr := o.Read(&user, \"Name\")\n\tif err != nil {\n\t\tbeego.Error(\"HandleLogin:: read user fail\", err)\n\t\tc.TplName = \"login.html\"\n\t\treturn\n\t}\n\tif user.Password != pwdStr {\n\t\tbeego.Error(\"HandleLogin:: pwd not equal\")\n\t\tc.TplName = \"login.html\"\n\t\treturn\n\t}\n\n\tc.SetSession(\"username\", userName)\n\t//登录成功设置记录用户名\n\tremember := c.GetString(\"remember\")\n\tbeego.Info(\"remember=\", remember)\n\tif remember == \"on\" {\n\t\tc.Ctx.SetCookie(\"username\", userName, 3600)\n\t} else {\n\t\tc.Ctx.SetCookie(\"username\", userName, -1)\n\t}\n\n\tc.Redirect(\"/index\", 302)\n}", "func authCallbackHandler(ctx context.Context, w http.ResponseWriter, r *http.Request) {\n\tghu, err := authWithGithubCode(ctx, r.FormValue(\"code\"))\n\tif err != nil {\n\t\trenderError(w, err, http.StatusInternalServerError, \"GitHub login failed\")\n\t\treturn\n\t}\n\n\tu, err := findOrCreateUser(ghu)\n\tif err != nil {\n\t\trenderError(w, err, http.StatusInternalServerError, \"Failed to find a user using GitHub profile\")\n\t\treturn\n\t}\n\n\tsess := db.NewSession(u.ID)\n\tif err := sess.Create(); err != nil {\n\t\trenderError(w, err, http.StatusInternalServerError, \"Failed to create a session\")\n\t\treturn\n\t}\n\n\tctx = auth.ContextWithSession(ctx, sess)\n\tauth.AuthorizeResponse(ctx, w)\n\tauth.CacheSession(sess)\n\n\thttp.Redirect(w, r, rootPath, http.StatusTemporaryRedirect)\n}", "func AuthorizeHandler(context *auth.Context) (*claims.Claims, error) {\n\tvar (\n\t\tauthInfo auth_identity.Basic\n\t\treq = context.Request\n\t\ttx = context.Auth.GetDB(req)\n\t\tprovider, _ = context.Provider.(*password.Provider)\n\t)\n\n\treq.ParseForm()\n\tauthInfo.Provider = provider.GetName()\n\tauthInfo.UID = strings.TrimSpace(req.Form.Get(\"login\"))\n\n\tif tx.Model(context.Auth.Config.AuthIdentityModel).Where(\n\t\tmap[string]interface{}{\n\t\t\t\"provider\": authInfo.Provider,\n\t\t\t\"uid\": authInfo.UID,\n\t\t}).Scan(&authInfo).RecordNotFound() {\n\t\treturn nil, auth.ErrInvalidAccount\n\t}\n\n\tif provider.Config.Confirmable && authInfo.ConfirmedAt == nil {\n\t\tcurrentUser, _ := context.Auth.UserStorer.Get(authInfo.ToClaims(), context)\n\t\tprovider.Config.ConfirmMailer(authInfo.UID, context, authInfo.ToClaims(), currentUser)\n\n\t\treturn nil, password.ErrUnconfirmed\n\t}\n\n\tif err := provider.Encryptor.Compare(authInfo.EncryptedPassword, strings.TrimSpace(req.Form.Get(\"password\"))); err == nil {\n\t\treturn authInfo.ToClaims(), err\n\t}\n\n\treturn nil, auth.ErrInvalidPassword\n}", "func LoginHandle(msg []byte, c echo.Context) (recv []byte, err error) {\n\tdefer util.Stack()\n\n\tabsMessage := &pf.AbsMessage{}\n\terr = absMessage.Unmarshal(msg)\n\tif err != nil {\n\t\treturn\n\t}\n\n\tmsgID := absMessage.GetMsgID()\n\tmsgBody := absMessage.GetMsgBody()\n\n\tswitch msgID {\n\tcase int32(pf.Login):\n\t\tloginSend := &pf.LoginSend{}\n\t\terr = loginSend.Unmarshal(msgBody)\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\n\t\ttoken, id, loginRecv := handleLogin(loginSend, c)\n\t\trecv, err = loginRecv.Marshal()\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\n\t\tabsMessage.Token = token\n\t\tutil.LogSend(msgID, id, 0, loginSend, \"Login\")\n\t\tutil.LogRecv(msgID, id, 0, loginRecv, \"Login\")\n\tdefault:\n\t\terr = def.ErrHandleLogin\n\t\treturn\n\t}\n\n\tabsMessage.MsgBody = recv\n\trecv, err = absMessage.Marshal()\n\treturn\n}", "func checkLogin(next echo.HandlerFunc) echo.HandlerFunc {\n\treturn func(c echo.Context) error {\n\t\tcookie, err := c.Cookie(\"SessionID\")\n\t\tif err == nil && cookie != nil && cookie.Value == \"some hash\" {\n\t\t\treturn next(c)\n\t\t}\n\t\treturn c.Redirect(http.StatusMovedPermanently, fmt.Sprintf(\"/login?redirect=%s\", c.Path()))\n\t}\n}", "func LoginHandler(dbase *gorm.DB, w http.ResponseWriter, r *http.Request) {\n\tvar user *db.Psychologist\n\tvar resp map[string]interface{}\n\tvar err error\n\n\terr = json.NewDecoder(r.Body).Decode(&user)\n\tif err != nil {\n\t\tlog.Println(err)\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\terrorResponse := utils.ErrorResponse{\n\t\t\tCode: http.StatusBadRequest,\n\t\t\tMessage: fmt.Sprintln(\"An error occurred while processing your request\"),\n\t\t}\n\t\tlog.Println(json.NewEncoder(w).Encode(errorResponse))\n\t\treturn\n\t}\n\n\tresp, err = FindOne(dbase, user.Email, user.Password)\n\tif err != nil {\n\t\tlog.Println(err)\n\t\tw.WriteHeader(http.StatusNotFound)\n\t\tlog.Println(json.NewEncoder(w).Encode(utils.ErrorResponse{\n\t\t\tCode: http.StatusNotFound,\n\t\t\tMessage: err.Error(),\n\t\t}))\n\t\treturn\n\t}\n\n\tw.WriteHeader(http.StatusOK)\n\tlog.Println(json.NewEncoder(w).Encode(resp))\n}", "func (c *client) Login(w http.ResponseWriter, r *http.Request) (string, error) {\n\tlogrus.Trace(\"Processing login request\")\n\n\t// generate a random string for creating the OAuth state\n\toAuthState, err := random.GenerateRandomString(32)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\t// temporarily redirect request to Github to begin workflow\n\thttp.Redirect(w, r, c.OConfig.AuthCodeURL(oAuthState), http.StatusTemporaryRedirect)\n\n\treturn oAuthState, nil\n}", "func (l *RemoteProvider) TokenHandler(w http.ResponseWriter, r *http.Request, fromMiddleWare bool) {\n\ttokenString := r.URL.Query().Get(tokenName)\n\tlogrus.Debugf(\"token : %v\", tokenString)\n\tck := &http.Cookie{\n\t\tName: tokenName,\n\t\tValue: string(tokenString),\n\t\tPath: \"/\",\n\t\tHttpOnly: true,\n\t}\n\thttp.SetCookie(w, ck)\n\n\t// Get new capabilities\n\t// Doing this here is important so that\n\tl.loadCapabilities(tokenString)\n\n\t// Download the package for the user\n\tl.downloadProviderExtensionPackage()\n\n\t// Proceed to redirect once the capabilities has loaded\n\t// and the package has been downloaded\n\thttp.Redirect(w, r, \"/\", http.StatusFound)\n}", "func LoginHandler(ctx *gin.Context) {\n\tstate = randToken()\n\tsession := sessions.Default(ctx)\n\tsession.Set(\"state\", state)\n\tsession.Save()\n\tfmt.Println(\"LOGIN SESSION:\", session.Get(\"userid\"))\n\t// TODO create this page from a template\n\tloginPage := fmt.Sprintf(`\n\t<html>\n\t<title>Google Login</title>\n\t<body>\n\t<h3>Google Login</h3>\n\t<a href=\"%s\"><button>Login with Google</button></a>\n\t</body>\n\t</html>\n\t`, GetLoginURL(state))\n\tctx.Writer.Write([]byte(loginPage))\n}", "func LoginController(res http.ResponseWriter, req *http.Request) {\n\tsession, _ := utils.GetValidSession(req)\n\tif session.Values[\"userid\"] != nil {\n\t\tcontext.Set(req, \"userid\", session.Values[\"userid\"])\n\t\thttp.Redirect(res, req, urls.HomePath, http.StatusSeeOther)\n\t} else {\n\t\tt, _ := template.ParseFiles(templates.LoginTemplate)\n\t\tt.Execute(res, nil)\n\t}\n}", "func (a *authHandler) Login(c *gin.Context) {\n\tvar userLogin models.UserLogin\n\tif err := c.Bind(&userLogin); err != nil {\n\t\tpanic(err)\n\t}\n\tuser, err := authResource.Login(userLogin.Email, userLogin.Password)\n\tif err != nil {\n\t\tc.Error(err)\n\t\treturn\n\t}\n\n\ttoken := jwt_lib.New(jwt_lib.GetSigningMethod(\"HS256\"))\n\t// Set some claims\n\ttoken.Claims[\"exp\"] = time.Now().Add(time.Hour * 1).Unix()\n\ttoken.Claims[\"userId\"] = user.Id.Hex()\n\t// Sign and get the complete encoded token as a string\n\tapiKey, err := token.SignedString([]byte(config.GetSecret()))\n\tif err != nil {\n\t\tc.Error(apiErrors.ThrowError(apiErrors.ServerError))\n\t\treturn\n\t}\n\t// Remove password\n\tuser.Password = \"\"\n\n\tc.JSON(200, gin.H{\n\t\t\"user\": user,\n\t\t\"api-key\": apiKey,\n\t})\n}", "func (s *Service) Login(c context.Context, req *authpb.LoginRequest) (*authpb.LoginResponse, error) {\n\ts.Logger.Info(\"received code\", zap.String(\"code\", req.Code))\n\n\topenID, err := s.OpenIDResolver.Resolve((req.Code))\n\tif err != nil {\n\t\treturn nil, status.Errorf(codes.Unavailable, \"cannot resolve openid: %v\", err)\n\t}\n\n\taccountID, err := s.Mongo.ResolveAccountID(c, openID)\n\tif err != nil {\n\t\ts.Logger.Error(\"cannot resolve account id \", zap.Error(err))\n\t\treturn nil, status.Errorf(codes.Internal, \"\")\n\t}\n\n\ttkn, err := s.TokenGenerator.GenerateToken(accountID.String(), s.TokenExpire)\n\tif err != nil {\n\t\ts.Logger.Error(\"cannot generate token\", zap.Error(err))\n\t\treturn nil, status.Error(codes.Internal, \"\")\n\t}\n\treturn &authpb.LoginResponse{\n\t\tAccessToken: tkn,\n\t\tExpiresIn: int32(s.TokenExpire.Seconds()),\n\t}, nil\n\n}", "func LoginV0Handler(config *types.ConfigMap) http.HandlerFunc {\n\treturn func(w http.ResponseWriter, r *http.Request) {\n\t\tstart := time.Now()\n\t\tdefer log.Debugf(\"LoginV0Handler Elapsed - %s\", time.Since(start))\n\n\t\t//Check for valid username and password\n\t\tusername, password, ok := r.BasicAuth()\n\t\tif !ok {\n\t\t\tsendResponse(http.StatusUnauthorized, \"\", types.RawAuthResponse{}, fmt.Errorf(\"Need valid username and password as basic auth\"), w)\n\t\t\treturn\n\t\t}\n\t\tuserDetailFromConfig, err := validateAndGetUser(config, username, password)\n\t\tif err != nil {\n\t\t\terrHandle(w, fmt.Sprintf(\"Unable to validate : %s\", err), \"Authentication failed\", 401)\n\t\t\treturn\n\t\t}\n\t\tuser := types.User{\n\t\t\tUsername: userDetailFromConfig.UserName,\n\t\t\tEMail: userDetailFromConfig.Email,\n\t\t\tUID: userDetailFromConfig.UID,\n\t\t\tGroups: userDetailFromConfig.Groups}\n\t\tif user.UID == \"\" {\n\t\t\tuser.UID = user.Username\n\t\t}\n\t\ttoken, err := auth.GenerateToken(user, \"\", auth.V0)\n\t\tif err != nil {\n\t\t\terrHandle(w, fmt.Sprintf(\"Something is wrong with auth token. : %s\", err), \"Authentication failed\", 401)\n\t\t\treturn\n\t\t}\n\n\t\tv1Token := types.V1Token{\n\t\t\tToken: token.JWT,\n\t\t\tExpiry: token.Expiry,\n\t\t}\n\n\t\tdata, _ := json.Marshal(v1Token)\n\t\tresponse := JSONResponse{}\n\t\tresponse.status = http.StatusCreated\n\t\tresponse.data = data\n\n\t\tresponse.Write(w)\n\t}\n}", "func AuthenticateHandler(w http.ResponseWriter, r *http.Request) {\n\n\tuser := &models.User{}\n\terr := json.NewDecoder(r.Body).Decode(user) //decode the request body into struct and fail if any error occur\n\tif err != nil {\n\t\tfmt.Println(\"Debug user AuthenticateHandler:\", err)\n\t\tutils.Respond(w, utils.Message(false, \"Invalid request\"))\n\t\treturn\n\t}\n\n\tresp := models.Login(user.Email, user.Password)\n\tutils.Respond(w, resp)\n}", "func (c *Controller) Login(ctx context.Context) (err error) {\n\t// Build request\n\treq, err := c.requestBuild(ctx, \"POST\", authenticationAPIName, \"login\", map[string]string{\n\t\t\"username\": c.user,\n\t\t\"password\": c.password,\n\t})\n\tif err != nil {\n\t\treturn fmt.Errorf(\"building request failed: %w\", err)\n\t}\n\t// Add custom header for login\n\torigin := fmt.Sprintf(\"%s://%s\", c.url.Scheme, c.url.Hostname())\n\tif c.url.Port() != \"\" {\n\t\torigin += \":\" + c.url.Port()\n\t}\n\treq.Header.Set(\"Origin\", origin)\n\t// execute auth request\n\tif err = c.requestExecute(ctx, req, nil, false); err != nil {\n\t\terr = fmt.Errorf(\"executing request failed: %w\", err)\n\t}\n\treturn\n}", "func (j *AuthMux) Callback() http.Handler {\n\treturn http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tlog := j.Logger.\n\t\t\tWithField(\"component\", \"auth\").\n\t\t\tWithField(\"remote_addr\", r.RemoteAddr).\n\t\t\tWithField(\"method\", r.Method).\n\t\t\tWithField(\"url\", r.URL)\n\n\t\tstate := r.FormValue(\"state\")\n\t\t// Check if the OAuth state token is valid to prevent CSRF\n\t\t// The state variable we set is actually a token. We'll check\n\t\t// if the token is valid. We don't need to know anything\n\t\t// about the contents of the principal only that it hasn't expired.\n\t\tif _, err := j.Tokens.ValidPrincipal(r.Context(), Token(state), TenMinutes); err != nil {\n\t\t\tlog.Error(\"Invalid OAuth state received: \", err.Error())\n\t\t\thttp.Redirect(w, r, j.FailureURL, http.StatusTemporaryRedirect)\n\t\t\treturn\n\t\t}\n\n\t\t// Exchange the code back with the provider to the the token\n\t\tconf := j.Provider.Config()\n\t\tcode := r.FormValue(\"code\")\n\t\ttoken, err := conf.Exchange(r.Context(), code)\n\t\tif err != nil {\n\t\t\tlog.Error(\"Unable to exchange code for token \", err.Error())\n\t\t\thttp.Redirect(w, r, j.FailureURL, http.StatusTemporaryRedirect)\n\t\t\treturn\n\t\t}\n\n\t\tif token.Extra(\"id_token\") != nil && !j.UseIDToken {\n\t\t\tlog.Info(\"found an extra id_token, but option --useidtoken is not set\")\n\t\t}\n\n\t\t// if we received an extra id_token, inspect it\n\t\tvar id string\n\t\tvar group string\n\t\tif j.UseIDToken && token.Extra(\"id_token\") != nil && token.Extra(\"id_token\") != \"\" {\n\t\t\tlog.Debug(\"found an extra id_token\")\n\t\t\tif provider, ok := j.Provider.(ExtendedProvider); ok {\n\t\t\t\tlog.Debug(\"provider implements PrincipalIDFromClaims()\")\n\t\t\t\ttokenString, ok := token.Extra(\"id_token\").(string)\n\t\t\t\tif !ok {\n\t\t\t\t\tlog.Error(\"cannot cast id_token as string\")\n\t\t\t\t\thttp.Redirect(w, r, j.FailureURL, http.StatusTemporaryRedirect)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tclaims, err := j.Tokens.GetClaims(tokenString)\n\t\t\t\tif err != nil {\n\t\t\t\t\tlog.Error(\"parsing extra id_token failed:\", err)\n\t\t\t\t\thttp.Redirect(w, r, j.FailureURL, http.StatusTemporaryRedirect)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tlog.Debug(\"found claims: \", claims)\n\t\t\t\tid, err = provider.PrincipalIDFromClaims(claims)\n\t\t\t\tif err != nil {\n\t\t\t\t\tlog.Error(\"requested claim not found in id_token:\", err)\n\t\t\t\t\thttp.Redirect(w, r, j.FailureURL, http.StatusTemporaryRedirect)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tgroup, err = provider.GroupFromClaims(claims)\n\t\t\t\tif err != nil {\n\t\t\t\t\tlog.Error(\"requested claim not found in id_token:\", err)\n\t\t\t\t\thttp.Redirect(w, r, j.FailureURL, http.StatusTemporaryRedirect)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tlog.Debug(\"provider does not implement PrincipalIDFromClaims()\")\n\t\t\t}\n\t\t} else {\n\t\t\t// otherwise perform an additional lookup\n\t\t\toauthClient := conf.Client(r.Context(), token)\n\t\t\t// Using the token get the principal identifier from the provider\n\t\t\tid, err = j.Provider.PrincipalID(oauthClient)\n\t\t\tif err != nil {\n\t\t\t\tlog.Error(\"Unable to get principal identifier \", err.Error())\n\t\t\t\thttp.Redirect(w, r, j.FailureURL, http.StatusTemporaryRedirect)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tgroup, err = j.Provider.Group(oauthClient)\n\t\t\tif err != nil {\n\t\t\t\tlog.Error(\"Unable to get OAuth Group\", err.Error())\n\t\t\t\thttp.Redirect(w, r, j.FailureURL, http.StatusTemporaryRedirect)\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\n\t\tp := Principal{\n\t\t\tSubject: id,\n\t\t\tIssuer: j.Provider.Name(),\n\t\t\tGroup: group,\n\t\t}\n\t\tctx := r.Context()\n\t\terr = j.Auth.Authorize(ctx, w, p)\n\t\tif err != nil {\n\t\t\tlog.Error(\"Unable to get add session to response \", err.Error())\n\t\t\thttp.Redirect(w, r, j.FailureURL, http.StatusTemporaryRedirect)\n\t\t\treturn\n\t\t}\n\t\tlog.Info(\"User \", id, \" is authenticated\")\n\t\thttp.Redirect(w, r, j.SuccessURL, http.StatusTemporaryRedirect)\n\t})\n}", "func (s *Services) authorize(handler func(wr http.ResponseWriter, req *http.Request, uid uint64)) http.HandlerFunc {\n\treturn func(wr http.ResponseWriter, req *http.Request) {\n\t\t// TODO: Save the requested url in a cookie that can be redirected to after logging in successfully\n\t\tuid, err := s.auth.Authorize(wr, req)\n\t\tif err != nil {\n\t\t\tlog.Print(err)\n\t\t\thttp.Redirect(wr, req, \"/login\", 302)\n\t\t\treturn\n\t\t}\n\n\t\thandler(wr, req, uid)\n\t}\n}", "func (rh *RealmRedirect) Handle(w http.ResponseWriter, req *http.Request) {\n\tsegments := strings.Split(req.URL.Path, \"/\")\n\t// last path segment is the base64d realm ID which we will pass the incoming request to\n\tbase64realmID := segments[len(segments)-1]\n\tbytesRealmID, err := base64.RawURLEncoding.DecodeString(base64realmID)\n\trealmID := string(bytesRealmID)\n\tif err != nil {\n\t\tlog.WithError(err).WithField(\"base64_realm_id\", base64realmID).Print(\n\t\t\t\"Not a b64 encoded string\",\n\t\t)\n\t\tw.WriteHeader(400)\n\t\treturn\n\t}\n\n\trealm, err := rh.DB.LoadAuthRealm(realmID)\n\tif err != nil {\n\t\tlog.WithError(err).WithField(\"realm_id\", realmID).Print(\"Failed to load realm\")\n\t\tw.WriteHeader(404)\n\t\treturn\n\t}\n\tlog.WithFields(log.Fields{\n\t\t\"realm_id\": realmID,\n\t}).Print(\"Incoming realm redirect request\")\n\trealm.OnReceiveRedirect(w, req)\n}", "func LoginHandler(w http.ResponseWriter, r *http.Request) {\n\t// should do this for every request and not repeat for every handler\n\tw.Header().Add(\"Content-Type\", \"application/json\")\n\tw.Header().Set(\"Access-Control-Allow-Headers\", \"*\")\n\tw.Header().Set(\"Access-Control-Allow-Origin\", \"*\") // * should be a secret and set to the request origin for the actual servers outside of develop\n\tif r.Method == http.MethodOptions {\n\t\treturn\n\t}\n\n\tvar loginRequest LoginRequest\n\n\terr := json.NewDecoder(r.Body).Decode(&loginRequest)\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusBadRequest)\n\t\treturn\n\t}\n\t// validate request, would probably put this in middleware\n\tvalidator := validator.New()\n\terr = validator.Struct(loginRequest)\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusBadRequest)\n\t\treturn\n\t}\n\n\terr = Authorize(loginRequest)\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusUnauthorized)\n\t}\n}", "func (am AuthManager) Login(userID string, ctx *Ctx) (session *Session, err error) {\n\t// create a new session value\n\tsessionValue := NewSessionID()\n\t// userID and sessionID are required\n\tsession = NewSession(userID, sessionValue)\n\tif am.SessionTimeoutProvider != nil {\n\t\tsession.ExpiresUTC = am.SessionTimeoutProvider(session)\n\t}\n\tsession.UserAgent = webutil.GetUserAgent(ctx.Request)\n\tsession.RemoteAddr = webutil.GetRemoteAddr(ctx.Request)\n\n\t// call the perist handler if one's been provided\n\tif am.PersistHandler != nil {\n\t\terr = am.PersistHandler(ctx.Context(), session)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\t// if we're in jwt mode, serialize the jwt.\n\tif am.SerializeSessionValueHandler != nil {\n\t\tsessionValue, err = am.SerializeSessionValueHandler(ctx.Context(), session)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\t// inject cookies into the response\n\tam.injectCookie(ctx, am.CookieNameOrDefault(), sessionValue, session.ExpiresUTC)\n\treturn session, nil\n}", "func (o *oauth) authorizeHandler(w http.ResponseWriter, r *http.Request) {\n\t// We aren't using HandleAuthorizeRequest here because that assumes redirect_uri\n\t// exists on the request. We're just checking for a valid token.\n\tti, err := o.server.ValidationBearerToken(r)\n\tif err != nil {\n\t\tauthFailures.With(\"method\", \"oauth2\").Add(1)\n\t\tencodeError(w, err)\n\t\treturn\n\t}\n\tif ti.GetClientID() == \"\" {\n\t\tauthFailures.With(\"method\", \"oauth2\").Add(1)\n\t\tencodeError(w, fmt.Errorf(\"missing client_id\"))\n\t\treturn\n\t}\n\n\t// Passed token check, return \"200 OK\"\n\tauthSuccesses.With(\"method\", \"oauth2\").Add(1)\n\tw.Header().Set(\"Content-Type\", \"text/plain\")\n\tw.WriteHeader(http.StatusOK)\n}", "func LoginHandler(c echo.Context) error {\n\treturn c.JSON(http.StatusOK, c.QueryParams())\n}", "func (h *AuthHandlers) Login(w http.ResponseWriter, req *http.Request) {\n\tvar err error\n\tvar data []byte\n\n\tsystemContext, err := h.getSystemContext(req)\n\tif err != nil {\n\t\tlog.Error().Err(err).Msg(\"request context retrevial failure\")\n\t\tmiddleware.ReturnError(w, err.Error(), 500)\n\t\treturn\n\t}\n\n\tif data, err = ioutil.ReadAll(req.Body); err != nil {\n\t\tlog.Error().Err(err).Msg(\"read body error\")\n\t\tmiddleware.ReturnError(w, \"error reading login data\", 500)\n\t\treturn\n\t}\n\tdefer req.Body.Close()\n\n\tloginDetails := &authz.LoginDetails{}\n\tif err := json.Unmarshal(data, loginDetails); err != nil {\n\t\tlog.Error().Err(err).Msg(\"marshal body error\")\n\t\tmiddleware.ReturnError(w, \"error reading login data\", 500)\n\t\treturn\n\t}\n\n\tif err := h.validate.Struct(loginDetails); err != nil {\n\t\tmiddleware.ReturnError(w, \"validation failure \"+err.Error(), 500)\n\t\treturn\n\t}\n\tloginDetails.OrgName = strings.ToLower(loginDetails.OrgName)\n\tloginDetails.Username = strings.ToLower(loginDetails.Username)\n\n\tlog.Info().Str(\"org\", loginDetails.OrgName).Str(\"user\", loginDetails.Username).Msg(\"login attempt\")\n\n\torgData, err := h.getOrgByName(req.Context(), systemContext, loginDetails.OrgName)\n\tif err != nil {\n\t\tlog.Error().Err(err).Str(\"org\", loginDetails.OrgName).Str(\"user\", loginDetails.Username).Msg(\"failed to get organization from name\")\n\t\tmiddleware.ReturnError(w, \"login failed\", 403)\n\t\treturn\n\t}\n\n\tresults, err := h.authenticator.Login(req.Context(), orgData, loginDetails)\n\tif err != nil {\n\t\tlog.Error().Err(err).Str(\"org\", loginDetails.OrgName).Str(\"user\", loginDetails.Username).Msg(\"login failed\")\n\t\tif req.Context().Err() != nil {\n\t\t\tmiddleware.ReturnError(w, \"internal server error\", 500)\n\t\t\treturn\n\t\t}\n\t\tmiddleware.ReturnError(w, \"login failed\", 403)\n\t\treturn\n\t}\n\t// add subscription id to response\n\tresults[\"subscription_id\"] = fmt.Sprintf(\"%d\", orgData.SubscriptionID)\n\n\trespData, err := json.Marshal(results)\n\tif err != nil {\n\t\tmiddleware.ReturnError(w, \"marshal auth response failed\", 500)\n\t\treturn\n\t}\n\n\tlog.Info().Str(\"org\", loginDetails.OrgName).Str(\"user\", loginDetails.Username).Str(\"OrgCID\", orgData.OrgCID).Msg(\"setting orgCID in cookie\")\n\tif err := h.secureCookie.SetAuthCookie(w, results[\"access_token\"], orgData.OrgCID, orgData.SubscriptionID); err != nil {\n\t\tmiddleware.ReturnError(w, \"internal cookie failure\", 500)\n\t\treturn\n\t}\n\tw.WriteHeader(200)\n\tfmt.Fprint(w, string(respData))\n}", "func handlerAuthCheck(h http.Handler) http.Handler {\n\treturn http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tswitch adminConfig.Auth {\n\t\tcase settings.AuthDB:\n\t\t\t// Check if user is already authenticated\n\t\t\tauthenticated, session := sessionsmgr.CheckAuth(r)\n\t\t\tif !authenticated {\n\t\t\t\thttp.Redirect(w, r, \"/login\", http.StatusFound)\n\t\t\t\treturn\n\t\t\t}\n\t\t\t// Set middleware values\n\t\t\ts := make(sessions.ContextValue)\n\t\t\ts[ctxUser] = session.Username\n\t\t\ts[ctxCSRF] = session.Values[ctxCSRF].(string)\n\t\t\tctx := context.WithValue(r.Context(), sessions.ContextKey(\"session\"), s)\n\t\t\t// Update metadata for the user\n\t\t\tif err := adminUsers.UpdateMetadata(session.IPAddress, session.UserAgent, session.Username, s[\"csrftoken\"]); err != nil {\n\t\t\t\tlog.Printf(\"error updating metadata for user %s: %v\", session.Username, err)\n\t\t\t}\n\t\t\t// Access granted\n\t\t\th.ServeHTTP(w, r.WithContext(ctx))\n\t\tcase settings.AuthSAML:\n\t\t\t_, err := samlMiddleware.Session.GetSession(r)\n\t\t\tif err != nil {\n\t\t\t\tlog.Printf(\"GetSession %v\", err)\n\t\t\t}\n\t\t\tcookiev, err := r.Cookie(samlConfig.TokenName)\n\t\t\tif err != nil {\n\t\t\t\tlog.Printf(\"error extracting JWT data: %v\", err)\n\t\t\t\thttp.Redirect(w, r, samlConfig.LoginURL, http.StatusFound)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tjwtdata, err := parseJWTFromCookie(samlData.KeyPair, cookiev.Value)\n\t\t\tif err != nil {\n\t\t\t\tlog.Printf(\"error parsing JWT: %v\", err)\n\t\t\t\thttp.Redirect(w, r, samlConfig.LoginURL, http.StatusFound)\n\t\t\t\treturn\n\t\t\t}\n\t\t\t// Check if user is already authenticated\n\t\t\tauthenticated, session := sessionsmgr.CheckAuth(r)\n\t\t\tif !authenticated {\n\t\t\t\t// Create user if it does not exist\n\t\t\t\tif !adminUsers.Exists(jwtdata.Username) {\n\t\t\t\t\tlog.Printf(\"user not found: %s\", jwtdata.Username)\n\t\t\t\t\thttp.Redirect(w, r, forbiddenPath, http.StatusFound)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tu, err := adminUsers.Get(jwtdata.Username)\n\t\t\t\tif err != nil {\n\t\t\t\t\tlog.Printf(\"error getting user %s: %v\", jwtdata.Username, err)\n\t\t\t\t\thttp.Redirect(w, r, forbiddenPath, http.StatusFound)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\taccess, err := adminUsers.GetEnvAccess(u.Username, u.DefaultEnv)\n\t\t\t\tif err != nil {\n\t\t\t\t\tlog.Printf(\"error getting access for %s: %v\", jwtdata.Username, err)\n\t\t\t\t\thttp.Redirect(w, r, forbiddenPath, http.StatusFound)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\t// Create new session\n\t\t\t\tsession, err = sessionsmgr.Save(r, w, u, access)\n\t\t\t\tif err != nil {\n\t\t\t\t\tlog.Printf(\"session error: %v\", err)\n\t\t\t\t\thttp.Redirect(w, r, samlConfig.LoginURL, http.StatusFound)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t}\n\t\t\t// Set middleware values\n\t\t\ts := make(sessions.ContextValue)\n\t\t\ts[ctxUser] = session.Username\n\t\t\ts[ctxCSRF] = session.Values[ctxCSRF].(string)\n\t\t\tctx := context.WithValue(r.Context(), sessions.ContextKey(\"session\"), s)\n\t\t\t// Update metadata for the user\n\t\t\terr = adminUsers.UpdateMetadata(session.IPAddress, session.UserAgent, session.Username, s[\"csrftoken\"])\n\t\t\tif err != nil {\n\t\t\t\tlog.Printf(\"error updating metadata for user %s: %v\", session.Username, err)\n\t\t\t}\n\t\t\t// Access granted\n\t\t\tsamlMiddleware.RequireAccount(h).ServeHTTP(w, r.WithContext(ctx))\n\t\t}\n\t})\n}", "func loginHandler(w http.ResponseWriter, r *http.Request) {\n\tusername := r.FormValue(\"user\")\n\tpass := r.FormValue(\"pass\")\n\n\tlog.Printf(\"Authenticate: user[%s] pass[%s]\\n\", username, pass)\n\n\tuser := User{}\n\terr := userDB.Find(bson.M{\"username\": username}).One(&user)\n\n\terr = bcrypt.CompareHashAndPassword(user.Hash, []byte(pass))\n\n\t// check values\n\tif err != nil {\n\t\tw.WriteHeader(http.StatusForbidden)\n\t\tfmt.Fprintln(w, \"Wrong info\")\n\t\treturn\n\t}\n\n\t// create a signer for rsa 256\n\tt := jwt.New(jwt.GetSigningMethod(\"RS256\"))\n\n\t// include username in cookie\n\tt.Claims[\"username\"] = username\n\n\t// set the expire time\n\tt.Claims[\"exp\"] = time.Now().Add(time.Hour * 24 * 7).Unix()\n\ttokenString, err := t.SignedString(signKey)\n\tif err != nil {\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\tfmt.Fprintln(w, \"Sorry, error while Signing Token!\")\n\t\tlog.Printf(\"Token Signing error: %v\\n\", err)\n\t\treturn\n\t}\n\n\thttp.SetCookie(w, &http.Cookie{\n\t\tName: tokenName,\n\t\tValue: tokenString,\n\t\tPath: \"/\",\n\t\tRawExpires: \"0\",\n\t})\n\n\tw.Header().Set(\"Content-Type\", \"text/html\")\n\thttp.Redirect(w, r, \"client.html\", http.StatusTemporaryRedirect)\n}", "func AuthHandler(c *gin.Context) {\r\n\tvar state utils.State\r\n\tdecoded, err := utils.B64Decode(c.Query(\"state\"))\r\n\terr = json.Unmarshal([]byte(decoded), &state)\r\n\tif err != nil {\r\n\t\tc.JSON(http.StatusConflict, gin.H{\"code\": http.StatusConflict, \"message\": err})\r\n\t\treturn\r\n\t}\r\n\r\n\tAccessKey := state.AccessKey\r\n\tif AccessKey == \"\" {\r\n\t\tAccessKey = state.Token\r\n\t}\r\n\r\n\tAPPUserID, _, err := utils.LoadAccessKey(AccessKey)\r\n\r\n\tif err != nil || APPUserID == \"\" {\r\n\t\tc.JSON(http.StatusNonAuthoritativeInfo, gin.H{\"code\": http.StatusNonAuthoritativeInfo, \"message\": err})\r\n\t\treturn\r\n\t}\r\n\r\n\tfmt.Println(\"redirURL\", state.URL)\r\n\r\n\tcode := c.Query(\"code\")\r\n\tuserID, _ := utils.VerifyCode(code)\r\n\tuserInfo, _ := utils.GetUserInfo(userID)\r\n\r\n\tu := url.Values{}\r\n\tdata, _ := json.Marshal(userInfo)\r\n\tu.Set(\"state\", utils.B64Encode(string(data)))\r\n\tu.Set(\"timestamp\", fmt.Sprintln(time.Now().Unix()))\r\n\tc.Redirect(http.StatusFound, state.URL+\"?\"+u.Encode())\r\n}", "func (am AuthManager) LoginRedirect(ctx *Ctx) Result {\n\tif am.LoginRedirectHandler != nil {\n\t\tredirectTo := am.LoginRedirectHandler(ctx)\n\t\tif redirectTo != nil {\n\t\t\treturn Redirect(redirectTo.String())\n\t\t}\n\t}\n\treturn ctx.DefaultProvider.NotAuthorized()\n}", "func LoginSuccess(r *http.Request, client *http.Client, hydraAdminURL, challenge, subject, stateID string, extra map[string]interface{}) (string, error) {\n\treq := &hydraapi.HandledLoginRequest{\n\t\tSubject: &subject,\n\t\tContext: map[string]interface{}{},\n\t}\n\n\tif len(stateID) > 0 {\n\t\treq.Context[StateIDKey] = stateID\n\t}\n\n\tfor k, v := range extra {\n\t\treq.Context[k] = v\n\t}\n\n\tresp, err := AcceptLogin(client, hydraAdminURL, challenge, req)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn resp.RedirectTo, nil\n}" ]
[ "0.6761304", "0.67374885", "0.661879", "0.64597034", "0.64308536", "0.635027", "0.62577486", "0.6231868", "0.61831856", "0.6161686", "0.6151811", "0.60392255", "0.603013", "0.5979919", "0.59725386", "0.59401226", "0.58740646", "0.58685976", "0.58455664", "0.58161426", "0.5793418", "0.57589483", "0.57581776", "0.5747329", "0.57380867", "0.572394", "0.56983453", "0.5666738", "0.5658463", "0.56554914", "0.56475294", "0.56197166", "0.5614138", "0.55980706", "0.55956167", "0.55855757", "0.55846393", "0.55771327", "0.55655414", "0.55596316", "0.55582803", "0.5554111", "0.5551692", "0.55398124", "0.5533831", "0.55332845", "0.5530271", "0.5519735", "0.55196726", "0.549632", "0.5489107", "0.54839444", "0.5471202", "0.5464589", "0.5462009", "0.5441684", "0.54368967", "0.54297286", "0.542826", "0.5420747", "0.5417392", "0.5389786", "0.53895193", "0.5378302", "0.53627497", "0.53595155", "0.5334685", "0.5333255", "0.53275996", "0.5311926", "0.53099906", "0.5306769", "0.5286853", "0.5285013", "0.5266116", "0.52586323", "0.52492046", "0.5247585", "0.52407414", "0.5229109", "0.5227819", "0.5222904", "0.52197057", "0.5217182", "0.52094865", "0.5207009", "0.5202762", "0.5199584", "0.51941526", "0.51929784", "0.5188632", "0.5188586", "0.5186786", "0.5182947", "0.51742685", "0.51735395", "0.5169924", "0.5163362", "0.51530564", "0.51511455" ]
0.75708395
0
HandleLogout invalidates the authenticated user
func (a *loginAPI) HandleLogout(user security.User, w http.ResponseWriter, r *http.Request) error { log.WithField("func", "server.HandleLogout").Debugf("for user '%s'", user.Username) // remove the cookie by expiring it a.setJWTCookie(a.jwt.CookieName, "", -1, w) a.appCookie.Set(errors.FlashKeyInfo, fmt.Sprintf("User '%s' was logged-off!", user.Email), cookieExpiry, w) http.Redirect(w, r, a.jwt.LoginRedirect, http.StatusTemporaryRedirect) return nil }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func HandleLogout(w http.ResponseWriter, r *http.Request) {\n\tsess, err := store.Get(r, \"s\")\n\tif err != nil {\n\t\tServeInternalServerError(w, r)\n\t\treturn\n\t}\n\tdelete(sess.Values, \"accountID\")\n\tsess.Save(r, w)\n\thttp.Redirect(w, r, \"/\", http.StatusSeeOther)\n}", "func (s *Server) handleLogout(w http.ResponseWriter, req *http.Request) error {\n\t// Intentionally ignore errors that may be caused by the stale session.\n\tsession, _ := s.cookieStore.Get(req, UserSessionName)\n\tsession.Options.MaxAge = -1\n\tdelete(session.Values, \"hash\")\n\tdelete(session.Values, \"email\")\n\t_ = session.Save(req, w)\n\tfmt.Fprintf(w, `<!DOCTYPE html><a href='/login'>Log in</a>`)\n\treturn nil\n}", "func LogoutHandler(ctx *enliven.Context) {\n\tctx.Session.Destroy()\n\tctx.Redirect(config.GetConfig()[\"user_logout_redirect\"])\n}", "func HandlerLogout(responseWriter http.ResponseWriter, request *http.Request) {\n\trequest.ParseForm()\n\n\tif !(IsTokenValid(responseWriter, request)) {\n\t\treturn\n\t}\n\n\tAddCookie(responseWriter, \"no token\")\n\tServeLogin(responseWriter, STR_EMPTY)\n}", "func (h *Handler) Logout(w http.ResponseWriter, r *http.Request) {\n\tswitch r.Method {\n\tcase \"GET\":\n\t\terr := h.Services.User.Logout(Authorized.UUID)\n\t\tif err != nil {\n\t\t\tJsonResponse(w, r, http.StatusBadRequest, \"\")\n\t\t\treturn\n\t\t}\n\t\tJsonResponse(w, r, http.StatusOK, \"success\")\n\tcase \"POST\":\n\tdefault:\n\t\tJsonResponse(w, r, http.StatusBadRequest, \"Bad Request\")\n\t}\n}", "func LogoutHandler(w http.ResponseWriter, r *http.Request) {\n\tif !SessionManager.Exists(r.Context(), \"userid\") {\n\t\thttp.Redirect(w, r, \"/login\", http.StatusSeeOther)\n\t\treturn\n\t}\n\tif r.Method != \"GET\" {\n\t\tshared.HTTPerr(w, fmt.Errorf(\"bad HTTP method\"), http.StatusBadRequest)\n\t\treturn\n\t}\n\tSessionManager.Remove(r.Context(), \"userid\")\n\thttp.Redirect(w, r, \"/login\", http.StatusSeeOther)\n}", "func (userHandlersImpl UserHandlersImpl) Logout(w http.ResponseWriter, req *http.Request) {\n\n\tresp := \"\"\n\tWriteOKResponse(w, resp)\n\n}", "func logoutHandler(res http.ResponseWriter, req *http.Request) {\n\tdefer server.LogRequest(req, http.StatusFound)\n\n\tsession.Destroy(req, res)\n\trenderBaseTemplate(res, \"logout.html\", nil)\n}", "func Logout(w http.ResponseWriter, r *http.Request) {\n\tuser := r.Context().Value(utils.TokenContextKey).(string)\n\tmessage := models.Logout(user)\n\tutils.JSONResonseWithMessage(w, message)\n}", "func (u *Users) LogOut() {\n\tu.deauthorizeUser()\n\tu.serveAJAXSuccess(nil)\n}", "func (u *UserController) Logout(c *gin.Context) {\n\trequestID := requestid.Get(c)\n\tau, err := helpers.ExtractTokenMetadata(c.Request, requestID)\n\tif err != nil {\n\t\tlogger.Error(logoutLogTag, requestID, \"Unable to extract token metadata on logout system, error: %+v\", err)\n\t\tc.JSON(http.StatusUnauthorized, \"unauthorized\")\n\t\treturn\n\t}\n\tdeleted, delErr := helpers.DeleteAuth(au.AccessUUID)\n\tif delErr != nil || deleted == 0 {\n\t\tlogger.Error(logoutLogTag, requestID, \"Unable to delete auth on logout system, error: %+v, deleted: %d\", delErr, deleted)\n\t\tc.JSON(http.StatusUnauthorized, \"user already logout\")\n\t\treturn\n\t}\n\tc.JSON(http.StatusOK, \"Successfully logged out\")\n}", "func (r *oauthProxy) logoutHandler(w http.ResponseWriter, req *http.Request) {\n\tctx, span, logger := r.traceSpan(req.Context(), \"logout handler\")\n\tif span != nil {\n\t\tdefer span.End()\n\t}\n\n\t// @step: drop the access token\n\tuser, err := r.getIdentity(req)\n\tif err != nil {\n\t\tr.errorResponse(w, req.WithContext(ctx), \"\", http.StatusBadRequest, nil)\n\t\treturn\n\t}\n\n\t// step: check if the user has a state session and if so revoke it\n\tif r.useStore() {\n\t\tgo func() {\n\t\t\tif err := r.DeleteRefreshToken(user.token); err != nil {\n\t\t\t\tlogger.Error(\"unable to remove the refresh token from store\", zap.Error(err))\n\t\t\t}\n\t\t}()\n\t}\n\n\t// step: can either use the id token or the refresh token\n\tidentityToken := user.token.Encode()\n\tif refresh, _, err := r.retrieveRefreshToken(req, user); err == nil {\n\t\tidentityToken = refresh\n\t}\n\n\tr.commonLogout(ctx, w, req, identityToken, func(w http.ResponseWriter) {\n\t\tw.Header().Set(\"Content-Type\", jsonMime)\n\t\tw.WriteHeader(http.StatusOK)\n\t}, logger.With(zap.String(\"email\", user.email)))\n}", "func logoutHandler(w http.ResponseWriter, r *http.Request) {\n\tlog.Print(\"logoutHandler: process form\")\n\tctx := context.Background()\n\tif b.authenticator == nil {\n\t\tvar err error\n\t\tb.authenticator, err = initAuth(ctx)\n\t\tif err != nil {\n\t\t\tlog.Print(\"logoutHandler authenticator could not be initialized\")\n\t\t\thttp.Error(w, \"Server error\", http.StatusInternalServerError)\n\t\t\treturn\n\t\t}\n\t}\n\tcookie, err := r.Cookie(\"session\")\n\tif err != nil {\n\t\t// OK, just don't show the contents that require a login\n\t\tlog.Println(\"logoutHandler: no cookie\")\n\t} else {\n\t\tb.authenticator.Logout(ctx, cookie.Value)\n\t\tcookie.MaxAge = -1\n\t\thttp.SetCookie(w, cookie)\n\t}\n\n\t// Return HTML if method is post\n\tif httphandling.AcceptHTML(r) {\n\t\ttitle := b.webConfig.GetVarWithDefault(\"Title\", defTitle)\n\t\tcontent := htmlContent{\n\t\t\tTitle: title,\n\t\t}\n\t\tb.pageDisplayer.DisplayPage(w, \"logged_out.html\", content)\n\t\treturn\n\t}\n\n\tmessage := \"Please come back again\"\n\tw.Header().Set(\"Content-Type\", \"application/json; charset=utf-8\")\n\tfmt.Fprintf(w, \"{\\\"message\\\" :\\\"%s\\\"}\", message)\n}", "func (a Authentic) logoutHandler(c buffalo.Context) error {\n\tc.Flash().Add(\"success\", \"Logged out from your account.\")\n\tc.Session().Delete(SessionField)\n\tc.Session().Save()\n\n\treturn c.Redirect(302, a.Config.AfterLogoutPath)\n}", "func UserLogoutHandler(c *gin.Context) {\n\tlogout := c.PostForm(\"logout\")\n\tif logout != \"\" {\n\t\tcookies.Clear(c)\n\t\turl := c.DefaultPostForm(\"redirectTo\", \"/\")\n\t\tif strings.Contains(url, \"/mod/\") {\n\t\t\turl = \"/\"\n\t\t}\n\t\tc.Redirect(http.StatusSeeOther, url)\n\t} else {\n\t\tc.Status(http.StatusNotFound)\n\t}\n}", "func (h *Handler) LogoutHandler(w http.ResponseWriter, r *http.Request) {\n\n\tchallenge, err := readURLChallangeParams(r, \"logout\")\n\tif err != nil {\n\t\tlog.Println(err)\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\treturn\n\t}\n\n\tif r.Method == \"POST\" {\n\t\tvar err error\n\t\tif r.Form == nil {\n\t\t\tif err := r.ParseForm(); err != nil {\n\t\t\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t\taccept := r.Form.Get(\"accept\")\n\t\tlogoutChallenge := r.Form.Get(\"challenge\")\n\t\tvar redirectURL string\n\n\t\tif accept == \"true\" {\n\t\t\tredirectURL, err = h.LoginService.SendAcceptBody(\"logout\", logoutChallenge, nil)\n\n\t\t} else {\n\t\t\tredirectURL, err = h.LoginService.SendRejectBody(\"logout\", logoutChallenge, nil)\n\t\t}\n\t\tif err != nil {\n\t\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\t\tlog.Fatal(err)\n\t\t}\n\n\t\thttp.Redirect(w, r, redirectURL, http.StatusFound)\n\t} else {\n\n\t\tchallengeBody, err := h.LoginService.ReadChallenge(challenge, \"logout\")\n\t\tif err != nil {\n\t\t\tlog.Println(err)\n\t\t}\n\n\t\tif challengeBody.RpInitiated {\n\t\t\ttemplLogout := template.Must(template.ParseFiles(\"templates/logout.html\"))\n\t\t\tlogoutData := h.ConfigService.FetchLogoutConfig(challenge, challengeBody.Subject)\n\t\t\ttemplLogout.Execute(w, logoutData)\n\t\t} else {\n\t\t\tredirectURL, err := h.LoginService.SendAcceptBody(\"logout\", challenge, nil)\n\t\t\tif err != nil {\n\t\t\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\t\t\tlog.Fatal(err)\n\t\t\t}\n\n\t\t\thttp.Redirect(w, r, redirectURL, http.StatusFound)\n\t\t}\n\t}\n\n}", "func authLogoutHandler(ctx context.Context, w http.ResponseWriter, r *http.Request) {\n\tauth.ClearCookie(ctx, w)\n\thttp.Redirect(w, r, rootPath, http.StatusTemporaryRedirect)\n}", "func Logout(c *gin.Context) {\n\ttokenString := util.ExtractToken(c.Request)\n\n\tau, err := util.ExtractTokenMetadata(tokenString)\n\tif err != nil {\n\t\tc.JSON(http.StatusUnauthorized, \"unauthorized\")\n\t\treturn\n\t}\n\n\tdeleted, delErr := util.DeleteAuth(au.AccessUuid)\n\tif delErr != nil || deleted == 0 { //if any goes wrong\n\t\tc.JSON(http.StatusUnauthorized, \"unauthorized\")\n\t\treturn\n\t}\n\n\tc.JSON(http.StatusOK, \"Successfully logged out\")\n}", "func (am AuthManager) Logout(ctx *Ctx) error {\n\tsessionValue := am.readSessionValue(ctx)\n\t// validate the sessionValue isn't unset\n\tif len(sessionValue) == 0 {\n\t\treturn nil\n\t}\n\n\t// issue the expiration cookies to the response\n\tctx.ExpireCookie(am.CookieNameOrDefault(), am.CookiePathOrDefault())\n\tctx.Session = nil\n\n\t// call the remove handler if one has been provided\n\tif am.RemoveHandler != nil {\n\t\treturn am.RemoveHandler(ctx.Context(), sessionValue)\n\t}\n\treturn nil\n}", "func signoutHandler(w http.ResponseWriter, r *http.Request) {\n\tuserId, err := userID(r)\n\tif err != nil || userId == \"\" {\n\t\tw.WriteHeader(400)\n\t\tLogPrintf(\"signout: userid\")\n\t\treturn\n\t}\n\tt := authTransport(userId)\n\tif t == nil {\n\t\tw.WriteHeader(500)\n\t\tLogPrintf(\"signout: auth\")\n\t\treturn\n\t}\n\treq, err := http.NewRequest(\"GET\", fmt.Sprintf(revokeEndpointFmt, t.Token.RefreshToken), nil)\n\tresponse, err := http.DefaultClient.Do(req)\n\tif err != nil {\n\t\tw.WriteHeader(500)\n\t\tLogPrintf(\"signout: revoke\")\n\t\treturn\n\t}\n\tdefer response.Body.Close()\n\tstoreUserID(w, r, \"\")\n\tdeleteCredential(userId)\n\thttp.Redirect(w, r, fullUrl, http.StatusFound)\n}", "func Logout(w http.ResponseWriter, req *http.Request) {\n\tif !requirePost(w, req) {\n\t\tlog.Warn(\"Logout request should use POST method\")\n\t\treturn\n\t}\n\tif !requireAuth(w, req) {\n\t\tlog.Warn(\"Logout request should be authenticated\")\n\t\treturn\n\t}\n\tsid := req.Context().Value(auth.SESSION_ID).(string)\n\terr := storage.DeleteSession(sid)\n\tif err != nil {\n\t\tlog.Error(err)\n\t}\n\tconf.RedirectTo(\"/\", \"\", w, req)\n}", "func Logout(app *aero.Application, authLog *log.Log) {\n\tapp.Get(\"/logout\", func(ctx aero.Context) error {\n\t\tif ctx.HasSession() {\n\t\t\tuser := arn.GetUserFromContext(ctx)\n\n\t\t\tif user != nil {\n\t\t\t\tauthLog.Info(\"%s logged out | %s | %s | %s | %s\", user.Nick, user.ID, ctx.IP(), user.Email, user.RealName())\n\t\t\t}\n\n\t\t\tctx.Session().Delete(\"userId\")\n\t\t}\n\n\t\treturn ctx.Redirect(http.StatusTemporaryRedirect, \"/\")\n\t})\n}", "func LogoutHandler(w http.ResponseWriter, r *http.Request) {\n\tfmt.Fprintf(w, \"logout\")\n}", "func (a *Auth) Logout(w http.ResponseWriter, r *http.Request) {\n\tu := a.userstate.Username(r)\n\ta.userstate.Logout(u)\n\ta.userstate.ClearCookie(w)\n\ta.userstate.RemoveUser(u)\n\tutil.OK(w, r)\n}", "func logoutHandler(w http.ResponseWriter, r *http.Request) {\n\t_, err := r.Cookie(\"username\")\n\tif err != nil {\n\t\tlog.Println(\"User was not logged in and cannot be logged out.\")\n\t\thttp.Redirect(w, r, \"/\", http.StatusFound)\n\t\treturn\n\t}\n\tcookie := http.Cookie{\n\t\tName: \"username\",\n\t\tValue: \"\",\n\t\tExpires: time.Unix(0, 0),\n\t}\n\thttp.SetCookie(w, &cookie)\n\thttp.Redirect(w, r, \"/\", http.StatusFound)\n\treturn\n}", "func (s *Server) handleAuthLogout() http.HandlerFunc {\n\treturn func(w http.ResponseWriter, r *http.Request) {\n\t\thttp.SetCookie(w, &http.Cookie{\n\t\t\tName: \"TKN\",\n\t\t\tExpires: time.Unix(0, 0),\n\t\t\tHttpOnly: true,\n\t\t\tPath: \"/\",\n\t\t\tDomain: s.config.AppDomain,\n\t\t})\n\n\t\ts.respond(w, r, http.StatusOK, map[string]string{\n\t\t\t\"logout\": \"successful\",\n\t\t})\n\t}\n}", "func (h *UserRepos) Logout(ctx context.Context, w http.ResponseWriter, r *http.Request, params map[string]string) error {\n\n\tsess := webcontext.ContextSession(ctx)\n\n\t// Set the access token to empty to logout the user.\n\tsess = webcontext.SessionDestroy(sess)\n\n\tif err := sess.Save(r, w); err != nil {\n\t\treturn err\n\t}\n\n\t// Redirect the user to the root page.\n\treturn web.Redirect(ctx, w, r, \"/\", http.StatusFound)\n}", "func LogoutHandler(w http.ResponseWriter, r *http.Request, serv *AppServer) {\n\ttok := http.Cookie{\n\t\tName: \"UserID\",\n\t\tValue: \"\",\n\t}\n\thttp.SetCookie(w, &tok)\n\thttp.Redirect(w, r, \"/\", http.StatusTemporaryRedirect)\n}", "func (u *MyUserModel) Logout() {\n\t// Remove from logged-in user's list\n\t// etc ...\n\tu.authenticated = false\n}", "func Logout(c buffalo.Context) error {\n\tsessionID := c.Value(\"SessionID\").(int)\n\tadmin := c.Value(\"Admin\")\n\tif admin != nil {\n\t\t// \"log out\" by unscoping out auth token\n\t\ttoken, err := utils.GenerateScopedToken(admin.(string), 0, sessionID)\n\t\tif err != nil {\n\t\t\treturn c.Error(http.StatusBadRequest, err)\n\t\t}\n\t\treturn c.Render(http.StatusOK, render.JSON(&common.TokenPayload{\n\t\t\tToken: token,\n\t\t}))\n\t}\n\tif err := modelext.DeleteUserSession(c, sessionID); err != nil {\n\t\treturn c.Error(http.StatusInternalServerError, err)\n\t}\n\treturn c.Render(http.StatusOK, render.JSON(&common.TokenPayload{\n\t\tToken: \"\",\n\t}))\n}", "func (router *router) SignOut(w http.ResponseWriter, r *http.Request) {\n\trouter.log.Println(\"request received at endpoint: SignOut\")\n\tif session.IsAuthenticated(w, r) {\n\t\trouter.database.Delete(&schema.Favourite{User: session.GetUser(w, r)})\n\t\tsession.SignOut(w, r)\n\t\trouter.log.Println(\"sign out completed redirecting to home page\")\n\t} else {\n\t\trouter.log.Println(\"Not signed in to sign out, redirecting to home page\")\n\t}\n\n\tHomePage(w, r)\n\treturn\n}", "func (a *authSvc) Logout(ctx context.Context) error {\n\taccessUuid, ok := ctx.Value(AccessUuidKey).(string)\n\tif !ok {\n\t\treturn errors.New(\"access uuid not present in context\")\n\t}\n\tdeleted, err := deleteAuth(\"access_token\", accessUuid)\n\tif err != nil || deleted == 0 {\n\t\treturn errors.New(\"not authenticated\")\n\t}\n\trefreshUuid, ok := ctx.Value(RefreshUuidKey).(string)\n\tif !ok {\n\t\treturn errors.New(\"refresh uuid not present in context\")\n\t}\n\tdeleted, err = deleteAuth(\"refresh_token\", refreshUuid)\n\tif err != nil || deleted == 0 {\n\t\treturn errors.New(\"not authenticated\")\n\t}\n\tcookieAccess := getCookieAccess(ctx)\n\tcookieAccess.RemoveToken(\"jwtAccess\")\n\tcookieAccess.RemoveToken(\"jwtRefresh\")\n\treturn nil\n}", "func (bap *BaseAuthProvider) Logout(ctx *RequestCtx) {\n\t// When this is internally called (such as user reset password, or been disabled)\n\t// ctx might be nil\n\tif ctx.Ctx != nil {\n\t\t//delete token cookie, keep uuid cookie\n\t\tWriteToCookie(ctx.Ctx, AuthTokenName, \"\")\n\t}\n\tif ctx.User != nil {\n\t\t(*bap.Mutex).Lock()\n\t\tdelete(bap.TokenCache, ctx.User.Token())\n\t\tbap.TokenToUsername.DelString(ctx.User.Token())\n\t\t(*bap.Mutex).Unlock()\n\t\tlog.Println(\"Logout:\", ctx.User.Username())\n\t}\n}", "func logoutHandler(w http.ResponseWriter, req *http.Request) {\n\tif req.Method == post {\n\t\tsessionStore.Destroy(w, sessionName)\n\t}\n\thttp.Redirect(w, req, \"/\", http.StatusFound)\n}", "func HandleGitHubLogout(w http.ResponseWriter, r *http.Request) {\n\n\tsession := SessionManager.Load(r)\n\tsession.PutString(w, \"username\", \"\")\n\thttp.Redirect(w, r, \"/\", http.StatusTemporaryRedirect)\n}", "func LogoutHandler(w http.ResponseWriter, r *http.Request) {\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\tvar token Token\n\tvar res ResponseResult\n\tkeys, ok := r.URL.Query()[\"username\"]\n\ttokencollection := sqldb.globalDB.Model(&token)\n\n\tif !ok || len(keys[0]) < 1 {\n\t\tres.Error = \"No user found in request!\"\n\t\tjson.NewEncoder(w).Encode(res)\n\t\treturn\n\t}\n\n\tvar err = tokencollection.FindOne(context.TODO(), bson.D{{\"username\", token.Username}}).Decode(&token)\n\n\tif err != nil {\n\t\tres.Error = \"Invalid username\"\n\t\tjson.NewEncoder(w).Encode(res)\n\t\treturn\n\t}\n\n\tvar resp = map[string]interface{}{\"status\": false, \"message\": \"logged out\"}\n\n\tjson.NewEncoder(w).Encode(resp)\n\treturn\n}", "func logoutHandler(w http.ResponseWriter, r *http.Request, _ map[string]string) {\n\tsessionHandler.ClearSession(w, r)\n\treturn\n}", "func (ctrl *UserController) Logout(c *gin.Context) {\n\thttp.SetCookie(c.Writer, &http.Cookie{Path: \"/\", Name: auth.RefreshTokenKey, MaxAge: -1, Secure: true, HttpOnly: true, SameSite: http.SameSiteNoneMode})\n\thttp.SetCookie(c.Writer, &http.Cookie{Path: \"/\", Name: auth.AccessTokenKey, MaxAge: -1, Secure: true, HttpOnly: true, SameSite: http.SameSiteNoneMode})\n\n\tc.JSON(http.StatusOK, utils.Msg(\"Logged out\"))\n}", "func (app *application) postLogout(w http.ResponseWriter, r *http.Request) {\n\t// \"log out\" the user by removing their ID from the session\n\trowid := app.session.PopInt(r, \"authenticatedPlayerID\")\n\tapp.players.UpdateLogin(rowid, false)\n\tapp.session.Put(r, \"flash\", \"You've been logged out successfully\")\n\thttp.Redirect(w, r, \"/login\", 303)\n}", "func Logout(w http.ResponseWriter, r *http.Request) {\n\tPrintln(\"Endpoint Hit: Logout\")\n\n\tsession := sessions.Start(w, r)\n\tsession.Clear()\n\tsessions.Destroy(w, r)\n\thttp.Redirect(w, r, \"/\", 302)\n}", "func Logout(ctx echo.Context) error {\n\tif _, ok := ctx.Get(\"User\").(*models.Person); ok {\n\t\tutils.DeleteSession(ctx, settings.App.Session.Lang)\n\t\tutils.DeleteSession(ctx, settings.App.Session.Flash)\n\t\tutils.DeleteSession(ctx, settings.App.Session.Name)\n\t}\n\tctx.Redirect(http.StatusFound, \"/\")\n\treturn nil\n}", "func Logout(w http.ResponseWriter, r *http.Request) {\n\t// TODO JvD: revoke the token?\n\thttp.Redirect(w, r, \"/login\", http.StatusFound)\n}", "func LogoutHandler(w http.ResponseWriter, r *http.Request) {\n\tsession, err := store.Get(r, \"piplayer-session\")\n\tif err != nil {\n\t\tlog.Println(\"error trying to get session in logout page\")\n\t}\n\n\tsession.Options.MaxAge = -1\n\tif err := session.Save(r, w); err != nil {\n\t\tlog.Println(\"error trying to set MaxAge on session to logout\")\n\t}\n\n\thttp.Redirect(w, r, \"/login\", http.StatusFound)\n}", "func (m *Repository) Logout(w http.ResponseWriter, r *http.Request) {\n\tif !m.App.Session.Exists(r.Context(), \"user_id\") {\n\t\thttp.Redirect(w, r, \"/\", http.StatusSeeOther)\n\t\treturn\n\t}\n\n\t_ = m.App.Session.Destroy(r.Context())\n\t_ = m.App.Session.RenewToken(r.Context())\n\tm.App.Session.Put(r.Context(), \"flash\", \"Successfully logged out!\")\n\n\thttp.Redirect(w, r, \"/\", http.StatusSeeOther)\n}", "func (c *Controller) Logout(ctx context.Context) (err error) {\n\t// Build request\n\treq, err := c.requestBuild(ctx, \"GET\", authenticationAPIName, \"logout\", nil)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"request building failure: %w\", err)\n\t}\n\t// execute auth request\n\tif err = c.requestExecute(ctx, req, nil, false); err != nil {\n\t\terr = fmt.Errorf(\"executing request failed: %w\", err)\n\t}\n\treturn\n}", "func Logout(res http.ResponseWriter, req *http.Request) {\n\ttokenID := req.Context().Value(\"tokenID\").(string)\n\tresponse := make(map[string]interface{})\n\tmsg := constants.Logout\n\t_, err := connectors.RemoveDocument(\"tokens\", tokenID)\n\tif err != nil {\n\t\trender.Render(res, req, responses.NewHTTPError(http.StatusServiceUnavailable, constants.Unavailable))\n\t\treturn\n\t}\n\tresponse[\"message\"] = msg\n\trender.Render(res, req, responses.NewHTTPSucess(http.StatusOK, response))\n}", "func (a *Server) LogoutUser(w http.ResponseWriter, r *http.Request) {\n\tfmt.Println(\"logout a user\")\n}", "func (u *User) Logout() {\n\t// Remove from logged-in user's list\n\t// etc ...\n\tu.authenticated = false\n}", "func (con *Controller) LogoutHandler(tokenCollection *mongo.Collection) gin.HandlerFunc {\n\treturn func(c *gin.Context) {\n\t\ttokenString := c.Query(\"access_token\")\n\t\temail := c.Query(\"email\")\n\t\tif tokenString == \"\" || email == \"\" {\n\t\t\tsendFailedResponse(c, http.StatusBadRequest, \"token and email cannot be empty\")\n\t\t\treturn\n\t\t}\n\n\t\tfilter := bson.M{\"access_token\": tokenString}\n\t\tupdate := bson.D{\n\t\t\tprimitive.E{\n\t\t\t\tKey: \"set\",\n\t\t\t\tValue: bson.D{\n\t\t\t\t\tprimitive.E{Key: \"logged_out\", Value: true},\n\t\t\t\t},\n\t\t\t},\n\t\t}\n\n\t\texistingToken := tokenCollection.FindOneAndUpdate(c, filter, update)\n\t\tif existingToken.Err() != mongo.ErrNoDocuments {\n\t\t\tsendFailedResponse(c, http.StatusUnprocessableEntity, \"access token rejected\")\n\t\t\treturn\n\t\t}\n\n\t\terr := con.Redis.Del(c, email).Err()\n\t\tif err != nil {\n\t\t\tlog.Println(\"redis error: \", err.Error())\n\t\t}\n\n\t\tsendSuccessResponse(c, http.StatusOK, gin.H{\n\t\t\t\"message\": \"User has logged out\",\n\t\t})\n\t}\n}", "func (uh *UserHandler) Logout(w http.ResponseWriter, r *http.Request) {\n\tnewSession := uh.configSess()\n\tcookie, _ := r.Cookie(newSession.SID)\n\n\tsession.Remove(newSession.SID, w)\n\tuh.SService.DeleteSession(cookie.Value)\n\thttp.Redirect(w, r, \"/Login\", http.StatusSeeOther)\n}", "func (handler AuthHandler) SignOutHandler(w http.ResponseWriter, r *http.Request) {\n\tsession, err := handler.SessionStore.Get(r, \"goth-session\")\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tsession.Options.MaxAge = -1\n\tsession.Save(r, w)\n\thttp.Redirect(w, r, handler.AfterSignoutPath, http.StatusFound)\n}", "func Logout(c *gin.Context) {\n\tsession := sessions.Default(c)\n\tsession.Delete(\"user\");\n\tsession.Save();\n\tc.JSON(200, gin.H{\n\t\t\"success\": true,\n\t})\n}", "func (AuthenticationController) Logout(c *gin.Context) {\n\tsession := sessions.Default(c)\n\tsession.Clear()\n\tif sessionErr := session.Save(); sessionErr != nil {\n\t\tlog.Print(sessionErr)\n\t\tutils.CreateError(c, http.StatusInternalServerError, \"Failed to logout.\")\n\t\tc.Abort()\n\t\treturn\n\t}\n\tc.JSON(http.StatusOK, gin.H{\"message\": \"Logged out...\"})\n}", "func logout(ctx context.Context) error {\n\tr := ctx.HttpRequest()\n\tsession, _ := core.GetSession(r)\n\t_, ok := session.Values[\"user\"]\n\tif ok {\n\t\tdelete(session.Values, \"user\")\n\t\tif err := session.Save(r, ctx.HttpResponseWriter()); err != nil {\n\t\t\tlog.Error(\"Unable to save session: \", err)\n\t\t}\n\t}\n\treturn goweb.Respond.WithPermanentRedirect(ctx, \"/\")\n}", "func Logout(r *http.Request) {\n\ts := getSessionFromRequest(r)\n\tif s.ID == 0 {\n\t\treturn\n\t}\n\n\t// Store Logout to the user log\n\tfunc() {\n\t\tlog := &Log{}\n\t\tlog.SignIn(s.User.Username, log.Action.Logout(), r)\n\t\tlog.Save()\n\t}()\n\n\ts.Logout()\n\n\t// Delete the cookie from memory if we sessions are cached\n\tif CacheSessions {\n\t\tdelete(cachedSessions, s.Key)\n\t}\n\n\tIncrementMetric(\"uadmin/security/logout\")\n}", "func logoutHandler(w http.ResponseWriter, r *http.Request) {\n\tsession, _ := store.Get(r, sessionName)\n\tsession.Values[\"LoggedIn\"] = \"no\"\n\tsession.Save(r, w)\n\thttp.Redirect(w, r, \"/\", http.StatusFound)\n}", "func (as *AdminServer) Logout(w http.ResponseWriter, r *http.Request) {\n\tsession := ctx.Get(r, \"session\").(*sessions.Session)\n\tdelete(session.Values, \"id\")\n\tFlash(w, r, \"success\", \"You have successfully logged out\")\n\tsession.Save(r, w)\n\thttp.Redirect(w, r, \"/login\", http.StatusFound)\n}", "func Logout(w http.ResponseWriter, r *http.Request) error {\n\tsession, _ := loggedUserSession.Get(r, \"authenticated-user-session\")\n\tsession.Values[\"username\"] = \"\"\n\treturn session.Save(r, w)\n}", "func (j *AuthMux) Logout() http.Handler {\n\treturn http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tj.Auth.Expire(w)\n\t\thttp.Redirect(w, r, j.SuccessURL, http.StatusTemporaryRedirect)\n\t})\n}", "func LogoutPostHandler(writer http.ResponseWriter, request *http.Request) {\n\tclearSession(writer)\n\thttp.Redirect(writer, request, \"/\", 302)\n}", "func Logout(c echo.Context) error {\n\tuser := c.Get(\"user\").(*jwt.Token)\n\tclaims := user.Claims.(jwt.MapClaims)\n\tusername := claims[\"name\"].(string)\n\n\terr := db.UpdateUserLoggedIn(username, false)\n\tif err != nil {\n\t\treturn c.JSON(http.StatusInternalServerError, \"Error logging out user\")\n\t}\n\n\treturn c.JSON(http.StatusOK, \"User logged out successfully\")\n}", "func (c UserInfo) Logout() revel.Result {\n\tc.Session.Del(\"DiscordUserID\")\n\tc.Response.Status = 200\n\treturn c.Render()\n}", "func (h *auth) Logout(c echo.Context) error {\n\tsession := currentSession(c)\n\tif session != nil {\n\t\terr := h.db.Delete(session)\n\t\tif err != nil && h.db.IsNotFound(err) {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn c.NoContent(http.StatusNoContent)\n}", "func logout(c *gin.Context) {\n\t//Give the user a session\n\tsession := sessions.Default(c)\n\tclearSession(&session)\n\n\tc.Redirect(http.StatusFound, \"/\")\n}", "func (a *noAuth) Logout(c echo.Context) error {\n\treturn a.logout(c)\n}", "func (a *Auth) LogoutHandler(redirectPath string) http.Handler {\n\treturn http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\ta.clearCookie(w)\n\t\thttp.Redirect(w, r, redirectPath, http.StatusTemporaryRedirect)\n\t})\n}", "func logout(w http.ResponseWriter, r *http.Request) {\n\n\tif isAuthorized(w, r) {\n\t\tusername, _ := r.Cookie(\"username\")\n\t\tdelete(gostuff.SessionManager, username.Value)\n\t\tcookie := http.Cookie{Name: \"username\", Value: \"0\", MaxAge: -1}\n\t\thttp.SetCookie(w, &cookie)\n\t\tcookie = http.Cookie{Name: \"sessionID\", Value: \"0\", MaxAge: -1}\n\t\thttp.SetCookie(w, &cookie)\n\t\tw.Header().Set(\"Cache-Control\", \"no-cache, no-store, must-revalidate\")\n\t\thttp.ServeFile(w, r, \"index.html\")\n\t}\n}", "func logout(w http.ResponseWriter, r *http.Request) {\n\tsession, err := store.Get(r, \"auth\")\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\tsession.Values[\"user\"] = User{}\n\tsession.Options.MaxAge = -1\n\n\terr = session.Save(r, w)\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\tlogRequest(r)\n}", "func (gs *GateService) Logout(ctx context.Context, opaque string) error {\n\treturn gs.repo.RemoveToken(ctx, opaque)\n}", "func (s *server) Logout(ctx context.Context, in *pb.LogRequest) (*pb.LogResponse, error) {\n\tlog.Printf(\"Received: %v\", \"Logout\")\n\tsuc, err := DeleteToken(in.Email, in.Token)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &pb.LogResponse{Sucess: suc}, nil\n}", "func (u *USER_DB) Logout() {\n\t// Remove from logged-in user's list\n\t// etc ...\n\tu.authenticated = false\n}", "func logoutHandler(w http.ResponseWriter, r *http.Request) {\n\tlogOutCookie := http.Cookie{Name: \"V\",\n\t\tPath: \"/\",\n\t\tMaxAge: -1}\n\thttp.SetCookie(w, &logOutCookie)\n\thttp.Redirect(w, r, \"/home/\", http.StatusFound)\n}", "func logoutHandler(w http.ResponseWriter, r *http.Request) {\n\tsession, _ := store.Get(r, \"goServerView\")\n\tsession.Values[\"isConnected\"] = false\n\tsession.Save(r, w)\n\thomeHandler(w, r)\n}", "func UserLogout(w http.ResponseWriter, r *http.Request) {\n\t_ = SessionDel(w, r, \"user\")\n\tutils.SuccessResponse(&w, \"登出成功\", \"\")\n}", "func (controller *Auth) Logout() {\n\tcontroller.distroySession()\n\tcontroller.DeleteConnectionCookie()\n\tcontroller.Redirect(\"/\", 200)\n}", "func (ths *ReceiveBackEnd) handleLogout(w http.ResponseWriter, r *http.Request) {\n\tvar sessionId = r.URL.Query().Get(\"session\");\n\tths.log.Println(\"Handle logout... \");\n\tths.log.Println(\"Session: \" + sessionId + \" is terminating.\");\n\tths.store.GetJSonBlobs(map[string]string{\"SessionId\": sessionId});\n\tr.Body.Close();\n\treturn;\n}", "func Logout(res http.ResponseWriter, req *http.Request) error {\n\tsession, err := Store.Get(req, SessionName)\n\tif err != nil {\n\t\treturn err\n\t}\n\tsession.Options.MaxAge = -1\n\tsession.Values = make(map[interface{}]interface{})\n\terr = session.Save(req, res)\n\tif err != nil {\n\t\treturn errors.New(\"Could not delete user session \")\n\t}\n\treturn nil\n}", "func Logout(w http.ResponseWriter, r *http.Request) {\r\n\t//Get user id of the session\r\n\tctx, cancel := context.WithTimeout(context.Background(), time.Second)\r\n\tdefer cancel()\r\n\tcookie, err := r.Cookie(\"sessionId\")\r\n\tif err != nil || cookie.Value != \"\" {\r\n\t\ttoken, _ := url.QueryUnescape(cookie.Value)\r\n\t\t_, err = AuthClient.RemoveAuthToken(ctx, &authpb.AuthToken{Token: token})\r\n\t\texpiration := time.Now()\r\n\t\tcookie := http.Cookie{Name: \"sessionId\", Path: \"/\", HttpOnly: true, Expires: expiration, MaxAge: -1}\r\n\t\thttp.SetCookie(w, &cookie)\r\n\t}\r\n\tAPIResponse(w, r, 200, \"Logout successful\", make(map[string]string))\r\n}", "func Logout(deleteAuthToken dependencyDeleteAuthToken) gin.HandlerFunc {\n\treturn func(c *gin.Context) {\n\t\tauthToken := c.MustGet(\"auth_token\").(models.AuthToken)\n\t\tif err := deleteAuthToken(authToken.AuthToken); err != nil {\n\t\t\tc.AbortWithError(http.StatusInternalServerError, err)\n\t\t\treturn\n\t\t}\n\n\t\tc.Status(http.StatusOK)\n\t}\n}", "func PostLogout(w http.ResponseWriter, req *http.Request, app *App) {\n\tsession, _ := app.SessionStore.Get(req, SessionName)\n\tsession.Values[\"userId\"] = nil\n\tsession.Save(req, w)\n\thttp.Redirect(w, req, app.Config.General.Prefix+\"/login\", http.StatusFound)\n}", "func (a *AuthController) Logout(w http.ResponseWriter, r *http.Request) {\n\tsession, err := a.store.Get(r, cookieSession)\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t}\n\n\tsession.Values[\"authenticated\"] = false\n\tif err := session.Save(r, w); err != nil {\n\t\tlog.Println(\"[ERROR] error saving authenticated session\")\n\t}\n\n\thttp.Redirect(w, r, \"/\", http.StatusNoContent)\n}", "func (c *controller) Logout(ctx context.Context, request *web.Request) web.Result {\n\treturn c.service.LogoutFor(ctx, request.Params[\"broker\"], request, nil)\n}", "func Logout(res http.ResponseWriter, req *http.Request) {\n\t_, ok := cookiesManager.GetCookieValue(req, CookieName)\n\tif ok {\n\t\t// cbs.SessionManager.RemoveSession(uuid)\n\t\tcookiesManager.RemoveCookie(res, CookieName)\n\t} else {\n\t\tlog.Trace(\"Logging out without the cookie\")\n\t}\n}", "func Logout(w http.ResponseWriter, r *http.Request) {\n\tif sessions.GoodSession(r) != true {\n\t\tjson.NewEncoder(w).Encode(\"Session Expired. Log out and log back in.\")\n\t}\n\tstore, err := pgstore.NewPGStore(os.Getenv(\"PGURL\"), key)\n\tcheck(err)\n\tdefer store.Close()\n\n\tsession, err := store.Get(r, \"scheduler-session\")\n\tcheck(err)\n\t// Revoke users authentication\n\tsession.Values[\"authenticated\"] = false\n\tw.WriteHeader(http.StatusOK)\n\tsession.Options.MaxAge = -1\n\tsession.Save(r, w)\n}", "func (l *RemoteProvider) Logout(w http.ResponseWriter, req *http.Request) {\n\tck, err := req.Cookie(tokenName)\n\tif err == nil {\n\t\tck.MaxAge = -1\n\t\tck.Path = \"/\"\n\t\thttp.SetCookie(w, ck)\n\t}\n\thttp.Redirect(w, req, \"/user/login\", http.StatusFound)\n}", "func Logout(_ *gorm.DB, rc *redis.Client, _ http.ResponseWriter, r *http.Request, s *status.Status) (int, error) {\n\tctx := context.Background()\n\trequestUsername := getVar(r, model.UsernameVar)\n\tclaims := GetTokenClaims(ExtractToken(r))\n\ttokenUsername := fmt.Sprintf(\"%v\", claims[\"sub\"])\n\tif tokenUsername != requestUsername {\n\t\ts.Message = status.LogoutFailure\n\t\treturn http.StatusForbidden, nil\n\t}\n\ts.Code = status.SuccessCode\n\ts.Message = status.LogoutSuccess\n\trc.Del(ctx, \"access_\"+requestUsername)\n\treturn http.StatusOK, nil\n}", "func logout(res http.ResponseWriter, req *http.Request) {\n sess := session.Get(req)\n\n session.Remove(sess, res)\n sess = nil\n\n return\n http.Redirect(res, req, \"/login\", 301)\n}", "func (c *LoggedInContext) DoSignOutRequestHandler(rw web.ResponseWriter, req *web.Request) {\n\tsession, _ := c.Store.Get(req.Request, \"session-security\")\n\tsession.Values[\"sessionId\"] = nil\n\tc.SetNotificationMessage(rw, req, \"Goodbye!\")\n\n\tc.Account.ExpireSession(c.Storage)\n\n\tsession.Save(req.Request, rw)\n\thttp.Redirect(rw, req.Request, HomeUrl.Make(), http.StatusFound)\n}", "func gwLogout(c *gin.Context) {\n\ts := getHostServer(c)\n\treqId := getRequestId(s, c)\n\tuser := getUser(c)\n\tcks := s.conf.Security.Auth.Cookie\n\tok := s.AuthManager.Logout(user)\n\tif !ok {\n\t\ts.RespBodyBuildFunc(http.StatusInternalServerError, reqId, \"auth logout fail\", nil)\n\t\treturn\n\t}\n\tsid, ok := getSid(s, c)\n\tif !ok {\n\t\ts.RespBodyBuildFunc(http.StatusInternalServerError, reqId, \"session store logout fail\", nil)\n\t\treturn\n\t}\n\t_ = s.SessionStateManager.Remove(sid)\n\tc.SetCookie(cks.Key, \"\", -1, cks.Path, cks.Domain, cks.Secure, cks.HttpOnly)\n}", "func (a *Auth) Logout(ctx *gin.Context) error {\n\tuuid, err := ctx.Cookie(a.cookie)\n\tif err != nil {\n\t\treturn nil\n\t}\n\n\t// delete redis record\n\tif err := a.redis.Del(uuid).Err(); err != nil {\n\t\treturn err\n\t}\n\n\t// delete cookie\n\thttp.SetCookie(ctx.Writer, &http.Cookie{\n\t\tName: a.cookie,\n\t\tValue: \"\",\n\t\tExpires: time.Unix(0, 0),\n\t})\n\treturn nil\n}", "func (a *localAuth) Logout(c echo.Context) error {\n\treturn a.logout(c)\n}", "func signOut(w http.ResponseWriter, r *http.Request) {\n\tlogoutUser(w, r)\n\tresp := map[string]interface{}{\n\t\t\"success\": true,\n\t}\n\tapiResponse(resp, w)\n}", "func Logout(origin string, o *model.OneTimePassword) error {\n\tu, err := url.Parse(origin)\n\tif err != nil {\n\t\treturn err\n\t}\n\tu.Path = path.Join(u.Path, \"housecontrol/v1/indoorauth/logout\")\n\treq, err := http.NewRequest(\"GET\", u.String(), nil)\n\tif err != nil {\n\t\treturn err\n\t}\n\treq.Header.Add(\"Authorization\", o.GetAuthHeader())\n\tresp, err := client.Do(req)\n\tif err != nil && err != io.EOF {\n\t\treturn err\n\t}\n\tdefer resp.Body.Close()\n\tio.Copy(ioutil.Discard, resp.Body)\n\t// fmt.Println(\"logouted, status:\", resp.StatusCode)\n\treturn nil\n}", "func (m *Model) Logout(ctx context.Context, header string) error {\n\tau, err := m.extractTokenMetadata(header)\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = m.authDAO.DeleteByID(ctx, au.ID)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}", "func LogoutHandler(er *Errorly) http.HandlerFunc {\n\treturn func(rw http.ResponseWriter, r *http.Request) {\n\t\tsession, _ := er.Store.Get(r, sessionName)\n\t\tdefer er.SaveSession(session, r, rw)\n\n\t\tsession.Values = make(map[interface{}]interface{})\n\n\t\thttp.Redirect(rw, r, \"/\", http.StatusTemporaryRedirect)\n\t}\n}", "func (a Authorizer) Logout(rw http.ResponseWriter, req *http.Request) error {\n\tsession, err := skyring.Store.Get(req, \"session-key\")\n\tif err != nil {\n\t\tlogger.Get().Error(\"Error getting the session. error: %v\", err)\n\t\treturn err\n\t}\n\tsession.Options.MaxAge = -1\n\tif err = session.Save(req, rw); err != nil {\n\t\tlogger.Get().Error(\"Error saving the session. error: %v\", err)\n\t\treturn err\n\t}\n\treturn nil\n}", "func (a Authorizer) Logout(rw http.ResponseWriter, req *http.Request) error {\n\tsession, err := skyring.Store.Get(req, \"session-key\")\n\tif err != nil {\n\t\tlogger.Get().Error(\"Error getting the session. error: %v\", err)\n\t\treturn err\n\t}\n\tsession.Options.MaxAge = -1\n\tif err = session.Save(req, rw); err != nil {\n\t\tlogger.Get().Error(\"Error saving the session. error: %v\", err)\n\t\treturn err\n\t}\n\treturn nil\n}", "func (u *User) PostLogout(w http.ResponseWriter, r *http.Request, _ httprouter.Params) {\n\tcookie, err := r.Cookie(session.Key)\n\tif err != nil {\n\t\t// ErrNoCookie should be handled as success.\n\t\twriteError(w, http.StatusBadRequest, err)\n\t\treturn\n\t}\n\n\tif err := u.session.Delete(cookie.Value); err != nil {\n\t\twriteError(w, http.StatusBadRequest, err)\n\t\treturn\n\t}\n\n\t// TODO: Look into the PRG pattern.\n\thttp.Redirect(w, r, \"/\", http.StatusFound)\n}", "func (context Context) Logout() error {\n\n\treqURL := url.URL{\n\t\tScheme: \"http\",\n\t\tHost: context.ServerAddress,\n\t\tPath: path.Join(apiPrefix, \"users/logout\"),\n\t}\n\n\treq, err := http.NewRequest(\"GET\", reqURL.String(), nil)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"HTTP new request error\")\n\t}\n\tcontext.UserCredentials.Apply(req.Header)\n\n\tclient := &http.Client{}\n\tresp, err := client.Do(req)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"HTTP client error\")\n\t}\n\n\tif resp.StatusCode != 200 {\n\t\terrorResponse, err := getAPIErrorResponse(resp)\n\t\terrorString := errorResponse.String()\n\t\tif err == nil || errorString == \"\" {\n\t\t\terrorString = resp.Status\n\t\t}\n\t\treturn errors.New(\"API error: \" + resp.Status)\n\t}\n\n\treturn nil\n}", "func logout(w http.ResponseWriter, r *http.Request) {\n LOG[INFO].Println(\"Executing Logout\")\n clearCache(w)\n cookie, _ := r.Cookie(LOGIN_COOKIE)\n cookie.MaxAge = -1\n cookie.Expires = time.Now().Add(-1 * time.Hour)\n http.SetCookie(w, cookie)\n LOG[INFO].Println(\"Successfully Logged Out\")\n http.Redirect(w, r, \"/welcome\", http.StatusSeeOther)\n}" ]
[ "0.7537373", "0.73492235", "0.72490174", "0.7220104", "0.7210306", "0.7185228", "0.7093687", "0.7087164", "0.705549", "0.7040366", "0.70124507", "0.700801", "0.6981215", "0.6977953", "0.6971679", "0.6926867", "0.6907817", "0.6862481", "0.684782", "0.68251675", "0.68112636", "0.6763008", "0.6749481", "0.6706726", "0.6706166", "0.66914827", "0.6683935", "0.6676935", "0.662624", "0.6625615", "0.6624541", "0.6622106", "0.6617115", "0.6600055", "0.65995765", "0.6589466", "0.65871304", "0.6544922", "0.65419453", "0.6533619", "0.65319526", "0.65289116", "0.65282935", "0.65094143", "0.6503908", "0.64996916", "0.64878213", "0.64858705", "0.64765966", "0.6474078", "0.64728814", "0.64627105", "0.64601123", "0.6458894", "0.6457551", "0.6429409", "0.64193076", "0.6417111", "0.64167255", "0.64050394", "0.6397269", "0.6356977", "0.6345556", "0.63377714", "0.63240165", "0.6321405", "0.63065785", "0.63036966", "0.62974966", "0.62917095", "0.6283037", "0.6275747", "0.6267895", "0.62627614", "0.6262145", "0.6259323", "0.6257421", "0.6254743", "0.62342644", "0.6229949", "0.61994666", "0.6188782", "0.6179966", "0.6175897", "0.61706567", "0.6160839", "0.6156217", "0.6126919", "0.6126196", "0.61090595", "0.61022586", "0.6095991", "0.6093348", "0.6087429", "0.60798556", "0.6059843", "0.6059843", "0.6057434", "0.604093", "0.6032016" ]
0.76730204
0
IsValid checks whether group's signature corresponds to the given hash.
func (g *Group) IsValid(h util.Uint160) error { if !g.PublicKey.Verify(g.Signature, hash.Sha256(h.BytesBE()).BytesBE()) { return errors.New("incorrect group signature") } return nil }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (m *Manifest) IsValid(hash util.Uint160) bool {\n\tif m.ABI.Hash != hash {\n\t\treturn false\n\t}\n\tfor _, g := range m.Groups {\n\t\tif !g.IsValid(hash) {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}", "func (g Groups) AreValid(h util.Uint160) error {\n\tfor i := range g {\n\t\terr := g[i].IsValid(h)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\tif len(g) < 2 {\n\t\treturn nil\n\t}\n\tpkeys := make(keys.PublicKeys, len(g))\n\tfor i := range g {\n\t\tpkeys[i] = g[i].PublicKey\n\t}\n\tsort.Sort(pkeys)\n\tfor i := range pkeys {\n\t\tif i == 0 {\n\t\t\tcontinue\n\t\t}\n\t\tif pkeys[i].Cmp(pkeys[i-1]) == 0 {\n\t\t\treturn errors.New(\"duplicate group keys\")\n\t\t}\n\t}\n\treturn nil\n}", "func ValidateSignature(hash, cipher string, key []byte) error {\n\tif util.IsSignValid(hash, cipher, key) {\n\t\treturn nil\n\t}\n\treturn ErrInvalidSignature\n}", "func IsSignerHashValid(hash []byte) (bool, *Signer) {\n\tif sha512.Size != len(hash) {\n\t\treturn false, nil\n\t}\n\n\treturn true, NewSigner(hash)\n}", "func IsValidHash(obj string) bool {\n\treturn hashPtn.MatchString(obj)\n}", "func ValidHash(s string) bool {\n\treturn len(s) == sha1.Size*2\n}", "func (g *Group) Verify(digest []byte, hashFunc hash.Hash, sig []byte) bool {\n\tif len(sig) != SignatureSize {\n\t\treturn false\n\t}\n\n\tt1, ok := new(bn256.G1).Unmarshal(sig[:2*32])\n\tif !ok {\n\t\treturn false\n\t}\n\tt2, ok := new(bn256.G1).Unmarshal(sig[2*32:4*32])\n\tif !ok {\n\t\treturn false\n\t}\n\tt3, ok := new(bn256.G1).Unmarshal(sig[4*32:6*32])\n\tif !ok {\n\t\treturn false\n\t}\n\tc := new(big.Int).SetBytes(sig[6*32:7*32])\n\tsalpha := new(big.Int).SetBytes(sig[7*32:8*32])\n\tsbeta := new(big.Int).SetBytes(sig[8*32:9*32])\n\tsx := new(big.Int).SetBytes(sig[9*32:10*32])\n\tsdelta1 := new(big.Int).SetBytes(sig[10*32:11*32])\n\tsdelta2 := new(big.Int).SetBytes(sig[11*32:12*32])\n\n\tr1 := new(bn256.G1).ScalarMult(g.u, salpha)\n\ttmp := new(big.Int).Neg(c)\n\ttmp.Add(tmp, bn256.Order)\n\ttmpg := new(bn256.G1).ScalarMult(t1, tmp)\n\tr1.Add(r1, tmpg)\n\n\tr2 := new(bn256.G1).ScalarMult(g.v, sbeta)\n\ttmpg.ScalarMult(t2, tmp)\n\tr2.Add(r2, tmpg)\n\n\tr4 := new(bn256.G1).ScalarMult(t1, sx)\n\ttmp.Neg(sdelta1)\n\ttmp.Add(tmp, bn256.Order)\n\ttmpg.ScalarMult(g.u, tmp)\n\tr4.Add(r4, tmpg)\n\n\tr5 := new(bn256.G1).ScalarMult(t2, sx)\n\ttmp.Neg(sdelta2)\n\ttmp.Add(tmp, bn256.Order)\n\ttmpg.ScalarMult(g.v, tmp)\n\tr5.Add(r5, tmpg)\n\n\tr3 := bn256.Pair(t3, g.g2)\n\tr3.ScalarMult(r3, sx)\n\n\ttmp.Neg(salpha)\n\ttmp.Sub(tmp, sbeta)\n\ttmp.Mod(tmp, bn256.Order)\n\ttmpgt := new(bn256.GT).ScalarMult(g.ehw, tmp)\n\tr3.Add(r3, tmpgt)\n\n\ttmp.Neg(sdelta1)\n\ttmp.Sub(tmp, sdelta2)\n\ttmp.Mod(tmp, bn256.Order)\n\ttmpgt.ScalarMult(g.ehg2, tmp)\n\tr3.Add(r3, tmpgt)\n\n\tet3w := bn256.Pair(t3, g.w)\n\tet3w.Add(et3w, g.minusEg1g2)\n\tet3w.ScalarMult(et3w, c)\n\tr3.Add(r3, et3w)\n\n\thashFunc.Reset()\n\thashFunc.Write(digest)\n\thashFunc.Write(t1.Marshal())\n\thashFunc.Write(t2.Marshal())\n\thashFunc.Write(t3.Marshal())\n\thashFunc.Write(r1.Marshal())\n\thashFunc.Write(r2.Marshal())\n\thashFunc.Write(r3.Marshal())\n\thashFunc.Write(r4.Marshal())\n\thashFunc.Write(r5.Marshal())\n\tcprime := new(big.Int).SetBytes(hashFunc.Sum(nil))\n\tcprime.Mod(cprime, bn256.Order)\n\n\treturn cprime.Cmp(c) == 0\n}", "func (gh *GitHubChecker) validSignature(payload []byte, signature string) error {\n\texpected := gh.hashPayload(payload)\n\n\tsignatureParts := strings.SplitN(signature, \"=\", 2)\n\tif len(signatureParts) != 2 {\n\t\treturn fmt.Errorf(\"%s header should be of the form \\\"<type>=<hash>\\\", not %q\", xGitHubSignature, signature)\n\t}\n\n\ttp := signatureParts[0]\n\thash := signatureParts[1]\n\n\tif tp != \"sha1\" {\n\t\treturn fmt.Errorf(\"%s header signature type should be \\\"sha1\\\", not %q\", xGitHubSignature, signature)\n\t}\n\n\tif !hmac.Equal([]byte(hash), []byte(expected)) {\n\t\treturn fmt.Errorf(\"%s header signature hash should be %q, not %q\", xGitHubSignature, expected, hash)\n\t}\n\n\treturn nil\n}", "func (blk Block) ValidHash() bool {\n\t// TODO\n\n\thash := blk.Hash\n\n\tif hash == nil {\n\t\treturn false\n\t}\n\n\tl := len(hash)\n\n\tnBytes := int(blk.Difficulty / 8)\n\tnBits := blk.Difficulty % 8\n\n\tfor i := l - 1; i >= l-nBytes; i-- {\n\t\tif hash[i] != '\\x00' {\n\t\t\treturn false\n\t\t}\n\t}\n\n\tif hash[l-nBytes-1]%(1<<nBits) != 0 {\n\t\treturn false\n\t}\n\n\treturn true\n}", "func IsValidHash(s string) bool {\n\tout, err := b58.Decode(s)\n\tif err != nil {\n\t\treturn false\n\t}\n\t_, err = mh.Cast(out)\n\treturn err == nil\n}", "func validateSignature(pubKey string, signature string, elements ...string) error {\n\tsig, err := util.ConvertSignature(signature)\n\tif err != nil {\n\t\treturn www.UserError{\n\t\t\tErrorCode: www.ErrorStatusInvalidSignature,\n\t\t}\n\t}\n\tb, err := hex.DecodeString(pubKey)\n\tif err != nil {\n\t\treturn err\n\t}\n\tpk, err := identity.PublicIdentityFromBytes(b)\n\tif err != nil {\n\t\treturn err\n\t}\n\tvar msg string\n\tfor _, v := range elements {\n\t\tmsg += v\n\t}\n\tif !pk.VerifyMessage([]byte(msg), sig) {\n\t\treturn www.UserError{\n\t\t\tErrorCode: www.ErrorStatusInvalidSignature,\n\t\t}\n\t}\n\treturn nil\n}", "func (sig *Signature) Verify(hash []byte, pubKey *PublicKey) bool {\n\treturn ecdsa.Verify(pubKey.ToECDSA(), hash, sig.R, sig.S)\n}", "func (dtk *DcmTagKey) HasValidGroup() bool {\n\tif ((dtk.group & 1) != 0) && ((dtk.group <= 7) || (dtk.group == 0xFFFF)) {\n\t\treturn false\n\t}\n\treturn true\n}", "func (bh *BlockHeader) Valid() bool {\r\n\ttarget, err := ExpandTargetFromAsInt(hex.EncodeToString(bh.Bits))\r\n\tif err != nil {\r\n\t\treturn false\r\n\t}\r\n\r\n\tdigest := bt.ReverseBytes(crypto.Sha256d(bh.Bytes()))\r\n\tvar bn *big.Int = big.NewInt(0)\r\n\tbn.SetBytes(digest)\r\n\r\n\treturn bn.Cmp(target) < 0\r\n}", "func (pstFile *File) IsValidSignature() (bool, error) {\n\tsignature, err := pstFile.Read(4, 0)\n\n\tif err != nil {\n\t\treturn false, err\n\t}\n\n\treturn bytes.Equal(signature, []byte(\"!BDN\")), nil\n}", "func Validate(digest *repb.Digest) error {\n\tif digest == nil {\n\t\treturn errors.New(\"nil digest\")\n\t}\n\tif ok, err := validateHashLength(digest.Hash); !ok {\n\t\treturn err\n\t}\n\tif !hexStringRegex.MatchString(digest.Hash) {\n\t\treturn fmt.Errorf(\"hash is not a lowercase hex string (%s)\", digest.Hash)\n\t}\n\tif digest.SizeBytes < 0 {\n\t\treturn fmt.Errorf(\"expected non-negative size, got %d\", digest.SizeBytes)\n\t}\n\treturn nil\n}", "func Validate(h, s, p string) bool {\n\treturn h == ComputeHash(p, s)\n}", "func validateSignature(transactionID string, transactionInputSignature string, unspentOutputAddress string) (bool, error) {\n\n\t// unspentOutputAddress is actually public key\n\t// first try to decode it to PEM block\n\tpemBlock, _ := pem.Decode([]byte(unspentOutputAddress))\n\tif pemBlock == nil {\n\t\treturn false, nil\n\t}\n\t// try to get the public key out of the PEM block\n\tpub, err := x509.ParsePKIXPublicKey(pemBlock.Bytes)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\n\t// get the string value out of signature which is hex encoded\n\tdecodedTransactionInputSignature, err := hex.DecodeString(transactionInputSignature)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\n\t// hash the unsigned transactionID so we can use the value in signature verification\n\thashedID := sha256.Sum256([]byte(transactionID))\n\n\t// verify signed decoded transactionID to the hashed unsigned transactionID\n\tvar verificationError = rsa.VerifyPKCS1v15(pub.(*rsa.PublicKey), crypto.SHA256, hashedID[:], []byte(decodedTransactionInputSignature))\n\n\t// verification failed\n\tif verificationError != nil {\n\t\treturn false, verificationError\n\t}\n\n\t// verification was success if there is no error\n\treturn true, nil\n}", "func VerifyHash(data Hash, pk PublicKey, sig Signature) (err error) {\n\tpkNorm := [PublicKeySize]byte(pk)\n\tsigNorm := [SignatureSize]byte(sig)\n\tverifies := ed25519.Verify(&pkNorm, data[:], &sigNorm)\n\tif !verifies {\n\t\terr = ErrInvalidSignature\n\t\treturn\n\t}\n\n\treturn\n}", "func IsHashValid(hash string, difficulty int) bool {\n\tprefix := strings.Repeat(\"0\", difficulty)\n\treturn strings.HasPrefix(hash, prefix)\n}", "func IsHashValid(hash string, difficulty int) bool {\n\tprefix := strings.Repeat(\"0\", difficulty)\n\treturn strings.HasPrefix(hash, prefix)\n}", "func (s *grpcServer) validateHash(hash string, logPrefix string) error {\n\tif len(hash) != hashKeyLength {\n\t\tmsg := fmt.Sprintf(\"Hash length must be length %d\", hashKeyLength)\n\t\ts.accessLogger.Printf(\"%s %s: %s\", logPrefix, hash, msg)\n\t\treturn status.Error(codes.InvalidArgument, msg)\n\t}\n\n\tif !hashKeyRegex.MatchString(hash) {\n\t\tmsg := \"Malformed hash\"\n\t\ts.accessLogger.Printf(\"%s %s: %s\", logPrefix, hash, msg)\n\t\treturn status.Error(codes.InvalidArgument, msg)\n\t}\n\n\treturn nil\n}", "func validSignature(body, key []byte, sig string) bool {\n\tconst prefix = \"sha1=\"\n\tif len(sig) < len(prefix) {\n\t\treturn false\n\t}\n\tsig = sig[len(prefix):]\n\tmac := hmac.New(sha1.New, key)\n\tmac.Write(body)\n\tb, err := hex.DecodeString(sig)\n\tif err != nil {\n\t\treturn false\n\t}\n\n\t// Use hmac.Equal to avoid timing attacks.\n\treturn hmac.Equal(mac.Sum(nil), b)\n}", "func ValidateSignature(r *http.Request, secret []byte) (bool, error) {\n\tbody, err := ReadBodyRequest(r)\n\tif err != nil {\n\t\treturn false, fmt.Errorf(\"read body: %w\", err)\n\t}\n\n\tr.Body = io.NopCloser(bytes.NewBuffer(body))\n\n\tif fmt.Sprintf(\"SHA-512=%x\", sha512.Sum512(body)) != r.Header.Get(\"Digest\") {\n\t\treturn false, model.WrapInvalid(errors.New(\"SHA-512 signature of body doesn't match\"))\n\t}\n\n\tsignatureString, signature, err := parseAuthorizationHeader(r)\n\tif err != nil {\n\t\treturn false, model.WrapInvalid(fmt.Errorf(\"parse authorization header: %w\", err))\n\t}\n\n\treturn hmac.Equal(signContent(secret, signatureString), signature), nil\n}", "func validateSignature(signature, secretKey string, payload []byte) error {\n\tsum := SHA1HMAC([]byte(secretKey), payload)\n\tif subtle.ConstantTimeCompare([]byte(sum), []byte(signature)) != 1 {\n\t\tlog.Printf(\"Expected signature %q (sum), got %q (hub-signature)\", sum, signature)\n\t\treturn errors.New(\"payload signature check failed\")\n\t}\n\treturn nil\n}", "func (b Block) Validate(prevHash []byte) bool {\n\tif bytes.Compare(b.prevHash, prevHash) != 0 {\n\t\treturn false\n\t}\n\n\tif bytes.Compare(b.hash, b.createHash()) != 0 {\n\t\treturn false\n\t}\n\treturn true\n}", "func (wh *Webhook) ValidateSignature(body []byte, signature string) error {\n\tmac := hmac.New(sha1.New, []byte(wh.Token))\n\tif _, err := mac.Write(body); err != nil {\n\t\treturn err\n\t}\n\n\tsig, err := hex.DecodeString(signature)\n\tif err != nil || !hmac.Equal(sig, mac.Sum(nil)) {\n\t\treturn ErrInvalidWebhookSignature\n\t}\n\n\treturn nil\n}", "func IsValidDigest(hash string) bool {\n\t// Currently we expect all digests to be ASCII encoded MD5 hashes.\n\tif len(hash) != 32 {\n\t\treturn false\n\t}\n\n\tfor _, c := range []byte(hash) {\n\t\tif ((c >= '0') && (c <= '9')) ||\n\t\t\t((c >= 'a') && (c <= 'f')) ||\n\t\t\t((c >= 'A') && (c <= 'F')) {\n\t\t\tcontinue\n\t\t}\n\t\treturn false\n\t}\n\treturn true\n}", "func (v *SignatureValidator) ValidateSignature(ctx context.Context, data []byte, signer address.Address, sig crypto.Signature) error {\n\tsignerAddress, err := v.signerView.ResolveToDeterministicAddress(ctx, signer)\n\tif err != nil {\n\t\treturn errors.Wrapf(err, \"failed to load signer address for %v\", signer)\n\t}\n\treturn crypto.Verify(&sig, signerAddress, data)\n}", "func (s SlackHandler) validateSignature(r *http.Request, body string) bool {\n\ttimestamp := r.Header.Get(\"X-Slack-Request-Timestamp\")\n\trequestSignature := r.Header.Get(\"X-Slack-Signature\")\n\tcompiled := fmt.Sprintf(\"%v:%v:%v\", requestVersion, timestamp, body)\n\tmac := hmac.New(sha256.New, []byte(s.SigningKey))\n\tmac.Write([]byte(compiled))\n\texpectedSignature := mac.Sum(nil)\n\treturn hmac.Equal(expectedSignature, []byte(requestSignature))\n}", "func Verify(sig *Signature, hash []byte) (bool, error) {\n\tif sig.Mode == ModeEdDSA {\n\t\tif len(hash) != crypto.SHA512.Size() {\n\t\t\tmsg := fmt.Sprintf(\"invalid hash length. wanted: %d, got: %d\", crypto.SHA512.Size(), len(hash))\n\t\t\treturn false, errors.New(msg)\n\t\t}\n\n\t\teddsaSig := sig.Signature\n\t\tif len(eddsaSig) != SignatureLength {\n\t\t\tmsg := fmt.Sprintf(\"invalid signature length. wanted: %d, got: %d\", SignatureLength, len(eddsaSig))\n\t\t\treturn false, errors.New(msg)\n\t\t}\n\t\topts := ed25519.Options{\n\t\t\tHash: crypto.SHA512,\n\t\t}\n\t\treturn ed25519.VerifyWithOptions(sig.Address, hash, eddsaSig, &opts), nil\n\t} else if sig.Mode == ModeBLS {\n\t\tif len(hash) != crypto.SHA3_256.Size() {\n\t\t\tmsg := fmt.Sprintf(\"invalid hash length. wanted: %d, got: %d\", crypto.SHA3_256.Size(), len(hash))\n\t\t\treturn false, errors.New(msg)\n\t\t}\n\n\t\tvar blsSig bls.Sign\n\t\tblsSig.Deserialize(sig.Signature)\n\t\tvar blsPub bls.PublicKey\n\t\tblsPub.Deserialize(sig.Address)\n\n\t\treturn blsSig.VerifyHash(&blsPub, hash), nil\n\t} else if sig.Mode == ModeMerkle {\n\t\t// calculate master\n\t\tcurrent := hash\n\t\tfor i := range sig.MerklePath {\n\t\t\th := sha512.New()\n\t\t\thash := sig.MerklePath[i]\n\t\t\tindex := sig.MerkleIndexes[i]\n\t\t\tvar msg []byte\n\t\t\tif index == false {\n\t\t\t\t// hash is left\n\t\t\t\tmsg = append(hash, current...)\n\t\t\t} else {\n\t\t\t\t// hash is right\n\t\t\t\tmsg = append(current, hash...)\n\t\t\t}\n\t\t\tif _, err := h.Write(msg); err != nil {\n\t\t\t\treturn false, err\n\t\t\t}\n\n\t\t\tcurrent = h.Sum(nil)\n\t\t}\n\n\t\t// `current` should now be the merkle root.\n\n\t\t// use caching: find out whether we previously already checked that\n\t\t// signature is ok. for this, use hash(addr || merkle root || sig)\n\t\th := crypto.SHA256.New()\n\t\th.Write(sig.Address)\n\t\th.Write(current)\n\t\th.Write(sig.Signature)\n\t\tsigHash := h.Sum(nil)\n\t\tsigHashIndex := [32]byte{}\n\t\tcopy(sigHashIndex[:], sigHash[:])\n\n\t\t// lookup cache and return if cached\n\t\tif UseMerkleSignatureCaching {\n\t\t\tcachedValid, ok := merkleSigCache.Load(sigHashIndex)\n\t\t\tif ok && cachedValid == true {\n\t\t\t\treturn true, nil\n\t\t\t}\n\t\t}\n\n\t\t// there is no cache entry, or entry was false.\n\t\topts := ed25519.Options{\n\t\t\tHash: crypto.SHA512,\n\t\t}\n\t\tvalid := ed25519.VerifyWithOptions(sig.Address, current, sig.Signature, &opts)\n\t\tif valid {\n\t\t\tmerkleSigCache.Store(sigHashIndex, true)\n\t\t}\n\t\treturn valid, nil\n\t} else {\n\t\treturn false, errors.New(\"mode not supported\")\n\t}\n}", "func (s *Sign) DoesSignatureMatch(hashedPayload string) (bool, *probe.Error) {\n\t// Save authorization header.\n\tv4Auth := s.httpRequest.Header.Get(\"Authorization\")\n\n\t// Parse signature version '4' header.\n\tsignV4Values, err := parseSignV4(v4Auth)\n\tif err != nil {\n\t\treturn false, err.Trace(v4Auth)\n\t}\n\n\t// Extract all the signed headers along with its values.\n\ts.extractedSignedHeaders = extractSignedHeaders(signV4Values.SignedHeaders, s.httpRequest.Header)\n\n\t// Verify if the access key id matches.\n\tif signV4Values.Credential.accessKeyID != s.accessKeyID {\n\t\treturn false, ErrInvalidAccessKeyID(\"Access key id does not match with our records.\", signV4Values.Credential.accessKeyID).Trace(signV4Values.Credential.accessKeyID)\n\t}\n\n\t// Verify if region is valid.\n\treqRegion := signV4Values.Credential.scope.region\n\tif !isValidRegion(reqRegion, s.region) {\n\t\treturn false, ErrInvalidRegion(\"Requested region is not recognized.\", reqRegion).Trace(reqRegion)\n\t}\n\n\t// Save region.\n\ts.region = reqRegion\n\n\t// Set input payload.\n\ts.httpRequest.Header.Set(\"X-Amz-Content-Sha256\", hashedPayload)\n\n\t// Extract date, if not present throw error.\n\tvar date string\n\tif date = s.httpRequest.Header.Get(http.CanonicalHeaderKey(\"x-amz-date\")); date == \"\" {\n\t\tif date = s.httpRequest.Header.Get(\"Date\"); date == \"\" {\n\t\t\treturn false, ErrMissingDateHeader(\"Date header is missing from the request.\").Trace()\n\t\t}\n\t}\n\t// Parse date header.\n\tt, e := time.Parse(iso8601Format, date)\n\tif e != nil {\n\t\treturn false, probe.NewError(e)\n\t}\n\n\t// Signature version '4'.\n\tcanonicalRequest := s.getCanonicalRequest()\n\tstringToSign := s.getStringToSign(canonicalRequest, t)\n\tsigningKey := s.getSigningKey(t)\n\tnewSignature := s.getSignature(signingKey, stringToSign)\n\n\t// Verify if signature match.\n\tif newSignature != signV4Values.Signature {\n\t\treturn false, nil\n\t}\n\treturn true, nil\n}", "func VerifySignatureValidity(sig []byte) int {\n\t//64+1\n\tif len(sig) != 65 {\n\t\tlog.Panic(\"VerifySignatureValidity: sig len is not 65 bytes\")\n\t\treturn 0\n\t}\n\t//malleability check:\n\t//highest bit of 32nd byte must be 1\n\t//0x7f is 126 or 0b01111111\n\tif (sig[32] >> 7) == 1 {\n\t\treturn 0 // signature is malleable\n\t}\n\t//recovery id check\n\tif sig[64] >= 4 {\n\t\treturn 0 // recovery id invalid\n\t}\n\treturn 1\n}", "func (s *Signature) Validate(masterPubKey ed25519.PublicKey, b []byte) error {\n\tif !ed25519.Verify(masterPubKey, []byte(*s.PublicKey), []byte(*s.Endorsement)) {\n\t\treturn &Error{Code: 401, Message: \"Request Public Key was not endorsed by Manifold\"}\n\t}\n\n\tlivePubKey := ed25519.PublicKey([]byte(*s.PublicKey))\n\tif !ed25519.Verify(livePubKey, b, []byte(*s.Value)) {\n\t\treturn &Error{Code: 401, Message: \"Request was not signed by included Public Key\"}\n\t}\n\n\treturn nil\n}", "func ValidateFileHash(filepath string, hashed string) error {\n\ths := strings.Split(hashed, \":\")\n\tif len(hs) != 2 {\n\t\treturn errors.Errorf(\"unknown hashed format, expect is `sha256:xxxx`, but got `%s`\", hashed)\n\t}\n\n\tvar hasher hash.Hash\n\tswitch hs[0] {\n\tcase \"sha256\":\n\t\thasher = sha256.New()\n\tcase \"md5\":\n\t\thasher = md5.New()\n\tdefault:\n\t\treturn errors.Errorf(\"unknown hasher `%s`\", hs[0])\n\t}\n\n\tfp, err := os.Open(filepath)\n\tif err != nil {\n\t\treturn errors.Wrapf(err, \"open file `%s`\", filepath)\n\t}\n\tdefer CloseQuietly(fp)\n\n\tif _, err = io.Copy(hasher, fp); err != nil {\n\t\treturn errors.Wrap(err, \"read file content\")\n\t}\n\n\tactualHash := hex.EncodeToString(hasher.Sum(nil))\n\tif hs[1] != actualHash {\n\t\treturn errors.Errorf(\"hash `%s` not match expect `%s`\", actualHash, hs[1])\n\t}\n\n\treturn nil\n}", "func (t *HashType) IsValid() bool {\n\t_, valid := validHashes[*t]\n\treturn valid\n}", "func AreSignersHashesValid(promise *cAPI.Promise) (bool, []Signer) {\n\tvar signers []Signer\n\tif len(promise.Context.Signers) == 0 {\n\t\treturn false, nil\n\t}\n\n\tfor _, v := range promise.Context.Signers {\n\t\tok, signer := IsSignerHashValid(v)\n\t\tif !ok {\n\t\t\treturn false, nil\n\t\t}\n\n\t\tsigners = append(signers, *signer)\n\t}\n\treturn true, signers\n}", "func isValidHash(hashType, hash string) bool {\n\tswitch hashType {\n\tcase string(HashMD5):\n\t\treturn len(hash) == 32\n\tcase string(HashSHA1):\n\t\treturn len(hash) == 40\n\tcase string(HashSHA256):\n\t\treturn len(hash) == 64\n\tcase string(HashSHA512):\n\t\treturn len(hash) == 128\n\tdefault:\n\t\treturn false\n\t}\n}", "func SignatureVerify(publicKey, sig, hash []byte) bool {\n\n\tbytesDecded, _ := base58.DecodeToBig(publicKey)\n\tpubl := splitBig(bytesDecded, 2)\n\tx, y := publ[0], publ[1]\n\n\tbytesDecded, _ = base58.DecodeToBig(sig)\n\tsigg := splitBig(bytesDecded, 2)\n\tr, s := sigg[0], sigg[1]\n\n\tpub := ecdsa.PublicKey{elliptic.P224(), x, y}\n\n\treturn ecdsa.Verify(&pub, hash, r, s)\n}", "func validateAuthGroup(name string, groups map[string]*protocol.AuthGroup) error {\n\tg := groups[name]\n\n\tfor _, ident := range g.Members {\n\t\tif _, err := identity.MakeIdentity(ident); err != nil {\n\t\t\treturn fmt.Errorf(\"auth: invalid identity %q in group %q - %s\", ident, name, err)\n\t\t}\n\t}\n\n\tfor _, glob := range g.Globs {\n\t\tif _, err := identity.MakeGlob(glob); err != nil {\n\t\t\treturn fmt.Errorf(\"auth: invalid glob %q in group %q - %s\", glob, name, err)\n\t\t}\n\t}\n\n\tfor _, nested := range g.Nested {\n\t\tif groups[nested] == nil {\n\t\t\treturn fmt.Errorf(\"auth: unknown nested group %q in group %q\", nested, name)\n\t\t}\n\t}\n\n\tif cycle := findGroupCycle(name, groups); len(cycle) != 0 {\n\t\treturn fmt.Errorf(\"auth: dependency cycle found - %v\", cycle)\n\t}\n\n\treturn nil\n}", "func (i service) ValidateSignature(signature *coredocumentpb.Signature, message []byte) error {\n\tcentID := id.NewDIDFromBytes(signature.EntityId)\n\n\terr := i.ValidateKey(context.Background(), centID, signature.PublicKey, id.KeyPurposeSigning)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif !crypto.VerifyMessage(signature.PublicKey, message, signature.Signature, crypto.CurveSecp256K1) {\n\t\treturn errors.New(\"error when validating signature\")\n\t}\n\n\treturn nil\n}", "func PrivateKeyValidate(priv *rsa.PrivateKey,) error", "func (m Group) Validate(formats strfmt.Registry) error {\n\treturn nil\n}", "func (r *RegisterSecondSignatureAsset) IsValid() (bool, error) {\n\tif len(r.PublicKey) != ed25519.PublicKeySize {\n\t\treturn false,\n\t\t\tfmt.Errorf(\n\t\t\t\t\"public key %s has invalid length %d\",\n\t\t\t\thex.EncodeToString(r.PublicKey),\n\t\t\t\tlen(r.PublicKey))\n\t}\n\treturn true, nil\n}", "func (h FileHeader) Valid() bool {\n\treturn h.ID == 0x46464952 && h.Format == 0x45564157\n}", "func validSignature(section rainslib.MessageSectionWithSig, keys map[rainslib.PublicKeyID][]rainslib.PublicKey) bool {\n\tswitch section := section.(type) {\n\tcase *rainslib.AssertionSection, *rainslib.AddressAssertionSection:\n\t\treturn validateSignatures(section, keys)\n\tcase *rainslib.ShardSection:\n\t\treturn validShardSignatures(section, keys)\n\tcase *rainslib.ZoneSection:\n\t\treturn validZoneSignatures(section, keys)\n\tcase *rainslib.AddressZoneSection:\n\t\treturn validAddressZoneSignatures(section, keys)\n\tdefault:\n\t\tlog.Warn(\"Not supported Msg Section\")\n\t\treturn false\n\t}\n}", "func (h *Header) Valid() bool {\n\treturn h.File.Valid() && h.Format.Valid() && h.Data.ID == 0x61746164\n}", "func (f FormatHeader) Valid() bool {\n\treturn f.ID == 0x20746d66 && f.Size == 0x10 && f.AudioFormat == 1\n}", "func (pk PublicKey) Verify(hash []byte, s *Sign) bool {\n\treturn secp256k1.VerifySignature(pk.Bytes(), hash, s.Bytes()[:64])\n}", "func (h Bcrypt) IsValid(hash string, raw string) bool {\n\terr := bcrypt.CompareHashAndPassword([]byte(hash), []byte(raw))\n\treturn err == nil\n}", "func (sg *StorageGroup) SetValidationDataHash(hash *pkg.Checksum) {\n\t(*storagegroup.StorageGroup)(sg).\n\t\tSetValidationHash(hash.ToV2())\n}", "func (k Keeper) ValidateRawSignature(signature signing.SignatureDescriptor, message []byte) (addr sdk.AccAddress, err error) {\n\tvar pubKey cryptotypes.PubKey\n\tif err = k.cdc.UnpackAny(signature.PublicKey, &pubKey); err != nil {\n\t\treturn\n\t}\n\taddr = sdk.AccAddress(pubKey.Address().Bytes())\n\n\tsigData := signing.SignatureDataFromProto(signature.Data)\n\tswitch data := sigData.(type) {\n\tcase *signing.SingleSignatureData:\n\t\tif !pubKey.VerifySignature(message, data.Signature) {\n\t\t\terr = fmt.Errorf(\"unable to verify single signer signature\")\n\t\t}\n\tcase *signing.MultiSignatureData:\n\t\tmultiPK, ok := pubKey.(multisig.PubKey)\n\t\tif !ok {\n\t\t\terr = fmt.Errorf(\"expected %T, got %T\", (multisig.PubKey)(nil), pubKey)\n\t\t\treturn\n\t\t}\n\t\terr = multiPK.VerifyMultisignature(func(mode signing.SignMode) ([]byte, error) {\n\t\t\t// no special adjustments need to be made to the signing bytes based on signing mode\n\t\t\treturn message, nil\n\t\t}, data)\n\tdefault:\n\t\terr = fmt.Errorf(\"unexpected SignatureData %T\", sigData)\n\t}\n\treturn\n}", "func VerifyRFC6979Hash(key *ecdsa.PublicKey, msgHash, sig []byte) error {\n\tif key == nil {\n\t\treturn ErrEmptyPublicKey\n\t} else if r, s, err := decodeSignature(sig); err != nil {\n\t\treturn err\n\t} else if !ecdsa.Verify(key, msgHash, r, s) {\n\t\treturn ErrWrongSignature\n\t}\n\n\treturn nil\n}", "func (sg *StorageGroup) ValidationDataHash() *pkg.Checksum {\n\treturn pkg.NewChecksumFromV2(\n\t\t(*storagegroup.StorageGroup)(sg).\n\t\t\tGetValidationHash(),\n\t)\n}", "func (m *Group) Validate(formats strfmt.Registry) error {\n\treturn nil\n}", "func (m *Group) Validate(formats strfmt.Registry) error {\n\treturn nil\n}", "func (bc *Bcrypter) ValidateHash(hash string) error {\n\t_, err := bcrypt.Cost([]byte(hash))\n\treturn err\n}", "func IsValidPayload(secret, headerHash string, payload []byte) bool {\n\thash := HashPayload(secret, payload)\n\treturn hmac.Equal(\n\t\t[]byte(hash),\n\t\t[]byte(headerHash),\n\t)\n}", "func (r *Response) IsSignatureValid(c *sa.Client) (bool, error) {\n\tsaDate := r.HTTPResponse.Header.Get(\"X-SA-DATE\")\n\tsaSignature := r.HTTPResponse.Header.Get(\"X-SA-SIGNATURE\")\n\tvar buffer bytes.Buffer\n\tbuffer.WriteString(saDate)\n\tbuffer.WriteString(\"\\n\")\n\tbuffer.WriteString(c.AppID)\n\tbuffer.WriteString(\"\\n\")\n\tbuffer.WriteString(r.RawJSON)\n\traw := buffer.String()\n\tbyteKey, _ := hex.DecodeString(c.AppKey)\n\tbyteData := []byte(raw)\n\tsig := hmac.New(sha256.New, byteKey)\n\tsig.Write([]byte(byteData))\n\tcomputedSig := base64.StdEncoding.EncodeToString(sig.Sum(nil))\n\tif computedSig != saSignature {\n\t\treturn false, nil\n\t}\n\treturn true, nil\n}", "func Verify(pubkey *dsa.PublicKey, hash *[32]byte, sig *Signature) (valid bool) {\n\n\treturn dsa.Verify(pubkey, hash[:], sig.R, sig.S)\n}", "func (k *PublicKeySECP256K1R) VerifyHash(hash, sig []byte) bool {\n\tfactory := FactorySECP256K1R{}\n\tpk, err := factory.RecoverHashPublicKey(hash, sig)\n\tif err != nil {\n\t\treturn false\n\t}\n\treturn k.Address().Equals(pk.Address())\n}", "func (s FileSignature) Validate() error {\n\tif s.BaseSignature != BaseSignature {\n\t\treturn ErrInvalidSignature{\"Base Signature\", s.BaseSignature, BaseSignature}\n\t}\n\tif s.SecondarySignature != SecondarySignature {\n\t\treturn ErrInvalidSignature{\"Secondary Signature\", s.SecondarySignature, SecondarySignature}\n\t}\n\tif s.MinorVersion != MinorVersion {\n\t\treturn ErrInvalidSignature{\"Minor Version\", s.MinorVersion, MinorVersion}\n\t}\n\tif s.MajorVersion != MajorVersion {\n\t\treturn ErrInvalidSignature{\"Major Version\", s.MajorVersion, MajorVersion}\n\t}\n\treturn nil\n}", "func (m *HashType) Validate(formats strfmt.Registry) error {\n\tvar res []error\n\n\tif err := m.validateFunction(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateMethod(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateModifier(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}", "func VerifySignature(d interface{}, signature string, keys []*rsa.PublicKey) error {\n\thash, err := calculateHash(d)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tsg, err := base64.StdEncoding.DecodeString(signature)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tvalid := false\n\tfor _, key := range keys {\n\t\terr = rsa.VerifyPKCS1v15(key, crypto.SHA256, hash[:], sg)\n\t\tif err == nil {\n\t\t\tvalid = true\n\t\t\tbreak\n\t\t}\n\t}\n\n\tif !valid {\n\t\treturn ErrInvalidSignature\n\t}\n\n\treturn nil\n}", "func ValidFilecoinMultihash(code FilecoinMultihashCode) bool {\n\t_, ok := FilecoinMultihashNames[code]\n\treturn ok\n}", "func (d *Definition) IsValid() (bool, []string) {\n\tconditions := []validationCondition{\n\t\t{len(d.Image) == 0, \"string [image] must be specified\"},\n\t\t{len(d.GroupName) == 0, \"string [group_name] must be specified\"},\n\t\t{!validGroupName.MatchString(d.GroupName), \"Group name can only contain letters, numbers, hyphens, and underscores\"},\n\t\t{len(d.GroupName) > 255, \"Group name must be 255 characters or less\"},\n\t\t{len(d.Alias) == 0, \"string [alias] must be specified\"},\n\t\t{d.Memory == nil, \"int [memory] must be specified\"},\n\t\t{len(d.Command) == 0, \"string [command] must be specified\"},\n\t}\n\n\tvalid := true\n\tvar reasons []string\n\tfor _, cond := range conditions {\n\t\tif cond.condition {\n\t\t\tvalid = false\n\t\t\treasons = append(reasons, cond.reason)\n\t\t}\n\t}\n\treturn valid, reasons\n}", "func Verify(pubKey []byte, hash []byte, sig []byte) (bool, error) {\n\tif len(sig) > SigLengthInBytes {\n\t\tsig = sig[:SigLengthInBytes]\n\t}\n\treturn crypto.VerifySignature(pubKey, hash, sig), nil\n}", "func (spec *MachineSpec) HasGroup() bool {\n\tif spec.Group.Id.Valid() {\n\t\treturn true\n\t}\n\treturn len(spec.Machine.Groups) != 0 && spec.Machine.Groups[0].Id.Valid()\n}", "func IsValidModelMultihash(model interface{}, modelMultihash string) error {\n\tcode, err := GetMultihashCode(modelMultihash)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tencodedComputedMultihash, err := CalculateModelMultihash(model, uint(code))\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif encodedComputedMultihash != modelMultihash {\n\t\treturn errors.New(\"supplied hash doesn't match original content\")\n\t}\n\n\treturn nil\n}", "func ValidateSigner(signBytes, sig []byte, signer ethcmn.Address) error {\n\tpk, err := ethcrypto.SigToPub(signBytes, sig)\n\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"failed to derive public key from signature\")\n\t} else if ethcrypto.PubkeyToAddress(*pk) != signer {\n\t\treturn fmt.Errorf(\"invalid signature for signer: %s\", signer)\n\t}\n\n\treturn nil\n}", "func (s *Signature) valid() bool {\n\treturn len(s.FirstName) > 0 &&\n\t\tlen(s.LastName) > 0 &&\n\t\tlen(s.Email) > 0 &&\n\t\ts.Age >= 18 && s.Age <= 180 &&\n\t\tlen(s.Message) < 140\n}", "func (e Des3CbcSha1Kd) VerifyIntegrity(protocolKey, ct, pt []byte, usage uint32) bool {\n\treturn rfc3961.VerifyIntegrity(protocolKey, ct, pt, usage, e)\n}", "func (this Hash) IsValid() <-chan bool {\n\tc := make(chan bool, 1)\n\tgo func() {\n\t\tdefer close(c)\n\t\tc <- (<-this.Type() == \"hash\")\n\t}()\n\treturn c\n}", "func (bh *Header) Validate(r *Record) error {\n\trp := r.AuxFields.Get(programTag)\n\tfound := false\n\tfor _, hp := range bh.Progs() {\n\t\tif hp.UID() == rp.Value() {\n\t\t\tfound = true\n\t\t\tbreak\n\t\t}\n\t}\n\tif !found && len(bh.Progs()) != 0 {\n\t\treturn fmt.Errorf(\"sam: program uid not found: %v\", rp.Value())\n\t}\n\n\trg := r.AuxFields.Get(readGroupTag)\n\tfound = false\n\tfor _, hg := range bh.RGs() {\n\t\tif hg.Name() == rg.Value() {\n\t\t\trPlatformUnit := r.AuxFields.Get(platformUnitTag).Value()\n\t\t\tif rPlatformUnit != hg.PlatformUnit() {\n\t\t\t\treturn fmt.Errorf(\"sam: mismatched platform for read group %s: %v != %v\", hg.Name(), rPlatformUnit, hg.platformUnit)\n\t\t\t}\n\t\t\trLibrary := r.AuxFields.Get(libraryTag).Value()\n\t\t\tif rLibrary != hg.Library() {\n\t\t\t\treturn fmt.Errorf(\"sam: mismatched library for read group %s: %v != %v\", hg.Name(), rLibrary, hg.library)\n\t\t\t}\n\t\t\tfound = true\n\t\t\tbreak\n\t\t}\n\t}\n\tif !found && len(bh.RGs()) != 0 {\n\t\treturn fmt.Errorf(\"sam: read group not found: %v\", rg.Value())\n\t}\n\n\treturn nil\n}", "func (s Scalar) Is_valid() (bool) { // TODO test this fun\n\tif s.bint.Cmp(big.NewInt(0))>=0 && s.bint.Cmp(GROUP_ORDER)<0 {\n\t\treturn true\n\t}\n\treturn false\n}", "func IsDigestValid(decryptedCell []byte) bool {\n\n\tif len(decryptedCell) != 512 {\n\t\treturn false\n\t}\n\tdigest := DecodePayloadDigest(decryptedCell)\n\tpayload := DecodeRelayPayload(decryptedCell)\n\tsum := md5.Sum(payload)\n\n\tfor i := 0; i < DigestLength; i++ {\n\t\tif digest[i] != sum[i] {\n\t\t\treturn false\n\t\t}\n\t}\n\n\treturn true\n}", "func isValidHashType(ht string) bool {\n\treturn ht == string(HashMD5) || ht == string(HashSHA1) || ht == string(HashSHA256) || ht == string(HashSHA512)\n}", "func verifySECP256K1RSignatureFormat(sig []byte) error {\n\tif len(sig) != SECP256K1RSigLen {\n\t\treturn errInvalidSigLen\n\t}\n\n\tvar s secp256k1.ModNScalar\n\ts.SetByteSlice(sig[32:64])\n\tif s.IsOverHalfOrder() {\n\t\treturn errMutatedSig\n\t}\n\treturn nil\n}", "func (sig Signature) Verify(X curve.Point, hash []byte) bool {\n\tgroup := X.Curve()\n\n\tm := curve.FromHash(group, hash)\n\tsInv := group.NewScalar().Set(sig.S).Invert()\n\tmG := m.ActOnBase()\n\tr := sig.R.XScalar()\n\trX := r.Act(X)\n\tR2 := mG.Add(rX)\n\tR2 = sInv.Act(R2)\n\treturn R2.Equal(sig.R)\n}", "func IsTSValid(hash []byte, timestamp int64) bool {\n\treturn IsTSValid_(hash, timestamp, time.Now().Unix())\n}", "func (block *Block) IsValid() bool {\n\tflagBits := util.BytesToBitField(block.Flags)\n\thashes := make([][]byte, len(block.Hashes))\n\tfor i, hash := range block.Hashes {\n\t\thashes[i] = make([]byte, len(hash))\n\t\tcopy(hashes[i], hash)\n\t\tutil.ReverseByteArray(hashes[i])\n\t}\n\ttree := NewTree(int(block.Total))\n\ttree.PopulateTree(flagBits, hashes)\n\treturn bytes.Equal(util.ReverseByteArray(tree.Root()), block.MerkleRoot[:])\n}", "func validateSignatureAgainstKey(token *jwt.Token, tokenParts []string, key interface{}) error {\n\t// jwt.SigningMethod.Verify requires signing string and signature as separate inputs\n\treturn token.Method.Verify(strings.Join(tokenParts[0:2], \".\"), token.Signature, key)\n}", "func (s *Signature) MatchesPublicKey(pub crypto.PubKey) error {\n\tpubData, err := pub.Bytes()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tkeyMulti, err := mh.Decode(s.GetKeyMultihash())\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tourMh, err := mh.Sum(pubData, keyMulti.Code, keyMulti.Length)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// TODO: find a better way to derive digest without encoding it.\n\tourMhDec, err := mh.Decode(ourMh)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif bytes.Compare(ourMhDec.Digest, keyMulti.Digest) != 0 {\n\t\tkeyMultiC, err := mh.Cast(s.GetKeyMultihash())\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\treturn errors.Errorf(\"hash mismatch: %s != %s\", ourMh.B58String(), keyMultiC.B58String())\n\t}\n\n\treturn nil\n}", "func (m *Group) Validate(formats strfmt.Registry) error {\n\tvar res []error\n\n\tif err := m.validateDeadline(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateExchangeDate(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateID(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateMoneyLimit(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}", "func IsSignatureUUIDValid(promise *cAPI.Promise) (bool, bson.ObjectId) {\n\tif bson.IsObjectIdHex(promise.Context.SignatureUUID) {\n\t\treturn true, bson.ObjectIdHex(promise.Context.SignatureUUID)\n\t}\n\n\treturn false, bson.NewObjectId()\n}", "func IsValid(h H3Index) bool {\n\treturn C.h3IsValid(h) == 1\n}", "func IsValid(h H3Index) bool {\n\treturn C.h3IsValid(h) == 1\n}", "func validateSignatureVersion(signature nbv1.S3SignatureVersion, nsStoreName string) error {\n\tif signature != \"\" &&\n\t\tsignature != nbv1.S3SignatureVersionV2 &&\n\t\tsignature != nbv1.S3SignatureVersionV4 {\n\t\treturn util.NewPersistentError(\"InvalidSignatureVersion\",\n\t\t\tfmt.Sprintf(\"Invalid s3 signature version %q for namespace store %q\",\n\t\t\t\tsignature, nsStoreName))\n\t}\n\treturn nil\n}", "func (sig *Signature) VerifySignature(publicKey interface{}, encoding string) bool {\n\tif sig.Data == nil {\n\t\tlog.Warn(\"sig does not contain signature data\", \"sig\", sig)\n\t\treturn false\n\t}\n\tif publicKey == nil {\n\t\tlog.Warn(\"PublicKey is nil\")\n\t\treturn false\n\t}\n\tencoding += sig.GetSignatureMetaData().String()\n\tdata := []byte(encoding)\n\tswitch sig.Algorithm {\n\tcase Ed25519:\n\t\tif pkey, ok := publicKey.(ed25519.PublicKey); ok {\n\t\t\treturn ed25519.Verify(pkey, data, sig.Data.([]byte))\n\t\t}\n\t\tlog.Warn(\"Could not assert type ed25519.PublicKey\", \"publicKeyType\", fmt.Sprintf(\"%T\", publicKey))\n\tcase Ed448:\n\t\tlog.Warn(\"Ed448 not yet Supported!\")\n\tcase Ecdsa256:\n\t\tif pkey, ok := publicKey.(*ecdsa.PublicKey); ok {\n\t\t\tif sig, ok := sig.Data.([]*big.Int); ok && len(sig) == 2 {\n\t\t\t\thash := sha256.Sum256(data)\n\t\t\t\treturn ecdsa.Verify(pkey, hash[:], sig[0], sig[1])\n\t\t\t}\n\t\t\tlog.Warn(\"Could not assert type []*big.Int\", \"signatureDataType\", fmt.Sprintf(\"%T\", sig.Data))\n\t\t\treturn false\n\t\t}\n\t\tlog.Warn(\"Could not assert type ecdsa.PublicKey\", \"publicKeyType\", fmt.Sprintf(\"%T\", publicKey))\n\tcase Ecdsa384:\n\t\tif pkey, ok := publicKey.(*ecdsa.PublicKey); ok {\n\t\t\tif sig, ok := sig.Data.([]*big.Int); ok && len(sig) == 2 {\n\t\t\t\thash := sha512.Sum384(data)\n\t\t\t\treturn ecdsa.Verify(pkey, hash[:], sig[0], sig[1])\n\t\t\t}\n\t\t\tlog.Warn(\"Could not assert type []*big.Int\", \"signature\", sig.Data)\n\t\t\treturn false\n\t\t}\n\t\tlog.Warn(\"Could not assert type ecdsa.PublicKey\", \"publicKeyType\", fmt.Sprintf(\"%T\", publicKey))\n\tdefault:\n\t\tlog.Warn(\"Signature algorithm type not supported\", \"type\", sig.Algorithm)\n\t}\n\treturn false\n}", "func (h *ArgonHasher) Validate(password, encoded string) (bool, error) {\n\tparams, salt, hash, err := decodeHash(encoded)\n\n\tif err != nil {\n\t\treturn false, err\n\t}\n\n\tpasswordHash := argon2.IDKey([]byte(password), salt, params.Iterations, params.Memory, params.Threads, params.KeyLength)\n\n\tif subtle.ConstantTimeCompare(hash, passwordHash) == 0 {\n\t\treturn false, nil\n\t}\n\n\treturn true, nil\n}", "func ValidateChain() (bool, error) {\n\tn, err := findLast()\n\tif err != nil {\n\t\treturn false, err\n\t}\n\n\tfor n > 0 {\n\t\th, err := ftoh(n - 1)\n\t\tif err != nil {\n\t\t\treturn false, err\n\t\t}\n\n\t\tb, err := Read(n)\n\t\tif err != nil {\n\t\t\treturn false, err\n\t\t}\n\n\t\tif b.PreviousHash != h {\n\t\t\treturn false, nil\n\t\t}\n\n\t\tn--\n\t}\n\n\treturn true, nil\n}", "func (account *ED25519Account) CheckSignature(message []byte, signature Signature) error {\n\n\tif ed25519.SignatureSize != len(signature) {\n\t\treturn fault.InvalidSignature\n\t}\n\n\tif !ed25519.Verify(account.PublicKey[:], message, signature) {\n\t\treturn fault.InvalidSignature\n\t}\n\treturn nil\n}", "func (g *Group) Validate() error {\n\treturn vd.ValidateStruct(\n\t\tvd.Field(&g.Name, vd.RuneLength(0, 255)),\n\t)\n}", "func ValidateSignature(ctx context.Context, req *http.Request, v ClientStorage) error {\n\tpreq, err := newProviderRequest(req)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif err = v.ValidateNonce(ctx, preq.clientKey, preq.nonce, preq.timestamp, req); err != nil {\n\t\treturn err\n\t}\n\tsigner, invalidClient := v.GetSigner(ctx, preq.clientKey, preq.signatureMethod, req)\n\n\t// Check signature even if client is invalid to prevent timing attacks.\n\tinvalidSignature := preq.checkSignature(signer)\n\tif invalidClient != nil {\n\t\treturn invalidClient\n\t}\n\treturn invalidSignature\n}", "func Verify(pub *PublicKey, hash []byte, r, s *big.Int) bool", "func ValidateSignatureValues(v byte, r, s *big.Int, homestead bool) bool {\n\tif r.Cmp(big.NewInt(1)) < 0 || s.Cmp(big.NewInt(1)) < 0 {\n\t\treturn false\n\t}\n\tcurve := DefaultCryptoType()\n\tcurve256N := curve.Params().N\n\tcurve256halfN := new(big.Int).Div(curve256N, big.NewInt(2))\n\tif homestead && s.Cmp(curve256halfN) > 0 {\n\t\treturn false\n\t}\n\t// Frontier: allow s to be in full N range\n\treturn r.Cmp(curve256N) < 0 && s.Cmp(curve256N) < 0 && (v == 0 || v == 1)\n}", "func validateSignatures(section rainslib.MessageSectionWithSig, keys map[rainslib.PublicKeyID][]rainslib.PublicKey) bool {\n\tif !rainsSiglib.CheckSectionSignatures(section, keys, sigEncoder, Config.MaxCacheValidity) {\n\t\treturn false //already logged\n\t}\n\tif section.ValidSince() == math.MaxInt64 {\n\t\tlog.Warn(\"No signature is valid before the MaxValidity date in the future.\")\n\t\treturn false\n\t}\n\treturn len(section.Sigs(rainslib.RainsKeySpace)) > 0\n}", "func (r *rsaPublicKey) CheckSignature(message []byte, sig []byte) error {\r\n\th := sha256.New()\r\n\th.Write(message)\r\n\td := h.Sum(nil)\r\n\treturn rsa.VerifyPKCS1v15(r.PublicKey, crypto.SHA256, d, sig)\r\n}", "func validShardSignatures(section *rainslib.ShardSection, keys map[rainslib.PublicKeyID][]rainslib.PublicKey) bool {\n\tif !validateSignatures(section, keys) || !validContainedAssertions(section.Content, keys) {\n\t\treturn false\n\t}\n\treturn true\n}", "func CheckIntegrity(content string, hash string) bool {\n\tif hash != GetChecksum(content) {\n\t\treturn false\n\t}\n\treturn true\n}" ]
[ "0.70170313", "0.63538384", "0.6279098", "0.6181657", "0.6105209", "0.6074831", "0.6050759", "0.5998113", "0.592034", "0.5897292", "0.583161", "0.58125925", "0.5799736", "0.579774", "0.57662517", "0.57348645", "0.5720027", "0.57123685", "0.56783986", "0.5667011", "0.5667011", "0.56623906", "0.56619674", "0.5657979", "0.5598267", "0.55913323", "0.5575956", "0.5560049", "0.55494434", "0.5505501", "0.55037713", "0.5479458", "0.54750067", "0.54705805", "0.5451857", "0.54438955", "0.5437204", "0.542399", "0.5401246", "0.5397879", "0.537511", "0.536313", "0.5357317", "0.535075", "0.53476006", "0.533479", "0.53237313", "0.53102124", "0.52868706", "0.5285716", "0.5278666", "0.5274423", "0.52719223", "0.5255848", "0.52510524", "0.52510524", "0.5237373", "0.52230215", "0.52142894", "0.52103424", "0.5196585", "0.5196364", "0.518675", "0.5173637", "0.5154142", "0.51451355", "0.51434255", "0.5132915", "0.5131605", "0.51188564", "0.5107204", "0.5099328", "0.50923944", "0.50901645", "0.5085983", "0.50760186", "0.5072307", "0.50646675", "0.50577736", "0.50547266", "0.50506365", "0.50484735", "0.50369316", "0.50262934", "0.50251013", "0.5009429", "0.5009429", "0.50038177", "0.4998897", "0.49864492", "0.49850997", "0.49849302", "0.49513486", "0.4947343", "0.49451712", "0.494039", "0.4939218", "0.49291924", "0.49282652", "0.4916734" ]
0.76344764
0
AreValid checks for groups correctness and uniqueness.
func (g Groups) AreValid(h util.Uint160) error { for i := range g { err := g[i].IsValid(h) if err != nil { return err } } if len(g) < 2 { return nil } pkeys := make(keys.PublicKeys, len(g)) for i := range g { pkeys[i] = g[i].PublicKey } sort.Sort(pkeys) for i := range pkeys { if i == 0 { continue } if pkeys[i].Cmp(pkeys[i-1]) == 0 { return errors.New("duplicate group keys") } } return nil }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (g *Group) IsValid(h util.Uint160) error {\n\tif !g.PublicKey.Verify(g.Signature, hash.Sha256(h.BytesBE()).BytesBE()) {\n\t\treturn errors.New(\"incorrect group signature\")\n\t}\n\treturn nil\n}", "func (m Group) Validate(formats strfmt.Registry) error {\n\treturn nil\n}", "func (m *Group) Validate(formats strfmt.Registry) error {\n\treturn nil\n}", "func (m *Group) Validate(formats strfmt.Registry) error {\n\treturn nil\n}", "func validateGroups(groupID, totalGroupsNum uint) error {\n\tif totalGroupsNum == 0 {\n\t\t// Need at least one group\n\t\treturn fmt.Errorf(errTotalGroupsZero)\n\t}\n\tif groupID >= totalGroupsNum {\n\t\t// Need reasonable groupID\n\t\treturn fmt.Errorf(errInvalidGroupsFmt, groupID, totalGroupsNum)\n\t}\n\treturn nil\n}", "func (m *Group) Validate(formats strfmt.Registry) error {\n\tvar res []error\n\n\tif err := m.validateDeadline(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateExchangeDate(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateID(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateMoneyLimit(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}", "func (s *mustRunAs) Validate(fldPath *field.Path, _ *api.Pod, groups []int64) field.ErrorList {\n\tallErrs := field.ErrorList{}\n\n\tif len(groups) == 0 && len(s.ranges) > 0 {\n\t\tallErrs = append(allErrs, field.Invalid(fldPath.Child(s.field), groups, \"unable to validate empty groups against required ranges\"))\n\t}\n\n\tfor _, group := range groups {\n\t\tif !s.isGroupValid(group) {\n\t\t\tdetail := fmt.Sprintf(\"%d is not an allowed group\", group)\n\t\t\tallErrs = append(allErrs, field.Invalid(fldPath.Child(s.field), groups, detail))\n\t\t}\n\t}\n\n\treturn allErrs\n}", "func (g Group) IsOk() bool {\n\tpaw.Logger.Debug(\"checking Group...\" + paw.Caller(1))\n\tif g > 0 && g < 4 {\n\t\treturn true\n\t}\n\treturn false\n\t// switch g {\n\t// case Grouped, GroupedR, GroupNone:\n\t// \treturn true\n\t// default:\n\t// \treturn false\n\t// }\n}", "func (c CgroupnsMode) Valid() bool {\n\treturn c.IsEmpty() || c.IsPrivate() || c.IsHost()\n}", "func (dtk *DcmTagKey) HasValidGroup() bool {\n\tif ((dtk.group & 1) != 0) && ((dtk.group <= 7) || (dtk.group == 0xFFFF)) {\n\t\treturn false\n\t}\n\treturn true\n}", "func ValidateGroups(groupID, totalGroupsNum uint) error {\n\tif totalGroupsNum == 0 {\n\t\t// Need at least one group\n\t\treturn fmt.Errorf(errTotalGroupsZero)\n\t}\n\tif groupID >= totalGroupsNum {\n\t\t// Need reasonable groupID\n\t\treturn fmt.Errorf(errInvalidGroupsFmt, groupID, totalGroupsNum)\n\t}\n\treturn nil\n}", "func (c CgroupSpec) Valid() bool {\n\t// TODO(thaJeztah): align with PidMode, and consider container-mode without a container name/ID to be invalid.\n\treturn c == \"\" || c.IsContainer()\n}", "func (g *Group) Validate(tx *pop.Connection) (*validate.Errors, error) {\n\treturn validate.Validate(\n\t\t&validators.StringIsPresent{Field: g.Name, Name: \"Name\"},\n\t), nil\n}", "func (d *Definition) IsValid() (bool, []string) {\n\tconditions := []validationCondition{\n\t\t{len(d.Image) == 0, \"string [image] must be specified\"},\n\t\t{len(d.GroupName) == 0, \"string [group_name] must be specified\"},\n\t\t{!validGroupName.MatchString(d.GroupName), \"Group name can only contain letters, numbers, hyphens, and underscores\"},\n\t\t{len(d.GroupName) > 255, \"Group name must be 255 characters or less\"},\n\t\t{len(d.Alias) == 0, \"string [alias] must be specified\"},\n\t\t{d.Memory == nil, \"int [memory] must be specified\"},\n\t\t{len(d.Command) == 0, \"string [command] must be specified\"},\n\t}\n\n\tvalid := true\n\tvar reasons []string\n\tfor _, cond := range conditions {\n\t\tif cond.condition {\n\t\t\tvalid = false\n\t\t\treasons = append(reasons, cond.reason)\n\t\t}\n\t}\n\treturn valid, reasons\n}", "func (s Scalar) Is_valid() (bool) { // TODO test this fun\n\tif s.bint.Cmp(big.NewInt(0))>=0 && s.bint.Cmp(GROUP_ORDER)<0 {\n\t\treturn true\n\t}\n\treturn false\n}", "func (g *Group) Validate() error {\n\treturn vd.ValidateStruct(\n\t\tvd.Field(&g.Name, vd.RuneLength(0, 255)),\n\t)\n}", "func validateAuthGroup(name string, groups map[string]*protocol.AuthGroup) error {\n\tg := groups[name]\n\n\tfor _, ident := range g.Members {\n\t\tif _, err := identity.MakeIdentity(ident); err != nil {\n\t\t\treturn fmt.Errorf(\"auth: invalid identity %q in group %q - %s\", ident, name, err)\n\t\t}\n\t}\n\n\tfor _, glob := range g.Globs {\n\t\tif _, err := identity.MakeGlob(glob); err != nil {\n\t\t\treturn fmt.Errorf(\"auth: invalid glob %q in group %q - %s\", glob, name, err)\n\t\t}\n\t}\n\n\tfor _, nested := range g.Nested {\n\t\tif groups[nested] == nil {\n\t\t\treturn fmt.Errorf(\"auth: unknown nested group %q in group %q\", nested, name)\n\t\t}\n\t}\n\n\tif cycle := findGroupCycle(name, groups); len(cycle) != 0 {\n\t\treturn fmt.Errorf(\"auth: dependency cycle found - %v\", cycle)\n\t}\n\n\treturn nil\n}", "func (m *ModelsGroupRule) Validate(formats strfmt.Registry) error {\n\tvar res []error\n\n\tif err := m.validateGroupCustomRule(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateGroupPredefinedRules(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}", "func (m *AutoscalingGroup) Validate() error {\n\treturn m.validate(false)\n}", "func (m *ConsistencyGroup) Validate(formats strfmt.Registry) error {\n\tvar res []error\n\n\tif err := m.validateCluster(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateConsistencyGroupSnapshots(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateDescription(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateEntityAsyncStatus(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateID(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateIscsiLuns(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateLabels(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateLocalCreatedAt(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateLocalID(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateName(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateNamespaces(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateUniqueSize(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}", "func (o Request) Valid() error {\n\tif len(o.GroupBy) == 0 {\n\t\treturn skerr.Fmt(\"at least one GroupBy value must be supplied.\")\n\t}\n\n\tvalid := false\n\tfor _, op := range AllOperations {\n\t\tif op == o.Operation {\n\t\t\tvalid = true\n\t\t\tbreak\n\t\t}\n\t}\n\tif !valid {\n\t\treturn skerr.Fmt(\"invalid Operation value: %q\", o.Operation)\n\t}\n\n\tvalid = false\n\tfor _, incomingOp := range o.Summary {\n\t\tfor _, op := range AllOperations {\n\t\t\tif op == incomingOp {\n\t\t\t\tvalid = true\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\tif !valid {\n\t\t\treturn skerr.Fmt(\"invalid Summary value: %q\", incomingOp)\n\t\t}\n\t}\n\treturn nil\n}", "func (g *Groups) GetValidGroups() Groups {\n\tvalid := Groups{}\n\tfor _, group := range *g {\n\t\tif !group.IsFull() {\n\t\t\tvalid = append(valid, group)\n\t\t}\n\t}\n\treturn valid\n}", "func (s *service) validateClaGroupInput(ctx context.Context, input *models.CreateClaGroupInput) (bool, error) {\n\tif input.FoundationSfid == nil {\n\t\treturn false, fmt.Errorf(\"missing foundation ID parameter\")\n\t}\n\tif input.ClaGroupName == nil {\n\t\treturn false, fmt.Errorf(\"missing CLA Group parameter\")\n\t}\n\n\tfoundationSFID := *input.FoundationSfid\n\tclaGroupName := *input.ClaGroupName\n\n\tf := logrus.Fields{\n\t\t\"functionName\": \"v2.cla_groups.helpers.validateClaGroupInput\",\n\t\tutils.XREQUESTID: ctx.Value(utils.XREQUESTID),\n\t\t\"ClaGroupName\": claGroupName,\n\t\t\"ClaGroupDescription\": input.ClaGroupDescription,\n\t\t\"FoundationSfid\": foundationSFID,\n\t\t\"IclaEnabled\": *input.IclaEnabled,\n\t\t\"CclaEnabled\": *input.CclaEnabled,\n\t\t\"CclaRequiresIcla\": *input.CclaRequiresIcla,\n\t\t\"ProjectSfidList\": strings.Join(input.ProjectSfidList, \",\"),\n\t\t\"templateID\": input.TemplateFields.TemplateID,\n\t}\n\n\tlog.WithFields(f).Debug(\"validating CLA Group input...\")\n\n\tif input.TemplateFields.TemplateID == \"\" {\n\t\tmsg := \"missing CLA Group template ID value\"\n\t\tlog.WithFields(f).Warn(msg)\n\t\treturn false, errors.New(msg)\n\t}\n\tif !s.v1TemplateService.CLAGroupTemplateExists(ctx, input.TemplateFields.TemplateID) {\n\t\tmsg := \"invalid template ID\"\n\t\tlog.WithFields(f).Warn(msg)\n\t\treturn false, errors.New(msg)\n\t}\n\t// First, check that all the required flags are set and make sense\n\tif foundationSFID == \"\" {\n\t\tmsg := \"bad request: foundation_sfid cannot be empty\"\n\t\tlog.WithFields(f).Warn(msg)\n\t\treturn false, errors.New(msg)\n\t}\n\tif !*input.IclaEnabled && !*input.CclaEnabled {\n\t\tmsg := \"bad request: can not create cla group with both icla and ccla disabled\"\n\t\tlog.WithFields(f).Warn(msg)\n\t\treturn false, errors.New(msg)\n\t}\n\tif *input.CclaRequiresIcla {\n\t\tif !(*input.IclaEnabled && *input.CclaEnabled) {\n\t\t\tmsg := \"bad request: ccla_requires_icla can not be enabled if one of icla/ccla is disabled\"\n\t\t\tlog.WithFields(f).Warn(msg)\n\t\t\treturn false, errors.New(msg)\n\t\t}\n\t}\n\n\t// Ensure we don't have a duplicate CLA Group Name\n\tlog.WithFields(f).Debug(\"checking for duplicate CLA Group name...\")\n\tclaGroupModel, err := s.v1ProjectService.GetCLAGroupByName(ctx, claGroupName)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\tif claGroupModel != nil {\n\t\treturn false, fmt.Errorf(\"bad request: cla_group with name '%s' already exists\", claGroupName)\n\t}\n\n\tlog.WithFields(f).Debug(\"looking up project in project service by Foundation SFID...\")\n\t// Use the Platform Project Service API to lookup the Foundation details\n\tpsc := v2ProjectService.GetClient()\n\tfoundationProjectDetails, err := psc.GetProject(foundationSFID)\n\tif err != nil {\n\t\tif _, ok := err.(*psproject.GetProjectNotFound); ok {\n\t\t\treturn false, fmt.Errorf(\"bad request: invalid foundation_sfid - unable to locate foundation by ID: %s\", foundationSFID)\n\t\t}\n\t\treturn false, err\n\t}\n\n\t// Is our parent the LF project?\n\tlog.WithFields(f).Debugf(\"looking up LF parent project record...\")\n\tisLFParent := false\n\tif utils.IsProjectHaveParent(foundationProjectDetails) {\n\t\tisLFParent, err = psc.IsTheLinuxFoundation(utils.GetProjectParentSFID(foundationProjectDetails))\n\t\tif err != nil {\n\t\t\tlog.WithFields(f).WithError(err).Warnf(\"validation failure - unable to lookup parent project by SFID: %s\", utils.GetProjectParentSFID(foundationProjectDetails))\n\t\t\treturn false, err\n\t\t}\n\t}\n\n\t// If the foundation details in the platform project service indicates that this foundation has no parent or no\n\t// children/sub-project... (stand alone project situation)\n\tlog.WithFields(f).Debug(\"checking to see if we have a standalone project...\")\n\tif isLFParent && len(foundationProjectDetails.Projects) == 0 {\n\t\tlog.WithFields(f).Debug(\"we have a standalone project...\")\n\t\t// Did the user actually pass in any projects? If none - add the foundation ID to the list and return to\n\t\t// indicate it is a \"standalone project\"\n\t\tif len(input.ProjectSfidList) == 0 {\n\t\t\t// Add the foundation ID into the project list - caller should do this, but we'll add for compatibility\n\t\t\tlog.WithFields(f).Debug(\"no projects provided - adding foundation ID to the list of projects\")\n\t\t\tinput.ProjectSfidList = append(input.ProjectSfidList, foundationSFID)\n\t\t\tlog.WithFields(f).Debug(\"foundation doesn't have a parent or any children project in SF - this is a standalone project\")\n\t\t\treturn true, nil\n\t\t}\n\n\t\t// If they provided a project in the list - this is ok, as long as it's the foundation ID\n\t\tif len(input.ProjectSfidList) == 1 && isFoundationIDInList(foundationSFID, input.ProjectSfidList) {\n\t\t\tlog.WithFields(f).Debug(\"foundation doesn't have a parent or any children project in SF - this is a standalone project\")\n\t\t\treturn true, nil\n\t\t}\n\n\t\t// oops, not allowed - send error\n\t\tlog.WithFields(f).Warn(\"this project does not have subprojects defined in SF but some are provided as input\")\n\t\treturn false, fmt.Errorf(\"bad request: invalid project_sfid_list. This project does not have subprojects defined in SF but some are provided as input\")\n\t}\n\n\tprojectLevelCLA := true\n\tif isFoundationIDInList(*input.FoundationSfid, input.ProjectSfidList) {\n\t\tprojectLevelCLA = false\n\t}\n\n\terr = s.validateEnrollProjectsInput(ctx, foundationSFID, input.ProjectSfidList, projectLevelCLA, []string{})\n\tif err != nil {\n\t\treturn false, err\n\t}\n\treturn false, nil\n}", "func (k *KEPHandler) validateGroups(p *Proposal) []error {\n\tvar errs []error\n\tvalidGroups := make(map[string]bool)\n\tfor _, g := range k.Groups {\n\t\tvalidGroups[g] = true\n\t}\n\tfor _, g := range p.ParticipatingSIGs {\n\t\tif _, ok := validGroups[g]; !ok {\n\t\t\terrs = append(errs, fmt.Errorf(\"invalid participating-sig: %s\", g))\n\t\t}\n\t}\n\tif _, ok := validGroups[p.OwningSIG]; !ok {\n\t\terrs = append(errs, fmt.Errorf(\"invalid owning-sig: %s\", p.OwningSIG))\n\t}\n\treturn errs\n}", "func (collection *Collection) Valid() bool {\n\t// Collection must contain at least one GameTree\n\tif len(collection.GameTrees) == 0 {\n\t\treturn false\n\t}\n\n\t// Check GameTrees\n\tfor _, gameTree := range collection.GameTrees {\n\t\tif !validGameTree(gameTree) {\n\t\t\treturn false\n\t\t}\n\t}\n\n\treturn true\n}", "func (m *DeviceGroupData) Validate(formats strfmt.Registry) error {\n\treturn nil\n}", "func ValidateGroupView(result *GroupView) (err error) {\n\tif result.ID == nil {\n\t\terr = goa.MergeErrors(err, goa.MissingFieldError(\"id\", \"result\"))\n\t}\n\tif result.Name == nil {\n\t\terr = goa.MergeErrors(err, goa.MissingFieldError(\"name\", \"result\"))\n\t}\n\treturn\n}", "func (gs *GameSpec) Validate() bool {\n\tnbUsers := len(gs.Players)\n\n\t// games are either public or private. In the later case they must have 1+\n\t// players.\n\tif (gs.Public && nbUsers > 0) || (!gs.Public && nbUsers == 0) {\n\t\treturn false\n\t}\n\n\t// Note: the API accepts empty teasers\n\n\tif !paceRange.Include(gs.Pace) ||\n\t\t!turnsRange.Include(gs.Turns) ||\n\t\t!antsPerPlayerRange.Include(gs.AntsPerPlayer) ||\n\t\t!playersRange.Include(gs.MinPlayers) ||\n\t\t!playersRange.Include(gs.MaxPlayers) ||\n\t\tgs.MinPlayers > gs.MaxPlayers ||\n\t\t!initialEnergyRange.Include(gs.InitialEnergy) ||\n\t\t!initialAcidRange.Include(gs.InitialAcid) {\n\t\treturn false\n\t}\n\n\treturn true\n}", "func (oa *offsetAdmin) Valid() bool {\n\tif oa.grp == \"\" || oa.top == \"\" {\n\t\treturn false\n\t}\n\treturn true\n}", "func (m *JGroup) Validate(formats strfmt.Registry) error {\n\tvar res []error\n\n\tif err := m.validateAllowedDomains(formats); err != nil {\n\t\t// prop\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateCustomize(formats); err != nil {\n\t\t// prop\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateDefaultChannels(formats); err != nil {\n\t\t// prop\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateStackTemplates(formats); err != nil {\n\t\t// prop\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateTitle(formats); err != nil {\n\t\t// prop\n\t\tres = append(res, err)\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}", "func (m *UserPermissionsGroupsItems0) Validate(formats strfmt.Registry) error {\n\tvar res []error\n\n\tif err := m.validateAclurl(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateActions(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateDescription(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateGid(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateMembershipurl(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateRid(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}", "func (m *Manifest) IsValid(hash util.Uint160) bool {\n\tif m.ABI.Hash != hash {\n\t\treturn false\n\t}\n\tfor _, g := range m.Groups {\n\t\tif !g.IsValid(hash) {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}", "func (r addReq) Validate() error {\n\tif len(r.ProjectID) == 0 || len(r.Body.Name) == 0 || len(r.Body.Group) == 0 {\n\t\treturn fmt.Errorf(\"the name, project ID and group cannot be empty\")\n\t}\n\n\tfor _, existingGroupPrefix := range serviceAccountGroupsPrefixes {\n\t\tif existingGroupPrefix == r.Body.Group {\n\t\t\treturn nil\n\t\t}\n\t}\n\treturn fmt.Errorf(\"invalid group name %s\", r.Body.Group)\n}", "func (f Format) Valid() bool {\n\tfor _, valid := range Formats {\n\t\tif valid == f {\n\t\t\treturn true\n\t\t}\n\t}\n\n\treturn false\n}", "func (id GID) Valid() bool {\n\treturn len(id) == 12\n}", "func (id GroupID) Validate() error {\n\tif err := id.DeploymentID().Validate(); err != nil {\n\t\treturn sdkerrors.Wrap(err, \"GroupID: Invalid DeploymentID\")\n\t}\n\tif id.GSeq == 0 {\n\t\treturn sdkerrors.Wrap(sdkerrors.ErrInvalidSequence, \"GroupID: Invalid Group Sequence\")\n\t}\n\treturn nil\n}", "func (m *DeviceGroup) Validate(formats strfmt.Registry) error {\n\tvar res []error\n\n\tif err := m.validateAwsTestResult(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateAzureTestResult(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateCustomProperties(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateGcpTestResult(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateName(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateSubGroups(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}", "func (gist *Gist) Validate() bool {\n\tgist.Errors = make(map[string]string)\n\n\tif gist.Title == \"\" {\n\t\tgist.Errors[\"Title\"] = \"You must provide a title.\"\n\t}\n\n\tif gist.Content == \"\" {\n\t\tgist.Errors[\"Content\"] = \"You must provide content.\"\n\t}\n\n\treturn len(gist.Errors) == 0\n}", "func (m *CreateTechAdGroupReqData) Validate(formats strfmt.Registry) error {\n\treturn nil\n}", "func (m *DashboardGroup) Validate(formats strfmt.Registry) error {\n\tvar res []error\n\n\tif err := m.validateLinks(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateDashboardEntries(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}", "func (validation *Validation) Valid() bool {\n\treturn validation.valid\n}", "func (m *AutoscalingGroupSize) Validate() error {\n\treturn m.validate(false)\n}", "func (v *Validation) Valid() bool {\n\treturn len(*v) == 0\n}", "func (o *GetUniverseGroupsGroupIDOKBody) Validate(formats strfmt.Registry) error {\n\tvar res []error\n\n\tif err := o.validateCategoryID(formats); err != nil {\n\t\t// prop\n\t\tres = append(res, err)\n\t}\n\n\tif err := o.validateGroupID(formats); err != nil {\n\t\t// prop\n\t\tres = append(res, err)\n\t}\n\n\tif err := o.validateName(formats); err != nil {\n\t\t// prop\n\t\tres = append(res, err)\n\t}\n\n\tif err := o.validatePublished(formats); err != nil {\n\t\t// prop\n\t\tres = append(res, err)\n\t}\n\n\tif err := o.validateTypes(formats); err != nil {\n\t\t// prop\n\t\tres = append(res, err)\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}", "func (m Matches) IsValid() *ValidationError {\n\terrs := &ValidationError{}\n\n\tseenMatch := map[string]bool{}\n\tfor _, e := range m {\n\t\tif seenMatch[e.Key()] {\n\t\t\terrs.AddNew(ErrorCase{\"\", \"duplicate match found \" + e.Key()})\n\t\t}\n\t\terrs.MergePrefixed(e.IsValid(), fmt.Sprintf(\"matches[%v]\", e.Key()))\n\t\tseenMatch[e.Key()] = true\n\t}\n\n\treturn errs.OrNil()\n}", "func (t *OpenconfigQos_Qos_ForwardingGroups) Validate(opts ...ygot.ValidationOption) error {\n\tif err := ytypes.Validate(SchemaTree[\"OpenconfigQos_Qos_ForwardingGroups\"], t, opts...); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}", "func (r *Group) Validate() error {\n\n\tR := *r\n\tif len(R) != 1 {\n\t\treturn errors.New(\"Group must (only) contain one element\")\n\t}\n\ts, ok := R[0].(string)\n\tif !ok {\n\t\treturn errors.New(\"Group element must be a string\")\n\t}\n\tif e := ValidName(s); e != nil {\n\t\treturn fmt.Errorf(\"error in name of Group: %v\", e)\n\t}\n\treturn nil\n}", "func (validation *Validation) IsValid(name string) bool {\n\tif _, isNotValid := validation.errors[name]; isNotValid {\n\t\treturn false\n\t}\n\n\treturn true\n}", "func ValidateMultipleRequest(requests []Request) (bool, error) {\n\tgroupNames := []string{}\n\tvar validationError ValidationError\n\n\tfor _, request := range requests {\n\t\tgroupNames = append(groupNames, request.Group)\n\t}\n\n\t// Check that all group names are unique.\n\tif !StringsUnique(groupNames) {\n\t\tvalidationError.Add(groupsSameNameError)\n\t}\n\n\t// Check that group names are not prefixes of each other.\n\tif StringsSharePrefix(groupNames) {\n\t\tvalidationError.Add(groupsArePrefixError)\n\t}\n\n\tif len(validationError.CausingErrors) != 0 {\n\t\treturn false, validationError\n\t}\n\treturn true, nil\n}", "func (v MetricCustomSpaceAggregation) IsValid() bool {\n\tfor _, existing := range allowedMetricCustomSpaceAggregationEnumValues {\n\t\tif existing == v {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func (ap *AttackPattern) Valid() (valid bool, errs []error) {\n\tov, err := ap.Object.Valid()\n\tif !ov {\n\t\terrs = append(errs, fmt.Errorf(\"Invalid Object: %v\", err))\n\t}\n\n\tif ap.Type != attackPatternType {\n\t\terrs = append(errs, fmt.Errorf(\"Field 'type' is %s, should be %s\", ap.Type, attackPatternType))\n\t}\n\n\tif ap.Name == \"\" {\n\t\terrs = append(errs, fmt.Errorf(\"Field 'name' required for %s\", attackPatternType))\n\t}\n\n\tfor _, kcp := range ap.KillChainPhases {\n\t\t_, newErrs := kcp.Valid()\n\t\tif len(newErrs) != 0 {\n\t\t\terrs = append(errs, newErrs...)\n\t\t}\n\t}\n\n\tif len(errs) == 0 {\n\t\tvalid = true\n\t}\n\treturn\n}", "func (m *RecipientGroup) Validate(formats strfmt.Registry) error {\n\tvar res []error\n\n\tif err := m.validateGroupName(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateRecipients(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}", "func (entity *Collection) Valid() (bool, error) {\n\tvar stack model.ErrorStack\n\n\t// Iterate through each property in `Items` validating them as we go.\n\tfor _, item := range entity.Items {\n\t\tif ok, err := item.Valid(); !ok {\n\t\t\tstack.Append(\"Item\", err)\n\t\t}\n\t}\n\n\t// Custom errors\n\tif entity.PayloadBranch.Type != \"\" {\n\t\tif ok, err := entity.Branch.Valid(); !ok {\n\t\t\tstack.Append(\"Item\", err)\n\t\t} else {\n\t\t\tif entity.Branch.Value != \"No\" {\n\t\t\t\tstack.Append(\"Collection\", model.ErrFieldInvalid{\"Collection branch value is required\"})\n\t\t\t}\n\t\t}\n\t}\n\n\treturn !stack.HasErrors(), stack\n}", "func (gs GenesisState) Validate() error {\n\tseenDelegations := make(map[string]bool)\n\tseenMissCounters := make(map[string]bool)\n\tseenAggregates := make(map[string]bool)\n\n\tfor i, feederDelegation := range gs.FeederDelegations {\n\t\tif seenDelegations[feederDelegation.Validator] {\n\t\t\treturn fmt.Errorf(\"duplicated feeder delegation for validator %s at index %d\", feederDelegation.Validator, i)\n\t\t}\n\n\t\tdelegateAddr, err := sdk.AccAddressFromBech32(feederDelegation.Delegate)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"invalid feeder delegate at index %d: %w\", i, err)\n\t\t}\n\n\t\tvalidatorAddr, err := sdk.ValAddressFromBech32(feederDelegation.Validator)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"invalid feeder validator at index %d: %w\", i, err)\n\t\t}\n\n\t\tif delegateAddr.Equals(validatorAddr) {\n\t\t\treturn fmt.Errorf(\"delegate address %s cannot be equal to validator address %s\", feederDelegation.Delegate, feederDelegation.Validator)\n\t\t}\n\t\tseenDelegations[feederDelegation.Validator] = true\n\t}\n\n\tfor i, missCounter := range gs.MissCounters {\n\t\tif seenMissCounters[missCounter.Validator] {\n\t\t\treturn fmt.Errorf(\"duplicated miss counter for validator %s at index %d\", missCounter.Validator, i)\n\t\t}\n\n\t\tif missCounter.Misses < 0 {\n\t\t\treturn fmt.Errorf(\"miss counter for validator %s cannot be negative: %d\", missCounter.Validator, missCounter.Misses)\n\t\t}\n\n\t\tif _, err := sdk.ValAddressFromBech32(missCounter.Validator); err != nil {\n\t\t\treturn fmt.Errorf(\"invalid feeder at index %d: %w\", i, err)\n\t\t}\n\n\t\tseenMissCounters[missCounter.Validator] = true\n\t}\n\n\tif err := gs.Params.ValidateBasic(); err != nil {\n\t\treturn err\n\t}\n\n\tsupportedTypes := make(map[string]bool)\n\n\tfor _, dataType := range gs.Params.DataTypes {\n\t\tsupportedTypes[dataType] = true\n\t}\n\n\tfor _, aggregate := range gs.Aggregates {\n\t\tif aggregate.Height < 1 {\n\t\t\treturn sdkerrors.Wrapf(sdkerrors.ErrInvalidHeight, \"height (%d) cannot be zero or negative\", aggregate.Height)\n\t\t}\n\n\t\toracleData := aggregate.Data\n\t\tdataID := oracleData.GetID()\n\n\t\tif seenAggregates[dataID] {\n\t\t\treturn sdkerrors.Wrap(ErrDuplicatedOracleData, dataID)\n\t\t}\n\n\t\tif !supportedTypes[oracleData.Type()] {\n\t\t\treturn sdkerrors.Wrap(ErrUnsupportedDataType, oracleData.Type())\n\t\t}\n\n\t\tif err := oracleData.Validate(); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tseenAggregates[dataID] = true\n\t}\n\n\treturn nil\n}", "func (v *StringIsUserGroupOrWhitelisted) Validate(e *validator.Errors) {\n\tif IsGroupIsUserGroupOrWhitelisted(v.Field, v.Whitelist...) {\n\t\treturn\n\t}\n\n\te.Add(v.Name, StringIsUserGroupOrWhitelistedError(v))\n}", "func (m *GroupSearch) Validate() error {\n\tif m == nil {\n\t\treturn nil\n\t}\n\n\t// no validation rules for BaseDN\n\n\t// no validation rules for Filter\n\n\tif _, ok := _GroupSearch_Scope_InLookup[m.GetScope()]; !ok {\n\t\treturn GroupSearchValidationError{\n\t\t\tfield: \"Scope\",\n\t\t\treason: \"value must be in list [ sub one]\",\n\t\t}\n\t}\n\n\t// no validation rules for UserAttr\n\n\t// no validation rules for GroupAttr\n\n\t// no validation rules for NameAttr\n\n\treturn nil\n}", "func (c *hostNameFormatConfig) IsValid(name string) bool {\n\tfor _, validator := range c.validators {\n\t\tif !validator.IsValid(name) {\n\t\t\treturn false\n\t\t}\n\t}\n\n\treturn true\n}", "func (c *giteaConnector) groupsRequired() bool {\n\treturn len(c.orgs) > 0 || c.loadAllGroups\n}", "func (vr ValidateResult) Valid() bool {\n\treturn len(vr.errors) == 0\n}", "func (n name) IsValid() bool {\n\tfor _, supn := range supportedConditions {\n\t\tif n == supn {\n\t\t\treturn true\n\t\t}\n\t}\n\n\treturn false\n}", "func (t *OpenconfigSystem_System_Aaa_ServerGroups_ServerGroup) Validate(opts ...ygot.ValidationOption) error {\n\tif err := ytypes.Validate(SchemaTree[\"OpenconfigSystem_System_Aaa_ServerGroups_ServerGroup\"], t, opts...); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}", "func (entity *MilitarySelective) Valid() (bool, error) {\n\tvar stack model.ErrorStack\n\n\tif ok, err := entity.WasBornAfter.Valid(); !ok {\n\t\tstack.Append(\"MilitarySelective\", err)\n\t}\n\n\tif entity.WasBornAfter.Value == \"Yes\" {\n\t\tif ok, err := entity.HasRegistered.Valid(); !ok {\n\t\t\tstack.Append(\"MilitarySelective\", err)\n\t\t} else {\n\t\t\tif entity.HasRegistered.Value == \"Yes\" {\n\t\t\t\tif ok, err := entity.RegistrationNumber.Valid(); !ok {\n\t\t\t\t\tstack.Append(\"MilitarySelective\", err)\n\t\t\t\t}\n\t\t\t} else if entity.HasRegistered.Value == \"No\" {\n\t\t\t\tif ok, err := entity.Explanation.Valid(); !ok {\n\t\t\t\t\tstack.Append(\"MilitarySelective\", err)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\treturn !stack.HasErrors(), stack\n}", "func (g *Group) Check(resource.Renderer) (resource.TaskStatus, error) {\n\tvar (\n\t\tgroupByGid *user.Group\n\t\tgidErr error\n\t\tgroupByNewName *user.Group\n\t\tnewNameErr error\n\t)\n\n\t// lookup the group by name and lookup the group by gid\n\t// the lookups return ErrUnsupported if the system is not supported\n\t// LookupGroup returns user.UnknownGroupError if the group is not found\n\t// LookupGroupID returns user.UnknownGroupIdError if the gid is not found\n\tgroupByName, nameErr := g.system.LookupGroup(g.Name)\n\tif g.GID != \"\" {\n\t\tgroupByGid, gidErr = g.system.LookupGroupID(g.GID)\n\t}\n\tif g.NewName != \"\" {\n\t\tgroupByNewName, newNameErr = g.system.LookupGroup(g.NewName)\n\t}\n\n\tstatus := &resource.Status{}\n\n\tif nameErr == ErrUnsupported {\n\t\tstatus.RaiseLevel(resource.StatusFatal)\n\t\treturn status, ErrUnsupported\n\t}\n\n\tswitch g.State {\n\tcase StatePresent:\n\t\tswitch {\n\t\tcase g.GID == \"\":\n\t\t\t_, nameNotFound := nameErr.(user.UnknownGroupError)\n\n\t\t\tswitch {\n\t\t\tcase g.NewName == \"\":\n\t\t\t\tswitch {\n\t\t\t\tcase nameNotFound:\n\t\t\t\t\tstatus.RaiseLevel(resource.StatusWillChange)\n\t\t\t\t\tstatus.Output = append(status.Output, \"add group\")\n\t\t\t\t\tstatus.AddDifference(\"group\", string(StateAbsent), fmt.Sprintf(\"group %s\", g.Name), \"\")\n\t\t\t\tcase groupByName != nil:\n\t\t\t\t\tstatus.Output = append(status.Output, fmt.Sprintf(\"group add: group %s already exists\", g.Name))\n\t\t\t\t}\n\t\t\tcase g.NewName != \"\":\n\t\t\t\t_, newNameNotFound := newNameErr.(user.UnknownGroupError)\n\n\t\t\t\tswitch {\n\t\t\t\tcase nameNotFound:\n\t\t\t\t\tstatus.RaiseLevel(resource.StatusCantChange)\n\t\t\t\t\tstatus.Output = append(status.Output, fmt.Sprintf(\"group modify: group %s does not exist\", g.Name))\n\t\t\t\t\treturn status, errors.New(\"cannot modify group\")\n\t\t\t\tcase newNameNotFound:\n\t\t\t\t\tstatus.RaiseLevel(resource.StatusWillChange)\n\t\t\t\t\tstatus.Output = append(status.Output, \"modify group name\")\n\t\t\t\t\tstatus.AddDifference(\"group\", fmt.Sprintf(\"group %s\", g.Name), fmt.Sprintf(\"group %s\", g.NewName), \"\")\n\t\t\t\tcase groupByNewName != nil:\n\t\t\t\t\tstatus.RaiseLevel(resource.StatusCantChange)\n\t\t\t\t\tstatus.Output = append(status.Output, fmt.Sprintf(\"group modify: group %s already exists\", g.NewName))\n\t\t\t\t\treturn status, errors.New(\"cannot modify group\")\n\t\t\t\t}\n\t\t\t}\n\t\tcase g.GID != \"\":\n\t\t\t_, nameNotFound := nameErr.(user.UnknownGroupError)\n\t\t\t_, gidNotFound := gidErr.(user.UnknownGroupIdError)\n\n\t\t\tswitch {\n\t\t\tcase g.NewName == \"\":\n\t\t\t\tswitch {\n\t\t\t\tcase nameNotFound && gidNotFound:\n\t\t\t\t\tstatus.RaiseLevel(resource.StatusWillChange)\n\t\t\t\t\tstatus.Output = append(status.Output, \"add group with gid\")\n\t\t\t\t\tstatus.AddDifference(\"group\", string(StateAbsent), fmt.Sprintf(\"group %s with gid %s\", g.Name, g.GID), \"\")\n\t\t\t\tcase nameNotFound:\n\t\t\t\t\tstatus.RaiseLevel(resource.StatusCantChange)\n\t\t\t\t\tstatus.Output = append(status.Output, fmt.Sprintf(\"group add: gid %s already exists\", g.GID))\n\t\t\t\t\treturn status, errors.New(\"cannot add group\")\n\t\t\t\tcase gidNotFound:\n\t\t\t\t\tstatus.RaiseLevel(resource.StatusWillChange)\n\t\t\t\t\tstatus.Output = append(status.Output, \"modify group gid\")\n\t\t\t\t\tstatus.AddDifference(\"group\", fmt.Sprintf(\"group %s with gid %s\", g.Name, groupByName.Gid), fmt.Sprintf(\"group %s with gid %s\", g.Name, g.GID), \"\")\n\t\t\t\tcase groupByName != nil && groupByGid != nil && groupByName.Name != groupByGid.Name || groupByName.Gid != groupByGid.Gid:\n\t\t\t\t\tstatus.RaiseLevel(resource.StatusCantChange)\n\t\t\t\t\tstatus.Output = append(status.Output, fmt.Sprintf(\"group add/modify: group %s and gid %s belong to different groups\", g.Name, g.GID))\n\t\t\t\t\treturn status, errors.New(\"cannot add or modify group\")\n\t\t\t\tcase groupByName != nil && groupByGid != nil && *groupByName == *groupByGid:\n\t\t\t\t\tstatus.RaiseLevel(resource.StatusCantChange)\n\t\t\t\t\tstatus.Output = append(status.Output, fmt.Sprintf(\"group add/modify: group %s with gid %s already exists\", g.Name, g.GID))\n\t\t\t\t\treturn status, errors.New(\"cannot add or modify group\")\n\t\t\t\t}\n\t\t\tcase g.NewName != \"\":\n\t\t\t\t_, newNameNotFound := newNameErr.(user.UnknownGroupError)\n\n\t\t\t\tswitch {\n\t\t\t\tcase newNameNotFound && gidNotFound:\n\t\t\t\t\tstatus.RaiseLevel(resource.StatusWillChange)\n\t\t\t\t\tstatus.Output = append(status.Output, \"modify group name and gid\")\n\t\t\t\t\tstatus.AddDifference(\"group\", fmt.Sprintf(\"group %s with gid %s\", g.Name, groupByName.Gid), fmt.Sprintf(\"group %s with gid %s\", g.NewName, g.GID), \"\")\n\t\t\t\tcase gidNotFound:\n\t\t\t\t\tstatus.RaiseLevel(resource.StatusCantChange)\n\t\t\t\t\tstatus.Output = append(status.Output, fmt.Sprintf(\"group modify: group %s already exists\", g.NewName))\n\t\t\t\t\treturn status, errors.New(\"cannot modify group\")\n\t\t\t\tcase newNameNotFound:\n\t\t\t\t\tstatus.RaiseLevel(resource.StatusCantChange)\n\t\t\t\t\tstatus.Output = append(status.Output, fmt.Sprintf(\"group modify: gid %s already exists\", g.GID))\n\t\t\t\t\treturn status, errors.New(\"cannot modify group\")\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\tcase StateAbsent:\n\t\tswitch {\n\t\tcase g.GID == \"\":\n\t\t\t_, nameNotFound := nameErr.(user.UnknownGroupError)\n\n\t\t\tswitch {\n\t\t\tcase nameNotFound:\n\t\t\t\tstatus.Output = append(status.Output, fmt.Sprintf(\"group delete: group %s does not exist\", g.Name))\n\t\t\tcase groupByName != nil:\n\t\t\t\tstatus.RaiseLevel(resource.StatusWillChange)\n\t\t\t\tstatus.Output = append(status.Output, \"delete group\")\n\t\t\t\tstatus.AddDifference(\"group\", fmt.Sprintf(\"group %s\", g.Name), string(StateAbsent), \"\")\n\t\t\t}\n\t\tcase g.GID != \"\":\n\t\t\t_, nameNotFound := nameErr.(user.UnknownGroupError)\n\t\t\t_, gidNotFound := gidErr.(user.UnknownGroupIdError)\n\n\t\t\tswitch {\n\t\t\tcase nameNotFound && gidNotFound:\n\t\t\t\tstatus.Output = append(status.Output, fmt.Sprintf(\"group delete: group %s and gid %s do not exist\", g.Name, g.GID))\n\t\t\tcase nameNotFound:\n\t\t\t\tstatus.RaiseLevel(resource.StatusCantChange)\n\t\t\t\tstatus.Output = append(status.Output, fmt.Sprintf(\"group delete: group %s does not exist\", g.Name))\n\t\t\t\treturn status, errors.New(\"cannot delete group\")\n\t\t\tcase gidNotFound:\n\t\t\t\tstatus.RaiseLevel(resource.StatusCantChange)\n\t\t\t\tstatus.Output = append(status.Output, fmt.Sprintf(\"group delete: gid %s does not exist\", g.GID))\n\t\t\t\treturn status, errors.New(\"cannot delete group\")\n\t\t\tcase groupByName != nil && groupByGid != nil && groupByName.Name != groupByGid.Name || groupByName.Gid != groupByGid.Gid:\n\t\t\t\tstatus.RaiseLevel(resource.StatusCantChange)\n\t\t\t\tstatus.Output = append(status.Output, fmt.Sprintf(\"group delete: group %s and gid %s belong to different groups\", g.Name, g.GID))\n\t\t\t\treturn status, errors.New(\"cannot delete group\")\n\t\t\tcase groupByName != nil && groupByGid != nil && *groupByName == *groupByGid:\n\t\t\t\tstatus.RaiseLevel(resource.StatusWillChange)\n\t\t\t\tstatus.Output = append(status.Output, \"delete group with gid\")\n\t\t\t\tstatus.AddDifference(\"group\", fmt.Sprintf(\"group %s with gid %s\", g.Name, g.GID), string(StateAbsent), \"\")\n\t\t\t}\n\t\t}\n\tdefault:\n\t\tstatus.RaiseLevel(resource.StatusFatal)\n\t\treturn status, fmt.Errorf(\"group: unrecognized state %s\", g.State)\n\t}\n\n\treturn status, nil\n}", "func (r *Resource) Valid() bool {\n\tif r.Name == \"\" {\n\t\tfmt.Println(\"no resource spec label\")\n\t\treturn false\n\t}\n\n\tfor _, c := range r.Credentials {\n\t\tif !c.Valid() {\n\t\t\treturn false\n\t\t}\n\t}\n\n\treturn true\n}", "func (i *Instance) IsValid() bool {\n\tif i.IP == \"\" || i.Name == \"\" || i.Role == \"\" {\n\t\treturn false // empty fields\n\t}\n\tmatched, err := regexp.MatchString(`[a-zA-Z][a-zA-Z0-9_-]*-\\d+`, i.Name)\n\tif err != nil || !matched {\n\t\treturn false // invalid instance name\n\t}\n\n\tif _, exists := AvailableRoles[i.Role]; !exists {\n\t\treturn false // invalid role\n\t}\n\n\t// TODO: validate ip\n\treturn true\n}", "func validateAffinityGroupDuplicate(agList []ovirt.AffinityGroup) field.ErrorList {\n\tallErrs := field.ErrorList{}\n\tfor i, ag1 := range agList {\n\t\tfor _, ag2 := range agList[i+1:] {\n\t\t\tif ag1.Name == ag2.Name {\n\t\t\t\tif ag1.Priority != ag2.Priority ||\n\t\t\t\t\tag1.Description != ag2.Description ||\n\t\t\t\t\tag1.Enforcing != ag2.Enforcing {\n\t\t\t\t\tallErrs = append(\n\t\t\t\t\t\tallErrs,\n\t\t\t\t\t\t&field.Error{\n\t\t\t\t\t\t\tType: field.ErrorTypeDuplicate,\n\t\t\t\t\t\t\tBadValue: errors.Errorf(\"Error validating affinity groups: found same \"+\n\t\t\t\t\t\t\t\t\"affinity group defined twice with different fields %v anf %v\", ag1, ag2)})\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\treturn allErrs\n}", "func (v FormulaAndFunctionApmDependencyStatName) IsValid() bool {\n\tfor _, existing := range allowedFormulaAndFunctionApmDependencyStatNameEnumValues {\n\t\tif existing == v {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func (m *CreateTechAdGroupReq) Validate(formats strfmt.Registry) error {\n\tvar res []error\n\n\tif err := m.validateData(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}", "func (gvk GVK) Validate() error {\n\t// Check if the qualified group has a valid DNS1123 subdomain value\n\tif gvk.QualifiedGroup() == \"\" {\n\t\treturn fmt.Errorf(groupRequired)\n\t}\n\tif err := validation.IsDNS1123Subdomain(gvk.QualifiedGroup()); err != nil {\n\t\t// NOTE: IsDNS1123Subdomain returns a slice of strings instead of an error, so no wrapping\n\t\treturn fmt.Errorf(\"either Group or Domain is invalid: %s\", err)\n\t}\n\n\t// Check if the version follows the valid pattern\n\tif gvk.Version == \"\" {\n\t\treturn fmt.Errorf(versionRequired)\n\t}\n\tif !versionRegex.MatchString(gvk.Version) {\n\t\treturn fmt.Errorf(\"Version must match %s (was %s)\", versionPattern, gvk.Version)\n\t}\n\n\t// Check if kind has a valid DNS1035 label value\n\tif gvk.Kind == \"\" {\n\t\treturn fmt.Errorf(kindRequired)\n\t}\n\tif errors := validation.IsDNS1035Label(strings.ToLower(gvk.Kind)); len(errors) != 0 {\n\t\t// NOTE: IsDNS1035Label returns a slice of strings instead of an error, so no wrapping\n\t\treturn fmt.Errorf(\"invalid Kind: %#v\", errors)\n\t}\n\n\t// Require kind to start with an uppercase character\n\t// NOTE: previous validation already fails for empty strings, gvk.Kind[0] will not panic\n\tif string(gvk.Kind[0]) == strings.ToLower(string(gvk.Kind[0])) {\n\t\treturn fmt.Errorf(\"invalid Kind: must start with an uppercase character\")\n\t}\n\n\treturn nil\n}", "func (r *ResourceSpec) Valid() bool {\n\tif r.Name == \"\" {\n\t\tfmt.Println(\"no resource spec label\")\n\t\treturn false\n\t}\n\n\tfor _, c := range r.Credentials {\n\t\tif !c.Valid() {\n\t\t\treturn false\n\t\t}\n\t}\n\n\treturn true\n}", "func (provider *AlertProvider) IsValid() bool {\n\treturn len(provider.Token) > 0 && len(provider.ID) > 0\n}", "func (s *Kalman1State) Valid() (ok bool) {\n\treturn true\n}", "func (t *OpenconfigQos_Qos_ForwardingGroups_ForwardingGroup) Validate(opts ...ygot.ValidationOption) error {\n\tif err := ytypes.Validate(SchemaTree[\"OpenconfigQos_Qos_ForwardingGroups_ForwardingGroup\"], t, opts...); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}", "func (t *DeviceGroup_DeviceGroup) Validate(opts ...ygot.ValidationOption) error {\n\tif err := ytypes.Validate(SchemaTree[\"DeviceGroup_DeviceGroup\"], t, opts...); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}", "func (m *SanNewIgroups) Validate(formats strfmt.Registry) error {\n\tvar res []error\n\n\tif err := m.validateName(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateOsType(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateProtocol(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateSanNewIgroupsInlineIgroups(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateSanNewIgroupsInlineInitiatorObjects(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}", "func constraintSupportsGroup(group string, constraintGroups []string) bool {\n\tfor _, g := range constraintGroups {\n\t\tif g == group {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func (area *Circle) Valid() bool {\n\tif area.Radius <= 0 {\n\t\treturn false\n\t}\n\n\tif area.Lat() < -90.0 || area.Lat() > 90.0 {\n\t\treturn false\n\t}\n\n\tif area.Lng() < -180.0 || area.Lng() > 180.0 {\n\t\treturn false\n\t}\n\n\treturn true\n}", "func (g *Group) ValidateCreate(tx *pop.Connection) (*validate.Errors, error) {\n\treturn validate.NewErrors(), nil\n}", "func (box BoundingBox) IsValid() bool {\n\treturn box.Min.X < box.Max.X && box.Min.Y < box.Max.Y && box.Min.Z < box.Max.Z\n}", "func (m *OpenStackInstanceGroupV4Parameters) Validate(formats strfmt.Registry) error {\n\treturn nil\n}", "func (o *GetUniverseGroupsGroupIDNotFoundBody) Validate(formats strfmt.Registry) error {\n\tvar res []error\n\n\tif err := o.validateError(formats); err != nil {\n\t\t// prop\n\t\tres = append(res, err)\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}", "func (o *Permissao) HasIsGroup() bool {\n\tif o != nil && o.IsGroup != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func (v UtilizadorEstados) IsValid() bool {\n\tfor _, existing := range allowedUtilizadorEstadosEnumValues {\n\t\tif existing == v {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func (entity *MilitaryDisciplinary) Valid() (bool, error) {\n\tvar stack model.ErrorStack\n\n\tif ok, err := entity.HasDisciplinary.Valid(); !ok {\n\t\tstack.Append(\"MilitaryDisciplinary\", err)\n\t}\n\n\tif entity.HasDisciplinary.Value == \"Yes\" {\n\t\tif ok, err := entity.List.Valid(); !ok {\n\t\t\tstack.Append(\"MilitaryDisciplinary\", err)\n\t\t}\n\t}\n\n\treturn !stack.HasErrors(), stack\n}", "func (m *Measurement) ValidateGroupBy(stmt *influxql.SelectStatement) error {\n\tfor _, d := range stmt.Dimensions {\n\t\tswitch e := d.Expr.(type) {\n\t\tcase *influxql.VarRef:\n\t\t\tif !m.HasTagKey(e.Val) {\n\t\t\t\treturn fmt.Errorf(\"can not use field in GROUP BY clause: %s\", e.Val)\n\t\t\t}\n\t\t}\n\t}\n\treturn nil\n}", "func (c *Check) Valid() bool {\n\n\tif c.Name == \"\" || c.Command == \"\" {\n\t\treturn false\n\t}\n\treturn true\n}", "func (perm OpPermRole) Valid() bool {\n\tswitch perm {\n\tcase opPermRoleRead, opPermRoleWrite, opPermRoleAssignedOnly:\n\t\treturn true\n\tdefault:\n\t\treturn false\n\t}\n}", "func (c ConvertTransformFormat) IsValid() bool {\n\tswitch c {\n\tcase ConvertTransformFormatNone, ConvertTransformFormatQuantity, ConvertTransformFormatJSON:\n\t\treturn true\n\t}\n\treturn false\n}", "func (v *Validator) IsValid() bool {\n\treturn len(v.Errors) == 0\n}", "func (m *PortGroup) Validate(formats strfmt.Registry) error {\n\tvar res []error\n\n\t// validation for a type composition with InventoryBase\n\tif err := m.InventoryBase.Validate(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateEquipmentSwitchCard(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateEthernetPorts(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateFcPorts(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateRegisteredDevice(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateSubGroups(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}", "func (kind Kind) IsValid() bool {\n\treturn kind&Invalid == 0\n}", "func (t *OpenconfigSystem_System_Aaa_ServerGroups) Validate(opts ...ygot.ValidationOption) error {\n\tif err := ytypes.Validate(SchemaTree[\"OpenconfigSystem_System_Aaa_ServerGroups\"], t, opts...); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}", "func (mod ModGenerational) Validate() error {\n\t// Check the selection method presence\n\tif mod.Selector == nil {\n\t\treturn errors.New(\"'Selector' cannot be nil\")\n\t}\n\t// Check the crossover method presence\n\tif mod.Crossover == nil {\n\t\treturn errors.New(\"'Crossover' cannot be nil\")\n\t}\n\t// Check the mutation rate in the presence of a mutator\n\tif mod.Mutator != nil && (mod.MutRate < 0 || mod.MutRate > 1) {\n\t\treturn errors.New(\"'MutRate' should belong to the [0, 1] interval\")\n\t}\n\treturn nil\n}", "func checkGroup(group []string, value string) bool {\n\tfor _, v := range group {\n\t\tif v == value {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func (d UserData) HasGroups() bool {\n\treturn d.ModelData.Has(models.NewFieldName(\"Groups\", \"group_ids\"))\n}", "func (b *Biphasic) IsValid(validator structure.Validator) bool {\n\treturn !(validator.HasError())\n}", "func (m *NetStatusIPGroup) Validate(formats strfmt.Registry) error {\n\treturn nil\n}", "func (m *JGroupCustomize) Validate(formats strfmt.Registry) error {\n\tvar res []error\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}", "func (s *OpenconfigOfficeAp_System_Aaa_ServerGroups) Validate() error {\n\tif err := ytypes.Validate(SchemaTree[\"OpenconfigOfficeAp_System_Aaa_ServerGroups\"], s); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}", "func ValidateNetworkGroupID(input interface{}, key string) (warnings []string, errors []error) {\n\tv, ok := input.(string)\n\tif !ok {\n\t\terrors = append(errors, fmt.Errorf(\"expected %q to be a string\", key))\n\t\treturn\n\t}\n\n\tif _, err := ParseNetworkGroupID(v); err != nil {\n\t\terrors = append(errors, err)\n\t}\n\n\treturn\n}" ]
[ "0.6658368", "0.6658297", "0.6622691", "0.6622691", "0.6617413", "0.65091795", "0.6458095", "0.64095837", "0.6326981", "0.63195777", "0.63174385", "0.62626475", "0.6220965", "0.6184991", "0.6175797", "0.6126297", "0.605352", "0.60421586", "0.6037481", "0.59436566", "0.5935652", "0.593151", "0.58979034", "0.589736", "0.5837236", "0.5779377", "0.5778347", "0.576137", "0.5756037", "0.5751909", "0.57339984", "0.5726274", "0.57235026", "0.5713847", "0.5707384", "0.56888336", "0.5668168", "0.56616366", "0.5656071", "0.5641168", "0.5625542", "0.5625514", "0.5619304", "0.56126654", "0.5589566", "0.55832297", "0.55796427", "0.5578311", "0.55597466", "0.55533123", "0.553891", "0.55370486", "0.55357814", "0.55280375", "0.5523519", "0.5499317", "0.5496424", "0.54838264", "0.5478927", "0.54735607", "0.54644835", "0.5461121", "0.5450441", "0.54417944", "0.5432601", "0.54272276", "0.5420762", "0.5417541", "0.5411955", "0.5410611", "0.54098153", "0.54015094", "0.53934574", "0.53896993", "0.53893965", "0.5383327", "0.5375151", "0.5374232", "0.53684103", "0.5363466", "0.5355224", "0.5340016", "0.5338072", "0.5337801", "0.5335798", "0.5334794", "0.5334771", "0.5334279", "0.5333926", "0.53301305", "0.5330022", "0.5317153", "0.5311465", "0.5308187", "0.5307489", "0.5301717", "0.5299502", "0.52990705", "0.52946824", "0.52892196" ]
0.77380604
0
MarshalJSON implements json.Marshaler interface.
func (g *Group) MarshalJSON() ([]byte, error) { aux := &groupAux{ PublicKey: hex.EncodeToString(g.PublicKey.Bytes()), Signature: g.Signature, } return json.Marshal(aux) }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func Marshal(v Marshaler) ([]byte, error) {\n\tw := jwriter.Writer{}\n\tv.MarshalEasyJSON(&w)\n\treturn w.BuildBytes()\n}", "func Marshal(v interface{}) ([]byte, error) {\n\tif ImplementsPreJSONMarshaler(v) {\n\t\terr := v.(PreJSONMarshaler).PreMarshalJSON()\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\treturn json.Marshal(v)\n}", "func JSONMarshal(data interface{}) ([]byte, error) {\n\tvar b []byte\n\tvar err error\n\n\tb, err = json.MarshalIndent(data, \"\", \" \")\n\n\treturn b, err\n}", "func JsonMarshal(t interface{}) ([]byte, error) {\n\tbuffer := &bytes.Buffer{}\n\tenc := json.NewEncoder(buffer)\n\tenc.SetEscapeHTML(false)\n\terr := enc.Encode(t)\n\treturn buffer.Bytes(), err\n}", "func Marshal(v Marshaler) ([]byte, error) {\n\tif isNilInterface(v) {\n\t\treturn nullBytes, nil\n\t}\n\n\tw := jwriter.Writer{}\n\tv.MarshalTinyJSON(&w)\n\treturn w.BuildBytes()\n}", "func (j *JSON) Marshal(target interface{}) (output interface{}, err error) {\n\treturn jsonEncoding.Marshal(target)\n}", "func Marshal(v interface{}) ([]byte, error) {\n\treturn json.Marshal(v)\n}", "func jsonMarshal(t interface{}) ([]byte, error) {\n\tvar buffer bytes.Buffer\n\tencoder := json.NewEncoder(&buffer)\n\tencoder.SetEscapeHTML(false)\n\tif err := encoder.Encode(t); err != nil {\n\t\treturn nil, err\n\t}\n\t// Prettify\n\tvar out bytes.Buffer\n\tif err := json.Indent(&out, buffer.Bytes(), \"\", \"\\t\"); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn out.Bytes(), nil\n}", "func jsonMarshal(t interface{}) ([]byte, error) {\n\tbuffer := &bytes.Buffer{}\n\tencoder := json.NewEncoder(buffer)\n\tencoder.SetEscapeHTML(false)\n\tencoder.SetIndent(\"\", \" \")\n\terr := encoder.Encode(t)\n\treturn buffer.Bytes(), err\n}", "func (c *JSONCodec) Marshal(v interface{}) ([]byte, error) {\n\treturn json.Marshal(v)\n}", "func (c *JsonCodec) Marshal(object interface{}, options map[string]interface{}) ([]byte, error) {\n\treturn jsonEncoding.Marshal(object)\n}", "func marshal() {\n\tfmt.Println(\"=== json.marshal ===\")\n\tryan := &Person{\"Ryan\", 25}\n\twire, err := json.Marshal(ryan)\n\tcheck(err)\n\tfmt.Println(string(wire))\n}", "func Marshal(v interface{}) ([]byte, error) {\n\tb, err := json.Marshal(v)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn canonicaljson.Transform(b)\n}", "func (s *Serializer) Marshal(v interface{}) ([]byte, error) {\n\treturn jsoniter.Marshal(v)\n}", "func (j Json) MarshalJSON() ([]byte, error) {\n\treturn j.ToJson()\n}", "func JSONMarshal(obj interface{}) ([]byte, error) {\n\tb := new(bytes.Buffer)\n\tenc := json.NewEncoder(b)\n\tenc.SetEscapeHTML(false)\n\terr := enc.Encode(obj)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// json.NewEncoder.Encode adds a final '\\n', json.Marshal does not.\n\t// Let's keep the default json.Marshal behaviour.\n\tres := b.Bytes()\n\tif len(res) >= 1 && res[len(res)-1] == '\\n' {\n\t\tres = res[:len(res)-1]\n\t}\n\treturn res, nil\n}", "func (j *JsonlMarshaler) Marshal(v interface{}) ([]byte, error) {\n\treturn json.Marshal(v)\n}", "func (j *JSON) Marshal(obj interface{}) error {\n\tres, err := json.Marshal(obj)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// Call our implementation of\n\t// JSON UnmarshalJSON through json.Unmarshal\n\t// to set the result to the JSON object\n\treturn json.Unmarshal(res, j)\n}", "func NewJSONMarshaler() Marshaler {\n\treturn newJSONMarshaler()\n}", "func JsonMarshal(val any) ([]byte, error) {\n\tbuf := &bytes.Buffer{}\n\tencoder := json.NewEncoder(buf)\n\tencoder.SetEscapeHTML(false)\n\tif err := encoder.Encode(val); err != nil {\n\t\treturn nil, err\n\t}\n\t// Return without a trailing line feed.\n\tlineTerminatedJson := buf.Bytes()\n\treturn bytes.TrimSuffix(lineTerminatedJson, []byte(\"\\n\")), nil\n}", "func JSONMarshal(v interface{}) ([]byte, error) {\n\tb, err := json.MarshalIndent(v, \"\", \" \")\n\tif err != nil {\n\t\tfmt.Println(err)\n\t\treturn b, err\n\t}\n\tb = bytes.Replace(b, []byte(\"\\\\u003c\"), []byte(\"<\"), -1)\n\tb = bytes.Replace(b, []byte(\"\\\\u003e\"), []byte(\">\"), -1)\n\tb = bytes.Replace(b, []byte(\"\\\\u0026\"), []byte(\"&\"), -1)\n\treturn b, err\n}", "func MarshalJSON(v interface{}, config MarshalConfig) ([]byte, error) {\n\tres, err := Marshal(v, config)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn json.Marshal(res)\n}", "func (j *JsonMarshaler) Marshal(v interface{}) ([]byte, error) {\n\tswitch v.(type) {\n\tcase *distribute.GetResponse:\n\t\tvalue, err := protobuf.MarshalAny(v.(*distribute.GetResponse).Fields)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn json.Marshal(\n\t\t\tmap[string]interface{}{\n\t\t\t\t\"fields\": value,\n\t\t\t},\n\t\t)\n\tcase *distribute.SearchResponse:\n\t\tvalue, err := protobuf.MarshalAny(v.(*distribute.SearchResponse).SearchResult)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn json.Marshal(\n\t\t\tmap[string]interface{}{\n\t\t\t\t\"search_result\": value,\n\t\t\t},\n\t\t)\n\tdefault:\n\t\treturn json.Marshal(v)\n\t}\n}", "func (p *HJSON) Marshal(o map[string]interface{}) ([]byte, error) {\n\treturn hjson.Marshal(o)\n}", "func (j *TextMarshaler) Marshal(v interface{}) ([]byte, error) {\n\treturn json.Marshal(v)\n}", "func JSONEncoder() Encoder { return jsonEncoder }", "func (j JSON) MarshalJSON() ([]byte, error) {\n\tif j.Valid {\n\t\treturn json.Marshal(j.Map)\n\t}\n\n\treturn json.Marshal(nil)\n}", "func marshalJSON(i *big.Int) ([]byte, error) {\n\ttext, err := i.MarshalText()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn json.Marshal(string(text))\n}", "func Marshal(p Payload) ([]byte, error) {\n\treturn json.Marshal(p)\n}", "func JSONMarshal(content interface{}, escape bool) ([]byte, error) {\n\tvar buf bytes.Buffer\n\tenc := json.NewEncoder(&buf)\n\tenc.SetEscapeHTML(escape)\n\tif err := enc.Encode(content); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn buf.Bytes(), nil\n}", "func (m Json) MarshalJSON() ([]byte, error) {\n\tif m == nil {\n\t\treturn []byte(\"null\"), nil\n\t}\n\treturn m, nil\n}", "func Marshal(v interface{}) ([]byte, error) {\n\trv := reflect.ValueOf(v)\n\tif rv.Kind() != reflect.Slice {\n\t\treturn nil, &InvalidMarshalError{rv.Kind()}\n\t}\n\n\tvar buf bytes.Buffer\n\tencoder := json.NewEncoder(&buf)\n\tfor i := 0; i < rv.Len(); i++ {\n\t\tif err := encoder.Encode(rv.Index(i).Interface()); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\treturn buf.Bytes(), nil\n}", "func (v Posts) MarshalJSON() ([]byte, error) {\n\tw := jwriter.Writer{}\n\teasyjson783c1624EncodeGithubComGobwasVk(&w, v)\n\treturn w.Buffer.BuildBytes(), w.Error\n}", "func Marshal(v interface{}) ([]byte, error) {\n\tb, err := json.Marshal(v)\n\tif err != nil {\n\t\treturn []byte{}, err\n\t}\n\n\treturn replaceUnicodeConversion(b), err\n}", "func (jz JSONGzipEncoding) Marshal(v interface{}) ([]byte, error) {\n\tbuf, err := json.Marshal(v)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\t// var bufSizeBefore = len(buf)\n\n\tbuf, err = GzipEncode(buf)\n\t// coloredoutput.Infof(\"gzip_json_compress_ratio=%d/%d=%.2f\",\n\t// bufSizeBefore, len(buf), float64(bufSizeBefore)/float64(len(buf)))\n\treturn buf, err\n}", "func (v Post) MarshalJSON() ([]byte, error) {\n\tw := jwriter.Writer{}\n\teasyjson783c1624EncodeGithubComGobwasVk7(&w, v)\n\treturn w.Buffer.BuildBytes(), w.Error\n}", "func (j *jsonNative) MarshalJSON() ([]byte, error) {\n\tvar buf fflib.Buffer\n\tif j == nil {\n\t\tbuf.WriteString(\"null\")\n\t\treturn buf.Bytes(), nil\n\t}\n\terr := j.MarshalJSONBuf(&buf)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn buf.Bytes(), nil\n}", "func (s *ServiceSecrets) MarshalJson() ([]byte, error) {\n\treturn json.Marshal(s)\n}", "func (sc *Contract) Marshal() ([]byte, error) {\n\treturn json.Marshal(sc)\n}", "func Marshal(object interface{}) (data string, err error) {\n\tif t, err := json.Marshal(object); err != nil {\n\t\tdata = \"\"\n\t} else {\n\t\tdata = string(t)\n\t}\n\treturn\n}", "func (c *Codec) Marshal(v interface{}) ([]byte, error) {\n\tresult, err := json.Marshal(v)\n\tif err != nil {\n\t\treturn nil, xerrors.New(err.Error())\n\t}\n\n\treturn result, nil\n}", "func (o *Object) MarshalJSON() ([]byte, error) {\n\tctx := _builtinJSON_stringifyContext{\n\t\tr: o.runtime,\n\t}\n\tex := o.runtime.vm.try(func() {\n\t\tif !ctx.do(o) {\n\t\t\tctx.buf.WriteString(\"null\")\n\t\t}\n\t})\n\tif ex != nil {\n\t\treturn nil, ex\n\t}\n\treturn ctx.buf.Bytes(), nil\n}", "func (v ExportItem) MarshalJSON() ([]byte, error) {\n\tw := jwriter.Writer{}\n\teasyjsonB83d7b77EncodeGoplaygroundMyjson1(&w, v)\n\treturn w.Buffer.BuildBytes(), w.Error\n}", "func (js JSONSerializable) MarshalJSON() ([]byte, error) {\n\tif !js.Valid {\n\t\treturn json.Marshal(nil)\n\t}\n\tjsWithHex := replaceBytesWithHex(js.Val)\n\treturn json.Marshal(jsWithHex)\n}", "func MarshalJSON(v interface{}) []byte {\n\tdata, err := json.Marshal(v)\n\tAbortIf(err)\n\treturn data\n}", "func Marshal(in interface{}) ([]byte, error) {\n\tres, err := jsoniter.ConfigCompatibleWithStandardLibrary.Marshal(in)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"marshaling error: %w\", err)\n\t}\n\treturn res, nil\n}", "func MarshalJSON(v interface{}) string {\n\tcontents, _ := json.MarshalIndent(v, \"\", \" \")\n\treturn string(contents)\n}", "func (f *Formatter) Marshal(v interface{}) ([]byte, error) {\n\tdata, err := json.Marshal(v)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn f.Format(data)\n}", "func (v BlitzedItemResponse) MarshalJSON() ([]byte, error) {\n\tw := jwriter.Writer{}\n\teasyjson6a975c40EncodeJsonBenchmark4(&w, v)\n\treturn w.Buffer.BuildBytes(), w.Error\n}", "func (v Join) MarshalJSON() ([]byte, error) {\n\tw := jwriter.Writer{}\n\teasyjson42239ddeEncodeGithubComKhliengDispatchServer21(&w, v)\n\treturn w.Buffer.BuildBytes(), w.Error\n}", "func (d *Dump) MarshalJSON() ([]byte, error) {\n\td.mutex.RLock()\n\tdefer d.mutex.RUnlock()\n\n\tvar buffer bytes.Buffer\n\n\tbuffer.WriteString(`[`)\n\tfor i, item := range d.items {\n\t\tda, err := item.MarshalJSON()\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tbuffer.Write(da)\n\t\tif i != len(d.items)-1 {\n\t\t\tbuffer.WriteString(`,`)\n\t\t}\n\t}\n\tbuffer.WriteString(`]`)\n\n\treturn buffer.Bytes(), nil\n}", "func (p PatchObject) MarshalJSON() ([]byte, error) {\n\tobjectMap := make(map[string]any)\n\tpopulate(objectMap, \"tags\", p.Tags)\n\treturn json.Marshal(objectMap)\n}", "func (o *ExportData) MarshalJSON() ([]byte, error) {\n\treturn json.Marshal(o.ToMap())\n}", "func (v PbTestObject) MarshalJSON() ([]byte, error) {\n\tw := jwriter.Writer{}\n\teasyjson5fcf962eEncodeGithubComJsonIteratorGoBenchmarkWith10IntFields(&w, v)\n\treturn w.Buffer.BuildBytes(), w.Error\n}", "func (j *Publisher) MarshalJSON() ([]byte, error) {\n\tvar buf fflib.Buffer\n\tif j == nil {\n\t\tbuf.WriteString(\"null\")\n\t\treturn buf.Bytes(), nil\n\t}\n\terr := j.MarshalJSONBuf(&buf)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn buf.Bytes(), nil\n}", "func (v Post) MarshalJSON() ([]byte, error) {\n\tw := jwriter.Writer{}\n\teasyjson5a72dc82EncodeGithubComTimRazumovTechnoparkDBAppModels6(&w, v)\n\treturn w.Buffer.BuildBytes(), w.Error\n}", "func DefaultMarshalJSON(obj interface{}) ([]byte, error) {\n\treturn json.MarshalIndent(obj, \"\", \" \")\n}", "func (p Payload) MarshalJSON() ([]byte, error) {\n\treturn json.Marshal(p.Map())\n}", "func (v Posts) MarshalJSON() ([]byte, error) {\n\tw := jwriter.Writer{}\n\teasyjson5a72dc82EncodeGithubComTimRazumovTechnoparkDBAppModels5(&w, v)\n\treturn w.Buffer.BuildBytes(), w.Error\n}", "func (v BindParams) MarshalJSON() ([]byte, error) {\n\tw := jwriter.Writer{}\n\teasyjsonC5a4559bEncodeGithubComChromedpCdprotoTethering2(&w, v)\n\treturn w.Buffer.BuildBytes(), w.Error\n}", "func (n *Node) Marshal() ([]byte, error) {\n\treturn json.Marshal(n)\n}", "func (n *Node) Marshal() ([]byte, error) {\n\treturn json.Marshal(n)\n}", "func (s SyncIdentityProviderProperties) MarshalJSON() ([]byte, error) {\n\tobjectMap := make(map[string]any)\n\tpopulate(objectMap, \"resources\", s.Resources)\n\treturn json.Marshal(objectMap)\n}", "func (o *ExportDataPartial) MarshalJSON() ([]byte, error) {\n\treturn json.Marshal(o.ToMap())\n}", "func (v UsersHandler) MarshalJSON() ([]byte, error) {\n\tw := jwriter.Writer{}\n\teasyjson84c0690eEncodeMainHandlers(&w, v)\n\treturn w.Buffer.BuildBytes(), w.Error\n}", "func (obj *miner) MarshalJSON() ([]byte, error) {\n\tins := createJSONMinerFromMiner(obj)\n\treturn json.Marshal(ins)\n}", "func MarshalToWriter(v Marshaler, w io.Writer) (written int, err error) {\n\tif isNilInterface(v) {\n\t\treturn w.Write(nullBytes)\n\t}\n\n\tjw := jwriter.Writer{}\n\tv.MarshalTinyJSON(&jw)\n\treturn jw.DumpTo(w)\n}", "func (s *HTTPServer) marshalJSON(req *http.Request, obj interface{}) ([]byte, error) {\n\tif _, ok := req.URL.Query()[\"pretty\"]; ok || s.agent.config.DevMode {\n\t\tbuf, err := json.MarshalIndent(obj, \"\", \" \")\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tbuf = append(buf, \"\\n\"...)\n\t\treturn buf, nil\n\t}\n\n\tbuf, err := json.Marshal(obj)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn buf, err\n}", "func (j *Producer) MarshalJSON() ([]byte, error) {\n\tvar buf fflib.Buffer\n\tif j == nil {\n\t\tbuf.WriteString(\"null\")\n\t\treturn buf.Bytes(), nil\n\t}\n\terr := j.MarshalJSONBuf(&buf)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn buf.Bytes(), nil\n}", "func (v PostParams) MarshalJSON() ([]byte, error) {\n\tw := jwriter.Writer{}\n\teasyjsonC80ae7adEncodeGithubComDeiklovTechDbRomanovAndrGolangModels10(&w, v)\n\treturn w.Buffer.BuildBytes(), w.Error\n}", "func Marshal(obj interface{}) ([]byte, error) {\n\treturn mgobson.Marshal(obj)\n}", "func EncodeJson(v interface{}) ([]byte, error) {\n\treturn json.ConfigCompatibleWithStandardLibrary.Marshal(v)\n}", "func (spbi SuccessfulPropertyBatchInfo) MarshalJSON() ([]byte, error) {\n\tspbi.Kind = KindSuccessful\n\tobjectMap := make(map[string]interface{})\n\tif spbi.Properties != nil {\n\t\tobjectMap[\"Properties\"] = spbi.Properties\n\t}\n\tif spbi.Kind != \"\" {\n\t\tobjectMap[\"Kind\"] = spbi.Kind\n\t}\n\treturn json.Marshal(objectMap)\n}", "func (m Message) Marshal() ([]byte, error) {\n\treturn jsoniter.Marshal(m)\n}", "func (v FetchMessages) MarshalJSON() ([]byte, error) {\n\tw := jwriter.Writer{}\n\teasyjson42239ddeEncodeGithubComKhliengDispatchServer24(&w, v)\n\treturn w.Buffer.BuildBytes(), w.Error\n}", "func (s SyncStorageKeysInput) MarshalJSON() ([]byte, error) {\n\tobjectMap := make(map[string]any)\n\tpopulate(objectMap, \"id\", s.ID)\n\treturn json.Marshal(objectMap)\n}", "func (v Stash) MarshalJSON() ([]byte, error) {\n\tw := jwriter.Writer{}\n\teasyjsonD2b7633eEncodeDrhyuComIndexerModels(&w, v)\n\treturn w.Buffer.BuildBytes(), w.Error\n}", "func (a AppPatch) MarshalJSON() ([]byte, error) {\n\tobjectMap := make(map[string]any)\n\tpopulate(objectMap, \"identity\", a.Identity)\n\tpopulate(objectMap, \"properties\", a.Properties)\n\tpopulate(objectMap, \"sku\", a.SKU)\n\tpopulate(objectMap, \"tags\", a.Tags)\n\treturn json.Marshal(objectMap)\n}", "func JSONEncode(data interface{}) string {\n\tbt, _ := json.Marshal(data)\n\treturn string(bt)\n}", "func (v SignInData) MarshalJSON() ([]byte, error) {\n\tw := jwriter.Writer{}\n\teasyjsonD2b7633eEncode20191OPGPlus2InternalPkgModels6(&w, v)\n\treturn w.Buffer.BuildBytes(), w.Error\n}", "func (v Boo) MarshalJSON() ([]byte, error) {\n\tw := jwriter.Writer{}\n\teasyjson42239ddeEncodeMsgpJson(&w, v)\n\treturn w.Buffer.BuildBytes(), w.Error\n}", "func Marshal(data interface{}) ([]byte, error) {\n\tdocument, err := MarshalToStruct(data, nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn json.Marshal(document)\n}", "func (pbi PropertyBatchInfo) MarshalJSON() ([]byte, error) {\n\tpbi.Kind = KindPropertyBatchInfo\n\tobjectMap := make(map[string]interface{})\n\tif pbi.Kind != \"\" {\n\t\tobjectMap[\"Kind\"] = pbi.Kind\n\t}\n\treturn json.Marshal(objectMap)\n}", "func (v InfoUser) MarshalJSON() ([]byte, error) {\n\tw := jwriter.Writer{}\n\teasyjson6601e8cdEncodeJsongen3(&w, v)\n\treturn w.Buffer.BuildBytes(), w.Error\n}", "func (r *Anilist) Marshal() ([]byte, error) {\n\treturn json.Marshal(r)\n}", "func jsonify(v interface{}) string { return string(mustMarshalJSON(v)) }", "func (v DocumentResponse) MarshalJSON() ([]byte, error) {\n\tw := jwriter.Writer{}\n\teasyjson6a975c40EncodeJsonBenchmark3(&w, v)\n\treturn w.Buffer.BuildBytes(), w.Error\n}", "func (m *Marshaler) JSON(v interface{}) ([]byte, error) {\n\tif _, ok := v.(proto.Message); ok {\n\t\tvar buf bytes.Buffer\n\t\tjm := &jsonpb.Marshaler{}\n\t\tjm.OrigName = true\n\t\tif err := jm.Marshal(&buf, v.(proto.Message)); err != nil {\n\t\t\treturn []byte{}, err\n\t\t}\n\n\t\tif m.FilterProtoJson {\n\t\t\treturn m.FilterJsonWithStruct(buf.Bytes(), v)\n\t\t}\n\t\treturn buf.Bytes(), nil\n\t}\n\treturn json.Marshal(v)\n}", "func (v SwapFillsInfo) MarshalJSON() ([]byte, error) {\n\tw := jwriter.Writer{}\n\teasyjson25363b2dEncodeGithubComDarkfoxs96OpenApiV3SdkOkexGoSdkApi32(&w, v)\n\treturn w.Buffer.BuildBytes(), w.Error\n}", "func (o *Echo) MarshalJSON() ([]byte, error) {\n\treturn json.Marshal(o.ToMap())\n}", "func (v JSONB) MarshalJSON() ([]byte, error) {\n\tif bytes.Equal(v, []byte{}) || bytes.Equal(v, []byte(\"null\")) {\n\t\treturn []byte(\"{}\"), nil\n\t}\n\n\treturn v, nil\n}", "func (v Fruit) MarshalJSON() ([]byte, error) {\n\tw := jwriter.Writer{}\n\teasyjsonD2b7633eEncodeBackendInternalModels11(&w, v)\n\treturn w.Buffer.BuildBytes(), w.Error\n}", "func (m *JSONMarshaller) Marshal(message proto.Message) ([]byte, error) {\n\treturn m.marshaller.MarshalResource(message)\n}", "func (s SyncIdentityProviderUpdate) MarshalJSON() ([]byte, error) {\n\tobjectMap := make(map[string]any)\n\tpopulate(objectMap, \"properties\", s.Properties)\n\tpopulate(objectMap, \"systemData\", s.SystemData)\n\treturn json.Marshal(objectMap)\n}", "func marshal(v interface{}) string {\n\tb, err := json.Marshal(v)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn string(b)\n}", "func (v AddScriptToEvaluateOnNewDocumentReturns) MarshalJSON() ([]byte, error) {\n\tw := jwriter.Writer{}\n\teasyjsonC5a4559bEncodeGithubComChromedpCdprotoPage105(&w, v)\n\treturn w.Buffer.BuildBytes(), w.Error\n}", "func (k Kitten) MarshalJSON() ([]byte, error) {\n\tobjectMap := make(map[string]any)\n\tpopulate(objectMap, \"eatsMiceYet\", k.EatsMiceYet)\n\tpopulate(objectMap, \"hisses\", k.Hisses)\n\tpopulate(objectMap, \"likesMilk\", k.LikesMilk)\n\tpopulate(objectMap, \"meows\", k.Meows)\n\tpopulate(objectMap, \"name\", k.Name)\n\treturn json.Marshal(objectMap)\n}", "func (s SyncIdentityProvider) MarshalJSON() ([]byte, error) {\n\tobjectMap := make(map[string]any)\n\tpopulate(objectMap, \"id\", s.ID)\n\tpopulate(objectMap, \"name\", s.Name)\n\tpopulate(objectMap, \"properties\", s.Properties)\n\tpopulate(objectMap, \"systemData\", s.SystemData)\n\tpopulate(objectMap, \"type\", s.Type)\n\treturn json.Marshal(objectMap)\n}", "func (v Post) MarshalJSON() ([]byte, error) {\n\tw := jwriter.Writer{}\n\teasyjsonC80ae7adEncodeGithubComDeiklovTechDbRomanovAndrGolangModels11(&w, v)\n\treturn w.Buffer.BuildBytes(), w.Error\n}", "func (m MigratedPools) MarshalJSON() ([]byte, error) {\n\tobjectMap := make(map[string]any)\n\tpopulate(objectMap, \"migratedPools\", m.MigratedPools)\n\treturn json.Marshal(objectMap)\n}", "func (v item) MarshalJSON() ([]byte, error) {\n\tw := jwriter.Writer{}\n\teasyjsonD2b7633eEncodeGithubComZhekabyGoGeneratorMongoRequestwrapperTests(&w, v)\n\treturn w.Buffer.BuildBytes(), w.Error\n}" ]
[ "0.76480657", "0.7543959", "0.7473467", "0.7430187", "0.7421684", "0.7403892", "0.73858833", "0.7313465", "0.72521377", "0.72313905", "0.7208603", "0.71704865", "0.71544856", "0.7066681", "0.70452994", "0.70277417", "0.70267886", "0.69951296", "0.69946826", "0.69923913", "0.69906896", "0.698712", "0.6982909", "0.6956585", "0.69414854", "0.6940962", "0.6932542", "0.6932474", "0.69294894", "0.69260335", "0.6921052", "0.69127536", "0.69094753", "0.6907121", "0.6875743", "0.6847798", "0.68401104", "0.67866564", "0.6782746", "0.67739207", "0.67660534", "0.6764664", "0.6761667", "0.6759137", "0.6741082", "0.6733371", "0.67190796", "0.67181057", "0.6700351", "0.66854936", "0.66831654", "0.6673261", "0.66658384", "0.66555274", "0.6653536", "0.6638429", "0.6637122", "0.662265", "0.661582", "0.66040504", "0.6590129", "0.6590129", "0.65872246", "0.65861446", "0.657899", "0.6572133", "0.65583825", "0.655834", "0.65572655", "0.6555868", "0.6555037", "0.65528667", "0.6546261", "0.6540513", "0.65404207", "0.65329766", "0.6531766", "0.6530692", "0.6528798", "0.65279204", "0.65278274", "0.65264696", "0.6525193", "0.6524901", "0.6524398", "0.65181106", "0.651717", "0.6514824", "0.651336", "0.651134", "0.6510136", "0.6507388", "0.65024835", "0.6500173", "0.64974135", "0.6494878", "0.64947814", "0.6494008", "0.6492304", "0.6488762", "0.6487041" ]
0.0
-1
UnmarshalJSON implements json.Unmarshaler interface.
func (g *Group) UnmarshalJSON(data []byte) error { aux := new(groupAux) if err := json.Unmarshal(data, aux); err != nil { return err } b, err := hex.DecodeString(aux.PublicKey) if err != nil { return err } pub := new(keys.PublicKey) if err := pub.DecodeBytes(b); err != nil { return err } g.PublicKey = pub if len(aux.Signature) != keys.SignatureLen { return errors.New("wrong signature length") } g.Signature = aux.Signature return nil }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (j *jsonNative) UnmarshalJSON(input []byte) error {\n\tfs := fflib.NewFFLexer(input)\n\treturn j.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start)\n}", "func (j *Data) UnmarshalJSON(input []byte) error {\n\tfs := fflib.NewFFLexer(input)\n\treturn j.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start)\n}", "func (j *Response) UnmarshalJSON(input []byte) error {\n\tfs := fflib.NewFFLexer(input)\n\treturn j.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start)\n}", "func (j *Json) UnmarshalJSON(b []byte) error {\n\tr, err := loadContentWithOptions(b, Options{\n\t\tType: ContentTypeJson,\n\t\tStrNumber: true,\n\t})\n\tif r != nil {\n\t\t// Value copy.\n\t\t*j = *r\n\t}\n\treturn err\n}", "func UnmarshalJSON(data []byte, v interface{}) {\n\terr := json.Unmarshal(data, v)\n\tAbortIf(err)\n}", "func (v *OneLike) UnmarshalJSON(data []byte) error {\n\tr := jlexer.Lexer{Data: data}\n\tdecodeOneLike(&r, v)\n\treturn r.Error()\n}", "func (v *BlitzedItemResponse) UnmarshalJSON(data []byte) error {\n\tr := jlexer.Lexer{Data: data}\n\teasyjson6a975c40DecodeJsonBenchmark4(&r, v)\n\treturn r.Error()\n}", "func (j *Json) UnmarshalJSON(data []byte) error {\n\terr := json.Unmarshal(data, &j.data)\n\n\tj.exists = (err == nil)\n\treturn err\n}", "func unmarshalJSON(j extv1.JSON, output *any) error {\n\tif len(j.Raw) == 0 {\n\t\treturn nil\n\t}\n\treturn json.Unmarshal(j.Raw, output)\n}", "func UnmarshalJSON(body io.Reader, v interface{}) error {\n\tdecoder := json.NewDecoder(body)\n\treturn decoder.Decode(v)\n}", "func jsonUnmarshal(r io.Reader, o interface{}, opts ...JSONOpt) error {\n\td := json.NewDecoder(r)\n\tfor _, opt := range opts {\n\t\td = opt(d)\n\t}\n\tif err := d.Decode(&o); err != nil {\n\t\treturn fmt.Errorf(\"while decoding JSON: %v\", err)\n\t}\n\treturn nil\n}", "func (j *Type) UnmarshalJSON(input []byte) error {\n\tfs := fflib.NewFFLexer(input)\n\treturn j.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start)\n}", "func (j *Packet) UnmarshalJSON(input []byte) error {\n\tfs := fflib.NewFFLexer(input)\n\treturn j.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start)\n}", "func (j *GetMessagesResponse) UnmarshalJSON(input []byte) error {\n\tfs := fflib.NewFFLexer(input)\n\treturn j.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start)\n}", "func (j *Publisher) UnmarshalJSON(input []byte) error {\n\tfs := fflib.NewFFLexer(input)\n\treturn j.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start)\n}", "func (j *Message) UnmarshalJSON(input []byte) error {\n\tfs := fflib.NewFFLexer(input)\n\treturn j.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start)\n}", "func (j *JSON) Unmarshal(input, target interface{}) error {\n\t// take the input and convert it to target\n\treturn jsonEncoding.Unmarshal(input.([]byte), target)\n}", "func (j *User) UnmarshalJSON(input []byte) error {\n\tfs := fflib.NewFFLexer(input)\n\treturn j.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start)\n}", "func (j *RespPacket) UnmarshalJSON(input []byte) error {\n\tfs := fflib.NewFFLexer(input)\n\treturn j.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start)\n}", "func (j *RunRespPacket) UnmarshalJSON(input []byte) error {\n\tfs := fflib.NewFFLexer(input)\n\treturn j.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start)\n}", "func (j *Server) UnmarshalJSON(input []byte) error {\n\tfs := fflib.NewFFLexer(input)\n\treturn j.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start)\n}", "func (j *PublishMessagesResponse) UnmarshalJSON(input []byte) error {\n\tfs := fflib.NewFFLexer(input)\n\treturn j.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start)\n}", "func (j *Balance) UnmarshalJSON(input []byte) error {\n\tfs := fflib.NewFFLexer(input)\n\treturn j.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start)\n}", "func (v *ItemCheckResponse) UnmarshalJSON(data []byte) error {\n\tr := jlexer.Lexer{Data: data}\n\teasyjson6a975c40DecodeJsonBenchmark2(&r, v)\n\treturn r.Error()\n}", "func (v *UnloadCheckResponse) UnmarshalJSON(data []byte) error {\n\tr := jlexer.Lexer{Data: data}\n\teasyjson6a975c40DecodeJsonBenchmark(&r, v)\n\treturn r.Error()\n}", "func (j *LuaFunction) UnmarshalJSON(input []byte) error {\n\tfs := fflib.NewFFLexer(input)\n\treturn j.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start)\n}", "func (v *Element) UnmarshalJSON(data []byte) error {\n\tr := jlexer.Lexer{Data: data}\n\teasyjsonB83d7b77DecodeGoplaygroundMyjson2(&r, v)\n\treturn r.Error()\n}", "func (j *ThirdParty) UnmarshalJSON(input []byte) error {\n\tfs := fflib.NewFFLexer(input)\n\treturn j.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start)\n}", "func (this *Simple) UnmarshalJSON(b []byte) error {\n\treturn TypesUnmarshaler.Unmarshal(bytes.NewReader(b), this)\n}", "func (j *ModifyQueueResponse) UnmarshalJSON(input []byte) error {\n\tfs := fflib.NewFFLexer(input)\n\treturn j.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start)\n}", "func (j *Event) UnmarshalJSON(input []byte) error {\n\tfs := fflib.NewFFLexer(input)\n\treturn j.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start)\n}", "func (v *OneUpdateLike) UnmarshalJSON(data []byte) error {\n\tr := jlexer.Lexer{Data: data}\n\tdecodeOneUpdateLike(&r, v)\n\treturn r.Error()\n}", "func (j *Error) UnmarshalJSON(input []byte) error {\n\tfs := fflib.NewFFLexer(input)\n\treturn j.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start)\n}", "func (u *Unstructured) UnmarshalJSON(b []byte) error {\n\t_, _, err := UnstructuredJSONScheme.Decode(b, nil, u)\n\treturn err\n}", "func (j *RegisterRespPacket) UnmarshalJSON(input []byte) error {\n\tfs := fflib.NewFFLexer(input)\n\treturn j.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start)\n}", "func UnmarshalFromJSON(data []byte, target interface{}) error {\n\tvar ctx map[string]interface{}\n\terr := json.Unmarshal(data, &ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn Unmarshal(ctx, target)\n}", "func (j *RunPacket) UnmarshalJSON(input []byte) error {\n\tfs := fflib.NewFFLexer(input)\n\treturn j.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start)\n}", "func (resp *Response) JSONUnmarshal(v interface{}) error {\n\tif resp == nil {\n\t\treturn errors.New(\"empty response\")\n\t}\n\n\tif resp.Err != nil {\n\t\treturn resp.Err\n\t}\n\n\t// get data from resp\n\tdata, err := ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tdefer resp.Body.Close()\n\terr = json.Unmarshal(data, &v)\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}", "func (j *UnInstallRespPacket) UnmarshalJSON(input []byte) error {\n\tfs := fflib.NewFFLexer(input)\n\treturn j.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start)\n}", "func (j *Producer) UnmarshalJSON(input []byte) error {\n\tfs := fflib.NewFFLexer(input)\n\treturn j.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start)\n}", "func (receiver *Type) UnmarshalJSON(src []byte) error {\n\tif nil == receiver {\n\t\treturn errNilReceiver\n\t}\n\n\tvar s string\n\tif err := json.Unmarshal(src, &s); nil != err {\n\t\treturn err\n\t}\n\n\tif err := receiver.Scan(s); nil != err {\n\t\treturn err\n\t}\n\n\treturn nil\n}", "func (w *Entry) UnmarshalJSON(bb []byte) error {\n\t<<!!YOUR_CODE!!>>\n}", "func (r *RawJSON) UnmarshalJSON(data []byte) error {\n\t*r = RawJSON(data)\n\treturn nil\n}", "func (j *PurgeQueueResponse) UnmarshalJSON(input []byte) error {\n\tfs := fflib.NewFFLexer(input)\n\treturn j.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start)\n}", "func (t *Type) UnmarshalJSON(b []byte) error {\n\tvar text string\n\tif err := json.Unmarshal(b, &text); err != nil {\n\t\treturn err\n\t}\n\n\treturn t.UnmarshalText([]byte(text))\n}", "func (v *Raw) UnmarshalJSON(data []byte) error {\n\tr := jlexer.Lexer{Data: data}\n\teasyjson42239ddeDecodeGithubComKhliengDispatchServer10(&r, v)\n\treturn r.Error()\n}", "func (i *Transform) UnmarshalJSON(data []byte) error {\n\tvar s string\n\tif err := json.Unmarshal(data, &s); err != nil {\n\t\treturn fmt.Errorf(\"Transform should be a string, got %[1]s\", data)\n\t}\n\n\tvar err error\n\t*i, err = ParseTransformString(s)\n\treturn err\n}", "func (v *User) UnmarshalJSON(data []byte) error {\n\tr := jlexer.Lexer{Data: data}\n\teasyjson9e1087fdDecodeHw3Bench(&r, v)\n\treturn r.Error()\n}", "func (v *GetUserResponse) UnmarshalJSON(data []byte) error {\n\tr := jlexer.Lexer{Data: data}\n\teasyjson84c0690eDecodeMainHandlers1(&r, v)\n\treturn r.Error()\n}", "func (v *OrderCheckResponse) UnmarshalJSON(data []byte) error {\n\tr := jlexer.Lexer{Data: data}\n\teasyjson6a975c40DecodeJsonBenchmark1(&r, v)\n\treturn r.Error()\n}", "func UnmarshalJSON(b []byte) (dgo.Value, error) {\n\tdec := json.NewDecoder(bytes.NewReader(b))\n\tdec.UseNumber()\n\treturn jsonDecodeValue(dec)\n}", "func (t *BlockTest) UnmarshalJSON(in []byte) error {\n\treturn json.Unmarshal(in, &t.Json)\n}", "func (j *Regulations) UnmarshalJSON(input []byte) error {\n\tfs := fflib.NewFFLexer(input)\n\treturn j.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start)\n}", "func (j *qProxyClient) UnmarshalJSON(input []byte) error {\n\tfs := fflib.NewFFLexer(input)\n\treturn j.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start)\n}", "func (t *Tag) UnmarshalJSON(dat []byte) error {\n\t// get string\n\tvar str string\n\terr := json.Unmarshal(dat, &str)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// parse tag\n\t*t = ParseTag(str)\n\n\treturn nil\n}", "func unmarshal() {\n\tfmt.Println(\"=== json.unmarshal ===\")\n\tvar jsonBlob = []byte(`[\n\t\t{\"name\": \"Bill\", \"age\": 109},\n\t\t{\"name\": \"Bob\", \"age\": 5}\n\t]`)\n\n\tvar persons []Person\n\terr := json.Unmarshal(jsonBlob, &persons)\n\tcheck(err)\n\n\tfmt.Printf(\"%+v\\n\", persons)\n}", "func (j *JsonMarshaler) Unmarshal(data []byte, v interface{}) error {\n\treturn json.Unmarshal(data, v)\n}", "func (j *DeleteQueueResponse) UnmarshalJSON(input []byte) error {\n\tfs := fflib.NewFFLexer(input)\n\treturn j.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start)\n}", "func (j *GetQueueResponse) UnmarshalJSON(input []byte) error {\n\tfs := fflib.NewFFLexer(input)\n\treturn j.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start)\n}", "func (j *UnInstallPacket) UnmarshalJSON(input []byte) error {\n\tfs := fflib.NewFFLexer(input)\n\treturn j.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start)\n}", "func (j *HealthcheckResponse) UnmarshalJSON(input []byte) error {\n\tfs := fflib.NewFFLexer(input)\n\treturn j.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start)\n}", "func (v *BidResponse) UnmarshalJSON(data []byte) error {\n\tr := jlexer.Lexer{Data: data}\n\teasyjson326edDecodeGithubComApplifierGoOpenrtbOpenrtb2(&r, v)\n\treturn r.Error()\n}", "func (v *DocumentResponse) UnmarshalJSON(data []byte) error {\n\tr := jlexer.Lexer{Data: data}\n\teasyjson6a975c40DecodeJsonBenchmark3(&r, v)\n\treturn r.Error()\n}", "func (j *FactoryPluginRespPacket) UnmarshalJSON(input []byte) error {\n\tfs := fflib.NewFFLexer(input)\n\treturn j.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start)\n}", "func UnmarshalJSON(b []byte, discriminator string, f Factory) (interface{}, error) {\n\tm := make(map[string]interface{})\n\terr := json.Unmarshal(b, &m)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn Decode(m, discriminator, f)\n}", "func (j *BootInitiationRespPacket) UnmarshalJSON(input []byte) error {\n\tfs := fflib.NewFFLexer(input)\n\treturn j.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start)\n}", "func (v *VisitArray) UnmarshalJSON(data []byte) error {\n\tr := jlexer.Lexer{Data: data}\n\teasyjsonE564fc13DecodeGithubComLa0rgHighloadcupModel(&r, v)\n\treturn r.Error()\n}", "func (m *MapTransform) UnmarshalJSON(b []byte) error {\n\treturn json.Unmarshal(b, &m.Pairs)\n}", "func (v *BidResponse) UnmarshalJSON(data []byte) error {\n\tr := jlexer.Lexer{Data: data}\n\teasyjson326edDecodeGithubComMxmCherryOpenrtb(&r, v)\n\treturn r.Error()\n}", "func (v *User) UnmarshalJSON(data []byte) error {\n\tr := jlexer.Lexer{Data: data}\n\teasyjson2bc03518DecodeLangTaskOnBench(&r, v)\n\treturn r.Error()\n}", "func jsonDecode(reader io.ReadCloser, v interface{}) error {\n\tdecoder := json.NewDecoder(reader)\n\terr := decoder.Decode(v)\n\treturn err\n}", "func (v *ExportItem) UnmarshalJSON(data []byte) error {\n\tr := jlexer.Lexer{Data: data}\n\teasyjsonB83d7b77DecodeGoplaygroundMyjson1(&r, v)\n\treturn r.Error()\n}", "func (v *Foo) UnmarshalJSON(data []byte) error {\n\tr := jlexer.Lexer{Data: data}\n\teasyjsonAbe23ddeDecodeGithubComUberZanzibarExamplesExampleGatewayBuildGenCodeClientsCorgeCorge1(&r, v)\n\treturn r.Error()\n}", "func (j *LuaTable) UnmarshalJSON(input []byte) error {\n\tfs := fflib.NewFFLexer(input)\n\treturn j.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start)\n}", "func (v *FormulaAndFunctionResponseFormat) UnmarshalJSON(src []byte) error {\n\tvar value string\n\terr := json.Unmarshal(src, &value)\n\tif err != nil {\n\t\treturn err\n\t}\n\t*v = FormulaAndFunctionResponseFormat(value)\n\treturn nil\n}", "func (j *CreateQueueResponse) UnmarshalJSON(input []byte) error {\n\tfs := fflib.NewFFLexer(input)\n\treturn j.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start)\n}", "func (this *Service) UnmarshalJSON(b []byte) error {\n\treturn CommonUnmarshaler.Unmarshal(bytes.NewReader(b), this)\n}", "func (j *PeerInfo) UnmarshalJSON(input []byte) error {\n\tfs := fflib.NewFFLexer(input)\n\treturn j.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start)\n}", "func (j *AckMessagesResponse) UnmarshalJSON(input []byte) error {\n\tfs := fflib.NewFFLexer(input)\n\treturn j.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start)\n}", "func (j *LuaInt) UnmarshalJSON(input []byte) error {\n\tfs := fflib.NewFFLexer(input)\n\treturn j.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start)\n}", "func JSONDecode(ctx context.Context, data []byte, obj interface{}) error {\n\treturn json.Unmarshal(data, obj)\n}", "func (s *Serializer) Unmarshal(data []byte, v interface{}) error {\n\treturn jsoniter.Unmarshal(data,v)\n}", "func (this *QJson) UnmarshalJSON(p []byte) error {\n\tdec := json.NewDecoder(bytes.NewBuffer(p))\n\tdec.UseNumber()\n\tvar obj interface{}\n\terr := dec.Decode(&obj)\n\tif err != nil {\n\t\treturn err\n\t}\n\tswitch val := obj.(type) {\n\tcase []interface{}:\n\t\tthis.data = NewjsonArray(val)\n\tcase map[string]interface{}:\n\t\tthis.data = NewjsonObject(val)\n\tdefault:\n\t\tthis.data = obj\n\t}\n\treturn nil\n}", "func (v *TransactionResponse) UnmarshalJSON(data []byte) error {\n\tr := jlexer.Lexer{Data: data}\n\teasyjsonE82c8e88DecodeGithubComKamaiuOandaGoModel5(&r, v)\n\treturn r.Error()\n}", "func (j *MessageReceipt) UnmarshalJSON(input []byte) error {\n\tfs := fflib.NewFFLexer(input)\n\treturn j.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start)\n}", "func (j *LuaString) UnmarshalJSON(input []byte) error {\n\tfs := fflib.NewFFLexer(input)\n\treturn j.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start)\n}", "func (t *Transfer) UnmarshalJSON(data []byte) error {\n\tif id, ok := ParseID(data); ok {\n\t\tt.ID = id\n\t\treturn nil\n\t}\n\n\ttype transfer Transfer\n\tvar v transfer\n\tif err := json.Unmarshal(data, &v); err != nil {\n\t\treturn err\n\t}\n\n\t*t = Transfer(v)\n\treturn nil\n}", "func (m *Listener) UnmarshalJSON(b []byte) error {\n\treturn ListenerJSONUnmarshaler.Unmarshal(bytes.NewReader(b), m)\n}", "func (v *Visit) UnmarshalJSON(data []byte) error {\n\tr := jlexer.Lexer{Data: data}\n\teasyjsonE564fc13DecodeGithubComLa0rgHighloadcupModel1(&r, v)\n\treturn r.Error()\n}", "func (j JSON) Unmarshal(dest interface{}) error {\n\tif dest == nil {\n\t\treturn errors.New(\"destination is nil, not a valid pointer to an object\")\n\t}\n\n\t// Call our implementation of\n\t// JSON MarshalJSON through json.Marshal\n\t// to get the value of the JSON object\n\tres, err := json.Marshal(j)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn json.Unmarshal(res, dest)\n}", "func (m *Raw) UnmarshalJSON(data []byte) error {\n\t*m = data\n\treturn nil\n}", "func (m *gohaiMarshaler) UnmarshalJSON(bytes []byte) error {\n\tfirstUnmarshall := \"\"\n\terr := json.Unmarshal(bytes, &firstUnmarshall)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = json.Unmarshal([]byte(firstUnmarshall), &(m.gohai))\n\treturn err\n}", "func (d *Data) UnmarshalJSON(data []byte) error {\n\ttemp := struct {\n\t\tObject string `json:\"object\"`\n\t}{}\n\tif err := json.Unmarshal(data, &temp); err != nil {\n\t\treturn err\n\t}\n\tif temp.Object == \"card\" {\n\t\tvar c Card\n\t\tif err := json.Unmarshal(data, &c); err != nil {\n\t\t\treturn err\n\t\t}\n\t\td.Card = &c\n\t\td.BankAccount = nil\n\t} else if temp.Object == \"bank_account\" {\n\t\tvar ba BankAccount\n\t\tif err := json.Unmarshal(data, &ba); err != nil {\n\t\t\treturn err\n\t\t}\n\t\td.BankAccount = &ba\n\t\td.Card = nil\n\t} else {\n\t\treturn errors.New(\"Invalid object value\")\n\t}\n\treturn nil\n}", "func (j *FF_BidRequest) UnmarshalJSON(input []byte) error {\n\tfs := fflib.NewFFLexer(input)\n\treturn j.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start)\n}", "func (v *Fruit) UnmarshalJSON(data []byte) error {\n\tr := jlexer.Lexer{Data: data}\n\teasyjsonD2b7633eDecodeBackendInternalModels11(&r, v)\n\treturn r.Error()\n}", "func (j *EventMsg) UnmarshalJSON(input []byte) error {\n\tfs := fflib.NewFFLexer(input)\n\treturn j.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start)\n}", "func (v *UsersHandler) UnmarshalJSON(data []byte) error {\n\tr := jlexer.Lexer{Data: data}\n\teasyjson84c0690eDecodeMainHandlers(&r, v)\n\treturn r.Error()\n}", "func (v *Post) UnmarshalJSON(data []byte) error {\n\tr := jlexer.Lexer{Data: data}\n\teasyjson783c1624DecodeGithubComGobwasVk7(&r, v)\n\treturn r.Error()\n}", "func (v *CBPerson) UnmarshalJSON(data []byte) error {\n\tr := jlexer.Lexer{Data: data}\n\teasyjsonE242b40eDecodeGithubComExampleSample2(&r, v)\n\treturn r.Error()\n}", "func (v *FetchMessages) UnmarshalJSON(data []byte) error {\n\tr := jlexer.Lexer{Data: data}\n\teasyjson42239ddeDecodeGithubComKhliengDispatchServer24(&r, v)\n\treturn r.Error()\n}", "func (m *Manifest) UnmarshalJSON(data []byte) error {\n\ttype tmp Manifest\n\tvar t tmp\n\tif err := json.Unmarshal(data, &t); err != nil {\n\t\treturn err\n\t}\n\t*m = Manifest(t)\n\treturn m.Verify()\n}" ]
[ "0.74120563", "0.7275186", "0.7274783", "0.72390145", "0.72233593", "0.7127497", "0.70391864", "0.7021014", "0.7002435", "0.69946486", "0.698654", "0.6940797", "0.692335", "0.6918099", "0.6912977", "0.69057155", "0.68950945", "0.68927187", "0.68840563", "0.6883446", "0.6873066", "0.68662053", "0.6864303", "0.6854807", "0.68486434", "0.684757", "0.68323874", "0.6830249", "0.6803853", "0.6792638", "0.6788284", "0.6786422", "0.67841005", "0.6774881", "0.6761799", "0.676135", "0.6756674", "0.675211", "0.6734719", "0.6731663", "0.67307556", "0.672757", "0.6721059", "0.67208624", "0.6716941", "0.6716182", "0.67118144", "0.67000794", "0.6695617", "0.66943914", "0.66929764", "0.66795", "0.6668189", "0.6663166", "0.6661734", "0.66557336", "0.665345", "0.6641579", "0.6635959", "0.66295487", "0.66149294", "0.6603051", "0.6600649", "0.65997356", "0.6599716", "0.65980417", "0.6590869", "0.658715", "0.6581786", "0.6578255", "0.6577799", "0.6576928", "0.65691215", "0.6565638", "0.6564801", "0.6561339", "0.65560853", "0.65554833", "0.6554537", "0.6553924", "0.6552233", "0.6550794", "0.6549934", "0.6536469", "0.6532494", "0.65286946", "0.6525166", "0.6524922", "0.652255", "0.65193224", "0.65193135", "0.65151155", "0.6514324", "0.6505949", "0.65058815", "0.65048563", "0.6500065", "0.6498455", "0.64954054", "0.6491076", "0.6485235" ]
0.0
-1
ToStackItem converts Group to stackitem.Item.
func (g *Group) ToStackItem() stackitem.Item { return stackitem.NewStruct([]stackitem.Item{ stackitem.NewByteArray(g.PublicKey.Bytes()), stackitem.NewByteArray(g.Signature), }) }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (g *Group) FromStackItem(item stackitem.Item) error {\n\tif item.Type() != stackitem.StructT {\n\t\treturn errors.New(\"invalid Group stackitem type\")\n\t}\n\tgroup := item.Value().([]stackitem.Item)\n\tif len(group) != 2 {\n\t\treturn errors.New(\"invalid Group stackitem length\")\n\t}\n\tpKey, err := group[0].TryBytes()\n\tif err != nil {\n\t\treturn err\n\t}\n\tg.PublicKey, err = keys.NewPublicKeyFromBytes(pKey, elliptic.P256())\n\tif err != nil {\n\t\treturn err\n\t}\n\tsig, err := group[1].TryBytes()\n\tif err != nil {\n\t\treturn err\n\t}\n\tif len(sig) != keys.SignatureLen {\n\t\treturn errors.New(\"wrong signature length\")\n\t}\n\tg.Signature = sig\n\treturn nil\n}", "func (self *GameObjectCreator) Group2O(parent interface{}, name string, addToStage bool) *Group{\n return &Group{self.Object.Call(\"group\", parent, name, addToStage)}\n}", "func (d *Service) GroupRetroItem(RetroID string, ItemId string, GroupId string) ([]*thunderdome.RetroItem, error) {\n\tif _, err := d.DB.Exec(\n\t\t`UPDATE thunderdome.retro_item SET group_id = $3 WHERE retro_id = $1 AND id = $2;`,\n\t\tRetroID, ItemId, GroupId,\n\t); err != nil {\n\t\td.Logger.Error(\"update retro item error\", zap.Error(err))\n\t}\n\n\titems := d.GetRetroItems(RetroID)\n\n\treturn items, nil\n}", "func ToGroupMsg(in *antreatypes.Group, out *controlplane.Group, includeBody bool) {\n\tout.UID = in.UID\n\tout.Name = in.Name\n\tif !includeBody {\n\t\treturn\n\t}\n\tfor _, member := range in.GroupMembers {\n\t\tout.GroupMembers = append(out.GroupMembers, *member)\n\t}\n}", "func (sg *StorageGroup) ToV2() *storagegroup.StorageGroup {\n\treturn (*storagegroup.StorageGroup)(sg)\n}", "func ConvertGroupType(azsg *azsresources.Group) *armhelper.Group {\n\treturn &armhelper.Group{\n\t\tName: azsg.Name,\n\t\tLocation: azsg.Location,\n\t\tManagedBy: azsg.ManagedBy,\n\t\tTags: azsg.Tags,\n\t}\n}", "func NewGroup()(*Group) {\n m := &Group{\n DirectoryObject: *NewDirectoryObject(),\n }\n odataTypeValue := \"#microsoft.graph.group\";\n m.SetOdataType(&odataTypeValue);\n return m\n}", "func (self *GameObjectCreator) GroupI(args ...interface{}) *Group{\n return &Group{self.Object.Call(\"group\", args)}\n}", "func (self *GameObjectCreator) Group(parent interface{}) *Group{\n return &Group{self.Object.Call(\"group\", parent)}\n}", "func (s *ItemStack) Push(t Item) {\n\ts.lock.Lock()\n\tdefer s.lock.Unlock()\n\ts.items = append(s.items, t)\n}", "func (client *ClientImpl) MoveGroupToPage(ctx context.Context, args MoveGroupToPageArgs) (*Group, error) {\n\tif args.Group == nil {\n\t\treturn nil, &azuredevops.ArgumentNilError{ArgumentName: \"args.Group\"}\n\t}\n\trouteValues := make(map[string]string)\n\tif args.ProcessId == nil {\n\t\treturn nil, &azuredevops.ArgumentNilError{ArgumentName: \"args.ProcessId\"}\n\t}\n\trouteValues[\"processId\"] = (*args.ProcessId).String()\n\tif args.WitRefName == nil || *args.WitRefName == \"\" {\n\t\treturn nil, &azuredevops.ArgumentNilOrEmptyError{ArgumentName: \"args.WitRefName\"}\n\t}\n\trouteValues[\"witRefName\"] = *args.WitRefName\n\tif args.PageId == nil || *args.PageId == \"\" {\n\t\treturn nil, &azuredevops.ArgumentNilOrEmptyError{ArgumentName: \"args.PageId\"}\n\t}\n\trouteValues[\"pageId\"] = *args.PageId\n\tif args.SectionId == nil || *args.SectionId == \"\" {\n\t\treturn nil, &azuredevops.ArgumentNilOrEmptyError{ArgumentName: \"args.SectionId\"}\n\t}\n\trouteValues[\"sectionId\"] = *args.SectionId\n\tif args.GroupId == nil || *args.GroupId == \"\" {\n\t\treturn nil, &azuredevops.ArgumentNilOrEmptyError{ArgumentName: \"args.GroupId\"}\n\t}\n\trouteValues[\"groupId\"] = *args.GroupId\n\n\tqueryParams := url.Values{}\n\tif args.RemoveFromPageId == nil {\n\t\treturn nil, &azuredevops.ArgumentNilError{ArgumentName: \"removeFromPageId\"}\n\t}\n\tqueryParams.Add(\"removeFromPageId\", *args.RemoveFromPageId)\n\tif args.RemoveFromSectionId == nil {\n\t\treturn nil, &azuredevops.ArgumentNilError{ArgumentName: \"removeFromSectionId\"}\n\t}\n\tqueryParams.Add(\"removeFromSectionId\", *args.RemoveFromSectionId)\n\tbody, marshalErr := json.Marshal(*args.Group)\n\tif marshalErr != nil {\n\t\treturn nil, marshalErr\n\t}\n\tlocationId, _ := uuid.Parse(\"766e44e1-36a8-41d7-9050-c343ff02f7a5\")\n\tresp, err := client.Client.Send(ctx, http.MethodPut, locationId, \"6.0-preview.1\", routeValues, queryParams, bytes.NewReader(body), \"application/json\", \"application/json\", nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar responseValue Group\n\terr = client.Client.UnmarshalBody(resp, &responseValue)\n\treturn &responseValue, err\n}", "func (s *itemStack) Push(t Item) {\n\ts.lock.Lock()\n\ts.lock.Unlock()\n\ts.items = append(s.items, t)\n}", "func (g *Group) Pack(intent Intent, operation OperationType) (*Message, error) {\n\tblob, err := json.Marshal(g)\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"Error marshalling group while packing\")\n\t}\n\n\treturn NewMessage(\n\t\tg.ID,\n\t\tintent,\n\t\tModelType,\n\t\t\"group\",\n\t\toperation,\n\t\tblob,\n\t\tnil), nil\n}", "func (s *Stack) Push(value []byte) (*Item, error) {\n\ts.Lock()\n\tdefer s.Unlock()\n\n\t// Check if stack is closed.\n\tif !s.isOpen {\n\t\treturn nil, ErrDBClosed\n\t}\n\n\t// Create new Item.\n\titem := &Item{\n\t\tID: s.head + 1,\n\t\tKey: idToKey(s.head + 1),\n\t\tValue: value,\n\t}\n\n\t// Add it to the stack.\n\tif err := s.db.Update(func(txn *badger.Txn) error {\n\t\terr := txn.Set(item.Key, item.Value)\n\t\treturn err\n\t}); err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Increment head position.\n\ts.head++\n\n\treturn item, nil\n}", "func NewStack() *itemStack {\n\ts := &itemStack{}\n\ts.items = []Item{}\n\treturn s\n}", "func (g *Group) TOMLValue() interface{} {\n\treturn &GroupTOML{}\n}", "func NewStack() *ItemStack {\n\ts := &ItemStack{}\n\ts.items = []Items{}\n\treturn s\n}", "func (m *TeamItemRequestBuilder) Group()(*i8a1cdbeac728d5d9d3409d0d7085c53384ad37435e0292d966ed94bbc4155a05.GroupRequestBuilder) {\n return i8a1cdbeac728d5d9d3409d0d7085c53384ad37435e0292d966ed94bbc4155a05.NewGroupRequestBuilderInternal(m.pathParameters, m.requestAdapter);\n}", "func (j *GroupPush) GroupPush(req *PushRequest) (map[string]PushResponse, error) {\n\turl := j.GetURL(\"push\") + \"grouppush\"\n\tbuf, err := json.Marshal(req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tresp, err := j.request(\"POST\", url, bytes.NewReader(buf), nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tret := new(map[string]PushResponse)\n\terr2 := json.Unmarshal(resp, ret)\n\tif err2 != nil {\n\t\treturn nil, err\n\t}\n\treturn *ret, err\n}", "func (o *IgroupCreateResponse) ToXML() (string, error) {\n\toutput, err := xml.MarshalIndent(o, \" \", \" \")\n\tif err != nil {\n\t\tlog.Errorf(\"error: %v\", err)\n\t}\n\treturn string(output), err\n}", "func (s *Stack) Push(i int) {\n\ts.items = append(s.items, i)\n}", "func (self *GameObjectCreator) Group3O(parent interface{}, name string, addToStage bool, enableBody bool) *Group{\n return &Group{self.Object.Call(\"group\", parent, name, addToStage, enableBody)}\n}", "func (o GroupBadgeOutput) Group() pulumi.StringOutput {\n\treturn o.ApplyT(func(v *GroupBadge) pulumi.StringOutput { return v.Group }).(pulumi.StringOutput)\n}", "func (s *Stack) Push(item interface{}) {\n\toldNode := s.node\n\tnewNode := nodeStack{TElement: item, Previous: &oldNode}\n\ts.node = newNode\n\ts.size++\n}", "func infoToInventory(info *resource.Info) (*Inventory, error) {\n\tif info == nil || info.Object == nil {\n\t\treturn nil, fmt.Errorf(\"Empty resource.Info can not calculate as inventory.\\n\")\n\t}\n\tobj := info.Object\n\tgk := obj.GetObjectKind().GroupVersionKind().GroupKind()\n\treturn createInventory(info.Namespace, info.Name, gk)\n}", "func (me *XsdGoPkgHasElem_ItemIconObjectExtensionGroup) Walk() (err error) {\r\n\tif fn := WalkHandlers.XsdGoPkgHasElem_ItemIconObjectExtensionGroup; me != nil {\r\n\t\tif fn != nil {\r\n\t\t\tif err = fn(me, true); xsdt.OnWalkError(&err, &WalkErrors, WalkContinueOnError, WalkOnError) {\r\n\t\t\t\treturn\r\n\t\t\t}\r\n\t\t}\r\n\t\tif fn != nil {\r\n\t\t\tif err = fn(me, false); xsdt.OnWalkError(&err, &WalkErrors, WalkContinueOnError, WalkOnError) {\r\n\t\t\t\treturn\r\n\t\t\t}\r\n\t\t}\r\n\t}\r\n\treturn\r\n}", "func (s *GroupsService) RestoreGroup(gid interface{}, options ...RequestOptionFunc) (*Group, *Response, error) {\n\tgroup, err := parseID(gid)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\tu := fmt.Sprintf(\"groups/%s/restore\", PathEscape(group))\n\n\treq, err := s.client.NewRequest(http.MethodPost, u, nil, options)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tg := new(Group)\n\tresp, err := s.client.Do(req, g)\n\tif err != nil {\n\t\treturn nil, resp, err\n\t}\n\n\treturn g, resp, nil\n}", "func (iu *ItemUpdate) ClearItemGroup() *ItemUpdate {\n\tiu.mutation.ClearItemGroup()\n\treturn iu\n}", "func (m *ItemTermStoreGroupsGroupItemRequestBuilder) Get(ctx context.Context, requestConfiguration *ItemTermStoreGroupsGroupItemRequestBuilderGetRequestConfiguration)(ia3c27b33aa3d3ed80f9de797c48fbb8ed73f13887e301daf51f08450e9a634a3.Groupable, error) {\n requestInfo, err := m.ToGetRequestInformation(ctx, requestConfiguration);\n if err != nil {\n return nil, err\n }\n errorMapping := i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f.ErrorMappings {\n \"4XX\": ia572726a95efa92ddd544552cd950653dc691023836923576b2f4bf716cf204a.CreateODataErrorFromDiscriminatorValue,\n \"5XX\": ia572726a95efa92ddd544552cd950653dc691023836923576b2f4bf716cf204a.CreateODataErrorFromDiscriminatorValue,\n }\n res, err := m.BaseRequestBuilder.RequestAdapter.Send(ctx, requestInfo, ia3c27b33aa3d3ed80f9de797c48fbb8ed73f13887e301daf51f08450e9a634a3.CreateGroupFromDiscriminatorValue, errorMapping)\n if err != nil {\n return nil, err\n }\n if res == nil {\n return nil, nil\n }\n return res.(ia3c27b33aa3d3ed80f9de797c48fbb8ed73f13887e301daf51f08450e9a634a3.Groupable), nil\n}", "func (q *Stack) Push(val interface{}) {\n\tq.Items.Append(val)\n}", "func (o *IgroupAddResponse) ToXML() (string, error) {\n\toutput, err := xml.MarshalIndent(o, \" \", \" \")\n\tif err != nil {\n\t\tlog.Errorf(\"error: %v\", err)\n\t}\n\treturn string(output), err\n}", "func (_IFactorySpace *IFactorySpaceTransactor) CreateGroup(opts *bind.TransactOpts) (*types.Transaction, error) {\n\treturn _IFactorySpace.contract.Transact(opts, \"createGroup\")\n}", "func (o *IgroupCreateResponseResult) ToXML() (string, error) {\n\toutput, err := xml.MarshalIndent(o, \" \", \" \")\n\tif err != nil {\n\t\tlog.Errorf(\"error: %v\", err)\n\t}\n\treturn string(output), err\n}", "func (ss *SliceStack) Push(item adts.ContainerElement) bool {\n\treturn ss.Add(item)\n}", "func (v *SeasonResDB) ToItem() *SeasonResItem {\n\tres := &SeasonResItem{\n\t\tCTime: v.CTime.Time().Format(_TimeFormat),\n\t\tSeasonRes: v.SeasonRes,\n\t}\n\tswitch v.Check {\n\tcase 0: // reject\n\t\tres.Check = 2\n\tcase 1: // passed\n\t\tres.Check = 1\n\tdefault:\n\t\tres.Check = 0\n\t}\n\tif v.InjectTime > 0 {\n\t\tres.InjectTime = v.InjectTime.Time().Format(_TimeFormat)\n\t}\n\treturn res\n}", "func (iggb *ItemGroupGroupBy) String(ctx context.Context) (_ string, err error) {\n\tvar v []string\n\tif v, err = iggb.Strings(ctx); err != nil {\n\t\treturn\n\t}\n\tswitch len(v) {\n\tcase 1:\n\t\treturn v[0], nil\n\tcase 0:\n\t\terr = &NotFoundError{itemgroup.Label}\n\tdefault:\n\t\terr = fmt.Errorf(\"ent: ItemGroupGroupBy.Strings returned %d results when one was expected\", len(v))\n\t}\n\treturn\n}", "func (s *BaseMySqlParserListener) EnterGroupByItem(ctx *GroupByItemContext) {}", "func (ls *ListStack) Push(item adts.ContainerElement) bool {\n\treturn ls.Add(item)\n}", "func (s *GroupsService) TransferGroup(gid interface{}, pid interface{}, options ...RequestOptionFunc) (*Group, *Response, error) {\n\tgroup, err := parseID(gid)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\tproject, err := parseID(pid)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\tu := fmt.Sprintf(\"groups/%s/projects/%s\", PathEscape(group), PathEscape(project))\n\n\treq, err := s.client.NewRequest(http.MethodPost, u, nil, options)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tg := new(Group)\n\tresp, err := s.client.Do(req, g)\n\tif err != nil {\n\t\treturn nil, resp, err\n\t}\n\n\treturn g, resp, nil\n}", "func (iuo *ItemUpdateOne) ClearItemGroup() *ItemUpdateOne {\n\tiuo.mutation.ClearItemGroup()\n\treturn iuo\n}", "func (c *StackCollection) makeNodeGroupStackName(name string) string {\n\treturn fmt.Sprintf(\"eksctl-%s-nodegroup-%s\", c.spec.Metadata.Name, name)\n}", "func (s *Stack) Push(i interface{}) {\n\tsn := &StackNode{\n\t\tData: i,\n\t}\n\n\tif s.Top == nil {\n\t\ts.Top = sn\n\t\treturn\n\t}\n\n\tsn.Next = s.Top\n\ts.Top = sn\n}", "func (s *Slot) push(item Item) error {\n\tif s.item != Empty {\n\t\treturn errors.New(\"slot already contains an item\")\n\t}\n\ts.item = item\n\treturn nil\n}", "func (g *Group) Group(i int) *Group {\n\treturn g.groups[i]\n}", "func (group *ResourceGroup) ConvertTo(hub conversion.Hub) error {\n\tdestination, ok := hub.(*v20200601s.ResourceGroup)\n\tif !ok {\n\t\treturn fmt.Errorf(\"expected resources/v1api20200601storage/ResourceGroup but received %T instead\", hub)\n\t}\n\n\treturn group.AssignProperties_To_ResourceGroup(destination)\n}", "func (ds *DrawStack) Push(a Stackable) {\n\tds.toPush = append(ds.toPush, a)\n\n}", "func (c *container) Group() *Group {\n\tg := &Group{container: container{name: \"g\"}}\n\tc.contents = append(c.contents, g)\n\n\treturn g\n}", "func (sp *StackPackage) AddGroup(path string, sg StackGroup) {\n\tsp.Groups[path] = sg\n}", "func (s *CompanyPropertiesService) CreateGroup(group ItemPropertyGroup) (*ItemPropertyGroup, error) {\n\turl := \"/properties/v1/companies/groups\"\n\tres := new(ItemPropertyGroup)\n\terr := s.client.RunPost(url, group, res)\n\treturn res, err\n}", "func (self *GameObjectCreator) Group1O(parent interface{}, name string) *Group{\n return &Group{self.Object.Call(\"group\", parent, name)}\n}", "func (s Group) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (s Group) String() string {\n\treturn awsutil.Prettify(s)\n}", "func Convert2Group(node plannercore.LogicalPlan) *Group {\n\te := Convert2GroupExpr(node)\n\tg := NewGroupWithSchema(e, node.Schema())\n\t// Stats property for `Group` would be computed after exploration phase.\n\treturn g\n}", "func XPriCompGroupItemByNameXPriCompGroupID(db XODB, name string, xPriCompGroupID uint) (*XPriCompGroupItem, error) {\n\tvar err error\n\n\t// sql query\n\tconst sqlstr = `SELECT ` +\n\t\t`id, name, display_name, type_cd, primary_flag, seq_num, x_pri_comp_group_id, created_by, updated_by, created_at, updated_at ` +\n\t\t`FROM x_showroom.x_pri_comp_group_item ` +\n\t\t`WHERE name = ? AND x_pri_comp_group_id = ?`\n\n\t// run query\n\tXOLog(sqlstr, name, xPriCompGroupID)\n\txpcgi := XPriCompGroupItem{\n\t\t_exists: true,\n\t}\n\n\terr = db.QueryRow(sqlstr, name, xPriCompGroupID).Scan(&xpcgi.ID, &xpcgi.Name, &xpcgi.DisplayName, &xpcgi.TypeCd, &xpcgi.PrimaryFlag, &xpcgi.SeqNum, &xpcgi.XPriCompGroupID, &xpcgi.CreatedBy, &xpcgi.UpdatedBy, &xpcgi.CreatedAt, &xpcgi.UpdatedAt)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &xpcgi, nil\n}", "func (s *orderedItems) Push(x interface{}) {\n\t// Push and Pop use pointer receivers because they modify the slice's length,\n\t// not just its contents.\n\t*s = append(*s, x.(Item))\n}", "func (o *IgroupAddResponseResult) ToXML() (string, error) {\n\toutput, err := xml.MarshalIndent(o, \" \", \" \")\n\tif err != nil {\n\t\tlog.Errorf(\"error: %v\", err)\n\t}\n\treturn string(output), err\n}", "func createItem(name string) Item {\n\tnewItem := Item{}\n\tnewItem.Id = createXid()\n\tnewItem.Name = name\n\titems = append(items, newItem)\n\treturn newItem\n}", "func (l *GroupLookup) createOrSetInGroup(index int, key flux.GroupKey, value interface{}) {\n\t// If this index is at -1, then we are inserting a value with a smaller key\n\t// than every group and we need to create a new group to insert it at the\n\t// beginning.\n\tif index == -1 {\n\t\tl.groups = append(l.groups, nil)\n\t\tcopy(l.groups[1:], l.groups[:])\n\t\tl.groups[0] = l.newKeyGroup([]groupKeyListElement{\n\t\t\t{key: key, value: value},\n\t\t})\n\t\tl.lastIndex = 0\n\t\treturn\n\t}\n\n\tkg := l.groups[index]\n\n\t// Find the location where this should be inserted.\n\ti := kg.InsertAt(key)\n\n\t// If this should be inserted after the last element, do it and leave.\n\tif i == len(kg.elements) {\n\t\tkg.elements = append(kg.elements, groupKeyListElement{\n\t\t\tkey: key,\n\t\t\tvalue: value,\n\t\t})\n\t\treturn\n\t} else if kg.elements[i].key.Equal(key) {\n\t\t// If the entry already exists at this index, set the value.\n\t\tkg.set(i, value)\n\t\treturn\n\t}\n\n\t// We have to split this entry into two new elements. First, we start\n\t// by creating space for the new entry.\n\tl.groups = append(l.groups, nil)\n\tcopy(l.groups[index+2:], l.groups[index+1:])\n\t// Construct the new group entry and copy the end of the slice\n\t// into the new key group.\n\tl.groups[index+1] = func() *groupKeyList {\n\t\t// TODO(rockstar): A nice optimization here would be to prevent\n\t\t// the deleted items from being copied. However, this entire function\n\t\t// needs to be refactored to support that, as it's possible that *all*\n\t\t// the elements have been deleted, so no split is needed.\n\t\t// Moving currently deleted elements out of this key group, the deleted\n\t\t// count must be decremented.\n\t\tfor _, item := range kg.elements[i:] {\n\t\t\tif item.deleted {\n\t\t\t\tkg.deleted--\n\t\t\t}\n\t\t}\n\n\t\tentries := make([]groupKeyListElement, len(kg.elements[i:]))\n\t\tcopy(entries, kg.elements[i:])\n\n\t\treturn l.newKeyGroup(entries)\n\t}()\n\t// Use a slice on the key group elements to remove the extra elements.\n\t// Then append the new key group entry.\n\tkg.elements = kg.elements[:i:cap(kg.elements)]\n\tkg.elements = append(kg.elements, groupKeyListElement{\n\t\tkey: key,\n\t\tvalue: value,\n\t})\n}", "func (s *Stack) Push(i interface{}) error {\n\tif s.top != nil && reflect.TypeOf(s.top.value) != reflect.TypeOf(i) {\n\t\treturn &StackError{msg: \"type mismatch\"}\n\t}\n\tvar e *element = new(element)\n\te.value = i\n\te.under = s.top\n\ts.top = e\n\ts.count += 1\n\treturn nil\n}", "func (me *XsdGoPkgHasElems_ItemIconObjectExtensionGroup) Walk() (err error) {\r\n\tif fn := WalkHandlers.XsdGoPkgHasElems_ItemIconObjectExtensionGroup; me != nil {\r\n\t\tif fn != nil {\r\n\t\t\tif err = fn(me, true); xsdt.OnWalkError(&err, &WalkErrors, WalkContinueOnError, WalkOnError) {\r\n\t\t\t\treturn\r\n\t\t\t}\r\n\t\t}\r\n\t\tif fn != nil {\r\n\t\t\tif err = fn(me, false); xsdt.OnWalkError(&err, &WalkErrors, WalkContinueOnError, WalkOnError) {\r\n\t\t\t\treturn\r\n\t\t\t}\r\n\t\t}\r\n\t}\r\n\treturn\r\n}", "func (c *ClientWithResponses) GetPeeringGroupitemsWithResponse(ctx context.Context, params *GetPeeringGroupitemsParams) (*GetPeeringGroupitemsResponse, error) {\n\trsp, err := c.GetPeeringGroupitems(ctx, params)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn ParseGetPeeringGroupitemsResponse(rsp)\n}", "func (iuo *ItemUpdateOne) AddItemGroup(i ...*ItemGroup) *ItemUpdateOne {\n\tids := make([]int, len(i))\n\tfor j := range i {\n\t\tids[j] = i[j].ID\n\t}\n\treturn iuo.AddItemGroupIDs(ids...)\n}", "func (s *layoutStack) push(layout geom.Layout) {\n\t// inBaseTypeCollection inherits from outer context.\n\tstackObj := layoutStackObj{\n\t\tlayout: layout,\n\t\tinBaseTypeCollection: s.topInBaseTypeCollection(),\n\t}\n\n\tswitch layout {\n\tcase geom.NoLayout:\n\t\tstackObj.layout = s.topLayout()\n\tcase geom.XYM, geom.XYZ, geom.XYZM:\n\t\tstackObj.inBaseTypeCollection = false\n\tdefault:\n\t\t// This should never happen.\n\t\tpanic(fmt.Sprintf(\"unknown geom.Layout %d\", layout))\n\t}\n\n\ts.data = append(s.data, stackObj)\n}", "func (_BaseGroupFactory *BaseGroupFactoryTransactor) CreateGroup(opts *bind.TransactOpts) (*types.Transaction, error) {\n\treturn _BaseGroupFactory.contract.Transact(opts, \"createGroup\")\n}", "func NewStackPackage() *StackPackage {\n\t// create a Stack record and populate it with the relevant package contents\n\tv, k := v1alpha1.StackGroupVersionKind.ToAPIVersionAndKind()\n\n\tsp := &StackPackage{\n\t\tStack: v1alpha1.Stack{\n\t\t\tTypeMeta: metav1.TypeMeta{APIVersion: v, Kind: k},\n\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\tAnnotations: map[string]string{},\n\t\t\t},\n\t\t},\n\t\tCRDs: map[string]apiextensions.CustomResourceDefinition{},\n\t\tCRDPaths: map[string]string{},\n\t\tGroups: map[string]StackGroup{},\n\t\tIcons: map[string]*v1alpha1.IconSpec{},\n\t\tResources: map[string]StackResource{},\n\t\tUISpecs: map[string]string{},\n\t}\n\n\treturn sp\n}", "func (*authItemGroupR) NewStruct() *authItemGroupR {\n\treturn &authItemGroupR{}\n}", "func (v *RadioButton) SetGroup(group *glib.SList) {\n\tC.gtk_radio_button_set_group(v.native(), cGSList(group))\n}", "func (s *Stack) PushObject(value interface{}) (*Item, error) {\n\tvar buffer bytes.Buffer\n\tenc := gob.NewEncoder(&buffer)\n\tif err := enc.Encode(value); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn s.Push(buffer.Bytes())\n}", "func (iu *ItemUpdate) AddItemGroup(i ...*ItemGroup) *ItemUpdate {\n\tids := make([]int, len(i))\n\tfor j := range i {\n\t\tids[j] = i[j].ID\n\t}\n\treturn iu.AddItemGroupIDs(ids...)\n}", "func (client *ClientImpl) AddGroup(ctx context.Context, args AddGroupArgs) (*Group, error) {\n\tif args.Group == nil {\n\t\treturn nil, &azuredevops.ArgumentNilError{ArgumentName: \"args.Group\"}\n\t}\n\trouteValues := make(map[string]string)\n\tif args.ProcessId == nil {\n\t\treturn nil, &azuredevops.ArgumentNilError{ArgumentName: \"args.ProcessId\"}\n\t}\n\trouteValues[\"processId\"] = (*args.ProcessId).String()\n\tif args.WitRefName == nil || *args.WitRefName == \"\" {\n\t\treturn nil, &azuredevops.ArgumentNilOrEmptyError{ArgumentName: \"args.WitRefName\"}\n\t}\n\trouteValues[\"witRefName\"] = *args.WitRefName\n\tif args.PageId == nil || *args.PageId == \"\" {\n\t\treturn nil, &azuredevops.ArgumentNilOrEmptyError{ArgumentName: \"args.PageId\"}\n\t}\n\trouteValues[\"pageId\"] = *args.PageId\n\tif args.SectionId == nil || *args.SectionId == \"\" {\n\t\treturn nil, &azuredevops.ArgumentNilOrEmptyError{ArgumentName: \"args.SectionId\"}\n\t}\n\trouteValues[\"sectionId\"] = *args.SectionId\n\n\tbody, marshalErr := json.Marshal(*args.Group)\n\tif marshalErr != nil {\n\t\treturn nil, marshalErr\n\t}\n\tlocationId, _ := uuid.Parse(\"766e44e1-36a8-41d7-9050-c343ff02f7a5\")\n\tresp, err := client.Client.Send(ctx, http.MethodPost, locationId, \"6.0-preview.1\", routeValues, nil, bytes.NewReader(body), \"application/json\", \"application/json\", nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar responseValue Group\n\terr = client.Client.UnmarshalBody(resp, &responseValue)\n\treturn &responseValue, err\n}", "func (c *ClientWithResponses) GetSoundGroupitemsWithResponse(ctx context.Context, params *GetSoundGroupitemsParams) (*GetSoundGroupitemsResponse, error) {\n\trsp, err := c.GetSoundGroupitems(ctx, params)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn ParseGetSoundGroupitemsResponse(rsp)\n}", "func (_BaseContentSpace *BaseContentSpaceTransactor) CreateGroup(opts *bind.TransactOpts) (*types.Transaction, error) {\n\treturn _BaseContentSpace.contract.Transact(opts, \"createGroup\")\n}", "func (s *Slot) pop() Item {\n\titem := s.item\n\ts.item = Empty\n\treturn item\n}", "func (s *RuneStack) Push(item rune) {\n\tif len(s.items) > s.Length {\n\t\ts.items[s.Length] = item\n\t} else {\n\t\ts.items = append(s.items, item)\n\t}\n\ts.Length++\n}", "func DecodeBinaryStackItem(r *io.BinReader) Item {\n\tvar t = Type(r.ReadB())\n\tif r.Err != nil {\n\t\treturn nil\n\t}\n\n\tswitch t {\n\tcase ByteArrayT, BufferT:\n\t\tdata := r.ReadVarBytes(MaxSize)\n\t\treturn NewByteArray(data)\n\tcase BooleanT:\n\t\tvar b = r.ReadBool()\n\t\treturn NewBool(b)\n\tcase IntegerT:\n\t\tdata := r.ReadVarBytes(bigint.MaxBytesLen)\n\t\tnum := bigint.FromBytes(data)\n\t\treturn NewBigInteger(num)\n\tcase ArrayT, StructT:\n\t\tsize := int(r.ReadVarUint())\n\t\tarr := make([]Item, size)\n\t\tfor i := 0; i < size; i++ {\n\t\t\tarr[i] = DecodeBinaryStackItem(r)\n\t\t}\n\n\t\tif t == ArrayT {\n\t\t\treturn NewArray(arr)\n\t\t}\n\t\treturn NewStruct(arr)\n\tcase MapT:\n\t\tsize := int(r.ReadVarUint())\n\t\tm := NewMap()\n\t\tfor i := 0; i < size; i++ {\n\t\t\tkey := DecodeBinaryStackItem(r)\n\t\t\tvalue := DecodeBinaryStackItem(r)\n\t\t\tif r.Err != nil {\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tm.Add(key, value)\n\t\t}\n\t\treturn m\n\tcase AnyT:\n\t\treturn Null{}\n\tdefault:\n\t\tr.Err = fmt.Errorf(\"unknown type: %v\", t)\n\t\treturn nil\n\t}\n}", "func (q *Stack) Push(v interface{}) *list.Element {\n\treturn q.data.PushFront(v)\n}", "func MapItem(x map[string]interface{}, k string) item.Stack {\n\tif m, ok := x[k].(map[string]interface{}); ok {\n\t\ts := readItemStack(m)\n\t\treadDamage(m, &s, true)\n\t\treadEnchantments(m, &s)\n\t\treadDisplay(m, &s)\n\t\treadDragonflyData(m, &s)\n\t\treturn s\n\t}\n\treturn item.Stack{}\n}", "func (m *DirectoryObjectItemRequestBuilder) Group()(*if26fe6278b9799c8a3493f5e61e5d6b3bc3a629544bcfc96a440c979cf445852.GroupRequestBuilder) {\n return if26fe6278b9799c8a3493f5e61e5d6b3bc3a629544bcfc96a440c979cf445852.NewGroupRequestBuilderInternal(m.pathParameters, m.requestAdapter);\n}", "func (self *GameObjectCreator) Group4O(parent interface{}, name string, addToStage bool, enableBody bool, physicsBodyType int) *Group{\n return &Group{self.Object.Call(\"group\", parent, name, addToStage, enableBody, physicsBodyType)}\n}", "func (igq *ItemGroupQuery) QueryGroupItem() *ItemQuery {\n\tquery := &ItemQuery{config: igq.config}\n\tquery.path = func(ctx context.Context) (fromU *sql.Selector, err error) {\n\t\tif err := igq.prepareQuery(ctx); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tselector := igq.sqlQuery(ctx)\n\t\tif err := selector.Err(); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tstep := sqlgraph.NewStep(\n\t\t\tsqlgraph.From(itemgroup.Table, itemgroup.FieldID, selector),\n\t\t\tsqlgraph.To(item.Table, item.FieldID),\n\t\t\tsqlgraph.Edge(sqlgraph.M2M, true, itemgroup.GroupItemTable, itemgroup.GroupItemPrimaryKey...),\n\t\t)\n\t\tfromU = sqlgraph.SetNeighbors(igq.driver.Dialect(), step)\n\t\treturn fromU, nil\n\t}\n\treturn query\n}", "func pushToStack(stack *Stack, n *Node) error {\n\tif n == nil || stack == nil {\n\t\treturn errors.New(nilNodeErr)\n\t}\n\tstack.Push(*n)\n\treturn nil\n}", "func (s *BaseMySqlParserListener) ExitGroupByItem(ctx *GroupByItemContext) {}", "func (v *RadioButton) GetGroup() (*glib.SList, error) {\n\tc := C.gtk_radio_button_get_group(v.native())\n\tif c == nil {\n\t\treturn nil, nilPtrErr\n\t}\n\treturn glib.WrapSList(uintptr(unsafe.Pointer(c))), nil\n}", "func NewFromV2(aV2 *storagegroup.StorageGroup) *StorageGroup {\n\treturn (*StorageGroup)(aV2)\n}", "func (s *Stack) Push(value interface{}) {\n\ts.top = &item{\n\t\tvalue: value,\n\t\tnext: s.top,\n\t}\n\ts.size++\n}", "func (Manganese) GetGroup() string {\n\tvar g groupType = b7\n\treturn g.get()\n}", "func WithContext(ctx context.Context) (*Group, context.Context)", "func (s *stack) push(el int) {\n\ts.items = append(s.items, el)\n}", "func (stack *Stack) Push(value interface{}) {\n\tstack.top = &Item { value, stack.top }\n\tstack.size++\n}", "func (e *Element) Group() string {\n\treturn e.group\n}", "func (g Group) String() string {\n\tjg, _ := json.Marshal(g)\n\treturn string(jg)\n}", "func NewGroup() *Group {\n\treturn &Group{}\n}", "func NewGroup() *Group {\n\treturn &Group{}\n}", "func (s *stack) push(el string) {\n\ts.items = append(s.items, el)\n}", "func (oi *OutlineItem) ToPdfOutlineItem() (*PdfOutlineItem, int64) {\n\t// Create outline item.\n\tcurrItem := NewPdfOutlineItem()\n\tcurrItem.Title = core.MakeString(oi.Title)\n\tcurrItem.Dest = oi.Dest.ToPdfObject()\n\n\t// Create outline items.\n\tvar outlineItems []*PdfOutlineItem\n\tvar lenDescendants int64\n\tvar prev *PdfOutlineItem\n\n\tfor _, item := range oi.items {\n\t\toutlineItem, lenChildren := item.ToPdfOutlineItem()\n\t\toutlineItem.Parent = &currItem.PdfOutlineTreeNode\n\n\t\tif prev != nil {\n\t\t\tprev.Next = &outlineItem.PdfOutlineTreeNode\n\t\t\toutlineItem.Prev = &prev.PdfOutlineTreeNode\n\t\t}\n\n\t\toutlineItems = append(outlineItems, outlineItem)\n\t\tlenDescendants += lenChildren\n\t\tprev = outlineItem\n\t}\n\n\t// Add outline item linked list properties.\n\tlenOutlineItems := len(outlineItems)\n\tlenDescendants += int64(lenOutlineItems)\n\n\tif lenOutlineItems > 0 {\n\t\tcurrItem.First = &outlineItems[0].PdfOutlineTreeNode\n\t\tcurrItem.Last = &outlineItems[lenOutlineItems-1].PdfOutlineTreeNode\n\t\tcurrItem.Count = &lenDescendants\n\t}\n\n\treturn currItem, lenDescendants\n}", "func ToGroupStatus(val interface{}) (*GroupStatus, error) {\n\tvar group GroupStatus\n\tinrec, err := json.Marshal(val)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\terr = json.Unmarshal(inrec, &group)\n\treturn &group, err\n}", "func (o BackendResponseOutput) Group() pulumi.StringOutput {\n\treturn o.ApplyT(func(v BackendResponse) string { return v.Group }).(pulumi.StringOutput)\n}", "func (t *OpenconfigInterfaces_Interfaces_Interface_RoutedVlan_Ipv6_Addresses_Address_Vrrp) NewVrrpGroup(VirtualRouterId uint8) (*OpenconfigInterfaces_Interfaces_Interface_RoutedVlan_Ipv6_Addresses_Address_Vrrp_VrrpGroup, error){\n\n\t// Initialise the list within the receiver struct if it has not already been\n\t// created.\n\tif t.VrrpGroup == nil {\n\t\tt.VrrpGroup = make(map[uint8]*OpenconfigInterfaces_Interfaces_Interface_RoutedVlan_Ipv6_Addresses_Address_Vrrp_VrrpGroup)\n\t}\n\n\tkey := VirtualRouterId\n\n\t// Ensure that this key has not already been used in the\n\t// list. Keyed YANG lists do not allow duplicate keys to\n\t// be created.\n\tif _, ok := t.VrrpGroup[key]; ok {\n\t\treturn nil, fmt.Errorf(\"duplicate key %v for list VrrpGroup\", key)\n\t}\n\n\tt.VrrpGroup[key] = &OpenconfigInterfaces_Interfaces_Interface_RoutedVlan_Ipv6_Addresses_Address_Vrrp_VrrpGroup{\n\t\tVirtualRouterId: &VirtualRouterId,\n\t}\n\n\treturn t.VrrpGroup[key], nil\n}", "func (t *OpenconfigInterfaces_Interfaces_Interface_RoutedVlan_Ipv6_Addresses_Address_Vrrp) NewVrrpGroup(VirtualRouterId uint8) (*OpenconfigInterfaces_Interfaces_Interface_RoutedVlan_Ipv6_Addresses_Address_Vrrp_VrrpGroup, error){\n\n\t// Initialise the list within the receiver struct if it has not already been\n\t// created.\n\tif t.VrrpGroup == nil {\n\t\tt.VrrpGroup = make(map[uint8]*OpenconfigInterfaces_Interfaces_Interface_RoutedVlan_Ipv6_Addresses_Address_Vrrp_VrrpGroup)\n\t}\n\n\tkey := VirtualRouterId\n\n\t// Ensure that this key has not already been used in the\n\t// list. Keyed YANG lists do not allow duplicate keys to\n\t// be created.\n\tif _, ok := t.VrrpGroup[key]; ok {\n\t\treturn nil, fmt.Errorf(\"duplicate key %v for list VrrpGroup\", key)\n\t}\n\n\tt.VrrpGroup[key] = &OpenconfigInterfaces_Interfaces_Interface_RoutedVlan_Ipv6_Addresses_Address_Vrrp_VrrpGroup{\n\t\tVirtualRouterId: &VirtualRouterId,\n\t}\n\n\treturn t.VrrpGroup[key], nil\n}", "func NewGroup(system SystemUtils) *Group {\n\treturn &Group{\n\t\tsystem: system,\n\t}\n}" ]
[ "0.6048406", "0.5054484", "0.49797606", "0.49086067", "0.48558939", "0.48539013", "0.47804856", "0.47734994", "0.47266683", "0.470826", "0.47080353", "0.46998635", "0.46988836", "0.46922645", "0.46695462", "0.4576202", "0.45609567", "0.4550765", "0.45395997", "0.45349312", "0.45101464", "0.44898093", "0.4487527", "0.44689938", "0.444515", "0.44316405", "0.44273794", "0.44229397", "0.43980023", "0.43976942", "0.43850946", "0.43778944", "0.43654257", "0.43524015", "0.43484148", "0.43427733", "0.43276852", "0.4317248", "0.430607", "0.42962426", "0.42953196", "0.42899445", "0.42886916", "0.42786694", "0.42752048", "0.42641503", "0.42615253", "0.42510915", "0.42508128", "0.4250184", "0.42467776", "0.42467776", "0.42402261", "0.4228079", "0.42265776", "0.4226306", "0.4220565", "0.42100605", "0.41993824", "0.4196601", "0.4191102", "0.4189357", "0.4184023", "0.41839722", "0.41833517", "0.41778153", "0.4176754", "0.41693315", "0.41555282", "0.4151087", "0.4145824", "0.41323894", "0.41313168", "0.411537", "0.41130748", "0.41121265", "0.41114998", "0.41061485", "0.41051713", "0.40998146", "0.4094159", "0.4087808", "0.40863907", "0.40791503", "0.40784618", "0.40716565", "0.40704048", "0.40684986", "0.40660742", "0.40659377", "0.40627658", "0.40626025", "0.40626025", "0.40603963", "0.40565827", "0.40545803", "0.40524557", "0.40495226", "0.40495226", "0.40463" ]
0.7791388
0
FromStackItem converts stackitem.Item to Group.
func (g *Group) FromStackItem(item stackitem.Item) error { if item.Type() != stackitem.StructT { return errors.New("invalid Group stackitem type") } group := item.Value().([]stackitem.Item) if len(group) != 2 { return errors.New("invalid Group stackitem length") } pKey, err := group[0].TryBytes() if err != nil { return err } g.PublicKey, err = keys.NewPublicKeyFromBytes(pKey, elliptic.P256()) if err != nil { return err } sig, err := group[1].TryBytes() if err != nil { return err } if len(sig) != keys.SignatureLen { return errors.New("wrong signature length") } g.Signature = sig return nil }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (g *Group) ToStackItem() stackitem.Item {\n\treturn stackitem.NewStruct([]stackitem.Item{\n\t\tstackitem.NewByteArray(g.PublicKey.Bytes()),\n\t\tstackitem.NewByteArray(g.Signature),\n\t})\n}", "func (s *BaseMySqlParserListener) EnterGroupByItem(ctx *GroupByItemContext) {}", "func (d *Service) GroupRetroItem(RetroID string, ItemId string, GroupId string) ([]*thunderdome.RetroItem, error) {\n\tif _, err := d.DB.Exec(\n\t\t`UPDATE thunderdome.retro_item SET group_id = $3 WHERE retro_id = $1 AND id = $2;`,\n\t\tRetroID, ItemId, GroupId,\n\t); err != nil {\n\t\td.Logger.Error(\"update retro item error\", zap.Error(err))\n\t}\n\n\titems := d.GetRetroItems(RetroID)\n\n\treturn items, nil\n}", "func ConvertGroupType(azsg *azsresources.Group) *armhelper.Group {\n\treturn &armhelper.Group{\n\t\tName: azsg.Name,\n\t\tLocation: azsg.Location,\n\t\tManagedBy: azsg.ManagedBy,\n\t\tTags: azsg.Tags,\n\t}\n}", "func (iuo *ItemUpdateOne) ClearItemGroup() *ItemUpdateOne {\n\tiuo.mutation.ClearItemGroup()\n\treturn iuo\n}", "func DeserializeItem(data []byte) (Item, error) {\n\tr := io.NewBinReaderFromBuf(data)\n\titem := DecodeBinaryStackItem(r)\n\tif r.Err != nil {\n\t\treturn nil, r.Err\n\t}\n\treturn item, nil\n}", "func (iu *ItemUpdate) ClearItemGroup() *ItemUpdate {\n\tiu.mutation.ClearItemGroup()\n\treturn iu\n}", "func (self *GameObjectCreator) Group2O(parent interface{}, name string, addToStage bool) *Group{\n return &Group{self.Object.Call(\"group\", parent, name, addToStage)}\n}", "func Convert2Group(node plannercore.LogicalPlan) *Group {\n\te := Convert2GroupExpr(node)\n\tg := NewGroupWithSchema(e, node.Schema())\n\t// Stats property for `Group` would be computed after exploration phase.\n\treturn g\n}", "func (s *BaseMySqlParserListener) ExitGroupByItem(ctx *GroupByItemContext) {}", "func (igq *ItemGroupQuery) QueryGroupItem() *ItemQuery {\n\tquery := &ItemQuery{config: igq.config}\n\tquery.path = func(ctx context.Context) (fromU *sql.Selector, err error) {\n\t\tif err := igq.prepareQuery(ctx); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tselector := igq.sqlQuery(ctx)\n\t\tif err := selector.Err(); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tstep := sqlgraph.NewStep(\n\t\t\tsqlgraph.From(itemgroup.Table, itemgroup.FieldID, selector),\n\t\t\tsqlgraph.To(item.Table, item.FieldID),\n\t\t\tsqlgraph.Edge(sqlgraph.M2M, true, itemgroup.GroupItemTable, itemgroup.GroupItemPrimaryKey...),\n\t\t)\n\t\tfromU = sqlgraph.SetNeighbors(igq.driver.Dialect(), step)\n\t\treturn fromU, nil\n\t}\n\treturn query\n}", "func NewStack() *itemStack {\n\ts := &itemStack{}\n\ts.items = []Item{}\n\treturn s\n}", "func SerializeItem(item Item) ([]byte, error) {\n\tw := io.NewBufBinWriter()\n\tEncodeBinaryStackItem(item, w.BinWriter)\n\tif w.Err != nil {\n\t\treturn nil, w.Err\n\t}\n\treturn w.Bytes(), nil\n}", "func (m *TeamItemRequestBuilder) Group()(*i8a1cdbeac728d5d9d3409d0d7085c53384ad37435e0292d966ed94bbc4155a05.GroupRequestBuilder) {\n return i8a1cdbeac728d5d9d3409d0d7085c53384ad37435e0292d966ed94bbc4155a05.NewGroupRequestBuilderInternal(m.pathParameters, m.requestAdapter);\n}", "func (self *GameObjectCreator) Group3O(parent interface{}, name string, addToStage bool, enableBody bool) *Group{\n return &Group{self.Object.Call(\"group\", parent, name, addToStage, enableBody)}\n}", "func MapItem(x map[string]interface{}, k string) item.Stack {\n\tif m, ok := x[k].(map[string]interface{}); ok {\n\t\ts := readItemStack(m)\n\t\treadDamage(m, &s, true)\n\t\treadEnchantments(m, &s)\n\t\treadDisplay(m, &s)\n\t\treadDragonflyData(m, &s)\n\t\treturn s\n\t}\n\treturn item.Stack{}\n}", "func (self *GameObjectCreator) Group(parent interface{}) *Group{\n return &Group{self.Object.Call(\"group\", parent)}\n}", "func (self *GameObjectCreator) GroupI(args ...interface{}) *Group{\n return &Group{self.Object.Call(\"group\", args)}\n}", "func (igq *ItemGroupQuery) WithGroupItem(opts ...func(*ItemQuery)) *ItemGroupQuery {\n\tquery := &ItemQuery{config: igq.config}\n\tfor _, opt := range opts {\n\t\topt(query)\n\t}\n\tigq.withGroupItem = query\n\treturn igq\n}", "func (q *QueryGVR) groupFromGroupList(groupList *metav1.APIGroupList) *metav1.APIGroup {\n\tfor i := range groupList.Groups {\n\t\tif strings.EqualFold(groupList.Groups[i].Name, q.group) {\n\t\t\treturn &groupList.Groups[i]\n\t\t}\n\t}\n\treturn nil\n}", "func (self *GameObjectCreator) Group1O(parent interface{}, name string) *Group{\n return &Group{self.Object.Call(\"group\", parent, name)}\n}", "func (m *ManagerSecurityGroupItem) UnmarshalJSON(data []byte) error {\n\tvar rawMsg map[string]json.RawMessage\n\tif err := json.Unmarshal(data, &rawMsg); err != nil {\n\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", m, err)\n\t}\n\tfor key, val := range rawMsg {\n\t\tvar err error\n\t\tswitch key {\n\t\tcase \"networkGroupId\":\n\t\t\terr = unpopulate(val, \"NetworkGroupID\", &m.NetworkGroupID)\n\t\t\tdelete(rawMsg, key)\n\t\t}\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", m, err)\n\t\t}\n\t}\n\treturn nil\n}", "func (l *Lexer) g2GroupStart() (*GroupStart, error) {\n\tstartPos := l.Pos()\n\n\tr, err := l.nextR()\n\tif err != nil && !errors.Is(err, io.EOF) {\n\t\treturn nil, err\n\t}\n\n\tif r != '(' {\n\t\treturn nil, NewPosError(l.node(), \"expected '('\")\n\t}\n\n\tgroupStart := &GroupStart{}\n\tgroupStart.Position.BeginPos = startPos\n\tgroupStart.Position.EndPos = l.pos\n\n\treturn groupStart, nil\n}", "func ToGroupMsg(in *antreatypes.Group, out *controlplane.Group, includeBody bool) {\n\tout.UID = in.UID\n\tout.Name = in.Name\n\tif !includeBody {\n\t\treturn\n\t}\n\tfor _, member := range in.GroupMembers {\n\t\tout.GroupMembers = append(out.GroupMembers, *member)\n\t}\n}", "func (g *Group) FromTOML(i interface{}) error {\n\tgt, ok := i.(*GroupTOML)\n\tif !ok {\n\t\treturn fmt.Errorf(\"grouptoml unknown\")\n\t}\n\tg.Threshold = gt.Threshold\n\n\t// migration path from < v1.4, gt.SchemeID might not be contained in the group file, in which case it's the default\n\tsch, err := crypto.GetSchemeByIDWithDefault(gt.SchemeID)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"unable to instantiate group with crypto Scheme named '%s'\", gt.SchemeID)\n\t}\n\tg.Scheme = sch\n\n\tg.Nodes = make([]*Node, len(gt.Nodes))\n\tfor i, ptoml := range gt.Nodes {\n\t\tg.Nodes[i] = new(Node)\n\t\tg.Nodes[i].Identity = &Identity{Scheme: sch}\n\t\tif err := g.Nodes[i].FromTOML(ptoml); err != nil {\n\t\t\treturn fmt.Errorf(\"group: unwrapping node[%d]: %w\", i, err)\n\t\t}\n\t}\n\n\tif g.Threshold < dkg.MinimumT(len(gt.Nodes)) {\n\t\treturn errors.New(\"group file have threshold 0\")\n\t} else if g.Threshold > g.Len() {\n\t\treturn errors.New(\"group file threshold greater than number of participants\")\n\t}\n\n\tif gt.PublicKey != nil {\n\t\t// dist key only if dkg ran\n\t\tg.PublicKey = new(DistPublic)\n\t\tif err = g.PublicKey.FromTOML(sch, gt.PublicKey); err != nil {\n\t\t\treturn fmt.Errorf(\"group: unwrapping distributed public key: %w\", err)\n\t\t}\n\t}\n\tg.Period, err = time.ParseDuration(gt.Period)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif gt.CatchupPeriod == \"\" {\n\t\tg.CatchupPeriod = 0\n\t} else {\n\t\tg.CatchupPeriod, err = time.ParseDuration(gt.CatchupPeriod)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\tg.GenesisTime = gt.GenesisTime\n\tif gt.TransitionTime != 0 {\n\t\tg.TransitionTime = gt.TransitionTime\n\t}\n\tif gt.GenesisSeed != \"\" {\n\t\tif g.GenesisSeed, err = hex.DecodeString(gt.GenesisSeed); err != nil {\n\t\t\treturn fmt.Errorf(\"group: decoding genesis seed %w\", err)\n\t\t}\n\t}\n\n\t// for backward compatibility we make sure to write \"default\" as beacon id if not set\n\tg.ID = commonutils.GetCanonicalBeaconID(gt.ID)\n\n\treturn nil\n}", "func Convert2GroupExpr(node plannercore.LogicalPlan) *GroupExpr {\n\te := NewGroupExpr(node)\n\te.Children = make([]*Group, 0, len(node.Children()))\n\tfor _, child := range node.Children() {\n\t\tchildGroup := Convert2Group(child)\n\t\te.Children = append(e.Children, childGroup)\n\t}\n\treturn e\n}", "func NewFromV2(aV2 *storagegroup.StorageGroup) *StorageGroup {\n\treturn (*StorageGroup)(aV2)\n}", "func (o GroupBadgeOutput) Group() pulumi.StringOutput {\n\treturn o.ApplyT(func(v *GroupBadge) pulumi.StringOutput { return v.Group }).(pulumi.StringOutput)\n}", "func manifestGroupFromAkash(m *manifest.Group) (ManifestGroup, error) {\n\tma := ManifestGroup{\n\t\tName: m.Name,\n\t\tServices: make([]ManifestService, 0, len(m.Services)),\n\t}\n\n\tfor _, svc := range m.Services {\n\t\tservice, err := manifestServiceFromAkash(svc)\n\t\tif err != nil {\n\t\t\treturn ManifestGroup{}, err\n\t\t}\n\n\t\tma.Services = append(ma.Services, service)\n\t}\n\n\treturn ma, nil\n}", "func NewGroup()(*Group) {\n m := &Group{\n DirectoryObject: *NewDirectoryObject(),\n }\n odataTypeValue := \"#microsoft.graph.group\";\n m.SetOdataType(&odataTypeValue);\n return m\n}", "func NewGroup(system SystemUtils) *Group {\n\treturn &Group{\n\t\tsystem: system,\n\t}\n}", "func NewRuleGroupFromJSON(j []byte) (*RuleGroup, error) {\n\trg := new(RuleGroup)\n\terr := json.Unmarshal(j, rg)\n\treturn rg, err\n}", "func CreateGroupFromDiscriminatorValue(parseNode i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode)(i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.Parsable, error) {\n return NewGroup(), nil\n}", "func (m *ItemTermStoreGroupsGroupItemRequestBuilder) Get(ctx context.Context, requestConfiguration *ItemTermStoreGroupsGroupItemRequestBuilderGetRequestConfiguration)(ia3c27b33aa3d3ed80f9de797c48fbb8ed73f13887e301daf51f08450e9a634a3.Groupable, error) {\n requestInfo, err := m.ToGetRequestInformation(ctx, requestConfiguration);\n if err != nil {\n return nil, err\n }\n errorMapping := i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f.ErrorMappings {\n \"4XX\": ia572726a95efa92ddd544552cd950653dc691023836923576b2f4bf716cf204a.CreateODataErrorFromDiscriminatorValue,\n \"5XX\": ia572726a95efa92ddd544552cd950653dc691023836923576b2f4bf716cf204a.CreateODataErrorFromDiscriminatorValue,\n }\n res, err := m.BaseRequestBuilder.RequestAdapter.Send(ctx, requestInfo, ia3c27b33aa3d3ed80f9de797c48fbb8ed73f13887e301daf51f08450e9a634a3.CreateGroupFromDiscriminatorValue, errorMapping)\n if err != nil {\n return nil, err\n }\n if res == nil {\n return nil, nil\n }\n return res.(ia3c27b33aa3d3ed80f9de797c48fbb8ed73f13887e301daf51f08450e9a634a3.Groupable), nil\n}", "func (iuo *ItemUpdateOne) RemoveItemGroup(i ...*ItemGroup) *ItemUpdateOne {\n\tids := make([]int, len(i))\n\tfor j := range i {\n\t\tids[j] = i[j].ID\n\t}\n\treturn iuo.RemoveItemGroupIDs(ids...)\n}", "func (g *Group) Group(i int) *Group {\n\treturn g.groups[i]\n}", "func (group *ResourceGroup) ConvertFrom(hub conversion.Hub) error {\n\tsource, ok := hub.(*v20200601s.ResourceGroup)\n\tif !ok {\n\t\treturn fmt.Errorf(\"expected resources/v1api20200601storage/ResourceGroup but received %T instead\", hub)\n\t}\n\n\treturn group.AssignProperties_From_ResourceGroup(source)\n}", "func (g *Group) TOMLValue() interface{} {\n\treturn &GroupTOML{}\n}", "func (iggb *ItemGroupGroupBy) String(ctx context.Context) (_ string, err error) {\n\tvar v []string\n\tif v, err = iggb.Strings(ctx); err != nil {\n\t\treturn\n\t}\n\tswitch len(v) {\n\tcase 1:\n\t\treturn v[0], nil\n\tcase 0:\n\t\terr = &NotFoundError{itemgroup.Label}\n\tdefault:\n\t\terr = fmt.Errorf(\"ent: ItemGroupGroupBy.Strings returned %d results when one was expected\", len(v))\n\t}\n\treturn\n}", "func ToGroupStatus(val interface{}) (*GroupStatus, error) {\n\tvar group GroupStatus\n\tinrec, err := json.Marshal(val)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\terr = json.Unmarshal(inrec, &group)\n\treturn &group, err\n}", "func (v *RadioButton) GetGroup() (*glib.SList, error) {\n\tc := C.gtk_radio_button_get_group(v.native())\n\tif c == nil {\n\t\treturn nil, nilPtrErr\n\t}\n\treturn glib.WrapSList(uintptr(unsafe.Pointer(c))), nil\n}", "func (*authItemGroupR) NewStruct() *authItemGroupR {\n\treturn &authItemGroupR{}\n}", "func (i SliceIteration) ItemToUnstruct(idx int, item interface{}) *unstructured.Unstructured {\n\tu := &unstructured.Unstructured{}\n\tu.SetUnstructuredContent(\n\t\tmap[string]interface{}{\n\t\t\t//\tThe given item is mapped against spec field\n\t\t\t\"spec\": item,\n\t\t},\n\t)\n\t// NOTE:\n\t//\n\t// \tThese values are set to avoid getting into any errors\n\t// in-case of any internal validation during parsing of\n\t// this unstructured instance\n\tu.SetKind(\"SliceItem\")\n\tu.SetAPIVersion(\"v1\")\n\tu.SetName(fmt.Sprintf(\"elem-%d\", idx))\n\treturn u\n}", "func DecodeBinaryStackItem(r *io.BinReader) Item {\n\tvar t = Type(r.ReadB())\n\tif r.Err != nil {\n\t\treturn nil\n\t}\n\n\tswitch t {\n\tcase ByteArrayT, BufferT:\n\t\tdata := r.ReadVarBytes(MaxSize)\n\t\treturn NewByteArray(data)\n\tcase BooleanT:\n\t\tvar b = r.ReadBool()\n\t\treturn NewBool(b)\n\tcase IntegerT:\n\t\tdata := r.ReadVarBytes(bigint.MaxBytesLen)\n\t\tnum := bigint.FromBytes(data)\n\t\treturn NewBigInteger(num)\n\tcase ArrayT, StructT:\n\t\tsize := int(r.ReadVarUint())\n\t\tarr := make([]Item, size)\n\t\tfor i := 0; i < size; i++ {\n\t\t\tarr[i] = DecodeBinaryStackItem(r)\n\t\t}\n\n\t\tif t == ArrayT {\n\t\t\treturn NewArray(arr)\n\t\t}\n\t\treturn NewStruct(arr)\n\tcase MapT:\n\t\tsize := int(r.ReadVarUint())\n\t\tm := NewMap()\n\t\tfor i := 0; i < size; i++ {\n\t\t\tkey := DecodeBinaryStackItem(r)\n\t\t\tvalue := DecodeBinaryStackItem(r)\n\t\t\tif r.Err != nil {\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tm.Add(key, value)\n\t\t}\n\t\treturn m\n\tcase AnyT:\n\t\treturn Null{}\n\tdefault:\n\t\tr.Err = fmt.Errorf(\"unknown type: %v\", t)\n\t\treturn nil\n\t}\n}", "func (m *DirectoryObjectItemRequestBuilder) Group()(*if26fe6278b9799c8a3493f5e61e5d6b3bc3a629544bcfc96a440c979cf445852.GroupRequestBuilder) {\n return if26fe6278b9799c8a3493f5e61e5d6b3bc3a629544bcfc96a440c979cf445852.NewGroupRequestBuilderInternal(m.pathParameters, m.requestAdapter);\n}", "func (iuo *ItemUpdateOne) AddItemGroup(i ...*ItemGroup) *ItemUpdateOne {\n\tids := make([]int, len(i))\n\tfor j := range i {\n\t\tids[j] = i[j].ID\n\t}\n\treturn iuo.AddItemGroupIDs(ids...)\n}", "func (v *SeasonResDB) ToItem() *SeasonResItem {\n\tres := &SeasonResItem{\n\t\tCTime: v.CTime.Time().Format(_TimeFormat),\n\t\tSeasonRes: v.SeasonRes,\n\t}\n\tswitch v.Check {\n\tcase 0: // reject\n\t\tres.Check = 2\n\tcase 1: // passed\n\t\tres.Check = 1\n\tdefault:\n\t\tres.Check = 0\n\t}\n\tif v.InjectTime > 0 {\n\t\tres.InjectTime = v.InjectTime.Time().Format(_TimeFormat)\n\t}\n\treturn res\n}", "func XPriCompGroupItemByNameXPriCompGroupID(db XODB, name string, xPriCompGroupID uint) (*XPriCompGroupItem, error) {\n\tvar err error\n\n\t// sql query\n\tconst sqlstr = `SELECT ` +\n\t\t`id, name, display_name, type_cd, primary_flag, seq_num, x_pri_comp_group_id, created_by, updated_by, created_at, updated_at ` +\n\t\t`FROM x_showroom.x_pri_comp_group_item ` +\n\t\t`WHERE name = ? AND x_pri_comp_group_id = ?`\n\n\t// run query\n\tXOLog(sqlstr, name, xPriCompGroupID)\n\txpcgi := XPriCompGroupItem{\n\t\t_exists: true,\n\t}\n\n\terr = db.QueryRow(sqlstr, name, xPriCompGroupID).Scan(&xpcgi.ID, &xpcgi.Name, &xpcgi.DisplayName, &xpcgi.TypeCd, &xpcgi.PrimaryFlag, &xpcgi.SeqNum, &xpcgi.XPriCompGroupID, &xpcgi.CreatedBy, &xpcgi.UpdatedBy, &xpcgi.CreatedAt, &xpcgi.UpdatedAt)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &xpcgi, nil\n}", "func CreateGroup(params types.ContextParams, clientSet apimachinery.ClientSetInterface, groupItems []metadata.Group) []Group {\n\tresults := make([]Group, 0)\n\tfor _, grp := range groupItems {\n\n\t\tresults = append(results, &group{\n\t\t\tgrp: grp,\n\t\t\tparams: params,\n\t\t\tclientSet: clientSet,\n\t\t})\n\t}\n\n\treturn results\n}", "func (item *splitItem) Clone() *splitItem {\n\treturn &splitItem{\n\t\tmetaType: item.metaType,\n\t\titemType: item.itemType,\n\t\titemID: item.itemID,\n\t\tdirection: item.direction,\n\t\toffset: item.offset,\n\t\tbit: item.bit,\n\t\tkey: item.key,\n\t\tvalue: item.value,\n\t}\n}", "func AuthItemGroupExists(ctx context.Context, exec boil.ContextExecutor, iD int) (bool, error) {\n\tvar exists bool\n\tsql := \"select exists(select 1 from `auth_item_groups` where `id`=? limit 1)\"\n\n\tif boil.IsDebug(ctx) {\n\t\twriter := boil.DebugWriterFrom(ctx)\n\t\tfmt.Fprintln(writer, sql)\n\t\tfmt.Fprintln(writer, iD)\n\t}\n\trow := exec.QueryRowContext(ctx, sql, iD)\n\n\terr := row.Scan(&exists)\n\tif err != nil {\n\t\treturn false, errors.Wrap(err, \"models: unable to check if auth_item_groups exists\")\n\t}\n\n\treturn exists, nil\n}", "func (c *StackCollection) makeNodeGroupStackName(name string) string {\n\treturn fmt.Sprintf(\"eksctl-%s-nodegroup-%s\", c.spec.Metadata.Name, name)\n}", "func NewStack() *ItemStack {\n\ts := &ItemStack{}\n\ts.items = []Items{}\n\treturn s\n}", "func (_BaseContentSpace *BaseContentSpaceFilterer) ParseCreateGroup(log types.Log) (*BaseContentSpaceCreateGroup, error) {\n\tevent := new(BaseContentSpaceCreateGroup)\n\tif err := _BaseContentSpace.contract.UnpackLog(event, \"CreateGroup\", log); err != nil {\n\t\treturn nil, err\n\t}\n\tevent.Raw = log\n\treturn event, nil\n}", "func createFullfilItemWithRule(ty int, myTree *widgets.QTreeWidget, myRule BgpFsRule) {\n var myItem = widgets.NewQTreeWidgetItem3(myTree, ty)\n myItem.SetText(0, myRule.AddrFam)\n myItem.SetText(1, myRule.DstPrefix)\n myItem.SetText(2, myRule.SrcPrefix)\n myItem.SetText(3, myRule.Port)\n myItem.SetText(4, myRule.SrcPort)\n myItem.SetText(5, myRule.DstPort)\n myItem.SetText(6, myRule.TcpFlags)\n myItem.SetText(7, myRule.IcmpType)\n myItem.SetText(8, myRule.IcmpCode)\n myItem.SetText(9, myRule.ProtoNumber)\n myItem.SetText(10, myRule.PacketLen)\n myItem.SetText(11, myRule.Dscp)\n myItem.SetText(12, myRule.IpFrag)\n myItem.SetText(13, myRule.Action)\n myItem.SetText(14, myRule.ActSisterValue)\n}", "func (q authItemGroupQuery) One(ctx context.Context, exec boil.ContextExecutor) (*AuthItemGroup, error) {\n\to := &AuthItemGroup{}\n\n\tqueries.SetLimit(q.Query, 1)\n\n\terr := q.Bind(ctx, exec, o)\n\tif err != nil {\n\t\tif errors.Cause(err) == sql.ErrNoRows {\n\t\t\treturn nil, sql.ErrNoRows\n\t\t}\n\t\treturn nil, errors.Wrap(err, \"models: failed to execute a one query for auth_item_groups\")\n\t}\n\n\tif err := o.doAfterSelectHooks(ctx, exec); err != nil {\n\t\treturn o, err\n\t}\n\n\treturn o, nil\n}", "func NewGroup() *Group {\n\treturn &Group{}\n}", "func NewGroup() *Group {\n\treturn &Group{}\n}", "func (s *Stack) Push(item interface{}) {\n\toldNode := s.node\n\tnewNode := nodeStack{TElement: item, Previous: &oldNode}\n\ts.node = newNode\n\ts.size++\n}", "func XPriCompGroupItemByID(db XODB, id uint) (*XPriCompGroupItem, error) {\n\tvar err error\n\n\t// sql query\n\tconst sqlstr = `SELECT ` +\n\t\t`id, name, display_name, type_cd, primary_flag, seq_num, x_pri_comp_group_id, created_by, updated_by, created_at, updated_at ` +\n\t\t`FROM x_showroom.x_pri_comp_group_item ` +\n\t\t`WHERE id = ?`\n\n\t// run query\n\tXOLog(sqlstr, id)\n\txpcgi := XPriCompGroupItem{\n\t\t_exists: true,\n\t}\n\n\terr = db.QueryRow(sqlstr, id).Scan(&xpcgi.ID, &xpcgi.Name, &xpcgi.DisplayName, &xpcgi.TypeCd, &xpcgi.PrimaryFlag, &xpcgi.SeqNum, &xpcgi.XPriCompGroupID, &xpcgi.CreatedBy, &xpcgi.UpdatedBy, &xpcgi.CreatedAt, &xpcgi.UpdatedAt)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &xpcgi, nil\n}", "func (inp itemFrom) itemPipeEnter(wg itemWaiter) (out itemFrom) {\n\tcha := make(chan item)\n\tgo inp.pipeitemEnter(cha, wg)\n\treturn cha\n}", "func (self *GameObjectCreator) Group4O(parent interface{}, name string, addToStage bool, enableBody bool, physicsBodyType int) *Group{\n return &Group{self.Object.Call(\"group\", parent, name, addToStage, enableBody, physicsBodyType)}\n}", "func (s *BasesiciListener) EnterItem(ctx *ItemContext) {}", "func (sg *StorageGroup) ToV2() *storagegroup.StorageGroup {\n\treturn (*storagegroup.StorageGroup)(sg)\n}", "func (p *DefaultParser) NamedGroup(field string) string {\n\tr := strings.NewReplacer(\" \", \"\", \":\", \"\", \",\", \"\")\n\treturn r.Replace(strings.ToLower(field))\n}", "func (l *GroupLookup) createOrSetInGroup(index int, key flux.GroupKey, value interface{}) {\n\t// If this index is at -1, then we are inserting a value with a smaller key\n\t// than every group and we need to create a new group to insert it at the\n\t// beginning.\n\tif index == -1 {\n\t\tl.groups = append(l.groups, nil)\n\t\tcopy(l.groups[1:], l.groups[:])\n\t\tl.groups[0] = l.newKeyGroup([]groupKeyListElement{\n\t\t\t{key: key, value: value},\n\t\t})\n\t\tl.lastIndex = 0\n\t\treturn\n\t}\n\n\tkg := l.groups[index]\n\n\t// Find the location where this should be inserted.\n\ti := kg.InsertAt(key)\n\n\t// If this should be inserted after the last element, do it and leave.\n\tif i == len(kg.elements) {\n\t\tkg.elements = append(kg.elements, groupKeyListElement{\n\t\t\tkey: key,\n\t\t\tvalue: value,\n\t\t})\n\t\treturn\n\t} else if kg.elements[i].key.Equal(key) {\n\t\t// If the entry already exists at this index, set the value.\n\t\tkg.set(i, value)\n\t\treturn\n\t}\n\n\t// We have to split this entry into two new elements. First, we start\n\t// by creating space for the new entry.\n\tl.groups = append(l.groups, nil)\n\tcopy(l.groups[index+2:], l.groups[index+1:])\n\t// Construct the new group entry and copy the end of the slice\n\t// into the new key group.\n\tl.groups[index+1] = func() *groupKeyList {\n\t\t// TODO(rockstar): A nice optimization here would be to prevent\n\t\t// the deleted items from being copied. However, this entire function\n\t\t// needs to be refactored to support that, as it's possible that *all*\n\t\t// the elements have been deleted, so no split is needed.\n\t\t// Moving currently deleted elements out of this key group, the deleted\n\t\t// count must be decremented.\n\t\tfor _, item := range kg.elements[i:] {\n\t\t\tif item.deleted {\n\t\t\t\tkg.deleted--\n\t\t\t}\n\t\t}\n\n\t\tentries := make([]groupKeyListElement, len(kg.elements[i:]))\n\t\tcopy(entries, kg.elements[i:])\n\n\t\treturn l.newKeyGroup(entries)\n\t}()\n\t// Use a slice on the key group elements to remove the extra elements.\n\t// Then append the new key group entry.\n\tkg.elements = kg.elements[:i:cap(kg.elements)]\n\tkg.elements = append(kg.elements, groupKeyListElement{\n\t\tkey: key,\n\t\tvalue: value,\n\t})\n}", "func (m *StoreItemRequestBuilder) Groups()(*i943058511484df328af302c39ee5cb2c343138e90a60de21e94bad79f3a4f322.GroupsRequestBuilder) {\n return i943058511484df328af302c39ee5cb2c343138e90a60de21e94bad79f3a4f322.NewGroupsRequestBuilderInternal(m.pathParameters, m.requestAdapter);\n}", "func (p *Parser) parseGroupedExpression() asti.ExpressionI {\n\tp.nextToken()\n\n\texp := p.parseExpression(precedence.LOWEST)\n\tif !p.expectPeek(tokentype.RPAREN) {\n\t\treturn nil\n\t}\n\treturn exp\n}", "func ToGroupConfig(val interface{}) (*GroupConfig, error) {\n\tvar group GroupConfig\n\tinrec, err := json.Marshal(val)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\terr = json.Unmarshal(inrec, &group)\n\treturn &group, err\n}", "func (c *container) Group() *Group {\n\tg := &Group{container: container{name: \"g\"}}\n\tc.contents = append(c.contents, g)\n\n\treturn g\n}", "func (iu *ItemUpdate) RemoveItemGroup(i ...*ItemGroup) *ItemUpdate {\n\tids := make([]int, len(i))\n\tfor j := range i {\n\t\tids[j] = i[j].ID\n\t}\n\treturn iu.RemoveItemGroupIDs(ids...)\n}", "func AuthItemGroups(mods ...qm.QueryMod) authItemGroupQuery {\n\tmods = append(mods, qm.From(\"`auth_item_groups`\"))\n\treturn authItemGroupQuery{NewQuery(mods...)}\n}", "func WithContext(ctx context.Context) (*Group, context.Context)", "func NewGroup(m *algebra.Matrix) *Group {\n\tmat := m\n\tif m == nil || len(m.Get()) != 4 || len(m.Get()[0]) != 4 {\n\t\tmat = algebra.IdentityMatrix(4)\n\t}\n\temptyShapes := make([]Shape, 0, 0)\n\treturn &Group{transform: mat, parent: nil, shapes: emptyShapes, bounds: [2]*algebra.Vector{}}\n}", "func ParseGroupData(rawData interface{}, group *Group) {\n\tif error := json.Unmarshal([]byte(rawData.(string)), &group); error != nil {\n\t\treturn\n\t}\n}", "func (s *Slot) push(item Item) error {\n\tif s.item != Empty {\n\t\treturn errors.New(\"slot already contains an item\")\n\t}\n\ts.item = item\n\treturn nil\n}", "func (client *ClientImpl) MoveGroupToPage(ctx context.Context, args MoveGroupToPageArgs) (*Group, error) {\n\tif args.Group == nil {\n\t\treturn nil, &azuredevops.ArgumentNilError{ArgumentName: \"args.Group\"}\n\t}\n\trouteValues := make(map[string]string)\n\tif args.ProcessId == nil {\n\t\treturn nil, &azuredevops.ArgumentNilError{ArgumentName: \"args.ProcessId\"}\n\t}\n\trouteValues[\"processId\"] = (*args.ProcessId).String()\n\tif args.WitRefName == nil || *args.WitRefName == \"\" {\n\t\treturn nil, &azuredevops.ArgumentNilOrEmptyError{ArgumentName: \"args.WitRefName\"}\n\t}\n\trouteValues[\"witRefName\"] = *args.WitRefName\n\tif args.PageId == nil || *args.PageId == \"\" {\n\t\treturn nil, &azuredevops.ArgumentNilOrEmptyError{ArgumentName: \"args.PageId\"}\n\t}\n\trouteValues[\"pageId\"] = *args.PageId\n\tif args.SectionId == nil || *args.SectionId == \"\" {\n\t\treturn nil, &azuredevops.ArgumentNilOrEmptyError{ArgumentName: \"args.SectionId\"}\n\t}\n\trouteValues[\"sectionId\"] = *args.SectionId\n\tif args.GroupId == nil || *args.GroupId == \"\" {\n\t\treturn nil, &azuredevops.ArgumentNilOrEmptyError{ArgumentName: \"args.GroupId\"}\n\t}\n\trouteValues[\"groupId\"] = *args.GroupId\n\n\tqueryParams := url.Values{}\n\tif args.RemoveFromPageId == nil {\n\t\treturn nil, &azuredevops.ArgumentNilError{ArgumentName: \"removeFromPageId\"}\n\t}\n\tqueryParams.Add(\"removeFromPageId\", *args.RemoveFromPageId)\n\tif args.RemoveFromSectionId == nil {\n\t\treturn nil, &azuredevops.ArgumentNilError{ArgumentName: \"removeFromSectionId\"}\n\t}\n\tqueryParams.Add(\"removeFromSectionId\", *args.RemoveFromSectionId)\n\tbody, marshalErr := json.Marshal(*args.Group)\n\tif marshalErr != nil {\n\t\treturn nil, marshalErr\n\t}\n\tlocationId, _ := uuid.Parse(\"766e44e1-36a8-41d7-9050-c343ff02f7a5\")\n\tresp, err := client.Client.Send(ctx, http.MethodPut, locationId, \"6.0-preview.1\", routeValues, queryParams, bytes.NewReader(body), \"application/json\", \"application/json\", nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar responseValue Group\n\terr = client.Client.UnmarshalBody(resp, &responseValue)\n\treturn &responseValue, err\n}", "func (p ByName) GroupName() string { return p.groupName }", "func NewItem(n string, p string, s int) Item {\n\tprice, err := decimal.NewFromString(p)\n\tif err != nil {\n\t\tlog.Fatalf(\"Could not create new items\")\n\t}\n\treturn Item{\n\t\tselector: s,\n\t\tname: n,\n\t\tprice: price,\n\t}\n}", "func New(app, account, region, stack, cluster string) InstanceGroup {\n\treturn group{\n\t\tapp: app,\n\t\taccount: account,\n\t\tregion: region,\n\t\tstack: stack,\n\t\tcluster: cluster,\n\t}\n}", "func (m *Message) FromGroup() bool {\n\treturn m.Chat.Type == ChatGroup || m.Chat.Type == ChatSuperGroup\n}", "func (ib *defaultItemBackupper) backupItem(logger *logrus.Entry, obj runtime.Unstructured, groupResource schema.GroupResource) error {\n\tmetadata, err := meta.Accessor(obj)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tnamespace := metadata.GetNamespace()\n\tname := metadata.GetName()\n\n\tlog := logger.WithField(\"name\", name)\n\tif namespace != \"\" {\n\t\tlog = log.WithField(\"namespace\", namespace)\n\t}\n\n\t// NOTE: we have to re-check namespace & resource includes/excludes because it's possible that\n\t// backupItem can be invoked by a custom action.\n\tif namespace != \"\" && !ib.namespaces.ShouldInclude(namespace) {\n\t\tlog.Info(\"Excluding item because namespace is excluded\")\n\t\treturn nil\n\t}\n\n\t// NOTE: we specifically allow namespaces to be backed up even if IncludeClusterResources is\n\t// false.\n\tif namespace == \"\" && groupResource != namespacesGroupResource && ib.backup.Spec.IncludeClusterResources != nil && !*ib.backup.Spec.IncludeClusterResources {\n\t\tlog.Info(\"Excluding item because resource is cluster-scoped and backup.spec.includeClusterResources is false\")\n\t\treturn nil\n\t}\n\n\tif !ib.resources.ShouldInclude(groupResource.String()) {\n\t\tlog.Info(\"Excluding item because resource is excluded\")\n\t\treturn nil\n\t}\n\n\tkey := itemKey{\n\t\tresource: groupResource.String(),\n\t\tnamespace: namespace,\n\t\tname: name,\n\t}\n\n\tif _, exists := ib.backedUpItems[key]; exists {\n\t\tlog.Info(\"Skipping item because it's already been backed up.\")\n\t\treturn nil\n\t}\n\tib.backedUpItems[key] = struct{}{}\n\n\tlog.Info(\"Backing up resource\")\n\n\titem := obj.UnstructuredContent()\n\t// Never save status\n\tdelete(item, \"status\")\n\n\tif err := ib.itemHookHandler.handleHooks(log, groupResource, obj, ib.resourceHooks); err != nil {\n\t\treturn err\n\t}\n\n\tif action, found := ib.actions[groupResource]; found {\n\t\tlog.Info(\"Executing custom action\")\n\n\t\tif additionalItemIdentifiers, err := action.Execute(log, obj, ib.backup); err == nil {\n\t\t\tfor _, additionalItem := range additionalItemIdentifiers {\n\t\t\t\tgvr, resource, err := ib.discoveryHelper.ResourceFor(additionalItem.GroupResource.WithVersion(\"\"))\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\n\t\t\t\tclient, err := ib.dynamicFactory.ClientForGroupVersionResource(gvr.GroupVersion(), resource, additionalItem.Namespace)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\n\t\t\t\tadditionalItem, err := client.Get(additionalItem.Name, metav1.GetOptions{})\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\n\t\t\t\tib.additionalItemBackupper.backupItem(log, additionalItem, gvr.GroupResource())\n\t\t\t}\n\t\t} else {\n\t\t\treturn errors.Wrap(err, \"error executing custom action\")\n\t\t}\n\t}\n\n\tvar filePath string\n\tif namespace != \"\" {\n\t\tfilePath = filepath.Join(api.ResourcesDir, groupResource.String(), api.NamespaceScopedDir, namespace, name+\".json\")\n\t} else {\n\t\tfilePath = filepath.Join(api.ResourcesDir, groupResource.String(), api.ClusterScopedDir, name+\".json\")\n\t}\n\n\titemBytes, err := json.Marshal(item)\n\tif err != nil {\n\t\treturn errors.WithStack(err)\n\t}\n\n\thdr := &tar.Header{\n\t\tName: filePath,\n\t\tSize: int64(len(itemBytes)),\n\t\tTypeflag: tar.TypeReg,\n\t\tMode: 0755,\n\t\tModTime: time.Now(),\n\t}\n\n\tif err := ib.tarWriter.WriteHeader(hdr); err != nil {\n\t\treturn errors.WithStack(err)\n\t}\n\n\tif _, err := ib.tarWriter.Write(itemBytes); err != nil {\n\t\treturn errors.WithStack(err)\n\t}\n\n\treturn nil\n}", "func (iu *ItemUpdate) AddItemGroup(i ...*ItemGroup) *ItemUpdate {\n\tids := make([]int, len(i))\n\tfor j := range i {\n\t\tids[j] = i[j].ID\n\t}\n\treturn iu.AddItemGroupIDs(ids...)\n}", "func (group *ResourceGroup) ConvertTo(hub conversion.Hub) error {\n\tdestination, ok := hub.(*v20200601s.ResourceGroup)\n\tif !ok {\n\t\treturn fmt.Errorf(\"expected resources/v1api20200601storage/ResourceGroup but received %T instead\", hub)\n\t}\n\n\treturn group.AssignProperties_To_ResourceGroup(destination)\n}", "func (me *XsdGoPkgHasElem_ItemIconObjectExtensionGroup) Walk() (err error) {\r\n\tif fn := WalkHandlers.XsdGoPkgHasElem_ItemIconObjectExtensionGroup; me != nil {\r\n\t\tif fn != nil {\r\n\t\t\tif err = fn(me, true); xsdt.OnWalkError(&err, &WalkErrors, WalkContinueOnError, WalkOnError) {\r\n\t\t\t\treturn\r\n\t\t\t}\r\n\t\t}\r\n\t\tif fn != nil {\r\n\t\t\tif err = fn(me, false); xsdt.OnWalkError(&err, &WalkErrors, WalkContinueOnError, WalkOnError) {\r\n\t\t\t\treturn\r\n\t\t\t}\r\n\t\t}\r\n\t}\r\n\treturn\r\n}", "func (o *Permissao) SetIsGroup(v bool) {\n\to.IsGroup = &v\n}", "func (group *ResourceGroup_Spec) ConvertSpecFrom(source genruntime.ConvertibleSpec) error {\n\tif source == group {\n\t\treturn errors.New(\"attempted conversion between unrelated implementations of github.com/Azure/azure-service-operator/v2/pkg/genruntime/ConvertibleSpec\")\n\t}\n\n\treturn source.ConvertSpecTo(group)\n}", "func processEnumItem(body string) string {\n\tenumItemTags := map[string]string{\"itemize\": \"ul\",\n\t\t\t\t\t\t\t\t\t\t\"enumerate\": \"ol\",\n\t\t\t\t\t\t\t\t\t\t\"item\": \"li\"} \n\n\treg := regexp.MustCompile(`\\\\begin{itemize}` +\n\t\t\t\t\t\t\t\t`|\\\\end{itemize}` +\n\t\t\t\t\t\t\t\t`|\\\\begin{enumerate}` + \n\t\t\t\t\t\t\t\t`|\\\\end{enumerate}` +\n\t\t\t\t\t\t\t\t`|\\\\item`)\t\t\t\t\t\t\t\n\ttextBlocks := reg.Split(body,-1)\n\tcontrolTags := reg.FindAllString(body, -1)\n\n\tnewBody := textBlocks[0]\n\tfor i,tag := range controlTags{\n\n\t\t// handle \"\\item\" tage seperately\n\t\tif tag == \"\\\\item\" {\n\t\t\ttagType := \"item\"\n\t\t\tnewBody += \"<\" + enumItemTags[tagType] + \">\" + textBlocks[i+1] + \"</\" + enumItemTags[tagType] + \">\"\n\t\t\tcontinue\n\t\t}\n\n\t\tparsed := regexp.MustCompile(`\\\\|{|}`).Split(tag,-1)\n\t\ttagType := parsed[1]\n\t\ttagValue := parsed[2]\n\t\tif tagType == \"begin\"{\n\t\t\tnewBody += \"<\" + enumItemTags[tagValue] + \">\" \n\t\t}else{\n\t\t\tnewBody += \"</\" + enumItemTags[tagValue] + \">\" \n\t\t}\n\t\t \n\t\tnewBody += textBlocks[i+1]\n\t\t\n\t}\n\treturn newBody\n}", "func (s *Server) makeGroup(cmd string, phone string) (Group, error) {\n\n\tstartIndex := strings.Index(cmd, \"<\")\n\tendIndex := strings.Index(cmd, \">\") + 1\n\n\tscannerEngine := &scanner.GScanner{\n\t\tInput: cmd[startIndex:endIndex],\n\t\tTokens: []string{\"<\", \">\", \":\"},\n\t\tIncludeTokensInOutput: false,\n\t}\n\n\toutput := scannerEngine.Scan()\n\n\tif len(output) == 3 {\n\t\tcommand := output[0]\n\t\tgrpName := strings.TrimSpace(output[1])\n\t\talias := strings.TrimSpace(output[2])\n\n\t\tif command != GroupMakeCommand[1:len(GroupMakeCommand)] {\n\t\t\treturn Group{}, errors.New(\"Bad command for making group. Please use the syntax: \" + GroupMakeCommand)\n\t\t}\n\t\tif len(alias) > GroupAliasMaxLen {\n\t\t\treturn Group{}, errors.New(\"Error. Your group alias cannot be more than \" + strconv.Itoa(GroupAliasMaxLen) + \" characters\")\n\t\t}\n\t\tif strings.Contains(alias, \" \") || strings.Contains(alias, \"\\n\") || strings.Contains(alias, \"\\t\") {\n\t\t\treturn Group{}, errors.New(\"Error: Your group alias must have no white spaces. \")\n\t\t}\n\n\t\tgrp := &Group{\n\t\t\tID: utils.GenUlid(),\n\t\t\tName: grpName,\n\t\t\tAlias: alias,\n\t\t\tAdminPhone: phone,\n\t\t\tMembers: make([]string, 0),\n\t\t}\n\n\t\t//Ensure that none of the user's groups has either of the alias or name given here\n\t\tif s.userHasGroupByName(phone, grp.Name) {\n\t\t\treturn Group{}, errors.New(\"Error: The Group, \" + grp.Name + \" is already amongst your groups!\")\n\t\t}\n\n\t\treturn *grp, nil\n\t}\n\n\terr := errors.New(\"The syntax of your command i.e `\" + cmd + \"` is wrong!\\n Please use `<grpmk:grpName>` to create a new group\")\n\n\treturn Group{}, err\n}", "func New(dir string) *Group {\n\tg := &Group{\n\t\tdir: dir,\n\t}\n\tg.Clear()\n\treturn g\n}", "func ParseItem(pobItem PobItem) (Item, error) {\n\titem := Item{}\n\n\thasType := false\n\n\tlines := strings.Split(pobItem.Text, \"\\n\")\n\n\t//remove empty lines and trim spaces, and other special modifies and meta info like {stuff}\n\tcbracketsRegexp := regexp.MustCompile(`\\{(.*?)\\}`)\n\tcleanLines := []string{}\n\tfor _, line := range lines {\n\t\ttmpLine := strings.TrimSpace(line)\n\t\tif len(tmpLine) > 0 {\n\t\t\ttmpLine = cbracketsRegexp.ReplaceAllString(tmpLine, \"\")\n\t\t\tcleanLines = append(cleanLines, tmpLine)\n\t\t}\n\t}\n\n\t//validate a min required lines to be an \"item\"\n\tif len(cleanLines) < 2 {\n\t\treturn Item{}, errors.New(\"Too few lines, unsupported item format\")\n\t}\n\n\t//parse rarity\n\tif !isItemProperty(cleanLines[0]) {\n\t\treturn Item{}, errors.New(\"Unable to extract rarity, unsupported item format\")\n\t}\n\n\trarityKey, rarityValue, extracted := parseItemPropety(cleanLines[0])\n\tif !extracted || rarityKey != \"Rarity\" || len(rarityValue) == 0 {\n\t\treturn Item{}, errors.New(\"Unable to extract rarity, unsupported item format\")\n\t}\n\titem.Rarity = strings.Title(strings.ToLower(rarityValue))\n\n\t//set name\n\titem.Name = cleanLines[1]\n\n\t//type might not be provided, need to validate\n\tif len(cleanLines) > 2 {\n\t\tif !isItemProperty(cleanLines[2]) {\n\t\t\thasType = true\n\t\t\titem.Type = cleanLines[2]\n\t\t}\n\t}\n\n\t//parse properties and modifiers\n\tproperties := map[string]string{}\n\tmodifiers := []string{}\n\n\tstartFromIndex := 2\n\tif hasType {\n\t\tstartFromIndex = 3\n\t}\n\n\tfor index, line := range cleanLines[startFromIndex:] {\n\t\tif isItemProperty(line) {\n\t\t\tkey, value, success := parseItemPropety(line)\n\t\t\tif !success || len(key) == 0 || len(value) == 0 {\n\t\t\t\tlog.Println(\"Couldn't extract property from line:\", line, \"at index: \", index)\n\t\t\t} else {\n\t\t\t\tproperties[key] = value\n\t\t\t}\n\t\t} else {\n\t\t\tmodifiers = append(modifiers, line)\n\t\t}\n\t}\n\n\titem.Properties = properties\n\titem.Modifiers = modifiers\n\n\treturn item, nil\n}", "func NewGroup(field string) Group {\n\treturn Group{\n\t\tField: field,\n\t}\n}", "func (m *ItemTermStoreGroupsGroupItemRequestBuilder) Patch(ctx context.Context, body ia3c27b33aa3d3ed80f9de797c48fbb8ed73f13887e301daf51f08450e9a634a3.Groupable, requestConfiguration *ItemTermStoreGroupsGroupItemRequestBuilderPatchRequestConfiguration)(ia3c27b33aa3d3ed80f9de797c48fbb8ed73f13887e301daf51f08450e9a634a3.Groupable, error) {\n requestInfo, err := m.ToPatchRequestInformation(ctx, body, requestConfiguration);\n if err != nil {\n return nil, err\n }\n errorMapping := i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f.ErrorMappings {\n \"4XX\": ia572726a95efa92ddd544552cd950653dc691023836923576b2f4bf716cf204a.CreateODataErrorFromDiscriminatorValue,\n \"5XX\": ia572726a95efa92ddd544552cd950653dc691023836923576b2f4bf716cf204a.CreateODataErrorFromDiscriminatorValue,\n }\n res, err := m.BaseRequestBuilder.RequestAdapter.Send(ctx, requestInfo, ia3c27b33aa3d3ed80f9de797c48fbb8ed73f13887e301daf51f08450e9a634a3.CreateGroupFromDiscriminatorValue, errorMapping)\n if err != nil {\n return nil, err\n }\n if res == nil {\n return nil, nil\n }\n return res.(ia3c27b33aa3d3ed80f9de797c48fbb8ed73f13887e301daf51f08450e9a634a3.Groupable), nil\n}", "func (e *Element) Group() string {\n\treturn e.group\n}", "func NewGroupsFromReader(r io.Reader, bad BadLineHandler) (*HTGroup, error) {\n\thtGroup := HTGroup{}\n\n\treadFileErr := htGroup.ReloadGroupsFromReader(r, bad)\n\tif readFileErr != nil {\n\t\treturn nil, readFileErr\n\t}\n\n\treturn &htGroup, nil\n}", "func ToGroupMode(fluxMode flux.GroupMode) GroupMode {\n\tswitch fluxMode {\n\tcase flux.GroupModeNone:\n\t\treturn GroupModeNone\n\tcase flux.GroupModeBy:\n\t\treturn GroupModeBy\n\tdefault:\n\t\tpanic(fmt.Sprint(\"unknown group mode: \", fluxMode))\n\t}\n}", "func NewItem() Item {\n\treturn Item{make(map[string]interface{})}\n}", "func NewItem(svc iaas.Service, path string) (*Item, error) {\n\tfold, err := NewFolder(svc, path)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\ttheItem := &Item{\n\t\tfolder: fold,\n\t\tpayload: nil,\n\t\tlock: &sync.Mutex{},\n\t}\n\n\treturn theItem, nil\n}", "func (m *ItemItemsItemWorkbookWorksheetsItemChartsItemSeriesWorkbookChartSeriesItemRequestBuilder) Format()(*ItemItemsItemWorkbookWorksheetsItemChartsItemSeriesItemFormatRequestBuilder) {\n return NewItemItemsItemWorkbookWorksheetsItemChartsItemSeriesItemFormatRequestBuilderInternal(m.pathParameters, m.requestAdapter)\n}", "func (n *aggregateFuncExpr) getAggItem() *aggItem {\n\tif n.currentGroup == nil {\n\t\tn.currentGroup = singleGroupKey\n\t}\n\tif n.contextPerGroupMap == nil {\n\t\tn.contextPerGroupMap = make(map[string](*aggItem))\n\t}\n\tif _, ok := n.contextPerGroupMap[string(n.currentGroup)]; !ok {\n\t\tn.contextPerGroupMap[string(n.currentGroup)] = &aggItem{}\n\t}\n\treturn n.contextPerGroupMap[string(n.currentGroup)]\n}" ]
[ "0.6390705", "0.5216318", "0.49234468", "0.48573193", "0.48340288", "0.4817603", "0.4816735", "0.47445422", "0.4733563", "0.46748108", "0.4618605", "0.45001128", "0.44741407", "0.44484565", "0.443607", "0.4425097", "0.44242656", "0.44081646", "0.43769795", "0.43498576", "0.43455714", "0.43282694", "0.43267494", "0.43207732", "0.43169326", "0.43066716", "0.42992863", "0.42922297", "0.4274572", "0.42339373", "0.4226747", "0.4212116", "0.4209302", "0.42044497", "0.4141091", "0.4140794", "0.4136158", "0.41303107", "0.41099218", "0.4094889", "0.40891752", "0.4088892", "0.40853047", "0.40826094", "0.4046882", "0.40461916", "0.4041731", "0.40399212", "0.40275648", "0.40257287", "0.40181378", "0.40128523", "0.4011474", "0.40086088", "0.40076685", "0.40064093", "0.39944425", "0.39944425", "0.39930546", "0.39927524", "0.3992458", "0.39909756", "0.3989829", "0.39896327", "0.39837763", "0.39827508", "0.39803395", "0.39671007", "0.39629668", "0.39598787", "0.39518464", "0.39397278", "0.3936133", "0.3924741", "0.39244843", "0.3920348", "0.3913821", "0.3909048", "0.38958287", "0.38894978", "0.388832", "0.3887046", "0.38858768", "0.38815874", "0.38806632", "0.38781652", "0.38718647", "0.38668624", "0.386495", "0.3862647", "0.38506737", "0.38482636", "0.38478822", "0.38404357", "0.38373473", "0.38362077", "0.38310313", "0.38258076", "0.38248134", "0.38183627" ]
0.7810027
0
Begin starts a chain, reading the files located in the source directory as input.
func Begin(sourceDir string) *Goldsmith { goldsmith := &Goldsmith{ sourceDir: sourceDir, contextHasher: crc32.NewIEEE(), fileRefs: make(map[string]bool), } goldsmith.Chain(new(loader)) return goldsmith }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func Begin(sourceDir string) *Goldsmith {\n\tgoldsmith := &Goldsmith{sourceDir: sourceDir}\n\tgoldsmith.Chain(&loader{})\n\treturn goldsmith\n}", "func (fnc *FileNameConsumer) Start() {\n\tgo func() {\n\t\tfnc.wg.Add(1)\n\t\tdefer fnc.wg.Done()\n\n\t\tfor filename := range fnc.incoming {\n\t\t\tfnc.wg.Add(1)\n\t\t\tgo fnc.consume(filename)\n\t\t}\n\t}()\n}", "func (p *Parser) Start(r io.Reader, pkgdir string) ([]byte, error) {\n\n\tif r == nil {\n\t\treturn []byte{}, errors.New(\"input is empty\")\n\t}\n\n\tvar in io.Reader\n\tvar err error\n\n\tin, err = ToScssReader(r)\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tp.Line = make(map[int]string)\n\n\t// Setup paths\n\tif p.MainFile == \"\" {\n\t\tp.MainFile = \"stdin\"\n\t}\n\tif p.BuildDir == \"\" {\n\t\tp.BuildDir = pkgdir\n\t}\n\tif p.SassDir == \"\" {\n\t\tp.SassDir = pkgdir\n\t}\n\tbuf := bytes.NewBuffer(make([]byte, 0, bytes.MinRead))\n\tif in == nil {\n\t\treturn []byte{}, fmt.Errorf(\"input is empty\")\n\t}\n\t_, err = buf.ReadFrom(in)\n\tif err != nil {\n\t\treturn []byte{}, err\n\t}\n\n\t// Send original byte slice\n\tp.Output = buf.Bytes() //[]byte(p.Input)\n\n\treturn p.Output, nil\n}", "func (t *Tailer) Start(offset int64, whence int) error {\n\terr := t.setup(offset, whence)\n\tif err != nil {\n\t\tt.source.Status.Error(err)\n\t\treturn err\n\t}\n\tt.source.Status.Success()\n\tt.source.AddInput(t.path)\n\n\tgo t.forwardMessages()\n\tt.decoder.Start()\n\tgo t.readForever()\n\n\treturn nil\n}", "func (w *Walker) startProcessing() {\n\tdoStart := false\n\tw.pipe.RLock()\n\tif w.pipe.filters == nil { // no processing up to now => start with initial node\n\t\tw.pipe.pushSync(w.initial, 0) // input is buffered, will return immediately\n\t\tdoStart = true // yes, we will have to start the pipeline\n\t}\n\tw.pipe.RUnlock()\n\tif doStart { // ok to be outside mutex as other goroutines will check pipe.empty()\n\t\tw.pipe.startProcessing() // must be outside of mutex lock\n\t}\n}", "func main() {\n\n\t//Command line praser init\n\tvar src = flag.String(\"src\", \"\", \"Location of BDData Directory\")\n\tvar des = flag.String(\"des\", \"\", \"Location where recoverd files will be stored\")\n\tflag.Parse()\n\tif *src == \"\" || *des == \"\" {\n\t\tfmt.Println(\"Usage: recovery -src <BDData Dir> -des <Recovery Dir> \")\n\t}\n\n\t//Creates a new path struct to hold destination and source\n\tpaths := &Path{}\n\tpaths.SetPath(*src, *des)\n\tfiles := paths.GlobIt()\n\n\t//Init for FCSFile - reads the TEXT header\n\t//Init for FCSInto - stores relevant info from TEXT header\n\tnewFile := &FCSFile{}\n\tfileInfo := &FCSInfo{}\n\n\t//Loops through the files in the source directory.\n\tfor _, fileName := range files {\n\n\t\tnewFile.InitFCS(fileName)\n\t\tfileInfo.InitFCSInfo(newFile)\n\t\tpaths.RenameMove(fileInfo)\n\t}\n\n}", "func (w *BaseWorker) Chain(nextWorker Worker) Worker {\n\tif w.output == nil {\n\t\tw.output = NewFileStream()\n\t}\n\t// override the input in the worker\n\tnextWorker.SetInput(w.output)\n\n\tif o := nextWorker.Output(); o == nil {\n\t\tnextWorker.SetOutput(NewFileStream())\n\t}\n\tw.nextWorkers = append(w.nextWorkers, nextWorker)\n\tnextWorker.Start()\n\treturn nextWorker\n}", "func (r *InMemorySourceReader) Begin() {\n\tr.iStack.PushBack(r.i)\n}", "func (s *Basegff3Listener) EnterSource(ctx *SourceContext) {}", "func (P *Flow) Start() error {\n\terr := P.run()\n\t// fmt.Println(\"Start().Err\", P.Orig.Path(), err)\t\n\treturn err\n}", "func (m *SequentialMaster) Start() {\n\tm.active = true\n\n\tw := *NewWorker(m.JobName, m.MapF, m.ReduceF)\n\n\tfor i, file := range m.InputFileNames {\n\t\tw.DoMap(file, uint(i), m.NumReducers);\n\t}\n\n\tfor i := uint(0); i < m.NumReducers; i++ {\n\t\tw.DoReduce(i, uint(len(m.InputFileNames)))\n\t}\n}", "func (s *SyncTask) Start(input io.Reader, synced, failed io.Writer) error {\n\n\tstart := time.Now()\n\n\tkeysIn := make(chan s3.Key, s.SyncPara*BufferFactor)\n\tkeysOk := make(chan s3.Key, s.SyncPara*BufferFactor)\n\tkeysFail := make(chan s3.Key, s.SyncPara*BufferFactor)\n\n\tdecoders := make(chan []byte, s.DecodePara*BufferFactor)\n\n\t// start JSON decoders\n\tlogrus.WithFields(logrus.Fields{\n\t\t\"key_decoders\": s.DecodePara,\n\t\t\"buffer_size\": cap(decoders),\n\t}).Info(\"starting key decoders\")\n\n\tdecGroup := sync.WaitGroup{}\n\tfor i := 0; i < s.DecodePara; i++ {\n\t\tdecGroup.Add(1)\n\t\tgo s.decode(&decGroup, decoders, keysIn)\n\t}\n\n\t// start S3 sync workers\n\tlogrus.WithFields(logrus.Fields{\n\t\t\"sync_workers\": s.SyncPara,\n\t\t\"buffer_size\": cap(keysIn),\n\t}).Info(\"starting key sync workers\")\n\tsyncGroup := sync.WaitGroup{}\n\tfor i := 0; i < s.SyncPara; i++ {\n\t\tsyncGroup.Add(1)\n\t\tgo s.syncKey(&syncGroup, s.src, s.dst, keysIn, keysOk, keysFail)\n\t}\n\n\t// track keys that have been sync'd, and those that we failed to sync.\n\tlogrus.Info(\"starting to write progress\")\n\tencGroup := sync.WaitGroup{}\n\tencGroup.Add(2)\n\tgo s.encode(&encGroup, synced, keysOk)\n\tgo s.encode(&encGroup, failed, keysFail)\n\n\t// feed the pipeline by reading the listing file\n\tlogrus.Info(\"starting to read key listing file\")\n\terr := s.readLines(input, decoders)\n\n\t// when done reading the source file, wait until the decoders\n\t// are done.\n\tlogrus.WithFields(logrus.Fields{\n\t\t\"since_start\": time.Since(start),\n\t\t\"line_count\": metrics.fileLines.String(),\n\t}).Info(\"done reading lines from sync list\")\n\tclose(decoders)\n\tdecGroup.Wait()\n\n\t// when the decoders are all done, wait for the sync workers to finish\n\n\tlogrus.WithFields(logrus.Fields{\n\t\t\"since_start\": time.Since(start),\n\t\t\"line_count\": metrics.decodedKeys.String(),\n\t}).Info(\"done decoding keys from sync list\")\n\n\tclose(keysIn)\n\tsyncGroup.Wait()\n\n\tclose(keysOk)\n\tclose(keysFail)\n\n\tencGroup.Wait()\n\n\t// the source file is read, all keys were decoded and sync'd. we're done.\n\tlogrus.WithFields(logrus.Fields{\n\t\t\"since_start\": time.Since(start),\n\t\t\"sync_ok\": metrics.syncOk.String(),\n\t\t\"sync_fail\": metrics.syncAbandoned.String(),\n\t}).Info(\"done syncing keys\")\n\n\treturn err\n}", "func (w *Worker) startReader() {\n\tdump, err := os.Open(w.InputFile)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tdecoder := xml.NewDecoder(dump)\n\n\tfor {\n\t\tt, _ := decoder.Token()\n\t\tif t == nil {\n\t\t\tbreak\n\t\t}\n\n\t\t// Inspect the type of the token just read.\n\t\tswitch se := t.(type) {\n\t\tcase xml.StartElement:\n\t\t\tif se.Name.Local == \"page\" {\n\t\t\t\tvar p Page\n\t\t\t\tdecoder.DecodeElement(&p, &se)\n\n\t\t\t\tfound := find(seen, p.Title)\n\t\t\t\tif found {\n\t\t\t\t\tlog.Printf(\"Duplicate title: %s. Skipping...\", p.Title)\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\n\t\t\t\tw.InPage <- &p\n\t\t\t}\n\t\t}\n\t}\n\n\t// Close the channels associated with reading/writing\n\tclose(w.InPage)\n\tlog.Println(\"Reader done\")\n}", "func (p *Pipe) start() {\n\tp.cancel = make(chan struct{})\n\terrcList := make([]<-chan error, 0, 1+len(p.processors)+len(p.sinks))\n\t// start pump\n\tout, errc := p.pump.run(p.cancel, p.ID(), p.provide, p.consume, p.sampleRate, p.metric)\n\terrcList = append(errcList, errc)\n\n\t// start chained processesing\n\tfor _, proc := range p.processors {\n\t\tout, errc = proc.run(p.cancel, p.ID(), out, p.sampleRate, p.metric)\n\t\terrcList = append(errcList, errc)\n\t}\n\n\tsinkErrcList := p.broadcastToSinks(out)\n\terrcList = append(errcList, sinkErrcList...)\n\tp.errc = mergeErrors(errcList...)\n}", "func StartPipeline(incomingDataFolder string, successProcessedFolder string, failProcessedFolder string, handleIncomingData func(string, string, string) error) {\n\tpendingFile := make(chan string)\n\tsuccessFile := make(chan string)\n\tfailFile := make(chan string)\n\n\t// Watch for events at the three folders of the pipeline in parallel\n\tgo watchForNewFiles(incomingDataFolder, pendingFile)\n\tgo watchForNewFiles(successProcessedFolder, successFile)\n\tgo watchForNewFiles(failProcessedFolder, failFile)\n\n\tfor {\n\n\t\tselect {\n\t\t// case a new data file has arrived\n\t\tcase arrivedFilePath := <-pendingFile:\n\t\t\tlogrus.Debugf(\"New pending file change detected: %s\", arrivedFilePath)\n\t\t\t// invoke the specialized function that will handle this kind of function\n\t\t\tgo handleIncomingData(arrivedFilePath, successProcessedFolder, failProcessedFolder)\n\n\t\t// case a new data has been successly ingested\n\t\tcase successFilePath := <-successFile:\n\t\t\tlogrus.Debugf(\"New success file change detected. Data successly ingested!: %s\", successFilePath)\n\n\t\t// case a the received data couldn't be ingested\n\t\tcase failFilePath := <-failFile:\n\t\t\tlogrus.Debugf(\"There were an error on the data ingestion of the following file: %s\", failFilePath)\n\t\t}\n\t}\n}", "func (t *Traverser) Start(ctx context.Context) {\n\tselect {\n\tcase <-ctx.Done():\n\t\treturn\n\tcase t.awaitRequest <- struct{}{}:\n\t}\n\tgo func() {\n\t\tvar chooser traversal.LinkTargetNodeStyleChooser = dagpb.AddDagPBSupportToChooser(func(ipld.Link, ipld.LinkContext) (ipld.NodeStyle, error) {\n\t\t\treturn basicnode.Style.Any, nil\n\t\t})\n\t\tloader := func(lnk ipld.Link, lnkCtx ipld.LinkContext) (io.Reader, error) {\n\t\t\tselect {\n\t\t\tcase <-ctx.Done():\n\t\t\t\treturn nil, errors.New(\"Context cancelled\")\n\t\t\tcase t.stateChan <- state{false, lnk, lnkCtx}:\n\t\t\t}\n\t\t\tselect {\n\t\t\tcase <-ctx.Done():\n\t\t\t\treturn nil, errors.New(\"Context cancelled\")\n\t\t\tcase response := <-t.responses:\n\t\t\t\treturn response.input, response.err\n\t\t\t}\n\t\t}\n\t\tstyle, err := chooser(t.root, ipld.LinkContext{})\n\t\tif err != nil {\n\t\t\tt.writeDone(ctx)\n\t\t\treturn\n\t\t}\n\t\tbuilder := style.NewBuilder()\n\t\terr = t.root.Load(ctx, ipld.LinkContext{}, builder, loader)\n\t\tif err != nil {\n\t\t\tt.writeDone(ctx)\n\t\t\treturn\n\t\t}\n\t\tnd := builder.Build()\n\t\tsel, err := selector.ParseSelector(t.selector)\n\t\tif err != nil {\n\t\t\tt.writeDone(ctx)\n\t\t\treturn\n\t\t}\n\t\t_ = traversal.Progress{\n\t\t\tCfg: &traversal.Config{\n\t\t\t\tCtx: ctx,\n\t\t\t\tLinkLoader: loader,\n\t\t\t\tLinkTargetNodeStyleChooser: chooser,\n\t\t\t},\n\t\t}.WalkAdv(nd, sel, func(traversal.Progress, ipld.Node, traversal.VisitReason) error { return nil })\n\t\tt.writeDone(ctx)\n\t}()\n\n}", "func Start(in io.Reader, out io.Writer) {\n\tscanner := bufio.NewScanner(in)\n\n\tconstants := make([]object.Object, 0)\n\tsymbolTable := compiler.NewSymbolTable()\n\tglobals := make([]object.Object, vm.GlobalsSize)\n\n\tfor {\n\t\tfmt.Printf(prompt)\n\t\tscanned := scanner.Scan()\n\t\tif !scanned {\n\t\t\treturn\n\t\t}\n\n\t\tline := scanner.Text()\n\t\tl := lexer.New(line)\n\t\tp := parser.New(l)\n\n\t\tprogram := p.ParseProgram()\n\t\tif len(p.Errors()) != 0 {\n\t\t\tprintParserErrors(out, p.Errors())\n\t\t\tcontinue\n\t\t}\n\n\t\tcomp := compiler.NewWithState(symbolTable, constants)\n\t\tif err := comp.Compile(program); err != nil {\n\t\t\tio.WriteString(out, fmt.Sprintf(\"error during compilation: %v\", err))\n\t\t}\n\n\t\tmachine := vm.NewWithGlobals(comp.ByteCode(), globals)\n\t\tif err := machine.Run(); err != nil {\n\t\t\tio.WriteString(out, fmt.Sprintf(\"error during execution: %v\", err))\n\t\t}\n\n\t\tresult := machine.LastPopped()\n\t\tio.WriteString(out, result.Inspect())\n\t\tio.WriteString(out, \"\\n\")\n\t}\n}", "func (m *manager) start() error {\n\tw := newWatcher(m)\n\tw.start()\n\n\tgo m.test(fsnotify.Event{Name: \":start:\"})\n\n\t// watch files\n\tgo func() {\n\t\tlogrus.Info(\"watching files...\")\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase event := <-w.Events:\n\t\t\t\tif event.Op != fsnotify.Chmod {\n\t\t\t\t\tgo m.test(event)\n\t\t\t\t}\n\t\t\t\tw.Remove(event.Name)\n\t\t\t\tw.Add(event.Name)\n\t\t\tcase <-m.context.Done():\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t}()\n\n\tgo func() {\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase err := <-w.Errors:\n\t\t\t\tlogrus.Error(err)\n\t\t\tcase <-m.context.Done():\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t}()\n\n\tfor {\n\t\t_, err := os.Stat(\"test-coverage/index.html\")\n\t\tif err != nil {\n\t\t\ttime.Sleep(1 * time.Second)\n\t\t\tcontinue\n\t\t}\n\t\tbreak\n\t}\n\texec.Command(\"live-server\", \"test-coverage\").Run()\n\treturn nil\n}", "func (i *FileInput) Start(out chan<- *event.Event) error {\n\ti.out = out\n\ti.t.Go(i.run)\n\treturn nil\n}", "func (s *TestSource) Start(ctx context.Context) error {\n\tgo s.closer(ctx)\n\n\treturn nil\n}", "func (t *tcParser) start() {\n\tt.logger.Info(\"start(): Starting the tc_reader.\")\n\tconfigTemplate := \"tc_reader configuration: tcCmdPath: %s parseInterval: %d tcQdiscStats: %s tcClassStats: %s ifaces: %s userNameClass: %v\"\n\tt.logIfDebug(fmt.Sprintf(configTemplate, t.options.tcCmdPath(), t.options.parseInterval(), t.options.tcQdiscStats(), t.options.tcClassStats(), t.options.ifaces(), t.options.userNameClass()))\n\t// One initial run of TC execution and parsing.\n\tt.parseTc()\n\n\tgo func() {\n\t\tfor range time.Tick(time.Duration(t.options.parseInterval()) * time.Second) {\n\t\t\tt.parseTc()\n\t\t}\n\t}()\n}", "func inputSourceRunner(ctx context.Context, src InputSource, outCh chan<- Data, errQueue *queue.Queue) {\n\tfor src.Next(ctx) {\n\t\tdata := src.Data()\n\n\t\tselect {\n\t\tcase outCh <- data:\n\t\tcase <-ctx.Done():\n\t\t\treturn\n\t\t}\n\t}\n\t// Check for errors\n\tif err := src.Error(); err != nil {\n\t\terrQueue.Append(fmt.Errorf(\"pipeline input source: %v\", err))\n\t}\n}", "func (s *SourceControl) Start(sourceName *string, reply *bool) error {\n\t*reply = false\n\tif s.isSourceActive {\n\t\treturn fmt.Errorf(\"already have active source, do not start\")\n\t}\n\tname := strings.ToUpper(*sourceName)\n\tswitch name {\n\tcase \"SIMPULSESOURCE\":\n\t\ts.ActiveSource = DataSource(s.simPulses)\n\t\ts.status.SourceName = \"SimPulses\"\n\n\tcase \"TRIANGLESOURCE\":\n\t\ts.ActiveSource = DataSource(s.triangle)\n\t\ts.status.SourceName = \"Triangles\"\n\n\tcase \"LANCEROSOURCE\":\n\t\ts.ActiveSource = DataSource(s.lancero)\n\t\ts.status.SourceName = \"Lancero\"\n\n\tcase \"ROACHSOURCE\":\n\t\ts.ActiveSource = DataSource(s.roach)\n\t\ts.status.SourceName = \"Roach\"\n\n\tcase \"ABACOSOURCE\":\n\t\ts.ActiveSource = DataSource(s.abaco)\n\t\ts.status.SourceName = \"Abaco\"\n\n\tcase \"ERRORINGSOURCE\":\n\t\ts.ActiveSource = DataSource(s.erroring)\n\t\ts.status.SourceName = \"Erroring\"\n\n\tdefault:\n\t\treturn fmt.Errorf(\"data Source \\\"%s\\\" is not recognized\", *sourceName)\n\t}\n\n\tlog.Printf(\"Starting data source named %s\\n\", *sourceName)\n\ts.status.Running = true\n\tif err := Start(s.ActiveSource, s.queuedRequests, s.status.Npresamp, s.status.Nsamples); err != nil {\n\t\ts.status.Running = false\n\t\ts.isSourceActive = false\n\t\treturn err\n\t}\n\ts.isSourceActive = true\n\ts.status.SamplePeriod = s.ActiveSource.SamplePeriod()\n\ts.status.Nchannels = s.ActiveSource.Nchan()\n\ts.status.ChanGroups = s.ActiveSource.ChanGroups()\n\ts.broadcastStatus()\n\ts.broadcastTriggerState()\n\ts.broadcastGroupTriggerState()\n\ts.broadcastChannelNames()\n\ts.storeChannelGroups()\n\t*reply = true\n\treturn nil\n}", "func (m *stack) Start() (err error) {\n\n\t// Check stack binary is at specified path and compiled up-to-date\n\n\t// Start the stack binary\n\n\t// Connect to it with a ClientConn, and register the StackClient to it.\n\n\t// Additional checks if needed\n\n\treturn\n}", "func Start(in io.Reader, out io.Writer) {\n\tscanner := bufio.NewScanner(in)\n\tenv := object.NewEnvironment()\n\tfor {\n\t\tfmt.Fprintf(out, PROMPT)\n\n\t\tif !scanner.Scan() {\n\t\t\treturn\n\t\t}\n\n\t\tline := scanner.Text()\n\t\tl := lexer.New(line)\n\t\tp := parser.New(l)\n\t\tprogram := p.ParseProgram()\n\n\t\tif len(p.Errors()) != 0 {\n\t\t\tprintParserErrors(out, p.Errors())\n\t\t\tcontinue\n\t\t}\n\n\t\tevaluated := evaluator.Eval(env, program)\n\t\tif evaluated != nil {\n\t\t\tio.WriteString(out, evaluated.Inspect())\n\t\t\tio.WriteString(out, \"\\n\")\n\t\t}\n\t}\n}", "func main() {\n\tsig := make(chan os.Signal, 1)\n\tsignal.Notify(sig, os.Interrupt, os.Kill)\n\n\te := events.NewStream(1000, 10)\n\tSource.Load(e)\n\n\tSource.Start()\n\n\tdefer Source.Stop()\n\n\t<-sig\n}", "func (c *BFTChain) Start() {\n\tif err := c.consensus.Start(); err != nil {\n\t\tc.Logger.Panicf(\"Failed to start chain, aborting: %+v\", err)\n\t}\n\tc.reportIsLeader() // report the leader\n}", "func (c *Collector) Start() {\n\tgo c.Source.Start()\n\tc.collect()\n}", "func (tr *tableReader) Start(ctx context.Context) {\n\tif tr.FlowCtx.Txn == nil {\n\t\tlog.Fatalf(ctx, \"tableReader outside of txn\")\n\t}\n\n\tctx = tr.StartInternal(ctx, tableReaderProcName)\n\n\tlimitBatches := !tr.parallelize\n\tlog.VEventf(ctx, 1, \"starting scan with limitBatches %t\", limitBatches)\n\tvar err error\n\tif tr.maxTimestampAge == 0 {\n\t\terr = tr.fetcher.StartScan(\n\t\t\tctx, tr.FlowCtx.Txn, tr.spans, limitBatches, tr.limitHint,\n\t\t\ttr.FlowCtx.TraceKV,\n\t\t\ttr.EvalCtx.TestingKnobs.ForceProductionBatchSizes,\n\t\t)\n\t} else {\n\t\tinitialTS := tr.FlowCtx.Txn.ReadTimestamp()\n\t\terr = tr.fetcher.StartInconsistentScan(\n\t\t\tctx, tr.FlowCtx.Cfg.DB, initialTS, tr.maxTimestampAge, tr.spans,\n\t\t\tlimitBatches, tr.limitHint, tr.FlowCtx.TraceKV,\n\t\t\ttr.EvalCtx.TestingKnobs.ForceProductionBatchSizes,\n\t\t)\n\t}\n\n\tif err != nil {\n\t\ttr.MoveToDraining(err)\n\t}\n}", "func (s *BaseredcodeListener) EnterFile_(ctx *File_Context) {}", "func (c *Crawler) Start() error {\n\tlogp.Info(\"Loading Inputs: %v\", len(c.inputConfigs))\n\n\tfor _, inputConfig := range c.inputConfigs {\n\t\terr := c.startInput(inputConfig)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tlogp.Info(\"Loading and starting Inputs completed. Enabled inputs: %v\", len(c.inputs))\n\n\treturn nil\n}", "func (p *literalProcessor) start() { go p.run() }", "func main() {\n\tgopath = strings.Replace(gopath, \"\\\\\", \"/\", -1)\n\tpwd = strings.Replace(pwd, \"\\\\\", \"/\", -1)\n\t// http://stackoverflow.com/questions/12363030/read-from-initial-stdin-in-go\n\tb, err := ioutil.ReadAll(in)\n\tif err != nil || readAllerr {\n\t\tPdbgf(\"gopanic: ioutil.ReadAll(os.Stdin) => err: %s\", errorString(err))\n\t\texitfct(-1)\n\t\treturn\n\t}\n\t// Pdbgf(\"ioutil.ReadAll(in) => len: %d\", len(b))\n\n\tlines := strings.Split(string(b), \"\\n\")\n\tlexer := &lexer{lines: lines, stacks: []*stack{}}\n\t// Pdbgf(\"len: %d, pos %d\", len(lexer.lines), lexer.pos)\n\tfor state := lookForReason; state != nil; {\n\t\tstate = state(lexer)\n\t}\n\tfor _, stack := range lexer.stacks {\n\t\tstack.max = lexer.max + 2\n\t\tfmt.Fprintln(writers.Out(), stack.String())\n\t}\n\t// Pdbgf(\"done\")\n}", "func Init(d int) {\n\tcountByDay = loadCountFile()\n\tlog.Info(fmt.Sprintf(\"Processing files until %d days ago\", d))\n\tprocessCDRfiles(d)\n\tlog.Info(\"Creating result file\")\n\ttoCountToFile()\n\n\tcreateResultFile()\n\tif len(filesProcessed) > 0 {\n\t\t//Backup Process\n\t\tif config.Conf.Process.Backup {\n\t\t\tcompressAllFiles()\n\t\t\tcopyFilesToBackup()\n\t\t\tremoveFiles()\n\t\t}\n\n\t\tnotifyResult()\n\t} else {\n\t\tnotifyTeam(\"<b> No files loaded! <b>\")\n\t}\n}", "func Start(r io.Reader, w io.Writer) {\n\tscanner := bufio.NewScanner(r)\n\n\tfmt.Fprint(w, prompt)\n\tfor scanner.Scan() {\n\t\tline := scanner.Text()\n\n\t\tl := lexer.New(line)\n\t\tp := parser.New(l)\n\n\t\tprogram := p.ParseProgram()\n\n\t\tif len(p.Errors()) != 0 {\n\t\t\tprintParserErrors(w, p.Errors())\n\t\t\tcontinue\n\t\t}\n\n\t\tevaluated := evaluator.Eval(program)\n\n\t\tif evaluated != nil {\n\t\t\tio.WriteString(w, evaluated.Inspect())\n\t\t\tio.WriteString(w, \"\\n\")\n\t\t}\n\n\t\tfmt.Fprint(w, prompt)\n\t}\n}", "func (st *buildStatus) start() {\n\tsetStatus(st.builderRev, st)\n\tgo func() {\n\t\terr := st.build()\n\t\tif err != nil {\n\t\t\tfmt.Fprintf(st, \"\\n\\nError: %v\\n\", err)\n\t\t\tlog.Println(st.builderRev, \"failed:\", err)\n\t\t}\n\t\tst.setDone(err == nil)\n\t\tst.buildRecord().put()\n\t\tmarkDone(st.builderRev)\n\t}()\n}", "func (i *Ingester) Start(ctx context.Context) error {\n\tconcurrentProc := make(chan bool, nConcurrentProcessors)\n\tresultChan, err := i.getInputChannel(ctx)\n\tif err != nil {\n\t\treturn sklog.FmtErrorf(\"Error retrieving input channel: %s\", err)\n\t}\n\n\t// Continuously catch events from all input sources and push the data to the processor.\n\tgo func(doneCh <-chan bool) {\n\t\tvar resultFile ResultFileLocation = nil\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase resultFile = <-resultChan:\n\t\t\tcase <-doneCh:\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\t// get a slot in line to call Process\n\t\t\tconcurrentProc <- true\n\t\t\tgo func(resultFile ResultFileLocation) {\n\t\t\t\tdefer func() { <-concurrentProc }()\n\t\t\t\ti.processResult(ctx, resultFile)\n\t\t\t}(resultFile)\n\t\t}\n\t}(i.doneCh)\n\treturn nil\n}", "func (o *Options) generateSources(log logr.Logger, fs vfs.FileSystem) ([]InternalSourceOptions, error) {\n\tif len(o.SourceObjectPaths) == 0 {\n\t\t// try to read from stdin if no resources are defined\n\t\tsourceOptions := make([]InternalSourceOptions, 0)\n\t\tstdinInfo, err := os.Stdin.Stat()\n\t\tif err != nil {\n\t\t\tlog.V(3).Info(\"unable to read from stdin\", \"error\", err.Error())\n\t\t\treturn nil, nil\n\t\t}\n\t\tif (stdinInfo.Mode()&os.ModeNamedPipe != 0) || stdinInfo.Size() != 0 {\n\t\t\tstdinResources, err := o.generateSourcesFromReader(os.Stdin)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, fmt.Errorf(\"unable to read from stdin: %w\", err)\n\t\t\t}\n\t\t\tsourceOptions = append(sourceOptions, convertToInternalSourceOptions(stdinResources, \"\")...)\n\t\t}\n\t\treturn sourceOptions, nil\n\t}\n\n\tsourceOptions := make([]InternalSourceOptions, 0)\n\tfor _, resourcePath := range o.SourceObjectPaths {\n\t\tif resourcePath == \"-\" {\n\t\t\tstdinInfo, err := os.Stdin.Stat()\n\t\t\tif err != nil {\n\t\t\t\treturn nil, fmt.Errorf(\"unable to read from stdin: %w\", err)\n\t\t\t}\n\t\t\tif (stdinInfo.Mode()&os.ModeNamedPipe != 0) || stdinInfo.Size() != 0 {\n\t\t\t\tstdinResources, err := o.generateSourcesFromReader(os.Stdin)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn nil, fmt.Errorf(\"unable to read from stdin: %w\", err)\n\t\t\t\t}\n\t\t\t\tsourceOptions = append(sourceOptions, convertToInternalSourceOptions(stdinResources, \"\")...)\n\t\t\t}\n\t\t\tcontinue\n\t\t}\n\n\t\tresourceObjectReader, err := fs.Open(resourcePath)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"unable to read source object from %s: %w\", resourcePath, err)\n\t\t}\n\t\tnewResources, err := o.generateSourcesFromReader(resourceObjectReader)\n\t\tif err != nil {\n\t\t\tif err2 := resourceObjectReader.Close(); err2 != nil {\n\t\t\t\tlog.Error(err, \"unable to close file reader\", \"path\", resourcePath)\n\t\t\t}\n\t\t\treturn nil, fmt.Errorf(\"unable to read sources from %s: %w\", resourcePath, err)\n\t\t}\n\t\tif err := resourceObjectReader.Close(); err != nil {\n\t\t\treturn nil, fmt.Errorf(\"unable to read source from %q: %w\", resourcePath, err)\n\t\t}\n\t\tsourceOptions = append(sourceOptions, convertToInternalSourceOptions(newResources, resourcePath)...)\n\t}\n\treturn sourceOptions, nil\n}", "func (b *BBS) Start(ctx context.Context) {\n\tfor _, line := range b.lines {\n\t\tline.Init()\n\t\tgo b.listenForCalls(ctx, line)\n\t}\n\t<-ctx.Done()\n}", "func Start(w http.ResponseWriter, r *http.Request) {\n\t// The first Node will read two blocks from a Json string and add it to the BlockChain\n\tif os.Args[2] == \"yes\" {\n\t\tfmt.Println(\"Create BlockChain from json \")\n\t\tSBC.UpdateEntireBlockChain(JSON_BLOCKCHAIN)\n\t\thighestblockTransaction = SBC.GetLatestBlocks()[0].GetHeight()\n\t\thighestblockTransactionHash = SBC.GetLatestBlocks()[0].GetHash()\n\t} else {\n\t\tfmt.Println(\"Download BlockChain from first node..\")\n\t\tDownload()\n\t}\n\tifStarted = true\n\tgo StartHeartBeat()\n\tgo CreateBlockWithTransactions()\n\n\tfmt.Fprintf(w, \"Node started\")\n}", "func (cc *ChainConfig) Start(stopC chan bool) {\n\tdefer func() {\n\t\tif err := recover(); err != nil {\n\t\t\tmisc.PrintStack(false)\n\t\t\tVLogger.Fatal(\"flush fatal error \", zap.Error(err.(error)))\n\t\t}\n\t}()\n\n\tcc.Chain.Init(stopC)\n\tgo cc.Chain.Start()\n}", "func (self *Goldsmith) Chain(plugin Plugin) *Goldsmith {\n\tcontext := &Context{\n\t\tgoldsmith: self,\n\t\tplugin: plugin,\n\t\tfiltersExt: append(filterStack(nil), self.filters...),\n\t\tindex: self.index,\n\t\tfilesOut: make(chan *File),\n\t}\n\n\tif len(self.contexts) > 0 {\n\t\tcontext.filesIn = self.contexts[len(self.contexts)-1].filesOut\n\t}\n\n\tself.contexts = append(self.contexts, context)\n\tself.index++\n\n\treturn self\n}", "func (s *BasecluListener) EnterIterator(ctx *IteratorContext) {}", "func setupFileIngestion(ctx context.Context) error {\n\tsklog.Info(\"Checking out skia\")\n\trepo, err := gitinfo.CloneOrUpdate(ctx, common.REPO_SKIA, filepath.Join(*gitDir, \"skia\"), false)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Could not clone skia repo: %s\", err)\n\t}\n\n\tclient, err := auth.NewDefaultJWTServiceAccountClient(auth.SCOPE_READ_ONLY)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Problem setting up client OAuth: %s\", err)\n\t}\n\n\tstorageClient, err = storage.NewClient(ctx, option.WithHTTPClient(client))\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Problem authenticating: %s\", err)\n\t}\n\n\tgcsClient := gcs.NewGCSClient(storageClient, *bucket)\n\tboltDB, err := db.NewBoltDB(*cachePath)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"could not set up bolt db cache: %s\", err)\n\t}\n\tcoverageIngester = coverageingest.New(*extractDir, gcsClient, boltDB)\n\n\tcycle := func(v vcsinfo.VCS, coverageIngester coverageingest.Ingester) {\n\t\tsklog.Info(\"Begin coverage ingest cycle\")\n\t\tif err := v.Update(ctx, true, false); err != nil {\n\t\t\tsklog.Warningf(\"Could not update git repo, but continuing anyway: %s\", err)\n\t\t}\n\t\tcommits := []*vcsinfo.LongCommit{}\n\t\tfor _, c := range v.LastNIndex(*nCommits) {\n\t\t\tlc, err := v.Details(ctx, c.Hash, false)\n\t\t\tif err != nil {\n\t\t\t\tsklog.Errorf(\"Could not get commit info for git revision %s: %s\", c.Hash, err)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\t// Reverse the order so the most recent commit is first\n\t\t\tcommits = append([]*vcsinfo.LongCommit{lc}, commits...)\n\t\t}\n\t\tcoverageIngester.IngestCommits(ctx, commits)\n\t\tsklog.Info(\"End coverage ingest cycle\")\n\t}\n\n\tgo func(v vcsinfo.VCS, coverageIngester coverageingest.Ingester) {\n\t\tcycle(repo, coverageIngester)\n\t\tfor range time.Tick(*ingestPeriod) {\n\t\t\tcycle(repo, coverageIngester)\n\t\t}\n\t}(repo, coverageIngester)\n\treturn nil\n}", "func Start(w http.ResponseWriter, r *http.Request) {\n\tif os.Args[1] == \"6686\" {\n\t\tSBC.UpdateEntireBlockChain(BLOCKCHAIN_JSON)\n\t\tUpload(w, r)\n\t} else {\n\t\tDownload()\n\t}\n\tFirstHeatdBeat()\n\tgo StartHeartBeat()\n\tgo StartTryingNonces()\n}", "func (f *File) walkSources() error {\n\tif f.mode == OReadOnly {\n\t\tlogf(\"readonly in fileWalkSources\\n\")\n\t\treturn nil\n\t}\n\n\tif err := f.source.lock2(f.msource, OReadWrite); err != nil {\n\t\tlogf(\"sourceLock2 failed in fileWalkSources\\n\")\n\t\treturn err\n\t}\n\n\tf.source.unlock()\n\tf.msource.unlock()\n\treturn nil\n}", "func (logSeeker *LogSeeker) BeginReader() {\n\tlogSeeker.reader = bufio.NewReader(logSeeker.file)\n}", "func (fs *FileSystemWatch) Begin() error {\n\twc, err := fsnotify.NewWatcher()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfs.done = make(chan struct{})\n\tfs.notifier = wc\n\n\tgo func() {\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase <-fs.done:\n\t\t\t\treturn\n\t\t\tcase event := <-wc.Events:\n\t\t\t\tif fs.events != nil {\n\t\t\t\t\tfs.events(event)\n\t\t\t\t}\n\t\t\tcase err := <-wc.Errors:\n\t\t\t\tif fs.errors != nil {\n\t\t\t\t\tfs.errors(err)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}()\n\n\tfor _, file := range fs.files {\n\t\tif err := fs.notifier.Add(file); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}", "func (l *Launcher) Start(sourceProvider launchers.SourceProvider, pipelineProvider pipeline.Provider, registry auditor.Registry, tracker *tailers.TailerTracker) {\n\tl.sources = sourceProvider.GetAddedForType(config.JournaldType)\n\tl.pipelineProvider = pipelineProvider\n\tl.registry = registry\n\tgo l.run()\n}", "func (ra *resourceAnalyzer) Start() {\n\tra.fsResourceAnalyzer.Start()\n}", "func (f *filtererProcessor) Start(ctx context.Context) {\n\tctx = f.StartInternal(ctx, filtererProcName)\n\tf.input.Start(ctx)\n}", "func (m *Manager) startWatchingFlow(req *pb.StartWatchingRequest, channel chan *pb.Change) error {\n\tif m.isFileAlreadyWatched(req.FilePath) {\n\t\tm.logf(\"file %s already watched, skipping\\n\", req.FileAlias)\n\t\treturn nil\n\t}\n\tfWatcher, err := fsnotify.NewWatcher()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif err := m.repo.StartListening(Listener{\n\t\tStartWatchingRequest: req,\n\t\tChannel: channel,\n\t}); err != nil {\n\t\treturn fmt.Errorf(\"failed to start listening on repo, %w\", err)\n\t}\n\tm.fileWatcherClosers[req] = fWatcher.Close\n\tstopChannel := make(chan struct{})\n\tm.done[req.FileAlias] = stopChannel\n\n\tm.logn(fmt.Sprintf(\"starting watching over %s file\", req.FileAlias))\n\tgo m.watch(req.FileAlias, fWatcher, channel, stopChannel)\n\treturn fWatcher.Add(req.FilePath)\n}", "func (s *BaseGraffleParserListener) EnterFile(ctx *FileContext) {}", "func (st *buildStatus) start() {\n\tsetStatus(st.BuilderRev, st)\n\tgo func() {\n\t\terr := st.build()\n\t\tif err == errSkipBuildDueToDeps {\n\t\t\tst.setDone(true)\n\t\t} else {\n\t\t\tif err != nil {\n\t\t\t\tfmt.Fprintf(st, \"\\n\\nError: %v\\n\", err)\n\t\t\t\tlog.Println(st.BuilderRev, \"failed:\", err)\n\t\t\t}\n\t\t\tst.setDone(err == nil)\n\t\t\tpool.CoordinatorProcess().PutBuildRecord(st.buildRecord())\n\t\t}\n\t\tmarkDone(st.BuilderRev)\n\t}()\n}", "func (s *BaseCobol85PreprocessorListener) EnterFilename(ctx *FilenameContext) {}", "func (r *reducer) start() {\n\tfor _, m := range r.mappers {\n\t\tm.start()\n\t}\n\tgo r.run()\n}", "func (p *parser) init(info *token.File, src []byte) {\n\tp.file = info\n\tp.scanner.Init(p.file, src, func(pos token.Position, msg string) {\n\t\tp.err = scanner.Error{Position: pos, Message: msg}\n\t})\n\tp.next()\n}", "func reset_input(){\nloc= 0\nfile= file[:0]\n\n\n/*30:*/\n\n\n//line gocommon.w:371\n\nif wf,err:=os.Open(file_name[0]);err!=nil{\nfile_name[0]= alt_file_name\nif wf,err= os.Open(file_name[0]);err!=nil{\nfatal(\"! Cannot open input file \",file_name[0])\n\n}else{\nfile= append(file,bufio.NewReader(wf))\n}\n}else{\nfile= append(file,bufio.NewReader(wf))\n}\nif cf,err:=os.Open(change_file_name);err!=nil{\nfatal(\"! Cannot open change file \",change_file_name)\n\n}else{\nchange_file= bufio.NewReader(cf)\n}\n\n\n\n/*:30*/\n\n\n//line gocommon.w:356\n\ninclude_depth= 0\nline= line[:0]\nline= append(line,0)\nchange_line= 0\nchange_depth= include_depth\nchanging= true\nprime_the_change_buffer()\nchanging= !changing\nloc= 0\ninput_has_ended= false\n}", "func (n *network) Start() {\n\n\tfor _, l := range n.learners {\n\t\tgo l.Run()\n\t}\n\n\tfor _, a := range n.acceptors {\n\t\tgo a.Run()\n\t}\n\n\tfor _, p := range n.proposers {\n\t\tgo p.Run()\n\t}\n\t\n}", "func (r Rust) SourceDirectory() string { return \"src\" }", "func (elem *Ftp) StartCrawling() (err error) {\n\tpwd, err := elem.Conn.CurrentDir()\n\tif err != nil {\n\t\treturn\n\t}\n\n\tfor !elem.Obsolete {\n\t\telem.crawlDirectoryRecursive(pwd)\n\t}\n\treturn\n}", "func (f *fuseNode) getSource() (plugin.Parent, []string) {\n\tcur, segments := f.parent, []string{plugin.CName(f.entry)}\n\tfor cur != nil {\n\t\tif plugin.IsPrefetched(cur.entry) {\n\t\t\tsegments = append([]string{plugin.CName(cur.entry)}, segments...)\n\t\t\tcur = cur.parent\n\t\t} else {\n\t\t\t// All dirs must contain a parent or they wouldn't have been able to create children.\n\t\t\treturn cur.entry.(plugin.Parent), segments\n\t\t}\n\t}\n\treturn nil, segments\n}", "func (l *lexer) run() {\n\tfor state := lexStart; state != nil; {\n\t\tstate = state(l)\n\t}\n}", "func playCurrentSamples(samplePaths []string) {\n for _, samplePath := range samplePaths {\n go playback.PlayFile(samplePath)\n }\n}", "func (graphMinion *graphMinion) start() {\n\tgo func() {\n\t\tdefer graphMinion.wg.Done()\n\t\tfor {\n\n\t\t\t// pull reads from queue until done\n\t\t\tmappingData, ok := <-graphMinion.inputChannel\n\t\t\tif !ok {\n\t\t\t\treturn\n\t\t\t}\n\t\t\tif mappingData == nil {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\t// increment the nodes contained in the mapping window\n\t\t\tmisc.ErrorCheck(graphMinion.graph.IncrementSubPath(mappingData.ContainedNodes, mappingData.Freq))\n\t\t}\n\t}()\n}", "func (p *SingleLineParser) Start() {\n\tp.lineHandler.Start()\n\tgo p.run()\n}", "func (ps *projectSetProcessor) Start(ctx context.Context) {\n\tctx = ps.StartInternal(ctx, projectSetProcName)\n\tps.input.Start(ctx)\n\tps.cancelChecker = cancelchecker.NewCancelChecker(ctx)\n}", "func InitCopperChain(options Options) {\r\n\r\n\t// Validate the options, and resort to default when needed\r\n\tif options.DataRoot == \"\" {\r\n\t\tfmt.Printf(\"empty DataRoot passed in options for copperchain, defaulting to %s.\", defaultOptions.DataRoot)\r\n\t\toptions.DataRoot = defaultOptions.DataRoot\r\n\t}\r\n\r\n\t// Initiate GoFiledb so blockchain instances can be saved\r\n\tgofiledb.InitClient(options.DataRoot)\r\n\r\n\t// Read the saved blockchain using GoFileDb and put as the global chain\r\n\tsetupMyChain()\r\n\r\n}", "func (p *Progress) StartFile(filename string) {\n\tp.mu.Lock()\n\tdefer p.mu.Unlock()\n\tp.currentFiles[filename] = struct{}{}\n}", "func (f *Ferry) Start() error {\n\t// Event listeners for the BinlogStreamer and DataIterator are called\n\t// in the order they are registered.\n\t// The builtin event listeners are to write the events to the target\n\t// database.\n\t// Registering the builtin event listeners in Start allows the consumer\n\t// of the library to register event listeners that gets called before\n\t// and after the data gets written to the target database.\n\tf.BinlogStreamer.AddEventListener(f.BinlogWriter.BufferBinlogEvents)\n\tf.DataIterator.AddBatchListener(f.BatchWriter.WriteRowBatch)\n\n\tif f.inlineVerifier != nil {\n\t\tf.BinlogStreamer.AddEventListener(f.inlineVerifier.binlogEventListener)\n\t}\n\n\t// The starting binlog coordinates must be determined first. If it is\n\t// determined after the DataIterator starts, the DataIterator might\n\t// miss some records that are inserted between the time the\n\t// DataIterator determines the range of IDs to copy and the time that\n\t// the starting binlog coordinates are determined.\n\tvar sourcePos siddontangmysql.Position\n\tvar targetPos siddontangmysql.Position\n\n\tvar err error\n\tif f.StateToResumeFrom != nil {\n\t\tsourcePos, err = f.BinlogStreamer.ConnectBinlogStreamerToMysqlFrom(f.StateToResumeFrom.MinSourceBinlogPosition())\n\t} else {\n\t\tsourcePos, err = f.BinlogStreamer.ConnectBinlogStreamerToMysql()\n\t}\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif !f.Config.SkipTargetVerification {\n\t\tif f.StateToResumeFrom != nil && f.StateToResumeFrom.LastStoredBinlogPositionForTargetVerifier != zeroPosition {\n\t\t\ttargetPos, err = f.TargetVerifier.BinlogStreamer.ConnectBinlogStreamerToMysqlFrom(f.StateToResumeFrom.LastStoredBinlogPositionForTargetVerifier)\n\t\t} else {\n\t\t\ttargetPos, err = f.TargetVerifier.BinlogStreamer.ConnectBinlogStreamerToMysql()\n\t\t}\n\t}\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// If we don't set this now, there is a race condition where Ghostferry\n\t// is terminated with some rows copied but no binlog events are written.\n\t// This guarentees that we are able to restart from a valid location.\n\tf.StateTracker.UpdateLastResumableSourceBinlogPosition(sourcePos)\n\tif f.inlineVerifier != nil {\n\t\tf.StateTracker.UpdateLastResumableSourceBinlogPositionForInlineVerifier(sourcePos)\n\t}\n\n\tif !f.Config.SkipTargetVerification {\n\t\tf.StateTracker.UpdateLastResumableBinlogPositionForTargetVerifier(targetPos)\n\t}\n\n\treturn nil\n}", "func Start(in io.Reader, out io.Writer) {\n\tscanner := bufio.NewScanner(in)\n\tenv := object.NewEnvironment()\n\n\tfor {\n\t\tfmt.Printf(PROMPT)\n\t\tscanned := scanner.Scan()\n\t\tif !scanned {\n\t\t\treturn\n\t\t}\n\n\t\tline := scanner.Text()\n\n\t\t// A REPL that tokenizes and parses Monkey source code and prints\n\t\t// the AST.\n\t\tl := lexer.New(line)\n\t\tp := parser.New(l)\n\n\t\tprogram := p.ParseProgram()\n\t\tif len(p.Errors()) != 0 {\n\t\t\tprintParseErrors(out, p.Errors())\n\t\t\tcontinue\n\t\t}\n\n\t\tevaluated := evaluator.Eval(program, env)\n\t\tif evaluated != nil {\n\t\t\t// Print string representation of the object to stdout.\n\t\t\tio.WriteString(out, evaluated.Inspect())\n\t\t\tio.WriteString(out, \"\\n\")\n\t\t}\n\t}\n}", "func (s *Basegff3Listener) EnterStart(ctx *StartContext) {}", "func (s *BaseCobol85PreprocessorListener) EnterStartRule(ctx *StartRuleContext) {}", "func (r *RockyRainbow) Start() (err error) {\n\tif r.outFile, err = os.Create(r.OutputFile); err != nil {\n\t\treturn\n\t}\n\tif r.inFile, err = os.Open(r.InputFile); err != nil {\n\t\treturn\n\t}\n\tdefer func() {\n\t\tr.inFile.Close()\n\t\tr.outFile.Close()\n\t}()\n\tr.displayConf()\n\n\tp(\"Loading jobs queue\")\n\tgo r.queuer()\n\n\tp(\"Loading workers\")\n\tfor i := 0; i < r.WorkersCount; i++ {\n\t\tgo r.worker()\n\t}\n\n\tp(\"Waiting for workers to complete, type ENTER for current status...\")\n\tgo statusLoop(&r.status)\n\tfor i := 0; i < r.WorkersCount; i++ {\n\t\t<-r.done\n\t}\n\n\ttime.Sleep(1 * time.Second)\n\treturn\n}", "func (nc *NodeCaller) Start() {\n\tnc.caller.Start()\n}", "func (c *Chain) Init() *Chain {\n\tc.mux.Lock()\n\tdefer c.mux.Unlock()\n\tres, e := parse(c.Path)\n\tif e != nil {\n\t\tlog.Printf(\"chain not found at %s. creating one ...\", c.Path)\n\t\tc.LengthElements = 0\n\t\tc.Size = unsafe.Sizeof(c)\n\t\tc.Chain = []Block{}\n\t\treturn c\n\t}\n\n\traw := loadFromStorage(res)\n\tc.Chain = *formLinkedChainFromRawBlock(raw)\n\tc.LengthElements = len(c.Chain)\n\tc.Size = unsafe.Sizeof(c)\n\treturn c\n}", "func (r *Repl) Start(in io.Reader, out io.Writer) {\n\tfmt.Printf(\"Glu %s\\n\", version)\n\tfmt.Println(\"Type 'exit' to exit.\")\n\n\tscanner := bufio.NewScanner(in)\n\tfor {\n\t\t// Read\n\t\tfmt.Printf(\"\\n%s\", Prompt)\n\t\tok := scanner.Scan()\n\t\tif !ok {\n\t\t\treturn\n\t\t}\n\t\tinput := scanner.Text()\n\t\tif input == exit {\n\t\t\treturn\n\t\t} else if input == debugOn {\n\t\t\tr.config.debug = fullDebug()\n\t\t\tcontinue\n\t\t} else if input == debugOff {\n\t\t\tr.config.debug = defaultDebug()\n\t\t\tcontinue\n\t\t} else if input == ansiOn {\n\t\t\tr.ansi = NewANSI(true)\n\t\t\tcontinue\n\t\t} else if input == ansiOff {\n\t\t\tr.ansi = NewANSI(false)\n\t\t\tcontinue\n\t\t} else if strings.HasPrefix(input, run) {\n\t\t\tfp := strings.Trim(strings.Replace(input, run, \"\", 1), \" \")\n\t\t\tif fp != \"\" {\n\t\t\t\tdata, err := ioutil.ReadFile(fp)\n\t\t\t\tif err != nil {\n\t\t\t\t\tfmt.Println(\"Failed to open file: \", err)\n\t\t\t\t}\n\t\t\t\tinput = string(data)\n\t\t\t} else {\n\t\t\t\tfmt.Printf(\"'%s' requires a valid file.\\n\", run)\n\t\t\t}\n\t\t}\n\t\tr.Exec(input)\n\t}\n}", "func (f *Input) makeReaders(filesPaths []string) []*Reader {\n\t// Open the files first to minimize the time between listing and opening\n\tfiles := make([]*os.File, 0, len(filesPaths))\n\tfor _, path := range filesPaths {\n\t\tif _, ok := f.SeenPaths[path]; !ok {\n\t\t\tif f.readerFactory.fromBeginning {\n\t\t\t\tf.Infow(\"Started watching file\", \"path\", path)\n\t\t\t} else {\n\t\t\t\tf.Infow(\"Started watching file from end. To read preexisting logs, configure the argument 'start_at' to 'beginning'\", \"path\", path)\n\t\t\t}\n\t\t\tf.SeenPaths[path] = struct{}{}\n\t\t}\n\t\tfile, err := os.Open(path) // #nosec - operator must read in files defined by user\n\t\tif err != nil {\n\t\t\tf.Debugf(\"Failed to open file\", zap.Error(err))\n\t\t\tcontinue\n\t\t}\n\t\tfiles = append(files, file)\n\t}\n\n\t// Get fingerprints for each file\n\tfps := make([]*Fingerprint, 0, len(files))\n\tfor _, file := range files {\n\t\tfp, err := f.readerFactory.newFingerprint(file)\n\t\tif err != nil {\n\t\t\tf.Errorw(\"Failed creating fingerprint\", zap.Error(err))\n\t\t\tcontinue\n\t\t}\n\t\tfps = append(fps, fp)\n\t}\n\n\t// Exclude any empty fingerprints or duplicate fingerprints to avoid doubling up on copy-truncate files\nOUTER:\n\tfor i := 0; i < len(fps); i++ {\n\t\tfp := fps[i]\n\t\tif len(fp.FirstBytes) == 0 {\n\t\t\tif err := files[i].Close(); err != nil {\n\t\t\t\tf.Errorf(\"problem closing file\", \"file\", files[i].Name())\n\t\t\t}\n\t\t\t// Empty file, don't read it until we can compare its fingerprint\n\t\t\tfps = append(fps[:i], fps[i+1:]...)\n\t\t\tfiles = append(files[:i], files[i+1:]...)\n\t\t\ti--\n\t\t\tcontinue\n\t\t}\n\t\tfor j := i + 1; j < len(fps); j++ {\n\t\t\tfp2 := fps[j]\n\t\t\tif fp.StartsWith(fp2) || fp2.StartsWith(fp) {\n\t\t\t\t// Exclude\n\t\t\t\tif err := files[i].Close(); err != nil {\n\t\t\t\t\tf.Errorf(\"problem closing file\", \"file\", files[i].Name())\n\t\t\t\t}\n\t\t\t\tfps = append(fps[:i], fps[i+1:]...)\n\t\t\t\tfiles = append(files[:i], files[i+1:]...)\n\t\t\t\ti--\n\t\t\t\tcontinue OUTER\n\t\t\t}\n\t\t}\n\t}\n\n\treaders := make([]*Reader, 0, len(fps))\n\tfor i := 0; i < len(fps); i++ {\n\t\treader, err := f.newReader(files[i], fps[i])\n\t\tif err != nil {\n\t\t\tf.Errorw(\"Failed to create reader\", zap.Error(err))\n\t\t\tcontinue\n\t\t}\n\t\treaders = append(readers, reader)\n\t}\n\n\treturn readers\n}", "func Continue(inFileName, machine string) {\n\tif Debug {\n\t\tdefer debug.TimeMe(time.Now())\n\t}\n\n\tvar (\n\t\tnProcs int = 1\n\t\tinFileNameChan = make(chan string, 1)\n\t\tcssInfo = make(chan map[string]string, 1)\n\t\tpbsLaunchChannel = make(chan string, 100)\n\t\tdone = make(chan struct{})\n\t)\n\t\n\tfor idx:=0; idx<nProcs; idx++ {\n\t\tgo Out2ICs(inFileNameChan, cssInfo)\n\t\tgo CreateStartScripts(cssInfo, machine, pbsLaunchChannel, done)\n\t\t// Consumes pbs file names\n\t\tgo func (pbsLaunchChannel chan string) {\n\t\t\tfor _ = range pbsLaunchChannel {\n\t\t\t}\n\t\t} (pbsLaunchChannel)\n\t}\n\t\n\t// Check if we have to run on all the files in the folder \n\t// and not only on a selected one \n\tif inFileName == \"all\" || inFileName == \"*\" || \n\t\tinFileName == \"\" || strings.Contains(inFileName, \"*\") {\n\t\truns, runMap, mapErr := FindLastRound(\"*-comb*-NCM*-fPB*-W*-Z*-run*-rnd*.*\")\n\t\tlog.Println(\"Selected to continue round for all the runs in the folder\")\n\t\tlog.Println(\"Found: \")\n\t\tfor _, run := range runs {\n\t\t\tif mapErr != nil && (len(runMap[run][\"err\"]) == 0 || len(runMap[run][\"out\"]) == 0) {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tfmt.Printf(\"%v\\n\", runMap[run][\"out\"][len(runMap[run][\"out\"])-1])\n\t\t}\n\t\tfmt.Println()\n\t\t// Fill the channel with the last round of each run\n\t\tfor _, run := range runs {\n\t\t\tif mapErr != nil && (len(runMap[run][\"err\"]) == 0 || len(runMap[run][\"out\"]) == 0) {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tinFileNameChan <- runMap[run][\"out\"][len(runMap[run][\"out\"])-1]\n\t\t}\n\t} else {\n\t\t// Only continue the selected file\n\t\tinFileNameChan <- inFileName\n\t}\n\t\n\t// Close the channel, if you forget it, goroutines \n\t// will wait forever\n\tclose(inFileNameChan)\n\t\n\t// Wait the CreateStartScripts goroutines to finish\n\tfor idx:=0; idx<nProcs; idx++ {\n\t\t<-done // wait the goroutine to finish\n\t}\n}", "func (b *BlockExplorerTestSetUp) Start() error {\n\tb.ctx = context.Background()\n\n\tpulseExtractor := extractor.NewPlatformPulseExtractor(b.PulseClient)\n\tb.extr = extractor.NewPlatformExtractor(100, 0, 100, 100, pulseExtractor, b.ExporterClient, func() {})\n\terr := b.extr.Start(b.ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tb.trsf = transformer.NewMainNetTransformer(b.extr.GetJetDrops(b.ctx), 100)\n\terr = b.trsf.Start(b.ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tb.strg = storage.NewStorage(b.DB)\n\tb.cont, err = controller.NewController(cfg, b.extr, b.strg, 2)\n\tif err != nil {\n\t\treturn err\n\t}\n\tb.proc = processor.NewProcessor(b.trsf, b.strg, b.cont, 1)\n\terr = b.proc.Start(b.ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}", "func (self *Build) exec(moduleLabel core.Label, fileType core.FileType) error {\n\tthread := createThread(self, moduleLabel, fileType)\n\n\tsourceData, err := self.sourceFileReader(moduleLabel)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to execute %v: read failed: %v\", moduleLabel, err)\n\t}\n\n\t_, err = starlark.ExecFile(thread, moduleLabel.String(), sourceData,\n\t\tbuiltins.InitialGlobals(fileType))\n\treturn err\n}", "func (d *Decoder) Start(ctx context.Context, t astiencoder.CreateTaskFunc) {\n\td.BaseNode.Start(ctx, t, func(t *astikit.Task) {\n\t\t// Make sure to wait for all dispatcher subprocesses to be done so that they are properly closed\n\t\tdefer d.d.wait()\n\n\t\t// Make sure to stop the chan properly\n\t\tdefer d.c.Stop()\n\n\t\t// Start chan\n\t\td.c.Start(d.Context())\n\t})\n}", "func (this *CertWatcher) start(stopCh <-chan struct{}) error {\n\tfiles := []string{this.certPath, this.keyPath, this.cacertPath, this.cakeyPath}\n\n\tfor _, f := range files {\n\t\tif f != \"\" {\n\t\t\tif err := this.watcher.Add(f); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\n\tgo this.watch()\n\n\tgo func() {\n\t\t// Block until the stop channel is closed.\n\t\t<-stopCh\n\n\t\t_ = this.watcher.Close()\n\t}()\n\treturn nil\n}", "func (t *TransformCmd) Init(args []string) error {\n\n\tif err := t.fs.Parse(args); err != nil {\n\t\treturn err\n\t}\n\n\tif t.transFile == \"\" {\n\t\treturn errors.New(\"No transformation file provided\")\n\t}\n\n\tif t.fs.NArg() < 1 {\n\t\treturn errors.New(\"No input file(s) provided\")\n\t}\n\n\tt.inFiles = t.fs.Args()\n\n\treturn nil\n}", "func (ledger *Ledger) ChainTxBegin(txID string) {\n\tledger.chaincodeState.TxBegin(txID)\n}", "func (s *BaseCymbolListener) EnterFile(ctx *FileContext) {}", "func (mc *Chain) RequestStartChain(n *node.Node, start, started *int) error {\n\tif node.Self.Underlying().GetKey() == n.ID {\n\t\tif !mc.isStarted() {\n\t\t\t*start++\n\t\t} else {\n\t\t\t*started++\n\t\t}\n\t\treturn nil\n\t}\n\thandler := func(ctx context.Context, entity datastore.Entity) (interface{}, error) {\n\t\tstartChain, ok := entity.(*StartChain)\n\t\tif !ok {\n\t\t\terr := common.NewError(\"invalid object\", fmt.Sprintf(\"entity: %v\", entity))\n\t\t\tlogging.Logger.Error(\"failed to request start chain\", zap.Any(\"error\", err))\n\t\t\treturn nil, err\n\t\t}\n\t\tif startChain.Start {\n\t\t\t*start++\n\t\t} else {\n\t\t\t*started++\n\t\t}\n\t\treturn startChain, nil\n\t}\n\tparams := &url.Values{}\n\tparams.Add(\"round\", strconv.FormatInt(mc.GetCurrentRound(), 10))\n\tctx := common.GetRootContext()\n\tn.RequestEntityFromNode(ctx, ChainStartSender, params, handler)\n\treturn nil\n}", "func (p *plugin) analyzeSourceFiles() error {\n\tfor _, f := range p.request.GetProtoFile() {\n\t\tvar generate bool\n\t\tfor _, g := range p.request.GetFileToGenerate() {\n\t\t\tif g == f.GetName() {\n\t\t\t\tgenerate = true\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\n\t\tif generate {\n\t\t\tif err := p.analyzeFile(f); err != nil {\n\t\t\t\treturn fmt.Errorf(\"failed to analyze proto file '%s': %s\", f.GetName(), err.Error())\n\t\t\t}\n\t\t}\n\t}\n\n\treturn nil\n}", "func (s *BasebrainfuckListener) EnterFile_(ctx *File_Context) {}", "func Start(objects []*inject.Object, log Logger) error {\n\tlevels, err := levels(objects)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfor i := len(levels) - 1; i >= 0; i-- {\n\t\tlevel := levels[i]\n\t\tfor _, o := range level {\n\t\t\tif openerO, ok := o.Value.(Opener); ok {\n\t\t\t\tif log != nil {\n\t\t\t\t\tlog.Debugf(\"opening %s\", o)\n\t\t\t\t}\n\t\t\t\tif err := openerO.Open(); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t}\n\t\t\tif starterO, ok := o.Value.(Starter); ok {\n\t\t\t\tif log != nil {\n\t\t\t\t\tlog.Debugf(\"starting %s\", o)\n\t\t\t\t}\n\t\t\t\tif err := starterO.Start(); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\treturn nil\n}", "func (s *SourceProps) processPaths(ctx blueprint.BaseModuleContext) {\n\n\tprefix := projectModuleDir(ctx)\n\n\tfor _, src := range s.Srcs {\n\t\tif strings.HasPrefix(filepath.Clean(src), \"../\") {\n\t\t\tbackend.Get().GetLogger().Warn(warnings.RelativeUpLinkWarning, ctx.BlueprintsFile(), ctx.ModuleName())\n\t\t}\n\t}\n\n\tsrcs := utils.MixedListToFiles(s.Srcs)\n\ttargets := utils.MixedListToBobTargets(s.Srcs)\n\ts.Srcs = append(utils.PrefixDirs(srcs, prefix), utils.PrefixAll(targets, \":\")...)\n}", "func (proc *Proc) Start() error {\n\tinFile, err := common.GetFile(proc.Infile)\n\tif err != nil {\n\t\treturn err\n\t}\n\toutFile, err := common.GetFile(proc.Outfile)\n\tif err != nil {\n\t\treturn err\n\t}\n\terrFile, err := common.GetFile(proc.Errfile)\n\tif err != nil {\n\t\treturn err\n\t}\n\twd, _ := os.Getwd()\n\tprocAtr := &os.ProcAttr{\n\t\tDir: wd,\n\t\tEnv: os.Environ(),\n\t\tFiles: []*os.File{\n\t\t\t// os.Stdin,\n\t\t\tinFile,\n\t\t\toutFile,\n\t\t\terrFile,\n\t\t},\n\t\tSys: proc.Sys,\n\t}\n\targs := append([]string{proc.Name}, proc.Args...)\n\tprocess, err := os.StartProcess(proc.Cmd, args, procAtr)\n\tif err != nil {\n\t\treturn err\n\t}\n\tproc.process = process\n\tproc.Pid = proc.process.Pid\n\terr = common.WriteFile(proc.Pidfile, []byte(strconv.Itoa(proc.process.Pid)))\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tproc.Status.SetStatus(\"started\")\n\treturn nil\n}", "func (s *BasevhdlListener) EnterFile_declaration(ctx *File_declarationContext) {}", "func (e *binaryExprEvaluator) start() {\n\te.lhs.start()\n\te.rhs.start()\n\tgo e.run()\n}", "func (p *Bundle) Start() {\n\tp.RLock()\n\tdefer p.RUnlock()\n\n\tfor _, probe := range p.probes {\n\t\tprobe.Start()\n\t}\n}", "func (s *BasearithmeticListener) EnterFile_(ctx *File_Context) {}", "func run() {\n\n\terr := os.Chdir(project_dir)\n\tif err != nil {\n\t\tfmt.Println(err)\n\t}\n\tfiles, err := ioutil.ReadDir(\"./\")\n\tif err != nil {\n\t\tfmt.Println(err)\n\t}\n\t//loop through everybody's directory\n\tfor _, file := range files {\n\t\tif !isDir(file) {\n\t\t\tbreak\n\t\t}\n\t\tstudentName := file.Name()\n\t\tfmt.Println(\"Running project for: \" + studentName)\n\t\terr := os.Chdir(studentName + \"/Feedback Attachment(s)\")\n\t\tif err != nil {\n\t\t\tfmt.Println(err)\n\t\t\tos.Chdir(\"../..\")\n\t\t\tcontinue\n\t\t}\n\n\t\tfor _, input := range inputs {\n\t\t\tfmt.Println(\"Testing: \" + input)\n\t\t\tcmd := exec.Command(\"./\"+binName, input)\n\t\t\tif err = runCmd(cmd); err != nil {\n\t\t\t\t//It's POSSIBLE that they are taking in the file name (without extension. Try that.\n\t\t\t\tfilename := input\n\t\t\t\textension := filepath.Ext(filename)\n\t\t\t\tname := filename[0 : len(filename)-len(extension)]\n\t\t\t\tcmd = exec.Command(\"./\"+binName, name)\n\t\t\t\tif err = runCmd(cmd); err != nil {\n\t\t\t\t\tfmt.Println(\"Test case \" + name + \" failed.\")\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\tos.Chdir(\"../../\")\n\t}\n\tos.Chdir(\"../\")\n}", "func (s *BaseCobol85PreprocessorListener) EnterCopySource(ctx *CopySourceContext) {}", "func (p *Plan) Start(opts Options) (*PlanIterator, error) {\n\t// Ask the source for its iterator.\n\tsrc, err := p.source.Iterate(opts)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Create an iterator that can be used to perform the planning process.\n\treturn &PlanIterator{\n\t\tp: p,\n\t\topts: opts,\n\t\tsrc: src,\n\t\turns: make(map[resource.URN]bool),\n\t\tcreates: make(map[resource.URN]bool),\n\t\tupdates: make(map[resource.URN]bool),\n\t\treplaces: make(map[resource.URN]bool),\n\t\tdeletes: make(map[resource.URN]bool),\n\t\tsames: make(map[resource.URN]bool),\n\t\tpendingNews: make(map[resource.URN]Step),\n\t\tdones: make(map[*resource.State]bool),\n\t}, nil\n}", "func (g *Generator) enter() {\n\tg.depth++\n}" ]
[ "0.6270386", "0.53349864", "0.52632797", "0.51940596", "0.5160789", "0.5139248", "0.51322466", "0.50925684", "0.50691605", "0.5064905", "0.50103533", "0.49982744", "0.49546602", "0.49542066", "0.49437958", "0.49421835", "0.4914773", "0.49129766", "0.49052376", "0.48682693", "0.48267496", "0.48240015", "0.47943082", "0.4769192", "0.47596532", "0.47523275", "0.47515005", "0.47271594", "0.47219944", "0.46994436", "0.46866184", "0.46856126", "0.46854424", "0.4684436", "0.46739784", "0.46633276", "0.46616614", "0.4658064", "0.46537727", "0.46455738", "0.46409792", "0.4639922", "0.46334586", "0.46301517", "0.46217182", "0.4620657", "0.46190614", "0.4611293", "0.46030122", "0.4597511", "0.45895097", "0.45867816", "0.45844936", "0.45730698", "0.45694593", "0.4568968", "0.45635462", "0.45503277", "0.45501226", "0.45287544", "0.4527091", "0.4524551", "0.45201206", "0.4519971", "0.45189336", "0.45177364", "0.4517064", "0.45139927", "0.4510207", "0.4500104", "0.449723", "0.44966546", "0.44935638", "0.44908926", "0.4488684", "0.4483748", "0.44715375", "0.44709274", "0.44635457", "0.44624075", "0.44544387", "0.44491443", "0.44420272", "0.4433048", "0.44244763", "0.44222182", "0.44197485", "0.44160506", "0.44134113", "0.44111764", "0.44104576", "0.44020784", "0.43956724", "0.43879306", "0.438579", "0.43789184", "0.43760362", "0.4375024", "0.43682992", "0.43669978" ]
0.65363026
0
Cache enables caching in cacheDir for the remainder of the chain.
func (goldsmith *Goldsmith) Cache(cacheDir string) *Goldsmith { goldsmith.fileCache = &cache{cacheDir} return goldsmith }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (self *Goldsmith) Cache(cacheDir string) *Goldsmith {\n\tself.cache = &cache{cacheDir}\n\treturn self\n}", "func OptCacheDirectory(dir string) Opt {\n\treturn func(app *App) {\n\t\tapp.cacheDirectory = dir\n\t}\n}", "func (me *S3FileSystemImpl) addDirCache(key string, di *fscommon.DirItem) {\n\tif key[len(key)-1] == '/' {\n\t\tkey = key[:len(key)-1]\n\t}\n\tme.dirCache.Add(key, di, fscommon.CACHE_LIFE_SHORT)\n}", "func (c *Cache) cacheDir(name string) {\n\tname = clean(name)\n\tc.itemMu.Lock()\n\tdefer c.itemMu.Unlock()\n\tfor {\n\t\titem := c.item[name]\n\t\tif item != nil {\n\t\t\tbreak\n\t\t}\n\t\tc.item[name] = newCacheItem(false)\n\t\tif name == \"\" {\n\t\t\tbreak\n\t\t}\n\t\tname = vfscommon.FindParent(name)\n\t}\n}", "func (c *cache) Store(dir, src, key string) {\n\tif c.shouldStore(dir, src) {\n\t\tc.store(src, key)\n\t}\n}", "func (s *Server) EnableCache(cacheFolder string, cryptoKey string) {\n\tvar useCrypto = false\n\tif cryptoKey != \"\" {\n\t\tuseCrypto = true\n\t}\n\ts.DiskCache = cache.NewDisk(cacheFolder, cryptoKey, useCrypto)\n\treturn\n}", "func (g *Goproxy) cache(\n\tctx context.Context,\n\tname string,\n) (io.ReadCloser, error) {\n\tif g.Cacher == nil {\n\t\treturn nil, os.ErrNotExist\n\t}\n\n\treturn g.Cacher.Get(ctx, name)\n}", "func (c *CacheManager) BuildCached() {\n\t/* Iterate over cachedDir (1st level only) and retrieve filenames */\n\tdir, err := os.Open(c.cachedDir)\n\tif err != nil { return }\n\tdefer dir.Close()\n\tfileInfos, err := dir.Readdir(-1)\n\tif err != nil { return }\n\tfor _, fi := range fileInfos {\n\t\tfilename := utils.RemoveExtension(fi.Name())\n\t\tc.cached = append(c.cached, filename)\n\t\tfor i := 0; i < len(c.availables); i++ {\n\t\t\tif c.availables[i].Name == filename {\n\t\t\t\tc.availables[i].Generated = true\n\t\t\t}\n\t\t}\n\t}\n}", "func (f *Fs) DirCacheFlush() {\n\tctx := context.Background()\n\t_ = f.multithread(ctx, func(ctx context.Context, u *upstream) error {\n\t\tif do := u.f.Features().DirCacheFlush; do != nil {\n\t\t\tdo()\n\t\t}\n\t\treturn nil\n\t})\n}", "func (c *cacher) SetCache(ctx context.Context, item goproxy.Cache) (err error) {\n\tif c.readonly {\n\t\treturn nil\n\t}\n\tfilename := path.Join(c.root, item.Name())\n\tif err := os.MkdirAll(path.Dir(filename), 0755); err != nil {\n\t\treturn err\n\t}\n\tfile, err := os.Create(filename)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer file.Close()\n\tdefer item.Close()\n\n\t_, err = io.Copy(file, item)\n\n\treturn err\n}", "func Cache(inp <-chan *fs.FsFile) <-chan *FsCache {\n\tcha := make(chan *FsCache)\n\tfc := New()\n\tgo func(inp <-chan *fs.FsFile) {\n\t\tdefer close(cha)\n\t\t<-fc.Done(inp)\n\t\tcha <- fc\n\t}(inp)\n\treturn cha\n}", "func (s *Scope) cacheDir() (string, error) {\n\tp, err := s.dataDir()\n\tif err != nil {\n\t\treturn p, err\n\t}\n\n\treturn filepath.Join(p, \"Cache\"), nil\n}", "func (e *Environment) Cache() error {\n\tif e.cached {\n\t\tpanic(\"Should not be attempting to re-cache information in the environment.\")\n\t}\n\n\tif err := e.createStore(); err != nil {\n\t\treturn err\n\t}\n\n\tif err := e.createPackager(); err != nil {\n\t\treturn err\n\t}\n\n\tif err := e.createMessageManager(); err != nil {\n\t\treturn err\n\t}\n\n\te.cached = true\n\treturn nil\n}", "func (c *cacher) Cache(ctx context.Context, name string) (item goproxy.Cache, err error) {\n\tfilename := path.Join(c.root, name)\n\tstat, err := os.Stat(filename)\n\tif err != nil {\n\t\treturn nil, goproxy.ErrCacheNotFound\n\t}\n\tfile, err := os.Open(filename)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer file.Close()\n\n\tdata, err := ioutil.ReadAll(file)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tmd5sum := md5.Sum(data)\n\n\treturn &cache{\n\t\tr: bytes.NewReader(data),\n\t\tsize: int64(len(data)),\n\t\tname: name,\n\t\tmime: mimeType(filename),\n\t\tmodtime: stat.ModTime(),\n\t\tchecksum: md5sum[:],\n\t}, nil\n}", "func (m *ConfigManager) Cache() cache.Cache { return m.cache }", "func (d *dirInfoCache) Store(dir string, info directoryPackageInfo) {\n\td.mu.Lock()\n\t_, old := d.dirs[dir]\n\td.dirs[dir] = &info\n\tvar listeners []cacheListener\n\tfor _, l := range d.listeners {\n\t\tlisteners = append(listeners, l)\n\t}\n\td.mu.Unlock()\n\n\tif !old {\n\t\tfor _, l := range listeners {\n\t\t\tl(info)\n\t\t}\n\t}\n}", "func TestChainedCache(t *testing.T) {\n\tcache := New()\n\tsimpleM := &SimpleMemoryCache{\n\t\tdata: make(map[string]int64),\n\t}\n\tsourceM := &MockSource{\n\t\tdata: make(map[string]int64),\n\t}\n\tvar sample int64 = 20190102\n\tsourceM.data[\"foo\"] = sample\n\tcache.Use(simpleM).Use(sourceM)\n\n\t// test normal data\n\tcached, err := cache.Get(\"foo\")\n\n\tif err != nil {\n\t\tt.Errorf(\"Failed to get data, err = %s\", err)\n\t}\n\n\tif cached != sample {\n\t\tt.Errorf(\"Failed to get, %d != %d\", cached, sample)\n\t}\n\n\t// test non-existent key\n\tcached, err = cache.Get(\"bar\")\n\n\tif err == nil {\n\t\tt.Error(\"Non-existent should return error from source database.\")\n\t}\n\n\t// test invalid key\n\tcached, err = cache.Get(\"\")\n\tif cached == nil {\n\t\tt.Error(\"Invalid key should failed.\")\n\t}\n\n\t// set and get\n\tsample = 19700101\n\tcache.Set(\"bar\", sample)\n\tcached, err = cache.Get(\"\")\n\tif cached != sample {\n\t\tt.Errorf(\"Failed to set and get, %d != %d\", sample, cached)\n\t}\n}", "func CacheSetup() {\n\tfor k, v := range DataStore {\n\t\treq := NewRequest(WRITE)\n\t\treq.Key = k\n\t\treq.Payload = v\n\t\tRequestChannel <- req\n\t}\n}", "func (se *shellExecutor) writeCache() {\n\tif !se.inCmd.HasCache() {\n\t\treturn\n\t}\n\n\tdefer util.RecoverPanic(func(e error) {\n\t\tutil.LogWarn(e.Error())\n\t})\n\n\tdir, err := ioutil.TempDir(\"\", \"_cache_output_\")\n\tif err != nil {\n\t\tutil.LogWarn(err.Error())\n\t\treturn\n\t}\n\n\tse.cacheOutputDir = dir\n\tcache := se.inCmd.Cache\n\n\tfor _, path := range cache.Paths {\n\t\tpath = filepath.Clean(path)\n\t\tfullPath := filepath.Join(se.jobDir, path)\n\n\t\tinfo, exist := util.IsFileExistsAndReturnFileInfo(fullPath)\n\t\tif !exist {\n\t\t\tcontinue\n\t\t}\n\n\t\tnewPath := filepath.Join(dir, path)\n\n\t\tif info.IsDir() {\n\t\t\terr := util.CopyDir(fullPath, newPath)\n\t\t\tutil.PanicIfErr(err)\n\n\t\t\tutil.LogDebug(\"dir %s write back to cache dir\", newPath)\n\t\t\tcontinue\n\t\t}\n\n\t\terr := util.CopyFile(fullPath, newPath)\n\t\tutil.PanicIfErr(err)\n\t\tutil.LogDebug(\"file %s write back to cache dir\", newPath)\n\t}\n}", "func InitCache() {\n\n\tlog.Print(\"Init cache\")\n\t_, err := os.Stat(\"_cache\")\n\tif err != nil {\n\t\tif os.IsNotExist(err) {\n\t\t\terr := os.Mkdir(\"_cache\", os.FileMode(0755))\n\t\t\tif err != nil {\n\t\t\t\tlog.Fatal(\"Can't create _cache folder\")\n\t\t\t} else {\n\t\t\t\tlog.Print(\"_cache folder created\")\n\t\t\t}\n\t\t}\n\t}\n\n}", "func (m *MinIO) Cache(ctx context.Context, name string) (goproxy.Cache, error) {\n\tif m.loadOnce.Do(m.load); m.loadError != nil {\n\t\treturn nil, m.loadError\n\t}\n\n\tobject, err := m.client.GetObjectWithContext(\n\t\tctx,\n\t\tm.BucketName,\n\t\tpath.Join(m.Root, name),\n\t\tminio.GetObjectOptions{},\n\t)\n\tif err != nil {\n\t\tif er, ok := err.(minio.ErrorResponse); ok &&\n\t\t\ter.Code == \"NoSuchKey\" {\n\t\t\treturn nil, goproxy.ErrCacheNotFound\n\t\t}\n\n\t\treturn nil, err\n\t}\n\n\tobjectInfo, err := object.Stat()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tchecksum, err := hex.DecodeString(strings.Trim(objectInfo.ETag, `\"`))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &minioCache{\n\t\tobject: object,\n\t\tname: name,\n\t\tsize: objectInfo.Size,\n\t\tmodTime: objectInfo.LastModified,\n\t\tchecksum: checksum,\n\t}, nil\n}", "func Cache(c cache.Cacher, opts CacheOptions) Middleware {\n\tif len(opts.AllowedMethods) == 0 {\n\t\topts.AllowedMethods = []string{\n\t\t\thttp.MethodGet,\n\t\t\thttp.MethodHead,\n\t\t}\n\t}\n\n\tif len(opts.AllowedStatuses) == 0 {\n\t\topts.AllowedStatuses = []int{\n\t\t\t0,\n\t\t\thttp.StatusOK,\n\t\t}\n\t}\n\n\tif opts.TTL == 0 {\n\t\topts.TTL = 15 * time.Minute\n\t}\n\n\tif len(opts.StaleStatuses) == 0 {\n\t\topts.StaleStatuses = []int{\n\t\t\thttp.StatusInternalServerError,\n\t\t}\n\t}\n\n\tif opts.StaleTTL == 0 {\n\t\topts.StaleTTL = 24 * time.Hour\n\t}\n\n\treturn func(next http.Handler) http.Handler {\n\t\treturn http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\t\tctx := ContextFromRequest(r)\n\t\t\tlog := hlog.FromRequest(r)\n\n\t\t\tmethodNotAllowed := true\n\t\t\tfor _, method := range opts.AllowedMethods {\n\t\t\t\tif r.Method == method {\n\t\t\t\t\tmethodNotAllowed = false\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tif !cache.UseCache(ctx) || methodNotAllowed {\n\t\t\t\tnext.ServeHTTP(w, r)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tvar resp CachedResponse\n\t\t\tcacheKey := CacheKey(opts.KeyPrefix, r)\n\n\t\t\t// try to write cached data\n\t\t\tdata, cacheErr := c.Get(ctx, cacheKey)\n\t\t\tif cacheErr == nil {\n\t\t\t\tcacheErr = json.Unmarshal(data, &resp)\n\t\t\t\tif cacheErr != nil {\n\t\t\t\t\tlog.Err(cacheErr).Msg(\"Failed to unmarshal cached response\")\n\t\t\t\t\twrite.Error(w, cacheErr.Error(), http.StatusInternalServerError)\n\t\t\t\t\treturn\n\t\t\t\t}\n\n\t\t\t\tif time.Now().Unix() < resp.Expiration {\n\t\t\t\t\t// copy cached response headers from downstream handlers\n\t\t\t\t\tfor key := range resp.Header {\n\t\t\t\t\t\tif w.Header().Get(key) == \"\" {\n\t\t\t\t\t\t\tw.Header().Set(key, resp.Header.Get(key))\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\n\t\t\t\t\tAddCacheHeader(w.Header())\n\n\t\t\t\t\tif resp.StatusCode != 0 {\n\t\t\t\t\t\tw.WriteHeader(resp.StatusCode)\n\t\t\t\t\t}\n\n\t\t\t\t\tw.Write(resp.Body)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t}\n\n\t\t\t// get fresh response from handler\n\t\t\tcw := NewCacheWriter(w, opts.UseStale, opts.StaleStatuses)\n\t\t\tnext.ServeHTTP(cw, r)\n\n\t\t\tstatusCodeNotAllowed := true\n\t\t\tfor _, status := range opts.AllowedStatuses {\n\t\t\t\tif cw.statusCode == status {\n\t\t\t\t\tstatusCodeNotAllowed = false\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tif statusCodeNotAllowed {\n\t\t\t\tfmt.Println(\"status code:\", cw.statusCode)\n\n\t\t\t\tif opts.UseStale {\n\t\t\t\t\t// If stale data can be used, the response needs\n\t\t\t\t\t// to be written to the ResponseWriter; the\n\t\t\t\t\t// CacheWriter only wrote the response body to\n\t\t\t\t\t// its internal buffer.\n\t\t\t\t\tif includesStaleStatus(cw.statusCode, opts.StaleStatuses) {\n\t\t\t\t\t\tif cacheErr == nil {\n\t\t\t\t\t\t\tfor key := range resp.Header {\n\t\t\t\t\t\t\t\tw.Header().Set(key, resp.Header.Get(key))\n\t\t\t\t\t\t\t}\n\n\t\t\t\t\t\t\tAddCacheHeader(w.Header())\n\t\t\t\t\t\t\tw.WriteHeader(resp.StatusCode)\n\t\t\t\t\t\t\tw.Write(resp.Body)\n\n\t\t\t\t\t\t\treturn\n\t\t\t\t\t\t}\n\n\t\t\t\t\t\tdata, _ := cw.ReadAll()\n\t\t\t\t\t\tw.WriteHeader(cw.statusCode)\n\t\t\t\t\t\tw.Write(data)\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\t// covers cases where previous responses were cached\n\t\t\t\t_ = c.Del(ctx, cacheKey)\n\n\t\t\t\t// response has been written, end early\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tbody, err := cw.ReadAll()\n\t\t\tif err != nil {\n\t\t\t\tlog.Err(err).Msg(\"Failed to read cache buffer\")\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tcachedResp := CachedResponse{\n\t\t\t\tcw.Header().Clone(),\n\t\t\t\tbody,\n\t\t\t\tcw.statusCode,\n\t\t\t\ttime.Now().Unix() + int64(opts.TTL),\n\t\t\t}\n\n\t\t\tdata, err = json.Marshal(&cachedResp)\n\t\t\tif err != nil {\n\t\t\t\tlog.Err(err).Msg(\"Failed to marshal cached response\")\n\t\t\t\twrite.Error(w, err.Error(), http.StatusInternalServerError)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\t// store response in cache\n\t\t\terr = c.Set(ctx, cacheKey, data, opts.StaleTTL)\n\t\t\tif err != nil {\n\t\t\t\tlog.Err(err).Msg(\"Failed to set data in cache\")\n\t\t\t}\n\t\t})\n\t}\n}", "func (a *AuthConfig) Cache(fileName string) error {\n\tf, err := os.Create(fileName)\n\tif err != nil {\n\t\tlog.Println(err)\n\t\treturn err\n\t}\n\tb, err := json.Marshal(a)\n\tif err != nil {\n\t\tlog.Println(err)\n\t\treturn err\n\t}\n\t_, err = f.Write(b)\n\tif err != nil {\n\t\tlog.Println(err)\n\t}\n\treturn f.Close()\n}", "func (f *Fs) DirCacheFlush() {\n\tf.dirCache.ResetRoot()\n}", "func (c *Cache) withCache(transact func(*credCache)) {\n\t// Grab the file lock so we have exclusive access to read the file.\n\tif err := c.trylockFunc(); err != nil {\n\t\tc.errReporter(fmt.Errorf(\"could not lock cache file: %w\", err))\n\t\treturn\n\t}\n\n\t// Unlock the file at the end of this call, bubbling up the error if things were otherwise successful.\n\tdefer func() {\n\t\tif err := c.unlockFunc(); err != nil {\n\t\t\tc.errReporter(fmt.Errorf(\"could not unlock cache file: %w\", err))\n\t\t}\n\t}()\n\n\t// Try to read the existing cache.\n\tcache, err := readCache(c.path)\n\tif err != nil {\n\t\t// If that fails, fall back to resetting to a blank slate.\n\t\tc.errReporter(fmt.Errorf(\"failed to read cache, resetting: %w\", err))\n\t\tcache = emptyCache()\n\t}\n\n\t// Normalize the cache before modifying it, to remove any entries that have already expired.\n\tcache = cache.normalized()\n\n\t// Process/mutate the cache using the provided function.\n\ttransact(cache)\n\n\t// Normalize again to put everything into a known order.\n\tcache = cache.normalized()\n\n\t// Marshal the cache back to YAML and save it to the file.\n\tif err := cache.writeTo(c.path); err != nil {\n\t\tc.errReporter(fmt.Errorf(\"could not write cache: %w\", err))\n\t}\n}", "func (f *fragment) cachePath() string { return f.path + cacheExt }", "func (m *resmgr) setupCache() error {\n\tvar err error\n\n\toptions := cache.Options{CacheDir: opt.RelayDir}\n\tif m.cache, err = cache.NewCache(options); err != nil {\n\t\treturn resmgrError(\"failed to create cache: %v\", err)\n\t}\n\n\treturn nil\n\n}", "func (ro *ReaderOptions) CachePath() string {\n\tif ro.CacheDir != \"\" {\n\t\treturn ro.CacheDir\n\t}\n\n\treturn filepath.Join(ro.WorkDir, defaultCacheSubDir)\n}", "func Cache(typ reflect.Type, ttl time.Duration) decorators.Decorator {\n\treturn CacheWithKey(typ, ttl, func(r *http.Request, ps httprouter.Params, username string) (*string, *ServerError) {\n\t\trequestUri := r.RequestURI\n\t\tparsedUrl, err := url.Parse(requestUri)\n\t\tif err != nil {\n\t\t\treturn nil, NewServerError(fmt.Sprintf(\"Failed to parse '%s': %v\", r.RequestURI, err.Error()), username, MissingErrorCode, err)\n\t\t} else {\n\t\t\tkey := parsedUrl.Path\n\t\t\treturn &key, nil\n\t\t}\n\t})\n}", "func (p *Proxy) initCache() {\n\tif !p.CacheEnabled {\n\t\tlog.Info(\"dnsproxy: cache: disabled\")\n\n\t\treturn\n\t}\n\n\tsize := p.CacheSizeBytes\n\tlog.Info(\"dnsproxy: cache: enabled, size %d b\", size)\n\n\tp.cache = newCache(size, p.EnableEDNSClientSubnet, p.CacheOptimistic)\n\tp.shortFlighter = newOptimisticResolver(p)\n}", "func New(dir string, maxSizeBytes int64) cache.Cache {\n\t// Create the directory structure.\n\thexLetters := []byte(\"0123456789abcdef\")\n\tfor _, c1 := range hexLetters {\n\t\tfor _, c2 := range hexLetters {\n\t\t\tsubDir := string(c1) + string(c2)\n\t\t\terr := os.MkdirAll(filepath.Join(dir, cache.CAS.String(), subDir), os.FileMode(0744))\n\t\t\tif err != nil {\n\t\t\t\tlog.Fatal(err)\n\t\t\t}\n\t\t\terr = os.MkdirAll(filepath.Join(dir, cache.AC.String(), subDir), os.FileMode(0744))\n\t\t\tif err != nil {\n\t\t\t\tlog.Fatal(err)\n\t\t\t}\n\t\t}\n\t}\n\n\t// The eviction callback deletes the file from disk.\n\tonEvict := func(key Key, value SizedItem) {\n\t\t// Only remove committed items (as temporary files have a different filename)\n\t\tif value.(*lruItem).committed {\n\t\t\terr := os.Remove(filepath.Join(dir, key.(string)))\n\t\t\tif err != nil {\n\t\t\t\tlog.Println(err)\n\t\t\t}\n\t\t}\n\t}\n\n\tcache := &diskCache{\n\t\tdir: filepath.Clean(dir),\n\t\tmux: &sync.RWMutex{},\n\t\tlru: NewSizedLRU(maxSizeBytes, onEvict),\n\t}\n\n\terr := cache.migrateDirectories()\n\tif err != nil {\n\t\tlog.Fatalf(\"Attempting to migrate the old directory structure to the new structure failed \"+\n\t\t\t\"with error: %v\", err)\n\t}\n\terr = cache.loadExistingFiles()\n\tif err != nil {\n\t\tlog.Fatalf(\"Loading of existing cache entries failed due to error: %v\", err)\n\t}\n\n\treturn cache\n}", "func UserCacheDir() (string, error)", "func (m *Mutator) cache(ctx context.Context) error {\n\t// We need the manifest\n\tif m.manifest == nil {\n\t\tblob, err := m.engine.FromDescriptor(ctx, m.source.Descriptor())\n\t\tif err != nil {\n\t\t\treturn errors.Wrap(err, \"cache source manifest\")\n\t\t}\n\t\tdefer blob.Close()\n\n\t\tmanifest, ok := blob.Data.(ispec.Manifest)\n\t\tif !ok {\n\t\t\t// Should _never_ be reached.\n\t\t\treturn errors.Errorf(\"[internal error] unknown manifest blob type: %s\", blob.Descriptor.MediaType)\n\t\t}\n\n\t\t// Make a copy of the manifest.\n\t\tm.manifest = manifestPtr(manifest)\n\t}\n\n\tif m.config == nil {\n\t\tblob, err := m.engine.FromDescriptor(ctx, m.manifest.Config)\n\t\tif err != nil {\n\t\t\treturn errors.Wrap(err, \"cache source config\")\n\t\t}\n\t\tdefer blob.Close()\n\n\t\tconfig, ok := blob.Data.(ispec.Image)\n\t\tif !ok {\n\t\t\t// Should _never_ be reached.\n\t\t\treturn errors.Errorf(\"[internal error] unknown config blob type: %s\", blob.Descriptor.MediaType)\n\t\t}\n\n\t\t// Make a copy of the config and configDescriptor.\n\t\tm.config = configPtr(config)\n\t}\n\n\treturn nil\n}", "func CacheSymlinks() MountOption {\n\treturn func(conf *mountConfig) error {\n\t\tconf.initFlags |= InitCacheSymlinks\n\t\treturn nil\n\t}\n}", "func (conf blah) Cache() string {\n\treturn conf.Val(CACHEPATH)\n}", "func (e *CachedEnforcer) EnableCache(enableCache bool) {\n\te.enableCache = enableCache\n}", "func Init(path_to_cache string, connection *rpc.Client) {\n\tcache_directory = path_to_cache\n\tconn = connection\n}", "func (o *OSS) Cache(ctx context.Context, name string) (goproxy.Cache, error) {\n\tif o.loadOnce.Do(o.load); o.loadError != nil {\n\t\treturn nil, o.loadError\n\t}\n\n\tobjectName := path.Join(o.Root, name)\n\tif e, err := o.bucket.IsObjectExist(objectName); err != nil {\n\t\treturn nil, err\n\t} else if !e {\n\t\treturn nil, goproxy.ErrCacheNotFound\n\t}\n\n\th, err := o.bucket.GetObjectMeta(objectName)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tcontentLength, err := strconv.ParseInt(h.Get(\"Last-Modified\"), 10, 64)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tlastModified, err := http.ParseTime(h.Get(\"Last-Modified\"))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tchecksum, err := hex.DecodeString(strings.Trim(h.Get(\"ETag\"), `\"`))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &ossCache{\n\t\tbucket: o.bucket,\n\t\tobjectName: objectName,\n\t\tname: name,\n\t\tsize: contentLength,\n\t\tmodTime: lastModified,\n\t\tchecksum: checksum,\n\t}, nil\n}", "func serveCache(next echo.HandlerFunc) echo.HandlerFunc {\n\treturn func(c echo.Context) error {\n\t\tif cache.Serve(c.Response(), c.Request()) {\n\t\t\treturn nil\n\t\t}\n\t\treturn next(c)\n\t}\n}", "func (t *Three) Cache() *Cache {\n\tp := t.ctx.Get(\"Cache\")\n\treturn CacheFromJSObject(p)\n}", "func Cache(log logr.Logger, cfg *Config, delegate http.RoundTripper) (http.RoundTripper, error) {\n\tif cfg == nil && config == nil {\n\t\treturn nil, errors.New(\"no configuration is provided for the github cache\")\n\t}\n\tif cfg == nil {\n\t\tcfg = config\n\t}\n\n\tgithubCache, err := getCache(cfg)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tcachedTransport := httpcache.NewTransport(githubCache)\n\tcachedTransport.Transport = &cache{\n\t\tdelegate: delegate,\n\t\tmaxAgeSeconds: cfg.MaxAgeSeconds,\n\t}\n\n\treturn &rateLimitLogger{\n\t\tlog: log,\n\t\tdelegate: cachedTransport,\n\t}, nil\n\n}", "func CacheContext(parent context.Context, log logging.Logger) (context.Context, error) {\n\ttopo, cleanup, err := NewProvider(log).initTopology()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\ttopoCtx := context.WithValue(parent, topoKey, topo)\n\treturn context.WithValue(topoCtx, cleanupKey, cleanup), nil\n}", "func (tnc *TreeNodeCache) Cache(tree *Tree, treeNode *TreeNode) {\n\ttnc.Lock()\n\tdefer tnc.Unlock()\n\tmm, ok := tnc.Entries[tree.ID]\n\tif !ok {\n\t\tmm = make(map[TreeNodeID]*TreeNode)\n\t}\n\t// add treenode\n\tmm[treeNode.ID] = treeNode\n\t// add parent if not root\n\tif treeNode.Parent != nil {\n\t\tmm[treeNode.Parent.ID] = treeNode.Parent\n\t}\n\t// add children\n\tfor _, c := range treeNode.Children {\n\t\tmm[c.ID] = c\n\t}\n\t// add cache\n\ttnc.Entries[tree.ID] = mm\n}", "func (c *asyncCache) run() {\n\tfor r := range c.requests {\n\t\tc.realCache.Store(r.target, r.key, r.files)\n\t}\n\tc.wg.Done()\n}", "func (b *taskBuilder) usesCCache() {\n\tb.cache(CACHES_CCACHE...)\n}", "func (c *CheckpointAdvancer) enableCache() {\n\tc.cache = NewCheckpoints()\n\tc.state = &fullScan{}\n}", "func CachePath() string {\n\treturn defaultConfig.CachePath()\n}", "func (mr *MountResolver) SyncCache(proc *process.Process) error {\n\tmr.lock.Lock()\n\tdefer mr.lock.Unlock()\n\n\tmnts, err := kernel.ParseMountInfoFile(proc.Pid)\n\tif err != nil {\n\t\tpErr, ok := err.(*os.PathError)\n\t\tif !ok {\n\t\t\treturn err\n\t\t}\n\t\treturn pErr\n\t}\n\n\tfor _, mnt := range mnts {\n\t\te, err := newMountEventFromMountInfo(mnt)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tif _, exists := mr.mounts[e.MountID]; exists {\n\t\t\tcontinue\n\t\t}\n\t\tmr.insert(*e)\n\n\t\t// init discarder revisions\n\t\tmr.probe.inodeDiscarders.initRevision(e)\n\t}\n\n\treturn nil\n}", "func TestDirCacheFlush(t *testing.T) {\n\trun.skipIfNoFUSE(t)\n\n\trun.checkDir(t, \"\")\n\n\trun.mkdir(t, \"dir\")\n\trun.mkdir(t, \"otherdir\")\n\trun.createFile(t, \"dir/file\", \"1\")\n\trun.createFile(t, \"otherdir/file\", \"1\")\n\n\tdm := newDirMap(\"otherdir/|otherdir/file 1|dir/|dir/file 1\")\n\tlocalDm := make(dirMap)\n\trun.readLocal(t, localDm, \"\")\n\tassert.Equal(t, dm, localDm, \"expected vs fuse mount\")\n\n\terr := run.fremote.Mkdir(context.Background(), \"dir/subdir\")\n\trequire.NoError(t, err)\n\n\t// expect newly created \"subdir\" on remote to not show up\n\trun.forget(\"otherdir\")\n\trun.readLocal(t, localDm, \"\")\n\tassert.Equal(t, dm, localDm, \"expected vs fuse mount\")\n\n\trun.forget(\"dir\")\n\tdm = newDirMap(\"otherdir/|otherdir/file 1|dir/|dir/file 1|dir/subdir/\")\n\trun.readLocal(t, localDm, \"\")\n\tassert.Equal(t, dm, localDm, \"expected vs fuse mount\")\n\n\trun.rm(t, \"otherdir/file\")\n\trun.rmdir(t, \"otherdir\")\n\trun.rm(t, \"dir/file\")\n\trun.rmdir(t, \"dir/subdir\")\n\trun.rmdir(t, \"dir\")\n\trun.checkDir(t, \"\")\n}", "func CacheDir() (string, error) {\n\tc, err := os.UserCacheDir()\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn filepath.Join(c, appName), nil\n}", "func (cass *WriterBase) CacheRender(ctx context.Context, path string, from int64, to int64, tags repr.SortingTags) ([]*metrics.RawRenderItem, error) {\n\treturn nil, ErrNotYetimplemented\n}", "func NewCache () *Cache {\n path := Config.CacheFile\n if path != \"\" && filepath.Base(path) == path {\n path = filepath.Join(Config.Source, path)\n }\n return &Cache{\n FilePrints: make(map[string]*FilePrint),\n path: path,\n }\n}", "func Cache(c cache.Service) Option {\n\treturn func(o *Options) {\n\t\to.Cache = c\n\t}\n}", "func (c *rPathCacheContainer) store(cPath string, dirIV []byte, pPath string) {\n\tc.Lock()\n\tdefer c.Unlock()\n\tc.cPath = cPath\n\tc.dirIV = dirIV\n\tc.pPath = pPath\n}", "func Cache(directives string) Adapter {\n\treturn func(h http.Handler) http.Handler {\n\t\tnh := func(w http.ResponseWriter, r *http.Request) {\n\t\t\tif r.Method == http.MethodGet {\n\t\t\t\tw.Header().Set(\"Cache-Control\", directives)\n\t\t\t}\n\n\t\t\th.ServeHTTP(w, r)\n\t\t}\n\n\t\treturn http.HandlerFunc(nh)\n\t}\n}", "func (ls *libstoreServer) CacheHandler() {\n\tfor {\n\t\tselect {\n\t\tcase key := <-ls.checkCache:\n\t\t\tCacheItem := ls.CheckCache(key)\n\t\t\tls.checkCacheReply <- CacheItem\n\t\tcase key := <-ls.revokeCache:\n\t\t\tdelete(ls.Cache, key)\n\t\t\tls.CacheReply <- true\n\t\tcase pack := <-ls.addCache:\n\t\t\tls.Cache[pack.Key] = pack.Item\n\t\t\tls.CacheReply <- true\n\t\t}\n\t}\n}", "func Cache(next echo.HandlerFunc) echo.HandlerFunc {\n\treturn func(c echo.Context) (err error) {\n\t\tif redis.Client == nil {\n\t\t\treturn next(c)\n\t\t}\n\n\t\tctx := c.Request().Context()\n\n\t\turl := c.Request().URL.String()\n\n\t\t// Request\n\t\trequest := []byte{}\n\t\tif c.Request().Body != nil {\n\t\t\trequest, err = ioutil.ReadAll(c.Request().Body)\n\n\t\t\tif err != nil {\n\t\t\t\treturn c.JSON(http.StatusInternalServerError, nil)\n\t\t\t}\n\t\t}\n\n\t\t// Reset\n\t\tc.Request().Body = ioutil.NopCloser(bytes.NewBuffer(request))\n\n\t\tvar requestIndent string\n\n\t\tif len(request) > 0 {\n\t\t\tvar prettyJSON bytes.Buffer\n\n\t\t\terr = json.Indent(&prettyJSON, request, \"\", \" \")\n\n\t\t\tif err != nil {\n\t\t\t\treturn c.JSON(http.StatusInternalServerError, nil)\n\t\t\t}\n\n\t\t\trequestIndent = prettyJSON.String()\n\t\t}\n\n\t\tkey := fmt.Sprintf(\"%s-%s-%s\", cachePrefix, url, requestIndent)\n\n\t\tcached, err := redis.Client.Get(ctx, key).Result()\n\n\t\tif err == redis.Nil {\n\t\t\terr = nil\n\t\t}\n\n\t\tif err != nil {\n\t\t\treturn c.JSON(http.StatusInternalServerError, nil)\n\t\t}\n\n\t\t// Stop processing if client went away\n\t\tselect {\n\t\tcase <-ctx.Done():\n\t\t\treturn nil\n\t\tdefault:\n\t\t}\n\n\t\tif cached != \"\" {\n\t\t\tresponse := cachedResponse{}\n\n\t\t\terr = json.Unmarshal([]byte(cached), &response)\n\n\t\t\tif err != nil {\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\treturn c.JSONBlob(response.StatusCode, response.Body)\n\t\t}\n\n\t\t// Reponse\n\t\tresponse := new(bytes.Buffer)\n\t\tmw := io.MultiWriter(c.Response().Writer, response)\n\t\twriter := &bodyDumpResponseWriter{Writer: mw, ResponseWriter: c.Response().Writer}\n\t\tc.Response().Writer = writer\n\n\t\terr = next(c)\n\n\t\tif err != nil {\n\t\t\treturn c.JSON(http.StatusInternalServerError, nil)\n\t\t}\n\n\t\tcachedResp := cachedResponse{\n\t\t\tStatusCode: c.Response().Status,\n\t\t\tBody: response.Bytes(),\n\t\t}\n\n\t\tcachedRespJSON, err := json.Marshal(cachedResp)\n\n\t\tif err != nil {\n\t\t\treturn c.JSON(http.StatusInternalServerError, nil)\n\t\t}\n\n\t\terr = redis.Client.Set(ctx, key, string(cachedRespJSON), cacheDuration).Err()\n\n\t\tif err != nil {\n\t\t\treturn c.JSON(http.StatusInternalServerError, nil)\n\t\t}\n\n\t\treturn\n\t}\n}", "func (s *Module) Cache() (pkg.Volume, error) {\n\treturn s.VolumeLookup(cacheLabel)\n}", "func cacheNpm(workdir string) error {\n\tnpmCache := cache.New()\n\n\tlocalPackageDir := filepath.Join(workdir, \"node_modules\")\n\tlocalPackageLockFile := filepath.Join(workdir, \"package-lock.json\")\n\n\texist, err := pathutil.IsDirExists(localPackageDir)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to check directory existence, error: %s\", err)\n\t}\n\tif !exist {\n\t\treturn fmt.Errorf(\"local node_modules directory does not exist: %s\", localPackageDir)\n\t}\n\n\tnpmCache.IncludePath(localPackageDir + \" -> \" + localPackageLockFile)\n\n\tif err := npmCache.Commit(); err != nil {\n\t\treturn fmt.Errorf(\"failed to mark node_modules directory to be cached, error: %s\", err)\n\t}\n\treturn nil\n}", "func (b *taskBuilder) cache(caches ...*specs.Cache) {\n\tfor _, c := range caches {\n\t\talreadyHave := false\n\t\tfor _, exist := range b.Spec.Caches {\n\t\t\tif c.Name == exist.Name {\n\t\t\t\tif !reflect.DeepEqual(c, exist) {\n\t\t\t\t\tlog.Fatalf(\"Already have cache %s with a different definition!\", c.Name)\n\t\t\t\t}\n\t\t\t\talreadyHave = true\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\tif !alreadyHave {\n\t\t\tb.Spec.Caches = append(b.Spec.Caches, c)\n\t\t}\n\t}\n}", "func (o *OSS) SetCache(ctx context.Context, c goproxy.Cache) error {\n\tif o.loadOnce.Do(o.load); o.loadError != nil {\n\t\treturn o.loadError\n\t}\n\n\treturn o.bucket.PutObject(\n\t\tpath.Join(o.Root, c.Name()),\n\t\tc,\n\t\toss.ContentType(mimeTypeByExtension(path.Ext(c.Name()))),\n\t)\n}", "func UserCacheDir(tc Context) (string, error) {\n\treturn os.UserCacheDir()\n}", "func Wrap(transport http.RoundTripper) http.RoundTripper {\n\tcacheDir := getCacheDir()\n\tos.MkdirAll(cacheDir, 0700)\n\treturn roundTripper{transport, cacheDir}\n}", "func (s *Module) ensureCache(ctx context.Context) error {\n\tlog.Info().Msgf(\"Setting up cache\")\n\n\tlog.Debug().Msgf(\"Checking pools for existing cache\")\n\n\tvar cacheFs filesystem.Volume\n\n\t// check if cache volume available\n\tfor _, pool := range s.ssds {\n\t\tlog.Debug().Str(\"pool\", pool.Name()).Msg(\"checking pool for cache volume\")\n\t\tif _, err := pool.Mounted(); err != nil {\n\t\t\tlog.Debug().Str(\"pool\", pool.Name()).Msg(\"pool is not mounted\")\n\t\t\tcontinue\n\t\t}\n\n\t\tfilesystems, err := pool.Volumes()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tfor _, fs := range filesystems {\n\t\t\tif fs.Name() == cacheLabel {\n\t\t\t\tlog.Debug().Msgf(\"Found existing cache at %v\", fs.Path())\n\t\t\t\tcacheFs = fs\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\tif cacheFs != nil {\n\t\t\tbreak\n\t\t}\n\t}\n\n\tif cacheFs == nil {\n\t\tlog.Debug().Msgf(\"No cache found, try to create new cache\")\n\n\t\tlog.Debug().Msgf(\"Trying to create new cache on SSD\")\n\t\tfs, err := s.createSubvolWithQuota(cacheSize, cacheLabel)\n\n\t\tif err != nil {\n\t\t\tlog.Warn().Err(err).Msg(\"failed to create new cache on SSD\")\n\t\t} else {\n\t\t\tcacheFs = fs\n\t\t}\n\t}\n\n\tif cacheFs == nil {\n\t\tlog.Warn().Msg(\"failed to create persisted cache disk. Running on limited cache\")\n\n\t\t// set limited cache flag\n\t\tif err := app.SetFlag(app.LimitedCache); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\t// when everything failed, mount the Tmpfs\n\t\treturn syscall.Mount(\"\", \"/var/cache\", \"tmpfs\", 0, \"size=500M\")\n\t}\n\n\t_ = app.DeleteFlag(app.LimitedCache)\n\n\tgo s.watchCache(ctx, cacheFs)\n\n\tif !filesystem.IsMountPoint(CacheTarget) {\n\t\tlog.Debug().Msgf(\"Mounting cache partition in %s\", CacheTarget)\n\t\treturn filesystem.BindMount(cacheFs, CacheTarget)\n\t}\n\n\tlog.Debug().Msgf(\"Cache partition already mounted in %s\", CacheTarget)\n\treturn nil\n}", "func WithLoaderCache(ctx context.Context) context.Context {\n\treturn context.WithValue(ctx, contextCacheKey{}, &sync.Map{})\n}", "func (g *GCS) Cache(ctx context.Context, name string) (goproxy.Cache, error) {\n\tif g.loadOnce.Do(g.load); g.loadError != nil {\n\t\treturn nil, g.loadError\n\t}\n\n\toh := g.bucket.Object(path.Join(g.Root, name))\n\tattrs, err := oh.Attrs(ctx)\n\tif err != nil {\n\t\tif err == storage.ErrObjectNotExist {\n\t\t\treturn nil, goproxy.ErrCacheNotFound\n\t\t}\n\n\t\treturn nil, err\n\t}\n\n\treturn &gcsCache{\n\t\tctx: ctx,\n\t\toh: oh,\n\t\tname: name,\n\t\tsize: attrs.Size,\n\t\tmodTime: attrs.Updated,\n\t\tchecksum: attrs.MD5,\n\t}, nil\n}", "func (c *ChainCache[T]) Set(ctx context.Context, key any, object T, options ...store.Option) error {\n\terrs := []error{}\n\tfor _, cache := range c.caches {\n\t\terr := cache.Set(ctx, key, object, options...)\n\t\tif err != nil {\n\t\t\tstoreType := cache.GetCodec().GetStore().GetType()\n\t\t\terrs = append(errs, fmt.Errorf(\"Unable to set item into cache with store '%s': %v\", storeType, err))\n\t\t}\n\t}\n\tif len(errs) > 0 {\n\t\terrStr := \"\"\n\t\tfor k, v := range errs {\n\t\t\terrStr += fmt.Sprintf(\"error %d of %d: %v\", k+1, len(errs), v.Error())\n\t\t}\n\t\treturn errors.New(errStr)\n\t}\n\n\treturn nil\n}", "func (w *Watcher) setCache(key string, val string) {\n\tw.Lock()\n\tdefer w.Unlock()\n\tw.cache[key] = val\n}", "func (wc *WriterBase) InCache(path string, tags repr.SortingTags) (bool, error) {\n\treturn false, nil\n}", "func CachePath(paths ...string) (path string, err error) {\n\tdefer func() {\n\t\t// create the dir based on return path if it doesn't exist\n\t\tos.MkdirAll(filepath.Dir(path), os.ModePerm)\n\t}()\n\tcacheDir := DefaultCacheDir\n\tif cd := os.Getenv(\"PACKER_CACHE_DIR\"); cd != \"\" {\n\t\tcacheDir = cd\n\t}\n\n\tpaths = append([]string{cacheDir}, paths...)\n\treturn filepath.Abs(filepath.Join(paths...))\n}", "func cacheResponse(next echo.HandlerFunc) echo.HandlerFunc {\n\treturn func(c echo.Context) error {\n\t\tc.Response().Writer = cache.NewWriter(c.Response().Writer, c.Request())\n\t\treturn next(c)\n\t}\n}", "func (v *VirtualEnvironment) OnUseCache(f func()) { v.onUseCache = f }", "func (h *Handler) SetCache(enabled bool) {\n\tif enabled {\n\t\th.cache = make(map[string]cacheValue)\n\t} else {\n\t\th.cache = nil\n\t}\n}", "func (f *Fs) DirCacheFlush() {\n\tdo := f.Fs.Features().DirCacheFlush\n\tif do != nil {\n\t\tdo()\n\t}\n}", "func (s *server) loadThroughCache(name upspin.PathName) ([]byte, error) {\n\tde, err := s.Lookup(name)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\trv, err := clientutil.ReadAll(s.cachedCfg, de)\n\treturn rv, err\n}", "func (wc *WriterBase) Cache() Cacher {\n\treturn wc.cacher\n}", "func (c *ChainCache[T]) setter() {\n\tfor item := range c.setChannel {\n\t\tfor _, cache := range c.caches {\n\t\t\tif item.storeType != nil && *item.storeType == cache.GetCodec().GetStore().GetType() {\n\t\t\t\tbreak\n\t\t\t}\n\n\t\t\tcache.Set(context.Background(), item.key, item.value, store.WithExpiration(item.ttl))\n\t\t}\n\t}\n}", "func (self autoInjectModule) Cached() autoInjectModule {\n\tself.cached = true\n\treturn self\n}", "func CacheManifest(cacheGroup CacheGrouper, rootCacheDir string, selectedInstallableIds []string,\n\tdryRun bool) error {\n\n\t// create a directory to cache all kapps in this cacheGroup in\n\tgroupCacheDir := filepath.Join(rootCacheDir, cacheGroup.Id())\n\n\terr := createDirectoryIfMissing(groupCacheDir)\n\tif err != nil {\n\t\treturn errors.WithStack(err)\n\t}\n\n\t// acquire each kapp and cache it\n\tfor _, installableObj := range cacheGroup.Installables() {\n\t\tif !utils.InStringArray(selectedInstallableIds, installableObj.FullyQualifiedId()) {\n\t\t\tlog.Logger.Debugf(\"Won't cache unselected installable '%s'\", installableObj.FullyQualifiedId())\n\t\t\tcontinue\n\t\t}\n\n\t\tlog.Logger.Infof(\"Caching kapp '%s'\", installableObj.FullyQualifiedId())\n\t\tlog.Logger.Debugf(\"Kapp to cache: %#v\", installableObj)\n\n\t\t_, err = printer.Fprintf(\"Downloading kapp '[white]%s'...\\n\", installableObj.FullyQualifiedId())\n\t\tif err != nil {\n\t\t\treturn errors.WithStack(err)\n\t\t}\n\n\t\terr := installableObj.SetWorkspaceDir(rootCacheDir)\n\t\tif err != nil {\n\t\t\treturn errors.WithStack(err)\n\t\t}\n\n\t\tacquirers, err := installableObj.Acquirers()\n\t\tif err != nil {\n\t\t\treturn errors.WithStack(err)\n\t\t}\n\n\t\terr = acquireSources(cacheGroup.Id(), acquirers, installableObj.GetCacheDir(),\n\t\t\tdryRun)\n\t\tif err != nil {\n\t\t\treturn errors.WithStack(err)\n\t\t}\n\t}\n\n\treturn nil\n}", "func (cfg *CachingBucketConfig) CacheIter(configName string, cache cache.Cache, matcher func(string) bool, ttl time.Duration, codec IterCodec) {\n\tcfg.iter[configName] = &iterConfig{\n\t\toperationConfig: newOperationConfig(cache, matcher),\n\t\tttl: ttl,\n\t\tcodec: codec,\n\t}\n}", "func (s *Settings) makeCachePath() {\n\tvar subPath string\n\tvar cacheImageName string\n\n\tpathParts := strings.Split(s.Context.Path, \"/\")\n\tlastIndex := len(pathParts) - 1\n\timageData := strings.Split(pathParts[lastIndex], \".\")\n\timageName, imageFormat := imageData[0], strings.ToLower(imageData[1])\n\n\tif s.Options.Webp {\n\t\tcacheImageName = fmt.Sprintf(\n\t\t\t\"%s_%dx%d_webp_.%s\", imageName, s.Options.Width, s.Options.Height, imageFormat)\n\t} else {\n\t\tcacheImageName = fmt.Sprintf(\n\t\t\t\"%s_%dx%d.%s\", imageName, s.Options.Width, s.Options.Height, imageFormat)\n\t}\n\n\tswitch s.Context.Storage {\n\tcase \"loc\":\n\t\tsubPath = strings.Join(pathParts[:lastIndex], \"/\")\n\tcase \"rem\":\n\t\tsubPath = strings.Join(pathParts[1:lastIndex], \"/\")\n\t}\n\ts.Context.Format = imageFormat\n\ts.Context.CachePath = fmt.Sprintf(\n\t\t\"%s/%s/%s\", s.CacheDir, subPath, cacheImageName)\n}", "func (se *shellExecutor) copyCache() {\n\tif !util.HasString(se.cacheInputDir) {\n\t\treturn\n\t}\n\n\tfiles, err := ioutil.ReadDir(se.cacheInputDir)\n\tutil.PanicIfErr(err)\n\n\tfor _, f := range files {\n\t\toldPath := filepath.Join(se.cacheInputDir, f.Name())\n\t\tnewPath := filepath.Join(se.jobDir, f.Name())\n\n\t\tif util.IsFileExists(newPath) {\n\t\t\t_ = os.Remove(newPath)\n\t\t}\n\n\t\t// move cache from src dir to job dir\n\t\terr = os.Rename(oldPath, newPath)\n\n\t\tif err == nil {\n\t\t\tse.writeSingleLog(fmt.Sprintf(\"cache %s has been applied\", f.Name()))\n\t\t} else {\n\t\t\tse.writeSingleLog(fmt.Sprintf(\"cache %s not applied: %s\", f.Name(), err.Error()))\n\t\t}\n\n\t\t// remove cache from cache dir anyway\n\t\t_ = os.RemoveAll(oldPath)\n\t}\n}", "func (cache *Cache) Write () {\n err := Config.Check()\n if err != nil { return }\n \n f, err := os.Create(cache.path)\n if err == nil {\n defer f.Close()\n\n if Opt.Verbose { log.Println(\"Writing new cache file\") }\n\n enc := gob.NewEncoder(f)\n err = enc.Encode(cache)\n }\n \n if err != nil {\n qMain.showError(\"Write cache\", err)\n }\n}", "func Disk(dir string) CacheFunc {\n\treturn func(in chan *Request) chan *Request {\n\n\t\tout := make(chan *Request)\n\n\t\t// This function writes the data to the disk after it is\n\t\t// created, and is sent along with the request if the data is\n\t\t// not in the cache.\n\t\twriteFunc := func(req *Request) {\n\t\t\tif len(req.errs) > 0 {\n\t\t\t\treturn\n\t\t\t}\n\t\t\tfname := filepath.Join(dir, req.key()+FileExtension)\n\t\t\tw, err := os.Create(fname)\n\t\t\tif err != nil {\n\t\t\t\treq.errs = append(req.errs, err)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tdefer w.Close()\n\t\t\tb, err := req.resultPayload.MarshalBinary()\n\t\t\tif err != nil {\n\t\t\t\treq.errs = append(req.errs, err)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tif _, err = w.Write(b); err != nil {\n\t\t\t\treq.errs = append(req.errs, err)\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\n\t\tgo func() {\n\t\t\tfor req := range in {\n\t\t\t\tfname := filepath.Join(dir, req.key()+FileExtension)\n\n\t\t\t\tf, err := os.Open(fname)\n\t\t\t\tif err != nil {\n\t\t\t\t\t// If we can't open the file, assume that it doesn't exist and Pass\n\t\t\t\t\t// the request on.\n\t\t\t\t\treq.funcs = append(req.funcs, writeFunc)\n\t\t\t\t\tout <- req\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tb, err := ioutil.ReadAll(f)\n\t\t\t\tif err != nil {\n\t\t\t\t\t// We can't read the file.\n\t\t\t\t\treq.errs = append(req.errs, err)\n\t\t\t\t\treq.returnChan <- req\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tif err := req.resultPayload.UnmarshalBinary(b); err != nil {\n\t\t\t\t\t// There is some problem with the file.\n\t\t\t\t\treq.errs = append(req.errs, err)\n\t\t\t\t\treq.returnChan <- req\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tif err := f.Close(); err != nil {\n\t\t\t\t\treq.errs = append(req.errs, err)\n\t\t\t\t\treq.returnChan <- req\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\t// Successfully retrieved the result. Now return it to the requester.\n\t\t\t\treq.returnChan <- req\n\t\t\t}\n\t\t}()\n\t\treturn out\n\t}\n}", "func (h *Server) UseCache() *Server {\n\t//Initialize the cache.\n\tcache.OnInit()\n\treturn h\n}", "func WithCache(c filemetadata.Cache) UploaderOption {\n\treturn func(up *Uploader) {\n\t\tup.fmCache = c\n\t}\n}", "func InitCache(serviceName string,\n\tlc logger.LoggingClient,\n\tdp interfaces.DeviceProfileClient,\n\tdc interfaces.DeviceClient,\n\tpwc interfaces.ProvisionWatcherClient) {\n\tinitOnce.Do(func() {\n\t\tctx := context.WithValue(context.Background(), common.CorrelationHeader, uuid.New().String())\n\t\tmdr, err := dc.DevicesByServiceName(ctx, serviceName, 0, -1)\n\t\tif err != nil {\n\t\t\tlc.Error(\"get device list error\", err)\n\t\t}\n\t\tvar dcs []models.Device\n\t\tfor i := range mdr.Devices {\n\t\t\tdcs = append(dcs, dtos.ToDeviceModel(mdr.Devices[i]))\n\t\t}\n\t\tnewDeviceCache(dcs)\n\n\t\tvar (\n\t\t\tdps []models.DeviceProfile\n\t\t\tdpMap = make(map[string]struct{})\n\t\t)\n\t\tfor i := range dcs {\n\t\t\tif _, ok := dpMap[dcs[i].ProfileName]; ok {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tdpr, err := dp.DeviceProfileByName(ctx, dcs[i].ProfileName)\n\t\t\tif err != nil {\n\t\t\t\tlc.Error(fmt.Sprintf(\"get device profile(%s) error: %+v\", dcs[i].ProfileName, err))\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tdpMap[dcs[i].ProfileName] = struct{}{}\n\t\t\tdps = append(dps, dtos.ToDeviceProfileModel(dpr.Profile))\n\t\t}\n\t\tnewProfileCache(dps)\n\n\t\tpwr, err := pwc.ProvisionWatchersByServiceName(ctx, serviceName, 0, -1)\n\t\tif err != nil {\n\t\t\tlc.Error(fmt.Sprintf(\"get device profile(%s) error: %+v\", serviceName, err))\n\t\t}\n\t\tvar pws []models.ProvisionWatcher\n\t\tfor i := range pwr.ProvisionWatchers {\n\t\t\tpws = append(pws, dtos.ToProvisionWatcherModel(pwr.ProvisionWatchers[i]))\n\t\t}\n\t\tnewProvisionWatcherCache(pws)\n\t})\n}", "func (m *Mux) cacheRoute(key string, r Route) {\n\tif MaxCacheEntries == 0 {\n\t\treturn // MaxCacheEntries is 0 so cache is off\n\t}\n\tm.cacheMu.Lock()\n\t// If cache is too big, evict\n\tif len(m.cache) > MaxCacheEntries {\n\t\tm.cache = make(map[string]Route, MaxCacheEntries)\n\t}\n\t// Fill the cache for this key -> route pair\n\tm.cache[key] = r\n\tm.cacheMu.Unlock()\n}", "func (l *PersistableLedger) flushCache() {\n\tif l.ledger == nil {\n\t\tlog.Println(\"WARN: flushCache called for a nil ledger\")\n\t\treturn\n\t}\n\n\t// Truncate(0) clears the file contents\n\tl.ledger.Truncate(0)\n\n\tfor username, ts := range l.cache {\n\t\tl.ledger.Write([]byte(l.line(username, ts)))\n\t}\n}", "func (c *CacheManager) Initialise(videoDir string, cachedDir string) {\n\tc.videoDir = videoDir\n\tc.BuildAvailables()\n\tc.cachedDir = cachedDir\n\tc.converting = make(map[string]bool)\n\tif (utils.FileExist(cachedDir)) {\n\t\tc.BuildCached()\n\t} else {\n\t\tos.MkdirAll(cachedDir, os.ModeDir|os.ModePerm)\n\t}\n\tc.converter.Initialise(videoDir, cachedDir)\n}", "func (dc *diskCache) init() error {\n\tfiles, err := ioutil.ReadDir(dc.dir)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tnow := time.Now().Unix()\n\tfor _, file := range files {\n\t\tfileName := dc.fileName(file.Name())\n\t\tif file.IsDir() {\n\t\t\tcontinue\n\t\t}\n\n\t\tfi, err := GetFileTime(fileName)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tnode := &lru.Node{\n\t\t\tKey: file.Name(),\n\t\t\tLength: file.Size(),\n\t\t\tAccessTime: fi.AccessTime / 1e9,\n\t\t}\n\t\tif dc.lru.TTL > 0 {\n\t\t\tif fi.AccessTime/1e9+dc.lru.TTL <= time.Now().Unix() {\n\t\t\t\tos.Remove(dc.dir + file.Name())\n\t\t\t\tcontinue\n\t\t\t} else {\n\t\t\t\tnode.SetExpire(now + dc.lru.TTL)\n\t\t\t}\n\t\t}\n\n\t\tdc.lru.Add(node)\n\t\tdc.m[node.Key] = node\n\t}\n\treturn nil\n}", "func (s *Scope) cacheDir() (string, error) {\n\tswitch s.Type {\n\tcase System:\n\t\treturn defaultCacheDir, nil\n\n\tcase User:\n\t\tpath := os.Getenv(\"XDG_CACHE_HOME\")\n\t\tif path == \"\" {\n\t\t\treturn expandUser(\"~/.cache\"), nil\n\t\t}\n\t\treturn path, nil\n\n\tcase CustomHome:\n\t\treturn filepath.Join(s.CustomHome, \".cache\"), nil\n\t}\n\n\treturn \"\", ErrInvalidScope\n}", "func CachePathOption(path string) Option {\n\treturn func(opts *options) {\n\t\topts.CachePath = path\n\t}\n}", "func (f *Finder) cachedFinder() cachedFinder {\n\tvar cFind cachedFinder\n\tfor _, srch := range f.Searchers {\n\t\tcFind.Searchers = append(cFind.Searchers, srch.cachedSearcher())\n\t}\n\treturn cFind\n}", "func (c *Component) SetCache(key string, data interface{}) {\n\tc.cache[key] = data\n}", "func (c *TCache) Cache(object interface{}, key string, ttl time.Duration, tags []string, data func() error) error {\n\terr := c.Get(key, object)\n\tif err == nil {\n\t\treturn nil\n\t}\n\terr = data()\n\tif err != nil {\n\t\treturn err\n\t}\n\t_, err = c.Set(&Item{\n\t\tKey: key,\n\t\tObject: object,\n\t\tExpiration: ttl,\n\t})\n\tc.SetTags(key, tags)\n\treturn nil\n}", "func Cache(w http.ResponseWriter, modTime time.Time, duration time.Duration) {\n\theader := w.Header()\n\tif !modTime.IsZero() {\n\t\theader.Set(\"Last-Modified\", modTime.UTC().Format(http.TimeFormat))\n\t}\n\theader.Set(\"Expires\", time.Now().Add(duration).UTC().Format(http.TimeFormat))\n\theader.Set(\"Vary\", \"Accept-Encoding\")\n}", "func WritebackCache() MountOption {\n\treturn func(conf *mountConfig) error {\n\t\tconf.initFlags |= InitWritebackCache\n\t\treturn nil\n\t}\n}", "func (dr *Resolver) cacheInode(key model.PathKey, path *PathEntry) error {\n\tentries, exists := dr.cache[key.MountID]\n\tif !exists {\n\t\tvar err error\n\n\t\tentries, err = lru.New[model.PathKey, *PathEntry](dr.config.DentryCacheSize)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tdr.cache[key.MountID] = entries\n\t}\n\n\tentries.Add(key, path)\n\n\treturn nil\n}", "func (g *Goproxy) serveCache(\n\trw http.ResponseWriter,\n\treq *http.Request,\n\tname string,\n\tcontentType string,\n\tcacheControlMaxAge int,\n\tonNotFound func(),\n) {\n\tcontent, err := g.cache(req.Context(), name)\n\tif err != nil {\n\t\tif errors.Is(err, os.ErrNotExist) {\n\t\t\tonNotFound()\n\t\t\treturn\n\t\t}\n\n\t\tg.logErrorf(\n\t\t\t\"failed to get cached module file: %s: %v\",\n\t\t\tname,\n\t\t\terr,\n\t\t)\n\t\tresponseInternalServerError(rw, req)\n\n\t\treturn\n\t}\n\tdefer content.Close()\n\n\tresponseSuccess(rw, req, content, contentType, cacheControlMaxAge)\n}" ]
[ "0.6759434", "0.63113296", "0.6255592", "0.62474954", "0.61981636", "0.5992968", "0.5986825", "0.594426", "0.59287596", "0.58807254", "0.58790565", "0.5852711", "0.5812298", "0.5808316", "0.5749725", "0.5698867", "0.56541485", "0.56460243", "0.5638599", "0.5629775", "0.5625304", "0.5621091", "0.5605719", "0.55910176", "0.5590671", "0.5577645", "0.55722016", "0.5571712", "0.5562889", "0.5547215", "0.55363494", "0.55289567", "0.5526984", "0.55202436", "0.55125016", "0.5499917", "0.54999083", "0.54973817", "0.54925054", "0.5487768", "0.5481577", "0.5465303", "0.5460139", "0.54297423", "0.54105026", "0.54086", "0.540792", "0.53997076", "0.5397832", "0.5395005", "0.5391356", "0.53850526", "0.5384245", "0.5380967", "0.5377091", "0.5362601", "0.5360775", "0.5332154", "0.53138673", "0.52918315", "0.5290951", "0.5286332", "0.5282745", "0.52720743", "0.52711153", "0.52697784", "0.52682626", "0.5266307", "0.5255994", "0.5255954", "0.5253667", "0.52507865", "0.5245032", "0.523944", "0.52289146", "0.5224962", "0.52157885", "0.52125454", "0.52122444", "0.5206114", "0.5192413", "0.5184141", "0.5182165", "0.5181377", "0.5172274", "0.5165898", "0.5165068", "0.5164777", "0.51608634", "0.5154301", "0.5150621", "0.5149986", "0.5146811", "0.5146274", "0.5143404", "0.5134979", "0.5133153", "0.5132603", "0.5128934", "0.5125158" ]
0.6750149
1
Clean enables or disables removal of leftover files in the target directory.
func (goldsmith *Goldsmith) Clean(clean bool) *Goldsmith { goldsmith.clean = clean return goldsmith }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (p *ArchiveExtract) Clean() {\n\tif p.workingDir != \"\" {\n\t\tos.RemoveAll(p.workingDir)\n\t}\n}", "func Clean() error {\n\treturn sh.Rm(\"dist\")\n}", "func (e Engine) Clean() {\n\tos.RemoveAll(e.outputFolder)\n\treturn\n}", "func Clean() error {\n\treturn os.RemoveAll(OUTPUT_DIR)\n}", "func (fs *OneFile) Clean() error {\n\treturn nil\n}", "func (i *interactor) Clean() error {\n\treturn os.RemoveAll(i.dir)\n}", "func Clean() error {\n\tfixtureDir := filepath.Join(\"integration\", \"testdata\", \"fixtures\")\n\tpaths := []string{\n\t\tfilepath.Join(fixtureDir, \"images\"),\n\t\tfilepath.Join(fixtureDir, \"vm-images\"),\n\t}\n\tfor _, p := range paths {\n\t\tif err := sh.Rm(p); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}", "func Clean() {\n\tfmt.Println(\"Cleaning...\")\n\tos.RemoveAll(app)\n\tfor _, fpath := range cleanFiles {\n\t\tos.RemoveAll(fpath)\n\t}\n}", "func Clean() error {\n\tfmt.Println(\"cleaning up\")\n\tif _, err := os.Stat(\"coverage.out\"); err == nil {\n\t\terr = os.Remove(\"coverage.out\")\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\terr := cleanDocker()\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn os.RemoveAll(\"bin/\")\n}", "func Clean() error {\n\tbuildDir := filepath.Join(srcDir(), \"build\")\n\tfmt.Println(\"\\n=====> Cleaning: \", buildDir)\n\treturn trace.ConvertSystemError(os.RemoveAll(buildDir))\n}", "func Clean() error {\n\tif err := createDir(binPath); err != nil {\n\t\treturn err\n\t}\n\tif err := cleanDir(binPath); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}", "func Clean() error {\n\tif err := createDir(binPath); err != nil {\n\t\treturn err\n\t}\n\tif err := cleanDir(binPath); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}", "func (Bpf) Clean() error {\n\n\tfmt.Println(\"Removing directory\", bpfBuildPath, \"..\")\n\tif err := os.RemoveAll(bpfBuildPath); err != nil {\n\t\treturn err\n\t}\n\n\tfmt.Println(\"Removing kernel configurations ..\")\n\tfor _, k := range kernel.Builds {\n\t\tp := path.Join(k.Directory(), \".config\")\n\t\tif mg.Verbose() {\n\t\t\tfmt.Println(\"Removing\", p, \"..\")\n\t\t}\n\t\tif err := os.RemoveAll(p); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}", "func (eng *Engine) Clean() {\n\tfor _, f := range eng.Junk {\n\t\tif err := os.Remove(f); err != nil {\n\t\t\teng.logf(\"clean: %v\\n\", err)\n\t\t}\n\t}\n}", "func (cp *ArchConveyorPacker) CleanUp() {\n\tos.RemoveAll(cp.b.Path)\n}", "func Clean() error {\n\tmg.Deps(getEnvironment)\n\tfmt.Println(\"Cleaning...\")\n\tfmt.Println(\"# ########################################################################################\")\n\n\terrClean := gobuildhelpers.RemovePaths([]string{\n\t\tgpsaBuildContext.BinDir,\n\t\tgpsaBuildContext.PackageDir,\n\t\tgpsaBuildContext.LogDir,\n\t\tgpsaBuildContext.BinZipPath,\n\t\tgpsaBuildContext.BuildZipPath,\n\t})\n\tif errClean != nil {\n\t\treturn errClean\n\t}\n\n\tfmt.Println(\"# ########################################################################################\")\n\treturn nil\n}", "func clean() (err error) {\n\terr = os.RemoveAll(util.DIR)\n\tif err != nil {\n\t\tprintln(err.Error())\n\t}\n\treturn\n}", "func (i *installer) clean(taskID string) {\n\tlog.Println(\"installer: Removing files for task:\", taskID)\n\twd, _ := os.Getwd()\n\twd = fmt.Sprintf(\"%s/tasks\", wd)\n\n\t_, err := os.Stat(wd)\n\tif err != nil && os.IsNotExist(err) {\n\t\t// nothing to remove\n\t\treturn\n\t}\n\tfiles, err := ioutil.ReadDir(wd)\n\tif err != nil {\n\t\tlog.Printf(\"installer: Error reading work dir: %s\", err)\n\t\treturn\n\t}\n\tfor i := 0; i < len(files); i++ {\n\t\tif files[i].Name() != taskID {\n\t\t\tlog.Println(files[i].Name(), taskID)\n\t\t\tfilename := fmt.Sprintf(\"%s/%s\", wd, files[i].Name())\n\t\t\tlog.Printf(\"installer: Removing: %s\", filename)\n\t\t\terr = os.RemoveAll(filename)\n\t\t\tif err != nil {\n\t\t\t\tlog.Printf(\"installer: Error removing: %s\", err)\n\t\t\t}\n\t\t}\n\t}\n}", "func Clean() {\n\tfmt.Println(\"Cleaning...\")\n\tos.RemoveAll(\"MyApp\")\n}", "func (cp *OCIConveyorPacker) CleanUp() {\n\tos.RemoveAll(cp.b.Path)\n}", "func (c *common) clean() error {\n\tvar err error\n\n\tif len(c.flags.clean) == 0 {\n\t\treturn nil\n\t}\n\n\targs := append(c.flags.global, c.flags.clean...)\n\n\terr = shared.RunCommand(c.ctx, nil, nil, c.commands.clean, args...)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif c.hooks.clean != nil {\n\t\terr = c.hooks.clean()\n\t}\n\n\treturn err\n}", "func (g *GitLocal) CleanForce(dir, fileName string) error {\n\treturn g.CleanForce(dir, fileName)\n}", "func cleanBinDirectory(options *Options) error {\n\n\tbuildDirectory := options.BinDirectory\n\n\t// Clear out old builds\n\tif fs.DirExists(buildDirectory) {\n\t\terr := os.RemoveAll(buildDirectory)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\t// Create clean directory\n\terr := os.MkdirAll(buildDirectory, 0700)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}", "func (b *Bzr) Clean(d *Dependency) {\n\treturn\n}", "func (repo *Repo) Clean() error {\n\tfmt.Fprintln(os.Stderr, \"Cleaning repo \", repo.Path)\n\n\ttoRmStr, err := execCmdCombinedOutput(repo.Path, \"git\", \"clean\", \"-ndx\")\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// Build a map of the externs\n\textMap := make(map[string]bool, len(repo.Externals))\n\tfor _, ext := range repo.Externals {\n\t\textRelPath := strings.Trim(strings.Replace(ext.Path, repo.Path, \"\", 1), \"/\")\n\t\textMap[extRelPath] = true\n\t}\n\n\ttoRm := strings.Split(string(toRmStr), \"\\n\")\n\tfor i := range toRm {\n\t\tr := strings.Replace(toRm[i], \"Would remove \", \"\", 1)\n\t\tr = strings.Trim(r, \"/\")\n\n\t\tif r == \"\" {\n\t\t\tcontinue\n\t\t}\n\n\t\tqualifiedR := path.Join(repo.Path, r)\n\n\t\tif !extMap[r] {\n\t\t\tif !dryRun {\n\t\t\t\terr = os.RemoveAll(qualifiedR)\n\t\t\t\tif err != nil {\n\t\t\t\t\tfmt.Fprintln(os.Stdout, err)\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tfmt.Printf(\"Would remove %q\\n\", qualifiedR)\n\t\t\t}\n\t\t}\n\t}\n\n\tfor _, ext := range repo.Externals {\n\t\terr = ext.Clean()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}", "func Clean() error { return sh.Run(\"mage\", \"-clean\") }", "func (u *baseUploader) CleanFiles() {\n\n\tif u.ST.IsReserve {\n\t\treturn\n\t}\n\tfor _, fpath := range u.successedFiles {\n\t\tif err := os.Remove(fpath); err != nil {\n\t\t\tlog.Logger.Error(\"remove file got error\", zap.Error(err))\n\t\t}\n\t\tlog.Logger.Info(\"remove file\", zap.String(\"fpath\", fpath))\n\t}\n}", "func Clean() error {\n\treturn devtools.Clean()\n}", "func CleanTechsupportDirectory(config *tsconfig.TechSupportConfig) error {\n\tlog.Infof(\"Cleaning techsupport directory\")\n\tdir, err := ioutil.ReadDir(config.FileSystemRoot)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfor _, d := range dir {\n\t\tos.RemoveAll(path.Join([]string{config.FileSystemRoot, d.Name()}...))\n\t}\n\n\treturn nil\n}", "func (rmc *RMakeConf) Clean() {\n\tfor _, v := range rmc.Files {\n\t\tv.LastTime = time.Now().AddDate(-20, 0, 0)\n\t}\n\trmc.Session = \"\"\n}", "func clean(t *testing.T, name string) {\n\terr := os.RemoveAll(name)\n\tif err != nil {\n\t\tt.Error(fmt.Errorf(\"Error during cleanup: %e\", err))\n\t}\n}", "func (b *Builder) Cleanup() {\n\tb.mu.Lock()\n\tfor r := range b.running {\n\t\tr.Cleanup()\n\t}\n\tb.mu.Unlock()\n\tif !b.Preserve {\n\t\tdefer os.RemoveAll(b.Dir)\n\t}\n}", "func cleanup() {\n\tos.Remove(dummyPath)\n}", "func cleanBootstrapSetup(workingDirectoryPath string) error {\n\n\t// Stop Yorc server\n\tif yorcServerShutdownChan != nil {\n\t\tclose(yorcServerShutdownChan)\n\t\tyorcServerOutputFile.Close()\n\t} else {\n\t\tcmd := exec.Command(\"pkill\", \"-f\", \"yorc server\")\n\t\tcmd.Run()\n\t}\n\n\t// stop Consul\n\tif cmdConsul != nil {\n\t\tif err := cmdConsul.Process.Kill(); err != nil {\n\t\t\treturn err\n\t\t}\n\t} else {\n\t\tcmd := exec.Command(\"pkill\", \"consul\")\n\t\tcmd.Run()\n\n\t}\n\n\t// Clean working directories\n\tos.RemoveAll(filepath.Join(workingDirectoryPath, \"bootstrapResources\"))\n\tos.RemoveAll(filepath.Join(workingDirectoryPath, \"deployments\"))\n\tos.RemoveAll(filepath.Join(workingDirectoryPath, \"consul-data\"))\n\tos.Remove(filepath.Join(workingDirectoryPath, \"config.yorc.yaml\"))\n\tos.Remove(filepath.Join(workingDirectoryPath, \"locations.yorc.yaml\"))\n\tos.Remove(filepath.Join(workingDirectoryPath, \"yorc.log\"))\n\n\tfmt.Println(\"Local setup cleaned up\")\n\treturn nil\n\n}", "func (c *noMirrorClientFactory) Clean() error {\n\treturn os.RemoveAll(c.cacheDir)\n}", "func (mr *Master) CleanupFiles() {\n\tfor i := range mr.files {\n\t\tfor j := 0; j < mr.nReduce; j++ {\n\t\t\tremoveFile(reduceName(mr.jobName, i, j))\n\t\t}\n\t}\n\tfor i := 0; i < mr.nReduce; i++ {\n\t\tremoveFile(mergeName(mr.jobName, i))\n\t}\n\tremoveFile(\"mrtmp.\" + mr.jobName)\n}", "func (me *TCmd) Cleanup() {\n\tos.Chdir(me.origin)\n\tos.RemoveAll(me.dir)\n}", "func (c *DefaultCleaner) Cleanup(config *api.Config) {\n\tif config.PreserveWorkingDir {\n\t\tglog.Infof(\"Temporary directory '%s' will be saved, not deleted\", config.WorkingDir)\n\t} else {\n\t\tglog.V(2).Infof(\"Removing temporary directory %s\", config.WorkingDir)\n\t\tc.fs.RemoveDirectory(config.WorkingDir)\n\t}\n\tif config.LayeredBuild {\n\t\tglog.V(2).Infof(\"Removing temporary image %s\", config.BuilderImage)\n\t\tc.docker.RemoveImage(config.BuilderImage)\n\t}\n}", "func (a *Awaitility) Clean(t *testing.T) {\n\tcleanup.ExecuteAllCleanTasks(t)\n}", "func (md *MassDns) Clean() error {\n\t// remove only temp resolvers file\n\tif md.tempResolversPath != \"\" {\n\t\tos.Remove(md.tempResolversPath)\n\t\tmd.tempResolversPath = \"\"\n\t}\n\treturn nil\n}", "func (this *Tidy) Clean(val bool) (bool, error) {\n\treturn this.optSetBool(C.TidyMakeClean, cBool(val))\n}", "func CleanTask() {\n\tvar wg sync.WaitGroup\n\tremove := []string{\n\t\t\"pkg/res/data.go\",\n\t\t\"res/generated/bundle.js\",\n\t\t\"res/generated/style.css\",\n\t\t\"browser/chrome-ext/src/src.zip\",\n\t}\n\tremoveAll := []string{\n\t\t\"dist/\",\n\t\t\"dist-archives/\",\n\t\t\"site/\",\n\t\t\"build/\",\n\t\t\"res/generated/\",\n\t\t\"res/messages/_ref\",\n\t\t\"browser/chrome-ext/src/javascripts\",\n\t\t\"AlkasirChromeExtension/\",\n\t}\n\twg.Add(len(remove))\n\twg.Add(len(removeAll))\n\tfor _, v := range remove {\n\t\tgo func(f string) {\n\t\t\tdefer wg.Done()\n\t\t\tos.Remove(f)\n\t\t}(v)\n\t}\n\tfor _, v := range removeAll {\n\t\tgo func(f string) {\n\t\t\tdefer wg.Done()\n\t\t\tos.RemoveAll(f)\n\t\t}(v)\n\t}\n\twg.Wait()\n}", "func clean() {\n\tbashBin, err := exec.LookPath(\"bash\")\n\tif err != nil {\n\t\tlogger.Error(\"Need bash to clean.\\n\")\n\t\tos.Exit(127)\n\t}\n\n\targv := []string{bashBin, \"-c\", \"commandhere\"}\n\n\tif *flagVerboseMode {\n\t\targv[2] = \"rm -rfv *.[568]\"\n\t} else {\n\t\targv[2] = \"rm -rf *.[568]\"\n\t}\n\n\tlogger.Info(\"Running: %v\\n\", argv[2:])\n\n\tcmd, err := exec.Run(bashBin, argv, os.Environ(), rootPath,\n\t\texec.DevNull, exec.PassThrough, exec.PassThrough)\n\tif err != nil {\n\t\tlogger.Error(\"%s\\n\", err)\n\t\tos.Exit(1)\n\t}\n\twaitmsg, err := cmd.Wait(0)\n\tif err != nil {\n\t\tlogger.Error(\"Couldn't delete files: %s\\n\", err)\n\t\tos.Exit(1)\n\t}\n\n\tif waitmsg.ExitStatus() != 0 {\n\t\tlogger.Error(\"rm returned with errors.\\n\")\n\t\tos.Exit(waitmsg.ExitStatus())\n\t}\n}", "func destructor(cmd *cobra.Command, args []string) {\n\tlog.Debug().Msgf(\"Running Destructor.\\n\")\n\tif debug {\n\t\t// Keep intermediary files, when on debug\n\t\tlog.Debug().Msgf(\"Skipping file clearance on Debug Mode.\\n\")\n\t\treturn\n\t}\n\tintermediaryFiles := []string{\"generate_pylist.py\", \"pylist.json\", \"dependencies.txt\", \"golist.json\", \"npmlist.json\"}\n\tfor _, file := range intermediaryFiles {\n\t\tfile = filepath.Join(os.TempDir(), file)\n\t\tif _, err := os.Stat(file); err != nil {\n\t\t\tif os.IsNotExist(err) {\n\t\t\t\t// If file doesn't exists, continue\n\t\t\t\tcontinue\n\t\t\t}\n\t\t}\n\t\te := os.Remove(file)\n\t\tif e != nil {\n\t\t\tlog.Fatal().Msgf(\"Error clearing files %s\", file)\n\t\t}\n\t}\n}", "func (te *TestEnvironment) Clean() {\n\tuserIDs, err := te.UserDAO.ListAllUserIDs()\n\tif err != nil {\n\t\tte.Logger.Error(fmt.Sprintf(\"ListAllUserIDs returns err: %v\", err))\n\t}\n\n\tfor _, uid := range userIDs {\n\t\tuser, err := te.UserDAO.GetUserByID(uid)\n\t\tif err != nil {\n\t\t\tte.Logger.Error(fmt.Sprintf(\"GetUserByID(%s) returns err: %v\", uid, err))\n\t\t}\n\n\t\tgDrive, err := te.GDriveFactory.New(user)\n\t\tif err != nil {\n\t\t\tte.Logger.Error(fmt.Sprintf(\"Error initializing gdrive client for user %q\", user.Id))\n\t\t\treturn\n\t\t}\n\t\tfileIDs, err := gDrive.ListFileIDs(googledrive.AllMP4s)\n\t\tif err != nil {\n\t\t\tte.Logger.Error(fmt.Sprintf(\"Error listing all file IDs for user %q\", user.Id))\n\t\t}\n\t\tfor _, fid := range fileIDs {\n\t\t\tfor _, prefix := range googledrive.FilePrefixes {\n\t\t\t\tif err := gDrive.MarkFileByID(fid, prefix, true); err != nil {\n\t\t\t\t\tte.Logger.Error(fmt.Sprintf(\"gDrive.MarkFileByID(%s, %s, true) for user %q returns err: %v\", fid, prefix, user.Id, err))\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\tif err := data.DeleteAllUserData(uid, false, te.sqlService, te.SimpleStorage, te.Logger); err != nil {\n\t\t\tte.Logger.Error(fmt.Sprintf(\"DeleteAllUserDataInDB(%s, %t, _) returns err: %v\", uid, false, err))\n\t\t}\n\t}\n}", "func bazelClean(ctx context.Context, checkoutDir string) error {\n\treturn td.Do(ctx, td.Props(\"Cleaning cache with --expunge\"), func(ctx context.Context) error {\n\t\trunCmd := &sk_exec.Command{\n\t\t\tName: \"bazelisk\",\n\t\t\tArgs: append([]string{\"clean\", \"--expunge\"}),\n\t\t\tInheritEnv: true, // Makes sure bazelisk is on PATH\n\t\t\tDir: checkoutDir,\n\t\t\tLogStdout: true,\n\t\t\tLogStderr: true,\n\t\t}\n\t\t_, err := sk_exec.RunCommand(ctx, runCmd)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t})\n}", "func (s *Syncer) Clear(opt *DirOption) error {\n\treturn filepath.Walk(s.dir, func(p string, info os.FileInfo, err error) error {\n\t\tif info.IsDir() {\n\t\t\treturn nil\n\t\t}\n\t\trelFile, _ := filepath.Rel(s.dir, p)\n\t\tif opt != nil && len(opt.Ignore) > 0 {\n\t\t\tfor _, ignore := range opt.Ignore {\n\t\t\t\tif strings.HasPrefix(relFile, ignore) {\n\t\t\t\t\treturn nil\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\tp = filepath.ToSlash(p)\n\t\tif s.syncedFiles[p] {\n\t\t\treturn nil\n\t\t}\n\t\tlog15.Debug(\"Sync|Del|%s\", p)\n\t\treturn os.Remove(p)\n\t})\n}", "func (d *Dirs) Clean() {\n\tfor a, p := range *d {\n\t\t_, err := os.Stat(p)\n\t\tif err != nil {\n\t\t\tfmt.Printf(\"Deleting entry %s: %s\", a, p)\n\t\t\td.DeleteOne(a)\n\t\t}\n\t}\n}", "func Clean() {\n\tmg.Deps(DeleteLighthouseReports, WebApp.Prune)\n}", "func removeTestFiles(defDir string) {\n\t// svcout dir\n\tos.RemoveAll(filepath.Join(defDir, \"metaverse\"))\n\t// service dir\n\tos.RemoveAll(filepath.Join(defDir, \"test-service\"))\n\t// where the binaries are compiled to\n\tos.RemoveAll(filepath.Join(defDir, \"bin\"))\n\t// Remove all the .pb.go files which may remain\n\tdirs, _ := ioutil.ReadDir(defDir)\n\tfor _, d := range dirs {\n\t\tif strings.HasSuffix(d.Name(), \".pb.go\") {\n\t\t\tos.RemoveAll(filepath.Join(defDir, d.Name()))\n\t\t}\n\t}\n}", "func withCleanup(argPath string, build procedure) procedure {\n\treturn func() error {\n\t\t// Record files names before build\n\t\tf, err := os.Open(argPath)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tfiles, err := f.Readdirnames(-1)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tif err = f.Close(); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\toldFiles := make(map[string]bool, len(files))\n\n\t\tfor _, f := range files {\n\t\t\toldFiles[f] = true\n\t\t}\n\n\t\t// Execute build function\n\t\terr = build()\n\n\t\tif err != nil {\n\t\t\t// Remove new files\n\t\t\tf, err2 := os.Open(argPath)\n\t\t\tif err2 != nil {\n\t\t\t\treturn err2\n\t\t\t}\n\n\t\t\tnewFiles, err2 := f.Readdirnames(-1)\n\t\t\tif err2 != nil {\n\t\t\t\treturn err2\n\t\t\t}\n\n\t\t\tif err2 = f.Close(); err2 != nil {\n\t\t\t\treturn err2\n\t\t\t}\n\n\t\t\tfor _, f := range newFiles {\n\t\t\t\tif !oldFiles[f] {\n\t\t\t\t\t// This file has been added by the run, remove it\n\t\t\t\t\t_ = os.RemoveAll(filepath.Join(argPath, f))\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\treturn err\n\t}\n}", "func (self *WorkingTreeCommands) ResetAndClean() error {\n\tsubmoduleConfigs, err := self.submodule.GetConfigs()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif len(submoduleConfigs) > 0 {\n\t\tif err := self.submodule.ResetSubmodules(submoduleConfigs); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tif err := self.ResetHard(\"HEAD\"); err != nil {\n\t\treturn err\n\t}\n\n\treturn self.RemoveUntrackedFiles()\n}", "func (inst *Installer) cleanDirs(ctx context.Context) error {\n\tvar (\n\t\terr error\n\t\tdone = make(chan struct{})\n\t)\n\n\tgo func() {\n\t\tdefer close(done)\n\t\tinstalldir := filepath.Join(inst.opts.InstallBaseDir, inst.NightlyID())\n\n\t\ttoDelete := []string{\".yumcache\"}\n\t\tif inst.opts.Branch == \"master\" || inst.opts.Branch == \"master-GAUDI\" {\n\t\t\ttoDelete = append(toDelete, \"tdaq\", \"tdaq-common\", \"dqm-common\")\n\t\t}\n\n\t\tfor i, d := range toDelete {\n\t\t\ttoDelete[i] = filepath.Join(installdir, d)\n\t\t}\n\n\t\terr = fs.Dirs(toDelete...).Remove()\n\t}()\n\n\tselect {\n\tcase <-ctx.Done():\n\t\terr = ctx.Err()\n\tcase <-done:\n\t}\n\n\treturn err\n}", "func (s *Satellite) CleanupTempFiles() {\n\tif !util.IsInDirectory(env.GetPublicPath(), s.ExtractedFilePath) {\n\t\tutil.Delete(s.ExtractedFilePath)\n\t}\n}", "func (d *FileWriter) CleanUp() {\n\tconst hoursPerDay = 24\n\n\tlogger := log.PrefixedLog(loggerPrefixFileWriter)\n\n\tlogger.Trace(\"starting clean up\")\n\n\tfiles, err := os.ReadDir(d.target)\n\n\tutil.LogOnErrorWithEntry(logger.WithField(\"target\", d.target), \"can't list log directory: \", err)\n\n\t// search for log files, which names starts with date\n\tfor _, f := range files {\n\t\tif strings.HasSuffix(f.Name(), \".log\") && len(f.Name()) > 10 {\n\t\t\tt, err := time.Parse(\"2006-01-02\", f.Name()[:10])\n\t\t\tif err == nil {\n\t\t\t\tdifferenceDays := uint64(time.Since(t).Hours() / hoursPerDay)\n\t\t\t\tif d.logRetentionDays > 0 && differenceDays > d.logRetentionDays {\n\t\t\t\t\tlogger.WithFields(logrus.Fields{\n\t\t\t\t\t\t\"file\": f.Name(),\n\t\t\t\t\t\t\"ageInDays\": differenceDays,\n\t\t\t\t\t\t\"logRetentionDays\": d.logRetentionDays,\n\t\t\t\t\t}).Info(\"existing log file is older than retention time and will be deleted\")\n\n\t\t\t\t\terr := os.Remove(filepath.Join(d.target, f.Name()))\n\t\t\t\t\tutil.LogOnErrorWithEntry(logger.WithField(\"file\", f.Name()), \"can't remove file: \", err)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n}", "func (f *fixture) cleanUp(ctx context.Context, s *testing.FixtState) {\n\tif err := ash.CloseAllWindows(ctx, f.tconn); err != nil {\n\t\ts.Error(\"Failed trying to close all windows: \", err)\n\t}\n\n\tf.tconn = nil\n\n\tif len(f.drivefsOptions) > 0 && f.driveFs != nil {\n\t\tif err := f.driveFs.ClearCommandLineFlags(); err != nil {\n\t\t\ts.Fatal(\"Failed to remove command line args file: \", err)\n\t\t}\n\t}\n\tf.driveFs = nil\n\tf.mountPath = \"\"\n\n\t// Clean up files in this account that are older than 1 hour, files past this\n\t// date are assumed no longer required and were not successfully cleaned up.\n\t// Note this removal can take a while ~1s per file and may end up exceeding\n\t// the timeout, this is not a failure as the next run will try to remove the\n\t// files that weren't deleted in time.\n\tfileList, err := f.APIClient.ListAllFilesOlderThan(ctx, time.Hour)\n\tif err != nil {\n\t\ts.Error(\"Failed to list all my drive files: \", err)\n\t} else {\n\t\ts.Logf(\"Attempting to remove %d files older than 1 hour\", len(fileList.Files))\n\t\tfor _, i := range fileList.Files {\n\t\t\tif err := f.APIClient.RemoveFileByID(ctx, i.Id); err != nil {\n\t\t\t\ts.Logf(\"Failed to remove file %q (%s): %v\", i.Name, i.Id, err)\n\t\t\t} else {\n\t\t\t\ts.Logf(\"Successfully removed file %q (%s, %s)\", i.Name, i.Id, i.ModifiedTime)\n\t\t\t}\n\t\t}\n\t}\n\tf.APIClient = nil\n\tif f.cr != nil {\n\t\tif err := f.cr.Close(ctx); err != nil {\n\t\t\ts.Log(\"Failed closing chrome: \", err)\n\t\t}\n\t\tf.cr = nil\n\t}\n}", "func (self *WorkingTreeCommands) RemoveUntrackedFiles() error {\n\tcmdArgs := NewGitCmd(\"clean\").Arg(\"-fd\").ToArgv()\n\n\treturn self.cmd.New(cmdArgs).Run()\n}", "func (u *Update) CleanUp() error {\n\n\tif fileExists(*u.dagFolderPath + \"/mollywallet.zip\") {\n\t\terr := os.Remove(*u.dagFolderPath + \"/mollywallet.zip\")\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\tif fileExists(*u.dagFolderPath + \"/backup\") {\n\t\terr := os.RemoveAll(*u.dagFolderPath + \"/backup\")\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tif fileExists(*u.dagFolderPath + \"/new_build\") {\n\t\terr := os.RemoveAll(*u.dagFolderPath + \"/new_build\")\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}", "func clear() {\n\tos.RemoveAll(TestDir)\n}", "func (rp *CFGoReadPackage) Clean() {\n\trp.first = true\n}", "func (r *Runner) Cleanup() {\n\tif !r.Preserve || r.Error() == nil {\n\t\tdefer os.RemoveAll(r.Dir)\n\t}\n}", "func WriteLogClean() {\n\n\tRemoveFile(FILELOG)\n}", "func (fr *Runner) Cleanup() {\n\tif fr.LocalDataDir != \"\" {\n\t\tos.RemoveAll(fr.LocalDataDir) //nolint:errcheck\n\t}\n}", "func cleanTestDir(dirPath string) error {\n\tif _, err := os.Stat(dirPath); !os.IsNotExist(err) {\n\t\tif err := os.RemoveAll(dirPath); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}", "func Clean(c Config) {\n\n\tSetup(&c)\n\tContainers, _ := model.DockerContainerList()\n\n\tfor _, Container := range Containers {\n\t\ttarget := false\n\t\tif l := Container.Labels[\"pygmy.enable\"]; l == \"true\" || l == \"1\" {\n\t\t\ttarget = true\n\t\t}\n\t\tif l := Container.Labels[\"pygmy\"]; l == \"pygmy\" {\n\t\t\ttarget = true\n\t\t}\n\n\t\tif target {\n\t\t\terr := model.DockerKill(Container.ID)\n\t\t\tif err == nil {\n\t\t\t\tfmt.Printf(\"Successfully killed %v.\\n\", Container.Names[0])\n\t\t\t}\n\n\t\t\terr = model.DockerRemove(Container.ID)\n\t\t\tif err == nil {\n\t\t\t\tfmt.Printf(\"Successfully removed %v.\\n\", Container.Names[0])\n\t\t\t}\n\t\t}\n\t}\n\n\tfor _, network := range c.Networks {\n\t\tmodel.DockerNetworkRemove(&network)\n\t\tif s, _ := model.DockerNetworkStatus(&network); s {\n\t\t\tfmt.Printf(\"Successfully removed network %v\\n\", network.Name)\n\t\t} else {\n\t\t\tfmt.Printf(\"Network %v was not removed\\n\", network.Name)\n\t\t}\n\t}\n\n\tfor _, resolver := range c.Resolvers {\n\t\tresolver.Clean()\n\t}\n}", "func RemoveAllFile() {\n\tos.RemoveAll(config.SDFS_DIR)\n\tos.MkdirAll(config.SDFS_DIR, config.PERM_MODE)\n}", "func Clean() {\n\tfmt.Println(\"cleaning ...\")\n\tmg.Deps(cleanBuild, cleanMage, cleanDeps)\n}", "func cleanup(dir string) {\n\tos.RemoveAll(dir)\n}", "func (s *stager) wipe() error {\n\t// Reset the prefix creation tracker.\n\ts.prefixCreated = make(map[string]bool, numberOfByteValues)\n\n\t// Reset root creation tracking.\n\ts.rootCreated = false\n\n\t// Remove the staging root.\n\tif err := os.RemoveAll(s.root); err != nil {\n\t\terrors.Wrap(err, \"unable to remove staging directory\")\n\t}\n\n\t// Success.\n\treturn nil\n}", "func CleanDirs() {\n\tcontents, err := ioutil.ReadDir(\"/var/tmp\")\n\tif err != nil {\n\t\tExitErr(fmt.Errorf(\"failed to access /var/tmp: %v\", err))\n\t}\n\tfor _, file := range contents {\n\t\tif !strings.Contains(file.Name(), \"invertpdf--\") {\n\t\t\tcontinue\n\t\t}\n\t\t_ = os.RemoveAll(fmt.Sprintf(\"/var/tmp/%s\", file.Name()))\n\t}\n}", "func setup(t *testing.T) {\n\terr := os.RemoveAll(storagePath)\n\trequire.NoError(t, err)\n}", "func (c *cache) clean() {\n\t// Cache may be empty so end\n\t_, err := os.Stat(c.root)\n\tif os.IsNotExist(err) {\n\t\treturn\n\t}\n\n\tfs.Debugf(nil, \"Cleaning the cache\")\n\n\t// first walk the FS to update the atimes\n\terr = c.updateAtimes()\n\tif err != nil {\n\t\tfs.Errorf(nil, \"Error traversing cache %q: %v\", c.root, err)\n\t}\n\n\t// Now remove any files that are over age\n\tc.purgeOld(c.opt.CacheMaxAge)\n\n\t// Now tidy up any empty directories\n\terr = fs.Rmdirs(c.f, \"\")\n\tif err != nil {\n\t\tfs.Errorf(c.f, \"Failed to remove empty directories from cache: %v\", err)\n\t}\n}", "func RemoveAll(path string) error", "func (b *Builder) Cleanup() {\n\tos.RemoveAll(b.root)\n}", "func (rign *CFGoReadIgnore) Clean() {\n}", "func Clean(category string) error {\n\tdir, err := GetTemplateDir(category)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn os.RemoveAll(dir)\n}", "func cleanup() {\n\tlog.Verbose(\"Cleaning up sensitive and temp files\")\n\tif _, err := os.Stat(\"ca.crt\"); err == nil {\n\t\tdeleteFile(\"ca.crt\")\n\t}\n\n\tif _, err := os.Stat(\"ca.key\"); err == nil {\n\t\tdeleteFile(\"ca.key\")\n\t}\n\n\tif _, err := os.Stat(\"client.crt\"); err == nil {\n\t\tdeleteFile(\"client.crt\")\n\t}\n\n\tif _, err := os.Stat(\"bearer.token\"); err == nil {\n\t\tdeleteFile(\"bearer.token\")\n\t}\n\n\tfor _, app := range s.Apps {\n\t\tif _, err := os.Stat(app.SecretsFile + \".dec\"); err == nil {\n\t\t\tdeleteFile(app.SecretsFile + \".dec\")\n\t\t}\n\t\tfor _, secret := range app.SecretsFiles {\n\t\t\tif _, err := os.Stat(secret + \".dec\"); err == nil {\n\t\t\t\tdeleteFile(secret + \".dec\")\n\t\t\t}\n\t\t}\n\t}\n\n}", "func (bm *OCRBatchImageManager) CleanUp() error {\n\tTRACE.Println(\"OCRBatchImageManager.CleanUp\", bm.tempfolder)\n\terr := os.RemoveAll(bm.tempfolder)\n\tif err != nil {\n\t\tERROR.Println(\"OCRBatchImageManager.CleanUp\", err)\n\t}\n\treturn err\n}", "func Clean(root_dir string, date string) []error {\n\terrors := []error{}\n\n\tif date == \"yesterday\" {\n\t\t// Get 'yymmdd' for yesterday.\n\t\tdate = time.Now().UTC().AddDate(0, 0, -1).Format(\"20060102\")\n\t}\n\n\tdata_dir := root_dir + \"/data\"\n\td, err := os.Open(data_dir)\n\tif err != nil {\n\t\terrors = append(errors, err)\n\t}\n\tdefer d.Close()\n\tsymbols, _ := d.Readdirnames(-1)\n\tif len(symbols) == 0 {\n\t\terrors = append(errors, fmt.Errorf(\"Nothing to clean!\"))\n\t}\n\tfor _, symbol := range symbols {\n\t\toptions_dir := data_dir + \"/\" + symbol + \"/o\"\n\t\te, err := os.Open(options_dir)\n\t\tif err != nil {\n\t\t\t//fmt.Printf(\"No 'o' subdir found for: %s!\\n\", data_dir)\n\t\t\terrors = append(errors, err)\n\t\t\tcontinue\n\t\t}\n\t\tdefer e.Close()\n\t\texpirations, _ := e.Readdirnames(-1)\n\t\tfor _, expiration := range expirations {\n\t\t\tif len(expiration) != len(\"20140101\") {\n\t\t\t\tfmt.Printf(\"Bad Length for: %s!\\n\", expiration)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif expiration < date {\n\t\t\t\tfmt.Printf(\"No need to check exp: %s, date: %s\\n\", expiration, date)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\texp_dir := options_dir + \"/\" + expiration\n\t\t\tfor _, _type := range []string{\"c\", \"p\"} {\n\t\t\t\tcleanup_file := exp_dir + \"/\" + _type + \"/\" + date\n\t\t\t\tcontents, err := ioutil.ReadFile(cleanup_file)\n\t\t\t\tif err != nil {\n\t\t\t\t\t//fmt.Printf(\"Could not find cleanup_file\\n\\t%s\\n\\t%s\\n\", cleanup_file, err)\n\t\t\t\t\terrors = append(errors, err)\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tcleanFile(cleanup_file, contents, \"option\")\n\t\t\t}\n\t\t}\n\t\tstock_file := data_dir + \"/\" + symbol + \"/s/\" + date\n\t\tcontents, err := ioutil.ReadFile(stock_file)\n\t\tif err != nil {\n\t\t\t//fmt.Printf(\"Could not read stock_file! err: %s\", err)\n\t\t\terrors = append(errors, err)\n\t\t\tcontinue\n\t\t}\n\t\tcleanFile(stock_file, contents, \"stock\")\n\t}\n\tif len(errors) == 0 {\n\t\terrors = nil\n\t}\n\treturn errors\n}", "func cleanTests(servicesDir string) {\n\t// Remove the 0-basic used for non building tests\n\tos.RemoveAll(filepath.Join(servicesDir, \"0-basic\"))\n\t// Clean up the service directories in each test\n\tdirs, _ := ioutil.ReadDir(servicesDir)\n\tfor _, d := range dirs {\n\t\t// If this item is not a directory skip it\n\t\tif !d.IsDir() {\n\t\t\tcontinue\n\t\t}\n\t\tremoveTestFiles(filepath.Join(servicesDir, d.Name()))\n\t}\n}", "func (am *Manager) Clean() {\n\tfor name, m := range am.Materials {\n\t\tLogger.Printf(\"Manager: deleting Material '%s'\\n\", name)\n\t\tm.Clean()\n\t\tdelete(am.Materials, name)\n\t}\n\tfor name, m := range am.Meshes {\n\t\tLogger.Printf(\"Manager: deleting Mesh '%s'\\n\", name)\n\t\tm.Clean()\n\t\tdelete(am.Meshes, name)\n\t}\n\tfor set, prog := range am.Programs {\n\t\tLogger.Printf(\"Manager: deleting Program '%v'\\n\", set)\n\t\tgl.DeleteProgram(prog)\n\t\tdelete(am.Programs, set)\n\t}\n\tfor name, shader := range am.Shaders {\n\t\tLogger.Printf(\"Manager: deleting Shader '%s'\\n\", name)\n\t\tgl.DeleteShader(shader)\n\t\tdelete(am.Shaders, name)\n\t}\n\tfor name, tex := range am.Textures {\n\t\tLogger.Printf(\"Manager: deleting Texture '%s'\\n\", name)\n\t\ttex.Clean()\n\t\tdelete(am.Textures, name)\n\t}\n}", "func (f *HelmConfiguration) Cleanup() {\n\tif len(f.Folder) > 0 {\n\t\tos.RemoveAll(f.Folder)\n\t}\n}", "func (t *Track) CleanDirectory(path string) {\n\tvar name string\n\tif t.isAudio {\n\t\tname = \"audio\"\n\t} else {\n\t\tname = \"video\"\n\t}\n\tfiles, _ := ioutil.ReadDir(path)\n\tfor _, fi := range files {\n\t\tif strings.Contains(fi.Name(), \"chunk_\" + name + strconv.Itoa(t.index)) {\n\t\t\ti := 0\n\t\t\tfor ; i < len(t.chunksName); i++ {\n\t\t\t\tif fi.Name() == t.chunksName[i] {\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\t\t\tif i == len(t.chunksName) {\n\t\t\t\tos.Remove(filepath.Join(path, fi.Name()))\n\t\t\t}\n\t\t}\n\t}\n}", "func (s *Server) cleanTmpFiles(dir string) {\n\tnow := time.Now()\n\tpackdir := filepath.Join(dir, \".git\", \"objects\", \"pack\")\n\terr := filepath.Walk(packdir, func(path string, info os.FileInfo, err error) error {\n\t\tif path != packdir && info.IsDir() {\n\t\t\treturn filepath.SkipDir\n\t\t}\n\t\tfile := filepath.Base(path)\n\t\tif strings.HasPrefix(file, \"tmp_pack_\") {\n\t\t\tif now.Sub(info.ModTime()) > longGitCommandTimeout {\n\t\t\t\terr := os.Remove(path)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\treturn nil\n\t})\n\tif err != nil {\n\t\tlog15.Error(\"error removing tmp_pack_* files\", \"error\", err)\n\t}\n}", "func cleanDir(name string) error {\n\tif err := sh.Rm(name); err != nil {\n\t\treturn err\n\t}\n\treturn os.MkdirAll(name, 0755)\n}", "func (b *TestBackend) IgnoreCleanable() {}", "func (ridt *CFGoReadLexUnit) Clean() {\n}", "func cleanup() {\n\tif len(tmpDir) > 0 {\n\t\tlogDebug.Printf(\"cleaning up %s\", tmpDir)\n\t\tos.RemoveAll(tmpDir)\n\t}\n}", "func CleanupFiles(configuration *types.JobConfiguration) {\n\tjobName := configuration.JobName\n\tmapperInputFileNames := configuration.MapperInputFileNames\n\tnumReducers := configuration.NumReducers\n\n\t// Clean up mapper output files.\n\tfor mapTaskIdx := range mapperInputFileNames {\n\t\tfor reduceTaskIdx := 0; reduceTaskIdx < numReducers; reduceTaskIdx++ {\n\t\t\tfileName := IntermediateFileName(\n\t\t\t\tjobName, mapTaskIdx, reduceTaskIdx,\n\t\t\t)\n\t\t\tos.Remove(fileName)\n\t\t}\n\t}\n\n\t// Clean up reducer output files.\n\tfor reduceTaskIdx := 0; reduceTaskIdx < numReducers; reduceTaskIdx++ {\n\t\tfileName := ReducerOutputFileName(jobName, reduceTaskIdx)\n\t\tos.Remove(fileName)\n\t}\n}", "func (tf *Terraform) CleanUp() error {\n\ttf.t.Helper()\n\n\tif tf.logging {\n\t\tlog.Println(\"cleaning up...\")\n\t}\n\n\terr := os.RemoveAll(tf.tempDir)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"unable to clean up temp dir '%s': %v\", tf.tempDir, err)\n\t}\n\n\tif tf.logging {\n\t\tlog.Printf(\"temp dir was successfully removed ('%s')\", tf.tempDir)\n\t}\n\n\treturn nil\n}", "func removeData(){\n\tos.RemoveAll(\"Logs\")\n\tos.RemoveAll(\"files\")\n\t//os.Mkdir(\"files\",0777)\n}", "func (c *cache) cleanUp() error {\n\treturn os.RemoveAll(c.root)\n}", "func ClearDir(dirPath string, keepNames ...string) (err error) {\n if IsDir(dirPath) {\n var fileInfos []os.FileInfo\n if fileInfos, err = Dir(dirPath); err == nil {\n for _, fi := range fileInfos {\n if fn := fi.Name(); !ustr.In(fn, keepNames...) {\n if err = os.RemoveAll(filepath.Join(dirPath, fn)); err != nil {\n return\n }\n }\n }\n }\n }\n return\n}", "func cleanup(ctx context.Context, fs fs, logger log.FieldLogger, props processorProps) {\n\tspan, _ := opentracing.StartSpanFromContext(ctx, \"cleanup\")\n\tdefer span.Finish()\n\n\terr := fs.DeleteDir(props.WorkDir)\n\tif err != nil {\n\t\tlogger.Errorf(\"%+v\\n\", err)\n\t}\n}", "func (f *Fs) CleanUp() error {\n\tdo := f.Fs.Features().CleanUp\n\tif do == nil {\n\t\treturn errors.New(\"can't CleanUp\")\n\t}\n\treturn do()\n}", "func cleanupProviderFiles(path string, payloads []*v1alpha1.File) error {\n\tfor i := range payloads {\n\t\t// AtomicWriter only symlinks the top file or directory\n\t\tfirstComponent := strings.Split(payloads[i].GetPath(), string(os.PathSeparator))[0]\n\n\t\tp := filepath.Join(path, firstComponent)\n\t\tinfo, err := os.Lstat(p)\n\t\tif os.IsNotExist(err) {\n\t\t\tcontinue\n\t\t}\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t// skip symlinks\n\t\tif info.Mode()&os.ModeSymlink != 0 {\n\t\t\tcontinue\n\t\t}\n\t\tif err := os.RemoveAll(p); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}", "func (m *Master) removeIntermediateFiles() {\n\tfor _, fileNames := range m.reduceJobs {\n\t\tfor _, fileName := range fileNames {\n\t\t\tos.Remove(fileName)\n\t\t}\n\t}\n}", "func (le *LazyExe) Cleanup() error {\n\tle.mu.Lock()\n\tdefer le.mu.Unlock()\n\tif le.tmpFile != nil {\n\t\tif err := os.Remove(le.tmpFile.Name()); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tle.tmpFile = nil\n\t}\n\treturn nil\n}", "func resetConfigDir(configDirPath string) {\n\tdirsToClean := []string{\n\t\tfilepath.Join(configDirPath, \"manifests\"),\n\t\tfilepath.Join(configDirPath, \"pki\"),\n\t}\n\tfmt.Printf(\"Deleting contents of config directories: %v\\n\", dirsToClean)\n\tfor _, dir := range dirsToClean {\n\t\tcleanDir(dir)\n\t}\n\n\tfilesToClean := []string{\n\t\tfilepath.Join(configDirPath, \"admin.conf\"),\n\t\tfilepath.Join(configDirPath, \"kubelet.conf\"),\n\t}\n\tfmt.Printf(\"Deleting files: %v\\n\", filesToClean)\n\tfor _, path := range filesToClean {\n\t\terr := os.RemoveAll(path)\n\t\tif err != nil {\n\t\t\tfmt.Printf(\"failed to remove file: [%v]\\n\", err)\n\t\t}\n\t}\n}", "func cleanTestDir(policyPath string) error {\n\tif _, err := os.Stat(policyPath); !os.IsNotExist(err) {\n\t\tif err := os.RemoveAll(policyPath); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tif err := os.MkdirAll(policyPath, 0764); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}", "func cleanDir(path string) {\n\t// If the directory doesn't even exist there's nothing to do, and we do\n\t// not consider this an error:\n\tif _, err := os.Stat(path); os.IsNotExist(err) {\n\t\treturn\n\t}\n\n\td, err := os.Open(path)\n\tif err != nil {\n\t\tfmt.Printf(\"failed to remove directory: [%v]\\n\", err)\n\t}\n\tdefer d.Close()\n\tnames, err := d.Readdirnames(-1)\n\tif err != nil {\n\t\tfmt.Printf(\"failed to remove directory: [%v]\\n\", err)\n\t}\n\tfor _, name := range names {\n\t\terr = os.RemoveAll(filepath.Join(path, name))\n\t\tif err != nil {\n\t\t\tfmt.Printf(\"failed to remove directory: [%v]\\n\", err)\n\t\t}\n\t}\n}" ]
[ "0.68612343", "0.6808517", "0.66689456", "0.65776414", "0.64366883", "0.63556844", "0.63354766", "0.63250905", "0.63042855", "0.62775576", "0.6232528", "0.6232528", "0.6222042", "0.62022847", "0.6167941", "0.61160666", "0.60688245", "0.6043584", "0.6016467", "0.60079503", "0.59952724", "0.59929276", "0.59806097", "0.59707165", "0.5966261", "0.59655225", "0.58960456", "0.58753216", "0.5856466", "0.5842573", "0.5797635", "0.5795918", "0.5768147", "0.57474744", "0.57471204", "0.5745192", "0.5738947", "0.5720099", "0.57027256", "0.5670119", "0.56692153", "0.56621796", "0.5658869", "0.5632639", "0.56159014", "0.5613426", "0.55963016", "0.55856174", "0.558376", "0.5572444", "0.5558601", "0.55426943", "0.55416703", "0.5530349", "0.5520816", "0.5507312", "0.5500699", "0.5499002", "0.5496558", "0.54838663", "0.5481356", "0.54792064", "0.54755723", "0.5465169", "0.54646516", "0.5417844", "0.5416093", "0.5393387", "0.53889877", "0.53702456", "0.53416055", "0.5341295", "0.53288555", "0.53116846", "0.5307059", "0.52998775", "0.52980286", "0.528275", "0.5279953", "0.52514344", "0.52513707", "0.52497417", "0.5246857", "0.52467084", "0.52388674", "0.522482", "0.52055746", "0.5203128", "0.5200512", "0.51909864", "0.5187682", "0.51796496", "0.51705617", "0.5165848", "0.5158155", "0.51519734", "0.5143594", "0.51377046", "0.5122101", "0.5119515", "0.51017916" ]
0.0
-1
Chain links a plugin instance into the chain.
func (goldsmith *Goldsmith) Chain(plugin Plugin) *Goldsmith { goldsmith.contextHasher.Write([]byte(plugin.Name())) context := &Context{ goldsmith: goldsmith, plugin: plugin, hash: goldsmith.contextHasher.Sum32(), outputFiles: make(chan *File), } context.fileFilters = append(context.fileFilters, goldsmith.fileFilters...) if len(goldsmith.contexts) > 0 { context.inputFiles = goldsmith.contexts[len(goldsmith.contexts)-1].outputFiles } goldsmith.contexts = append(goldsmith.contexts, context) return goldsmith }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (self *Goldsmith) Chain(plugin Plugin) *Goldsmith {\n\tcontext := &Context{\n\t\tgoldsmith: self,\n\t\tplugin: plugin,\n\t\tfiltersExt: append(filterStack(nil), self.filters...),\n\t\tindex: self.index,\n\t\tfilesOut: make(chan *File),\n\t}\n\n\tif len(self.contexts) > 0 {\n\t\tcontext.filesIn = self.contexts[len(self.contexts)-1].filesOut\n\t}\n\n\tself.contexts = append(self.contexts, context)\n\tself.index++\n\n\treturn self\n}", "func (r *AssetResolver) Chain(chain ...AssetHandler) {\n\tr.chain = chain\n}", "func (self *Tween) ChainI(args ...interface{}) *Tween{\n return &Tween{self.Object.Call(\"chain\", args)}\n}", "func (pl *Pipeline) Chain(cmds ...*exec.Cmd) {\n pl.tasks = append(pl.tasks, cmds...)\n}", "func (self *Tween) Chain(tweens *Tween) *Tween{\n return &Tween{self.Object.Call(\"chain\", tweens)}\n}", "func (n *Node) Chain(next *Node) *Node {\n\tif n.IsChained(next) {\n\t\treturn next\n\t}\n\tc := n.output.Client()\n\tnext.input.Watch(c)\n\tn.chained[next.ID()] = struct{}{}\n\treturn next\n}", "func (w *Wechaty) Use(plugin *Plugin) *Wechaty {\n\tplugin.registerPluginEvent(w)\n\treturn w\n}", "func cmdAdd(args *skel.CmdArgs) error {\n\tconf, err := parseConfig(args.StdinData)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// A plugin can be either an \"originating\" plugin or a \"chained\" plugin.\n\t// Originating plugins perform initial sandbox setup and do not require\n\t// any result from a previous plugin in the chain. A chained plugin\n\t// modifies sandbox configuration that was previously set up by an\n\t// originating plugin and may optionally require a PrevResult from\n\t// earlier plugins in the chain.\n\n\t// START chained plugin code\n\tif conf.PrevResult == nil {\n\t\treturn fmt.Errorf(\"must be called as chained plugin\")\n\t}\n\n\t// Convert the PrevResult to a concrete Result type that can be modified.\n\tprevResult, err := current.GetResult(conf.PrevResult)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to convert prevResult: %v\", err)\n\t}\n\n\tif len(prevResult.IPs) == 0 {\n\t\treturn fmt.Errorf(\"got no container IPs\")\n\t}\n\n\t// Pass the prevResult through this plugin to the next one\n\tresult := prevResult\n\n\t// END chained plugin code\n\n\t// START originating plugin code\n\t// if conf.PrevResult != nil {\n\t//\treturn fmt.Errorf(\"must be called as the first plugin\")\n\t// }\n\n\t// Generate some fake container IPs and add to the result\n\t// result := &current.Result{CNIVersion: current.ImplementedSpecVersion}\n\t// result.Interfaces = []*current.Interface{\n\t// \t{\n\t// \t\tName: \"intf0\",\n\t// \t\tSandbox: args.Netns,\n\t// \t\tMac: \"00:11:22:33:44:55\",\n\t// \t},\n\t// }\n\t// result.IPs = []*current.IPConfig{\n\t// \t{\n\t// \t\tAddress: \"1.2.3.4/24\",\n\t// \t\tGateway: \"1.2.3.1\",\n\t// \t\t// Interface is an index into the Interfaces array\n\t// \t\t// of the Interface element this IP applies to\n\t// \t\tInterface: current.Int(0),\n\t// \t}\n\t// }\n\t// END originating plugin code\n\n\t// Implement your plugin here\n\n\t// Pass through the result for the next plugin\n\treturn types.PrintResult(result, conf.CNIVersion)\n}", "func (m *ServeMux) Plugin(plugin interface{}) {\n\tp, ok := plugin.(*pluginContainer)\n\tif !ok {\n\t\tp = &pluginContainer{base: plugin}\n\t}\n\tp.check()\n\tm.plugins = append(m.plugins, p)\n}", "func (w *BaseWorker) Chain(nextWorker Worker) Worker {\n\tif w.output == nil {\n\t\tw.output = NewFileStream()\n\t}\n\t// override the input in the worker\n\tnextWorker.SetInput(w.output)\n\n\tif o := nextWorker.Output(); o == nil {\n\t\tnextWorker.SetOutput(NewFileStream())\n\t}\n\tw.nextWorkers = append(w.nextWorkers, nextWorker)\n\tnextWorker.Start()\n\treturn nextWorker\n}", "func (self *Tween) SetChainedTweenA(member *Tween) {\n self.Object.Set(\"chainedTween\", member)\n}", "func Plugin(transport plugin.Transport, plugin server.VersionedInterface, more ...server.VersionedInterface) {\n\t_, running := BackgroundPlugin(transport, nil, plugin, more...)\n\t<-running\n}", "func (b *bot) AddPlugin(h Plugin) {\n\tname := reflect.TypeOf(h).String()\n\tb.plugins[name] = h\n\tb.pluginOrdering = append(b.pluginOrdering, name)\n}", "func NewChain(hooks ...ent.Hook) Chain {\n\treturn Chain{append([]ent.Hook(nil), hooks...)}\n}", "func NewChain(hooks ...ent.Hook) Chain {\n\treturn Chain{append([]ent.Hook(nil), hooks...)}\n}", "func (s LifecyclerRPC) WithInstance(state *isclib.Instance) error {\n\tvar resp struct{}\n\terr := s.client.Call(\"Plugin.WithInstance\", state, &resp)\n\treturn err\n}", "func (chain *Chain) append(cm Middleware) (newChain *Chain) {\n\tnewChain = NewChain(cm)\n\tnewChain.parent = chain\n\treturn newChain\n}", "func NewMiddlewareChain(mw ...Middleware) (mc MiddlewareChain) {\n\tif len(mw) > 0 {\n\t\tmc = mc.Extend(mw...)\n\t}\n\treturn\n}", "func (b *Bot) AddPlugin(p Plugin) {\n\tb.plugins = append(b.plugins, p)\n}", "func NewAdmissionChain(handlers ...AdmissionHandler) AdmissionChain {\n\treturn AdmissionChain{handlers: handlers}\n}", "func Plugin(plugin interface{}) {\n\tDefaultMux.Plugin(plugin)\n}", "func (m *Manager) RegisterGroupHookToChain(hook, chain string) error {\n\teph, ok := m.groupHooks[hook]\n\tif !ok {\n\t\tm.log.Warn(\"Missing hook during chain initializtion\", \"chain\", chain, \"hook\", hook)\n\t\treturn ErrUnknownHook\n\t}\n\tm.groupProcesses[chain] = append(m.groupProcesses[chain], eph)\n\tsort.Slice(m.groupProcesses[chain], func(i, j int) bool {\n\t\treturn m.groupProcesses[chain][i].Priority() < m.groupProcesses[chain][j].Priority()\n\t})\n\tm.log.Trace(\"Registered hook to chain\", \"chain\", chain, \"hook\", hook)\n\treturn nil\n}", "func (c *Chain) Add(chain ...interface{}) {\n\t// Convert whatever to chainFunc\n\tfor _, v := range chain {\n\t\tswitch vt := v.(type) {\n\t\tcase func(handler http.Handler) http.Handler:\n\t\t\tc.chain = append(c.chain, vt)\n\t\tcase Func:\n\t\t\tc.chain = append(c.chain, vt)\n\t\tdefault:\n\t\t\tpanic(\"Unsupported now: \" + reflect.TypeOf(v).String())\n\t\t}\n\n\t}\n\n\t//c.chain = append(c.chain, chain...)\n}", "func NewChain(host p2p.Host) *Chain {\n\tc := &Chain{\n\t\thost: host,\n\t}\n\n\tif err := host.SetStreamHandler(protocolChain, c.handleNewStream); err != nil {\n\t\tpanic(err)\n\t}\n\n\treturn c\n}", "func (this *Router) Use(handler HandlerFunc) *Router {\n\tthis.handlerChain = append(this.handlerChain, handler)\n\tfor _, router := range this.children {\n\t\trouter.handlerChain = append(router.handlerChain, handler)\n\t}\n\treturn this\n}", "func Provider() *Chain {\n\tc := &Chain{}\n\tc.Version = \"1.0\"\n\tc.CreationDate = time.Now()\n\treturn c\n}", "func (app *builder) WithChain(chain chains.Chain) Builder {\n\tapp.chain = chain\n\treturn app\n}", "func AddPluginRedirect(redirect RequestPlugin) {\n\tif clientPlugin.RequestPlugin != nil {\n\t\tlog.Fatal(\"redirect plugin has been occupied by another plugin.\")\n\t\treturn\n\t}\n\tclientPlugin.RequestPlugin = redirect\n}", "func (app *contentBuilder) WithChain(chain chains.Chain) ContentBuilder {\n\tapp.chain = chain\n\treturn app\n}", "func (s *Server) AddChainRoute(handler *common.HTTPHandler, ctx *snow.Context, base, endpoint string, log logging.Logger) error {\n\turl := fmt.Sprintf(\"%s/%s\", baseURL, base)\n\ts.log.Info(\"adding route %s%s\", url, endpoint)\n\t// Apply logging middleware\n\th := handlers.CombinedLoggingHandler(log, handler.Handler)\n\t// Apply middleware to grab/release chain's lock before/after calling API method\n\th, err := lockMiddleware(h, handler.LockOptions, &ctx.Lock)\n\tif err != nil {\n\t\treturn err\n\t}\n\t// Apply middleware to reject calls to the handler before the chain finishes bootstrapping\n\th = rejectMiddleware(h, ctx)\n\treturn s.router.AddRouter(url, endpoint, h)\n}", "func Chain(hs ...juggler.Handler) juggler.Handler {\n\treturn juggler.HandlerFunc(func(ctx context.Context, c *juggler.Conn, m message.Msg) {\n\t\tfor _, h := range hs {\n\t\t\th.Handle(ctx, c, m)\n\t\t}\n\t})\n}", "func (p *pluginContainer) Add(plugin Plugin) {\n\tp.plugins = append(p.plugins, plugin)\n}", "func (r *Router) AddPlugin(p ...RouterPlugin) {\n\tr.logger.Debug(\"Adding plugins\", watermill.LogFields{\"count\": fmt.Sprintf(\"%d\", len(p))})\n\n\tr.plugins = append(r.plugins, p...)\n}", "func (_AnchorChain *AnchorChainTransactor) ConnectChain(opts *bind.TransactOpts, _path string, _method string, _args []string, _callbackPath string, _callbackMethod string, _crossChainType string) (*types.Transaction, error) {\n\treturn _AnchorChain.contract.Transact(opts, \"connectChain\", _path, _method, _args, _callbackPath, _callbackMethod, _crossChainType)\n}", "func NewChain(hooktype string) *Chain {\n\tresult := Chain{[]detector.Detector{}, hooktype}\n\treturn &result\n}", "func (self *Tween) ChainedTween() *Tween{\n return &Tween{self.Object.Get(\"chainedTween\")}\n}", "func (app *application) Chain() error {\n\t// authenticate:\n\tidentityApp, err := app.appli.Current().Authenticate(app.name, app.seed, app.password)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// mine the next block:\n\terr = identityApp.Sub().Chain().Block(app.additionalBucketsPerBlock)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// mine the next link:\n\terr = identityApp.Sub().Chain().Link(app.additionalBucketsPerBlock)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// retrieve the local chain:\n\tlocalChain, err := app.appli.Sub().Chain().Retrieve()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// retrieve the peers:\n\tlocalPeers, err := app.appli.Sub().Peers().Retrieve()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tbiggestDiff := 0\n\tvar biggestChain chains.Chain\n\tvar biggestChainApp application_chain.Application\n\tallPeers := localPeers.All()\n\tfor _, onePeer := range allPeers {\n\t\tremoteApp, err := app.clientBuilder.Create().WithPeer(onePeer).Now()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tremoteChainApp := remoteApp.Sub().Chain()\n\t\tremoteChain, err := remoteChainApp.Retrieve()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tdiffTrx := int(remoteChain.Total() - localChain.Total())\n\t\tif biggestDiff < diffTrx {\n\t\t\tbiggestDiff = diffTrx\n\t\t\tbiggestChain = remoteChain\n\t\t\tbiggestChainApp = remoteChainApp\n\t\t}\n\t}\n\n\t// if there is no chain in the network more advanced, return:\n\tif biggestChain == nil {\n\t\treturn nil\n\t}\n\n\t// update the chain:\n\tlocalIndex := int(localChain.Height())\n\tdiffHeight := int(biggestChain.Height()) - localIndex\n\tfor i := 0; i < diffHeight; i++ {\n\t\tchainIndex := localIndex + i\n\t\tremoteChainAtIndex, err := biggestChainApp.RetrieveAtIndex(uint(chainIndex))\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tremoteHead := remoteChainAtIndex.Head()\n\t\tgen := localChain.Genesis()\n\t\troot := localChain.Root()\n\t\ttotal := localChain.Total() + 1\n\t\tupdatedChain, err := app.chainBuilder.Create().WithGenesis(gen).WithRoot(root).WithHead(remoteHead).WithTotal(total).Now()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\terr = app.chainService.Update(localChain, updatedChain)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}", "func (i *instanceManager) dispense() (plugin drivers.DriverPlugin, err error) {\n\ti.pluginLock.Lock()\n\tdefer i.pluginLock.Unlock()\n\n\t// See if we already have a running instance\n\tif i.plugin != nil && !i.plugin.Exited() {\n\t\treturn i.driver, nil\n\t}\n\n\tvar pluginInstance loader.PluginInstance\n\tdispenseFn := func() (loader.PluginInstance, error) {\n\t\treturn i.loader.Dispense(i.id.Name, i.id.PluginType, i.pluginConfig, i.logger)\n\t}\n\n\tif reattach, ok := i.fetchReattach(); ok {\n\t\t// Reattach to existing plugin\n\t\tpluginInstance, err = i.loader.Reattach(i.id.Name, i.id.PluginType, reattach)\n\n\t\t// If reattachment fails, get a new plugin instance\n\t\tif err != nil {\n\t\t\ti.logger.Warn(\"failed to reattach to plugin, starting new instance\", \"err\", err)\n\t\t\tpluginInstance, err = dispenseFn()\n\t\t}\n\t} else {\n\t\t// Get an instance of the plugin\n\t\tpluginInstance, err = dispenseFn()\n\t}\n\n\tif err != nil {\n\t\t// Retry as the error just indicates the singleton has exited\n\t\tif err == singleton.SingletonPluginExited {\n\t\t\tpluginInstance, err = dispenseFn()\n\t\t}\n\n\t\t// If we still have an error there is a real problem\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"failed to start plugin: %v\", err)\n\t\t}\n\t}\n\n\t// Convert to a driver plugin\n\tdriver, ok := pluginInstance.Plugin().(drivers.DriverPlugin)\n\tif !ok {\n\t\tpluginInstance.Kill()\n\t\treturn nil, fmt.Errorf(\"plugin loaded does not implement the driver interface\")\n\t}\n\n\t// Store the plugin and driver\n\ti.plugin = pluginInstance\n\ti.driver = driver\n\n\t// Store the reattach config\n\tif c, ok := pluginInstance.ReattachConfig(); ok {\n\t\tif err := i.storeReattach(c); err != nil {\n\t\t\ti.logger.Error(\"error storing driver plugin reattach config\", \"error\", err)\n\t\t}\n\t}\n\n\treturn driver, nil\n}", "func Chain(h HandlerC, middlewares ...Middleware) HandlerC {\n\tfor _, middleware := range middlewares {\n\t\th = middleware(h)\n\t}\n\treturn h\n}", "func (b *Builder) BuildInstancePlugin() (instance.Plugin, error) {\n\tmc := monorail.New(b.options.Endpoint)\n\treturn NewInstancePlugin(mc, b.options.Username, b.options.Password), nil\n}", "func (o ApplicationStatusHistorySourceOutput) Plugin() ApplicationStatusHistorySourcePluginPtrOutput {\n\treturn o.ApplyT(func(v ApplicationStatusHistorySource) *ApplicationStatusHistorySourcePlugin { return v.Plugin }).(ApplicationStatusHistorySourcePluginPtrOutput)\n}", "func ToryPluginRun(ctx *cli.Context) {\n\tfuncs := []func(){\n\t\t//your new func in here\n\t\tNewHTTPProxyPlugin(ctx).TroyRun,\n\t}\n\n\tfor _, fc := range funcs {\n\t\tgo fc()\n\t}\n}", "func registerPlugin(name string, init constructor) {\n\tif Pipes == nil {\n\t\tPipes = make(map[string]constructor)\n\t}\n\tPipes[name] = init\n}", "func NewChain(cms ...Middleware) (chain *Chain) {\n\tchain = new(Chain)\n\tif len(cms) > 0 {\n\t\tchain.middleware = cms[0]\n\t\tif len(cms) > 1 {\n\t\t\tchain = chain.Append(cms[1:]...)\n\t\t}\n\t}\n\treturn\n}", "func (o ApplicationOperationSyncSourceOutput) Plugin() ApplicationOperationSyncSourcePluginPtrOutput {\n\treturn o.ApplyT(func(v ApplicationOperationSyncSource) *ApplicationOperationSyncSourcePlugin { return v.Plugin }).(ApplicationOperationSyncSourcePluginPtrOutput)\n}", "func Chain(c Client, mw ...MiddlewareFunc) Client {\n\tresult := c\n\tfor _, middleware := range mw {\n\t\tresult = middleware(result)\n\t}\n\treturn result\n}", "func (s *Serverus) ChainInterceptors(inter interface{}) {}", "func (route *Route) chainMiddleware(i int) Handler {\n\tif i == len(route.middleware) {\n\t\treturn route.handler\n\t}\n\tf := route.middleware[i]\n\treturn f(route.chainMiddleware(i + 1))\n}", "func (a *Admin) AliasChain(_ *http.Request, args *AliasChainArgs, _ *api.EmptyReply) error {\n\ta.Log.Debug(\"API called\",\n\t\tzap.String(\"service\", \"admin\"),\n\t\tzap.String(\"method\", \"aliasChain\"),\n\t\tlogging.UserString(\"chain\", args.Chain),\n\t\tlogging.UserString(\"alias\", args.Alias),\n\t)\n\n\tif len(args.Alias) > maxAliasLength {\n\t\treturn errAliasTooLong\n\t}\n\tchainID, err := a.ChainManager.Lookup(args.Chain)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif err := a.ChainManager.Alias(chainID, args.Alias); err != nil {\n\t\treturn err\n\t}\n\n\tendpoint := path.Join(constants.ChainAliasPrefix, chainID.String())\n\talias := path.Join(constants.ChainAliasPrefix, args.Alias)\n\treturn a.HTTPServer.AddAliasesWithReadLock(endpoint, alias)\n}", "func NewOverridingChain(l ...Loader) Loader {\n\treturn &overridingChain{l}\n}", "func (s *Server) RegisterChain(ctx *snow.Context, vmIntf interface{}) {\n\tvm, ok := vmIntf.(common.VM)\n\tif !ok {\n\t\treturn\n\t}\n\n\t// all subroutes to a chain begin with \"bc/<the chain's ID>\"\n\tchainID := ctx.ChainID.String()\n\tdefaultEndpoint := \"bc/\" + chainID\n\thttpLogger, err := s.factory.MakeChain(chainID, \"http\")\n\tif err != nil {\n\t\ts.log.Error(\"Failed to create new http logger: %s\", err)\n\t\treturn\n\t}\n\ts.log.Verbo(\"About to add API endpoints for chain with ID %s\", ctx.ChainID)\n\n\t// Register each endpoint\n\tfor extension, service := range vm.CreateHandlers() {\n\t\t// Validate that the route being added is valid\n\t\t// e.g. \"/foo\" and \"\" are ok but \"\\n\" is not\n\t\t_, err := url.ParseRequestURI(extension)\n\t\tif extension != \"\" && err != nil {\n\t\t\ts.log.Error(\"could not add route to chain's API handler because route is malformed: %s\", err)\n\t\t\tcontinue\n\t\t}\n\t\tif err := s.AddChainRoute(service, ctx, defaultEndpoint, extension, httpLogger); err != nil {\n\t\t\ts.log.Error(\"error adding route: %s\", err)\n\t\t}\n\t}\n}", "func Chain(ms ...Middleware) Middleware {\n\treturn func(handler Handler) Handler {\n\t\tfor i := len(ms) - 1; i >= 0; i-- {\n\t\t\thandler = ms[i](handler)\n\t\t}\n\n\t\treturn handler\n\t}\n}", "func (bee *Beego) Use(router interface{}, plugin []plugins.Plugin) error {\n\tvar (\n\t\teng *beego.App\n\t\tok bool\n\t)\n\tif eng, ok = router.(*beego.App); !ok {\n\t\treturn errors.New(\"wrong parameter\")\n\t}\n\n\tfor _, plug := range plugin {\n\t\tvar plugCopy = plug\n\t\tfor _, req := range plug.GetRequest() {\n\t\t\teng.Handlers.AddMethod(req.Method, req.URL, func(c *context.Context) {\n\t\t\t\tfor key, value := range c.Input.Params() {\n\t\t\t\t\tif c.Request.URL.RawQuery == \"\" {\n\t\t\t\t\t\tc.Request.URL.RawQuery += strings.Replace(key, \":\", \"\", -1) + \"=\" + value\n\t\t\t\t\t} else {\n\t\t\t\t\t\tc.Request.URL.RawQuery += \"&\" + strings.Replace(key, \":\", \"\", -1) + \"=\" + value\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tctx := gctx.NewContext(c.Request)\n\t\t\t\tctx.SetHandlers(plugCopy.GetHandler(c.Request.URL.Path, strings.ToLower(c.Request.Method))).Next()\n\t\t\t\tfor key, head := range ctx.Response.Header {\n\t\t\t\t\tc.ResponseWriter.Header().Add(key, head[0])\n\t\t\t\t}\n\t\t\t\tc.ResponseWriter.WriteHeader(ctx.Response.StatusCode)\n\t\t\t\tif ctx.Response.Body != nil {\n\t\t\t\t\tbuf := new(bytes.Buffer)\n\t\t\t\t\t_, _ = buf.ReadFrom(ctx.Response.Body)\n\t\t\t\t\tc.WriteString(buf.String())\n\t\t\t\t}\n\t\t\t})\n\t\t}\n\t}\n\n\treturn nil\n}", "func Chain(mwares ...Middleware) Middleware {\n\treturn func(inner http.Handler) http.Handler {\n\t\tfor i := len(mwares) - 1; i >= 0; i-- {\n\t\t\tinner = mwares[i](inner)\n\t\t}\n\t\treturn inner\n\t}\n}", "func (obj *miner) ToLink() blocks.Blocks {\n\treturn obj.toLink\n}", "func (o ApplicationSpecSourceOutput) Plugin() ApplicationSpecSourcePluginPtrOutput {\n\treturn o.ApplyT(func(v ApplicationSpecSource) *ApplicationSpecSourcePlugin { return v.Plugin }).(ApplicationSpecSourcePluginPtrOutput)\n}", "func (s *LifecyclerRPCServer) WithInstance(state *isclib.Instance, resp *struct{}) (err error) {\n\treturn s.Plugin.WithInstance(state)\n}", "func WrapConnector(dc driver.Connector, t *zipkin.Tracer, options ...TraceOption) driver.Connector {\n\topts := TraceOptions{}\n\tfor _, o := range options {\n\t\to(&opts)\n\t}\n\n\treturn &zDriver{\n\t\tparent: dc.Driver(),\n\t\tconnector: dc,\n\t\ttracer: t,\n\t\toptions: opts,\n\t}\n}", "func PChain() *PChainHelper {\n\n\treturn &PChainHelper{}\n}", "func (m *MiddlewareChain) Add(middleware Middleware) {\n\tm.chain = middleware.NewMiddleware(m.chain)\n}", "func NewChain(mw ...Middleware) *Chain {\n\treturn &Chain{\n\t\tmw: append(([]Middleware)(nil), mw...),\n\t}\n}", "func (c Chain) Append(hooks ...ent.Hook) Chain {\n\tnewHooks := make([]ent.Hook, 0, len(c.hooks)+len(hooks))\n\tnewHooks = append(newHooks, c.hooks...)\n\tnewHooks = append(newHooks, hooks...)\n\treturn Chain{newHooks}\n}", "func (c Chain) Append(hooks ...ent.Hook) Chain {\n\tnewHooks := make([]ent.Hook, 0, len(c.hooks)+len(hooks))\n\tnewHooks = append(newHooks, c.hooks...)\n\tnewHooks = append(newHooks, hooks...)\n\treturn Chain{newHooks}\n}", "func buildPlugins(options *Options, metricWriter MetricWriter) PluginManagerContainer {\n\tplugins := make(PluginManagerContainer)\n\n\tif !options.UseLocalRouter {\n\t\tplugin := NewPluginManager(options.RouterPlugin, options.RouterPluginArgs, RouterPlugin, metricWriter)\n\t\tplugins[RouterPlugin] = []PluginManager{plugin}\n\t}\n\n\tif !options.UseLocalLoadBalancer {\n\t\tplugin := NewPluginManager(options.LoadBalancerPlugin, options.LoadBalancerPluginArgs, LoadBalancerPlugin, metricWriter)\n\t\tplugins[LoadBalancerPlugin] = []PluginManager{plugin}\n\t}\n\n\tfor _, plugin := range options.UpstreamPlugins {\n\t\tplugins[UpstreamPlugin] = append(plugins[UpstreamPlugin], NewPluginManager(plugin, options.UpstreamPluginArgs, UpstreamPlugin, metricWriter))\n\t}\n\n\tfor _, plugin := range options.ModifierPlugins {\n\t\tplugins[ModifierPlugin] = append(plugins[ModifierPlugin], NewPluginManager(plugin, options.ModifierPluginArgs, ModifierPlugin, metricWriter))\n\t}\n\n\tfor _, plugin := range options.MetricPlugins {\n\t\tplugins[MetricPlugin] = append(plugins[MetricPlugin], NewPluginManager(plugin, options.MetricPluginArgs, MetricPlugin, metricWriter))\n\t}\n\treturn plugins\n}", "func Plugin() *node.Plugin {\n\tonce.Do(func() {\n\t\tplugin = node.NewPlugin(\"WebAPI WeightProvider Endpoint\", node.Enabled, configure)\n\t})\n\treturn plugin\n}", "func (c *Client) AddPlugin(p Plugin) {\n\tc.Plugins = append(c.Plugins, p)\n}", "func Register(plugin Plugin) {\n\tplugins = append(plugins, plugin)\n}", "func ChainHandler(h http.Handler, c ...alice.Constructor) http.Handler {\n\treturn alice.New(c...).Then(h)\n}", "func ChainHandler(h http.Handler, c ...alice.Constructor) http.Handler {\n\treturn alice.New(c...).Then(h)\n}", "func chainStarted(service moleculer.Service, mixin *moleculer.Mixin) moleculer.Service {\n\tif mixin.Started != nil {\n\t\tsvcHook := service.Started\n\t\tservice.Started = func(ctx moleculer.BrokerContext, svc moleculer.Service) {\n\t\t\tif svcHook != nil {\n\t\t\t\tsvcHook(ctx, svc)\n\t\t\t}\n\t\t\tmixin.Started(ctx, svc)\n\t\t}\n\t}\n\treturn service\n}", "func Plugin(name string, f http.HandlerFunc) error {\n\tif _, ok := plugins[name]; ok {\n\t\treturn errors.New(\"duplicate plugin: \" + name)\n\t}\n\tplugins[name] = f\n\treturn nil\n}", "func (c *Chainable) Chain(funcs ...Function) *Chainable {\n\tc.chainFuncs(funcs, true)\n\treturn c\n}", "func Chain(middlewares ...Middleware) Middleware {\n\treturn func(next http.Handler) http.Handler {\n\t\tfor i := len(middlewares) - 1; i >= 0; i-- {\n\t\t\tmiddleware := middlewares[i]\n\t\t\tnext = middleware(next)\n\t\t}\n\t\treturn next\n\t}\n}", "func (p *Pilot) installPlugin(pilot *v1alpha1.Pilot, plugin string) error {\n\tcmd := exec.Command(p.Options.ElasticsearchOptions.PluginBinary, \"install\", plugin)\n\tcmd.Env = p.env().Strings()\n\tcmd.Stdout = p.Options.StdOut\n\tcmd.Stderr = p.Options.StdErr\n\tif err := cmd.Run(); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}", "func NewChain(r *Repeater, client *Client) *Chain {\n\treturn &Chain{\n\t\tRepeater: r,\n\t\tCurrentClient: client,\n\t}\n}", "func (c Chain) Extend(chain Chain) Chain {\n\treturn c.Append(chain.hooks...)\n}", "func (c Chain) Extend(chain Chain) Chain {\n\treturn c.Append(chain.hooks...)\n}", "func Plugin(ctx context.Context) *plugin.Plugin {\n\tp := &plugin.Plugin{\n\t\tName: \"steampipe-plugin-github\",\n\t\tConnectionConfigSchema: &plugin.ConnectionConfigSchema{\n\t\t\tNewInstance: ConfigInstance,\n\t\t\tSchema: ConfigSchema,\n\t\t},\n\t\tDefaultTransform: transform.FromGo(),\n\t\tTableMap: map[string]*plugin.Table{\n\t\t\t\"github_branch\": tableGitHubBranch(ctx),\n\t\t\t\"github_branch_protection\": tableGitHubBranchProtection(ctx),\n\t\t\t\"github_commit\": tableGitHubCommit(ctx),\n\t\t\t\"github_community_profile\": tableGitHubCommunityProfile(ctx),\n\t\t\t\"github_gist\": tableGitHubGist(),\n\t\t\t\"github_gitignore\": tableGitHubGitignore(),\n\t\t\t\"github_issue\": tableGitHubIssue(),\n\t\t\t\"github_license\": tableGitHubLicense(),\n\t\t\t\"github_my_gist\": tableGitHubMyGist(),\n\t\t\t\"github_my_issue\": tableGitHubMyIssue(),\n\t\t\t\"github_my_organization\": tableGitHubMyOrganization(),\n\t\t\t\"github_my_repository\": tableGitHubMyRepository(),\n\t\t\t\"github_my_team\": tableGitHubMyTeam(),\n\t\t\t\"github_organization\": tableGitHubOrganization(),\n\t\t\t\"github_pull_request\": tableGitHubPullRequest(),\n\t\t\t\"github_rate_limit\": tableGitHubRateLimit(ctx),\n\t\t\t\"github_release\": tableGitHubRelease(ctx),\n\t\t\t\"github_repository\": tableGitHubRepository(),\n\t\t\t\"github_stargazer\": tableGitHubStargazer(ctx),\n\t\t\t\"github_tag\": tableGitHubTag(ctx),\n\t\t\t\"github_traffic_view_daily\": tableGitHubTrafficViewDaily(ctx),\n\t\t\t\"github_traffic_view_weekly\": tableGitHubTrafficViewWeekly(ctx),\n\t\t\t\"github_user\": tableGitHubUser(),\n\t\t\t\"github_workflow\": tableGitHubWorkflow(ctx),\n\t\t},\n\t}\n\treturn p\n}", "func Plugin(replayLayout *device.MemoryLayout) compiler.Plugin {\n\treturn &replayer{replayLayout: replayLayout}\n}", "func (h Hook) apply(m *Meta) {\n\tm.Hooks = append(m.Hooks, h)\n}", "func RegisterPlugin(p Plugin) {\n\tplugins = append(plugins, p)\n}", "func (*GenericFramework) PeerAdded(ctx *PeerContext) {}", "func Chain(middlewares ...Middleware) Middleware {\n\treturn MiddlewareFunc(func(roundTripper http.RoundTripper) http.RoundTripper {\n\t\tfor i := len(middlewares) - 1; i >= 0; i-- {\n\t\t\troundTripper = middlewares[i].Wrap(roundTripper)\n\t\t}\n\t\treturn roundTripper\n\t})\n}", "func (s krpcServer) ChainStream(interceptors ...grpc.StreamServerInterceptor) grpc.ServerOption {\n\treturn grpc.ChainStreamInterceptor(interceptors...)\n}", "func NewRouterChain() (*RouterChain, error) {\n\trouterFactories := extension.GetRouterFactories()\n\tif len(routerFactories) == 0 {\n\t\treturn nil, perrors.Errorf(\"No routerFactory exits , create one please\")\n\t}\n\n\trouters := make([]router.PriorityRouter, 0, len(routerFactories))\n\n\tfor key, routerFactory := range routerFactories {\n\t\tr, err := routerFactory().NewPriorityRouter()\n\t\tif err != nil {\n\t\t\tlogger.Errorf(\"Build router chain failed with routerFactories key:%s and error:%v\", key, err)\n\t\t\tcontinue\n\t\t} else if r == nil {\n\t\t\tcontinue\n\t\t}\n\t\trouters = append(routers, r)\n\t}\n\n\tnewRouters := make([]router.PriorityRouter, len(routers))\n\tcopy(newRouters, routers)\n\n\tsortRouter(newRouters)\n\n\trouterNeedsUpdateInit := atomic.Bool{}\n\trouterNeedsUpdateInit.Store(false)\n\n\tchain := &RouterChain{\n\t\trouters: newRouters,\n\t\tbuiltinRouters: routers,\n\t}\n\n\treturn chain, nil\n}", "func TestChain(t *testing.T) {\n\tsh := api.NewLocalShell()\n\tif sh == nil {\n\t\tt.Fatal(\"unable to connect to local ipfs\")\n\t}\n\n\tctx := context.Background()\n\tdbm := inmem.NewInmemDb()\n\tlocalStore := localdb.NewLocalDb(db.WithPrefix(dbm, []byte(\"/localdb\")))\n\tremoteStore := ipfs.NewRemoteStore(sh)\n\tobjStore := objstore.NewObjectStore(ctx, localStore, remoteStore)\n\n\tvalidatorPriv, _, err := crypto.GenerateEd25519Key(rand.Reader)\n\tif err != nil {\n\t\tt.Fatal(err.Error())\n\t}\n\n\tnod, err := node.NewNode(ctx, db.WithPrefix(dbm, \"/node\"), objStore, sh, ch, validatorPriv)\n\tif err != nil {\n\t\tt.Fatal(err.Error())\n\t}\n\n\tchainID := \"test-chain-1\"\n\tch, err := NewChain(ctx, dbm, objStore, chainID, validatorPriv, &validators.AllowAll{})\n\tif err != nil {\n\t\tt.Fatal(err.Error())\n\t}\n\n\t{\n\t\tchAfter, err := FromConfig(ctx, dbm, objStore, ch.GetConfig())\n\t\tif err != nil {\n\t\t\tt.Fatal(err.Error())\n\t\t}\n\n\t\tif chAfter.GetPubsubTopic() != ch.GetPubsubTopic() {\n\t\t\tt.Fail()\n\t\t}\n\t\tch = chAfter\n\t}\n\n\tproposer, err := NewProposer(ctx, validatorPriv, dbm, ch, nod)\n\tif err != nil {\n\t\tt.Fatal(err.Error())\n\t}\n\t_ = proposer\n}", "func Chain(handler http.Handler, middleware ...Middleware) http.Handler {\n\th := handler\n\tfor _, m := range middleware {\n\t\th = m(h)\n\t}\n\treturn h\n}", "func (p *parser) chain(lhs Node) Node {\n\tfor look := p.peek().typ; look == TokenDot; look = p.peek().typ {\n\t\top := p.next()\n\t\trhs := p.funcOrIdent()\n\t\tlhs = newBinary(op, lhs, rhs)\n\t}\n\treturn lhs\n}", "func run(c *cli.Context) error {\n\t// set the log level for the plugin\n\tswitch c.String(\"log.level\") {\n\tcase \"t\", \"trace\", \"Trace\", \"TRACE\":\n\t\tlogrus.SetLevel(logrus.TraceLevel)\n\tcase \"d\", \"debug\", \"Debug\", \"DEBUG\":\n\t\tlogrus.SetLevel(logrus.DebugLevel)\n\tcase \"w\", \"warn\", \"Warn\", \"WARN\":\n\t\tlogrus.SetLevel(logrus.WarnLevel)\n\tcase \"e\", \"error\", \"Error\", \"ERROR\":\n\t\tlogrus.SetLevel(logrus.ErrorLevel)\n\tcase \"f\", \"fatal\", \"Fatal\", \"FATAL\":\n\t\tlogrus.SetLevel(logrus.FatalLevel)\n\tcase \"p\", \"panic\", \"Panic\", \"PANIC\":\n\t\tlogrus.SetLevel(logrus.PanicLevel)\n\tcase \"i\", \"info\", \"Info\", \"INFO\":\n\t\tfallthrough\n\tdefault:\n\t\tlogrus.SetLevel(logrus.InfoLevel)\n\t}\n\n\tlogrus.WithFields(logrus.Fields{\n\t\t\"code\": \"https://github.com/go-vela/vela-kaniko\",\n\t\t\"docs\": \"https://go-vela.github.io/docs/plugins/registry/pipeline/kaniko\",\n\t\t\"registry\": \"https://hub.docker.com/r/target/vela-kaniko\",\n\t}).Info(\"Vela Kaniko Plugin\")\n\n\t// create the plugin\n\tp := &Plugin{\n\t\t// build configuration\n\t\tBuild: &Build{\n\t\t\tEvent: c.String(\"build.event\"),\n\t\t\tSha: c.String(\"build.sha\"),\n\t\t\tSnapshotMode: c.String(\"build.snapshot_mode\"),\n\t\t\tTag: c.String(\"build.tag\"),\n\t\t},\n\t\t// image configuration\n\t\tImage: &Image{\n\t\t\tArgs: c.StringSlice(\"image.build_args\"),\n\t\t\tContext: c.String(\"image.context\"),\n\t\t\tDockerfile: c.String(\"image.dockerfile\"),\n\t\t\tTarget: c.String(\"image.target\"),\n\t\t},\n\t\t// registry configuration\n\t\tRegistry: &Registry{\n\t\t\tDryRun: c.Bool(\"registry.dry_run\"),\n\t\t\tName: c.String(\"registry.name\"),\n\t\t\tMirror: c.String(\"registry.mirror\"),\n\t\t\tUsername: c.String(\"registry.username\"),\n\t\t\tPassword: c.String(\"registry.password\"),\n\t\t\tPushRetry: c.Int(\"registry.push_retry\"),\n\t\t},\n\t\t// repo configuration\n\t\tRepo: &Repo{\n\t\t\tAutoTag: c.Bool(\"repo.auto_tag\"),\n\t\t\tCache: c.Bool(\"repo.cache\"),\n\t\t\tCacheName: c.String(\"repo.cache_name\"),\n\t\t\tName: c.String(\"repo.name\"),\n\t\t\tTags: c.StringSlice(\"repo.tags\"),\n\t\t\tLabel: &Label{\n\t\t\t\tAuthorEmail: c.String(\"label.author_email\"),\n\t\t\t\tCommit: c.String(\"label.commit\"),\n\t\t\t\tCreated: time.Now().Format(time.RFC3339),\n\t\t\t\tFullName: c.String(\"label.full_name\"),\n\t\t\t\tNumber: c.Int(\"label.number\"),\n\t\t\t\tURL: c.String(\"label.url\"),\n\t\t\t},\n\t\t\tLabels: c.StringSlice(\"repo.labels\"),\n\t\t},\n\t}\n\n\t// validate the plugin\n\terr := p.Validate()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// execute the plugin\n\treturn p.Exec()\n}", "func (a *AppBuilder) Plugins(plugins ...client.Interface) *AppBuilder {\n\t// will init a client if not already initiated\n\ta.initClient(nil)\n\ta.plugins = plugins\n\ta.filters = append(a.filters, client.MetaFilter, client.AuthFilter)\n\n\tfor _, plugin := range a.plugins {\n\t\tif err := plugin.Setup(a.Context, a.Logger); err != nil {\n\t\t\ta.Logger.Fatalw(\"plugin could not be setup correctly\", \"err\", err, \"plugin\", plugin.Path())\n\t\t}\n\t\tws, err := route.NewService(plugin, a.filters...)\n\t\tif err != nil {\n\t\t\ta.Logger.Fatalw(\"plugin could not start correctly\", \"err\", err, \"plugin\", plugin.Path())\n\t\t}\n\t\ta.container.Add(ws)\n\t}\n\treturn a\n}", "func (c krpcClient) ChainStream(interceptors ...grpc.StreamClientInterceptor) grpc.DialOption {\n\treturn grpc.WithChainStreamInterceptor(interceptors...)\n}", "func NewChain(mws ...Middleware) *Chain {\n\tc := &Chain{make([]Middleware, len(mws))}\n\tfor i, mw := range mws {\n\t\tc.middlewares[i] = mw\n\t}\n\treturn c\n}", "func (i *Interceptor) Use(m grpc.UnaryServerInterceptor) {\n\ti.chain = append(i.chain, m)\n}", "func ChainMiddleware(f http.Handler, middlewares ...Middleware) http.Handler {\n\tfor _, m := range middlewares {\n\t\tf = m(f)\n\t}\n\treturn f\n}", "func NewChain(logger log.Logger, validators ...Validator) Validator {\n\treturn chain{\n\t\tvalidators: validators,\n\t\tlogger: logger,\n\t}\n}", "func NewChain(handlerArray ...MiddlewareHandlerFunc) *Chain {\n\tc := new(Chain)\n\tc.middlewareHandlers = handlerArray\n\treturn c\n}", "func UseDeps(cb func(*Deps)) Option {\n\treturn func(p *Plugin) {\n\t\tcb(&p.Deps)\n\t}\n}", "func (chain *Chain) AppendChain(tail *Chain) (newChain *Chain) {\n\t// Copy the chain to attach\n\tnewChain = tail.copy()\n\n\t// Attach the chain to extend to the new tail\n\tnewChain.head().parent = chain\n\n\t// Return the new tail\n\treturn\n}", "func (cli *chainClient[C, R, P, P2]) ConfigureChain(c *cli.Context) (err error) {\n\tchainID := c.String(\"id\")\n\tif chainID == \"\" {\n\t\treturn cli.errorOut(errors.New(\"missing chain ID [-id]\"))\n\t}\n\n\tif !c.Args().Present() {\n\t\treturn cli.errorOut(errors.New(\"must pass in at least one chain configuration parameters (usage: ... chains configure [key1=value1 key2=value2 ...])\"))\n\t}\n\n\t// Fetch existing config\n\tresp, err := cli.HTTP.Get(fmt.Sprintf(\"%s/%v\", cli.path, chainID))\n\tif err != nil {\n\t\treturn cli.errorOut(err)\n\t}\n\tdefer func() {\n\t\tif cerr := resp.Body.Close(); cerr != nil {\n\t\t\terr = multierr.Append(err, cerr)\n\t\t}\n\t}()\n\tvar chain R\n\tif err = cli.deserializeAPIResponse(resp, &chain, &jsonapi.Links{}); err != nil {\n\t\treturn cli.errorOut(err)\n\t}\n\tconfig := chain.GetConfig()\n\n\t// Parse new key-value pairs\n\tparams := map[string]interface{}{}\n\tfor _, arg := range c.Args() {\n\t\tparts := strings.SplitN(arg, \"=\", 2)\n\t\tif len(parts) != 2 {\n\t\t\treturn cli.errorOut(errors.Errorf(\"invalid parameter: %v\", arg))\n\t\t}\n\n\t\tvar value interface{}\n\t\tif err = json.Unmarshal([]byte(parts[1]), &value); err != nil {\n\t\t\t// treat it as a string\n\t\t\tvalue = parts[1]\n\t\t}\n\t\t// TODO: handle `key=nil` and `key=` besides just null?\n\t\tparams[parts[0]] = value\n\t}\n\n\t// Combine new values with the existing config\n\t// (serialize to a partial JSON map, deserialize to the old config struct)\n\trawUpdates, err := json.Marshal(params)\n\tif err != nil {\n\t\treturn cli.errorOut(err)\n\t}\n\terr = json.Unmarshal(rawUpdates, &config)\n\tif err != nil {\n\t\treturn cli.errorOut(err)\n\t}\n\n\t// Send the new config\n\tparams = map[string]interface{}{\n\t\t\"enabled\": chain.IsEnabled(),\n\t\t\"config\": config,\n\t}\n\tbody, err := json.Marshal(params)\n\tif err != nil {\n\t\treturn cli.errorOut(err)\n\t}\n\tresp, err = cli.HTTP.Patch(fmt.Sprintf(\"%s/%v\", cli.path, chainID), bytes.NewBuffer(body))\n\tif err != nil {\n\t\treturn cli.errorOut(err)\n\t}\n\tdefer func() {\n\t\tif cerr := resp.Body.Close(); cerr != nil {\n\t\t\terr = multierr.Append(err, cerr)\n\t\t}\n\t}()\n\tvar p P\n\treturn cli.renderAPIResponse(resp, &p)\n}", "func (IntegrationHandler) RouteCommandToInstance(m dogma.Message) string {\n\treturn \"<integration>\"\n}" ]
[ "0.6453789", "0.53310853", "0.52817833", "0.52302456", "0.5189953", "0.5110766", "0.50826776", "0.50744146", "0.50644445", "0.5005035", "0.49748373", "0.49228576", "0.49181893", "0.48358542", "0.48358542", "0.48132738", "0.4704591", "0.46917528", "0.46803397", "0.46771368", "0.46726814", "0.46464157", "0.46237466", "0.46015787", "0.45948094", "0.45655343", "0.45643058", "0.4562294", "0.45576832", "0.45553502", "0.45517173", "0.4549849", "0.45463058", "0.4541408", "0.45239788", "0.45236194", "0.45069513", "0.45040074", "0.44953144", "0.4492902", "0.44926575", "0.4488401", "0.44831958", "0.4447062", "0.44439206", "0.44425175", "0.4432218", "0.4430931", "0.44153225", "0.44140765", "0.43929878", "0.43797183", "0.4368422", "0.43664548", "0.43659875", "0.43525386", "0.43518126", "0.43467253", "0.43388268", "0.43313152", "0.4330534", "0.43296465", "0.43296465", "0.43273035", "0.43237245", "0.43147817", "0.43088156", "0.4304072", "0.4304072", "0.43007666", "0.42933753", "0.42877614", "0.42840552", "0.42809948", "0.42787993", "0.4265891", "0.4265891", "0.42624542", "0.42618176", "0.42608675", "0.4258734", "0.42530885", "0.4247073", "0.42417803", "0.42392713", "0.42338976", "0.42286697", "0.4220742", "0.4199421", "0.41973445", "0.41965097", "0.4173488", "0.41705763", "0.41705626", "0.41671312", "0.41662854", "0.41569376", "0.414986", "0.41493526", "0.41485557" ]
0.6487222
0
FilterPush pushes a filter instance on the chain's filter stack.
func (goldsmith *Goldsmith) FilterPush(filter Filter) *Goldsmith { goldsmith.fileFilters = append(goldsmith.fileFilters, filter) return goldsmith }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (self *Goldsmith) FilterPush(filter Filter) *Goldsmith {\n\tself.filters.push(filter, self.index)\n\tself.index++\n\treturn self\n}", "func NewPushFilter() *PushFilter {\n\n\treturn &PushFilter{\n\t\tIdentities: map[string][]EventType{},\n\t}\n}", "func (sm *SyncManager) pushBloomFilter(p *peer.Peer) {\n\tp.QueueMessage(sm.cfg.GetTxFilter(), nil)\n}", "func (pb *primitiveBuilder) pushFilter(in sqlparser.Expr, whereType string, reservedVars *sqlparser.ReservedVars) error {\n\tfilters := sqlparser.SplitAndExpression(nil, in)\n\treorderBySubquery(filters)\n\tfor _, filter := range filters {\n\t\tpullouts, origin, expr, err := pb.findOrigin(filter, reservedVars)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\trut, isRoute := origin.(*route)\n\t\tif isRoute && rut.eroute.Opcode == engine.DBA {\n\t\t\terr := pb.findSysInfoRoutingPredicates(expr, rut, reservedVars)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t\t// The returned expression may be complex. Resplit before pushing.\n\t\tfor _, subexpr := range sqlparser.SplitAndExpression(nil, expr) {\n\t\t\tpb.plan, err = planFilter(pb, pb.plan, subexpr, whereType, origin)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t\tpb.addPullouts(pullouts)\n\t}\n\treturn nil\n}", "func pushFilter(boolExpr sqlparser.Expr, bldr builder, whereType string) error {\n\tfilters := splitAndExpression(nil, boolExpr)\n\treorderBySubquery(filters)\n\tfor _, filter := range filters {\n\t\torigin, err := findOrigin(filter, bldr)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif err := bldr.PushFilter(filter, whereType, origin); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}", "func (s *ChainSet) Register(filter FilterDef) error {\n\tif s.registry[filter.Name] != nil {\n\t\treturn fmt.Errorf(\"filter %q already registered\", filter.Name)\n\t}\n\tif s.registry == nil {\n\t\ts.registry = map[string]NewFilter{}\n\t}\n\ts.registry[filter.Name] = filter.New\n\treturn nil\n}", "func (s *UndoStack) Push(elem UndoFn) {\n\ts.states = append(s.states, elem)\n}", "func (r *renderer) push(context reflect.Value) {\n\tr.stack = append(r.stack, context)\n}", "func (h *Router) AddFilter(f filter.Filter) {\n\t// Filter f is always added before the last filter, which is server mux.\n\th.filterChain.Insert(f, h.filterChain.Length()-1)\n}", "func (stack *Stack) Push(stuff interface{}) {\n\t*stack = append(*stack, stuff)\n}", "func (ws *WriterStack) Push(enc Encoder, args []byte) error {\n\tvar err error\n\tws.Writer, err = enc(ws.Writer, args)\n\tif err != nil {\n\t\treturn err\n\t}\n\t// note this DELIBERATELY does not end up including the \"base writer\"\n\t// that we're interpolating to... we do NOT want to end up closing\n\t// that, too!\n\tws.components = append(ws.components, ws.Writer)\n\treturn nil\n}", "func (goldsmith *Goldsmith) FilterPop() *Goldsmith {\n\tcount := len(goldsmith.fileFilters)\n\tif count == 0 {\n\t\tpanic(\"attempted to pop empty filter stack\")\n\t}\n\n\tgoldsmith.fileFilters = goldsmith.fileFilters[:count-1]\n\treturn goldsmith\n}", "func (r *Recorder) Filter(filter shared.Filter) *Recorder {\n\tr.filters = append(r.filters, filter)\n\treturn r\n}", "func (this *Stack) Push(x interface{}) {\n\tthis.stack = append(this.stack, x)\n}", "func (s *stack) Push(elem []byte) error {\n\ts.stk = append(s.stk, elem)\n\treturn nil\n}", "func (m *MetricFilter) Filter(filter shared.Filter) *MetricFilter {\n\tm.filterList = append(m.filterList, filter)\n\treturn m\n}", "func (p *Pipe) Push(values ...phono.Param) {\n\tif len(values) == 0 {\n\t\treturn\n\t}\n\tparams := params(make(map[string][]phono.ParamFunc))\n\tp.events <- eventMessage{\n\t\tevent: push,\n\t\tparams: params.add(values...),\n\t}\n}", "func (self *Goldsmith) FilterPop() *Goldsmith {\n\tself.filters.pop()\n\tself.index++\n\treturn self\n}", "func (stack *Stack) Push(s string) {\n\t*stack = append(*stack, s)\n}", "func (s *Stack) Push(e interface{}) {\n\t*s = append(*s, e)\n}", "func (is *InputSteerer) Push(w io.WriteCloser) {\n\tis.mu.Lock()\n\tdefer is.mu.Unlock()\n\tis.ws = append(is.ws, w)\n}", "func (p *Pusher) Push(ctx context.Context) error {\n\tif p.PushFormat == \"\" {\n\t\tp.PushFormat = expfmt.FmtText\n\t}\n\n\tresps := make(chan (error))\n\tgo func() {\n\t\tresps <- p.push(ctx)\n\t}()\n\n\tselect {\n\tcase err := <-resps:\n\t\treturn err\n\tcase <-ctx.Done():\n\t\treturn ctx.Err()\n\t}\n}", "func Push(state string) Action {\n\treturn ActionPush{state}\n}", "func (s *Stack) Push(data interface{}) {\r\n\ts.stk = append(s.stk, data)\r\n}", "func (s *Stack) Push(val interface{}) {\n\t*s = append(*s, val)\n}", "func (s *simpleStack) Push(v StackData) {\n\ts.s = append(s.s, v)\n}", "func (s *Stack) Push(x interface{}) {\n\ts.data = append(s.data, x)\n}", "func (s *Stack) Push(x interface{}) {\n\ts.data = append(s.data, x)\n}", "func AddFilter(cf CastFilter) CastOption {\n\treturn func(c *Caster) {\n\t\tc.Filters = append(c.Filters, cf)\n\t}\n}", "func (this *MyStack) Push(x int) {\n\tthis.input.Enqueue(x)\n\tthis.output.Enqueue(this.input.Dequeue())\n}", "func (q *SensorStack) Push(n *SensorReading) {\n\t*q = append(*q, n)\n}", "func (p PassOp) Push(o *op.Ops) PassStack {\n\tid, mid := ops.PushOp(&o.Internal, ops.PassStack)\n\tdata := ops.Write(&o.Internal, ops.TypePassLen)\n\tdata[0] = byte(ops.TypePass)\n\treturn PassStack{ops: &o.Internal, id: id, macroID: mid}\n}", "func (self *WtfPush) Push(blob []byte) {\n\tselect {\n\tcase self.pushch <- blob:\n\tdefault:\n\t}\n}", "func StackedFilter() Filter {\n\treturn Param(\"stacked\", \"true\")\n}", "func (s *Stack) Push(item float64) {\n\ts.Items = append(s.Items, item)\n\ts.Length++\n}", "func (s *Stack) Push(n *Tree) {\n\tif s.count >= len(s.nodes) {\n\t\tnodes := make([]*Tree, len(s.nodes)*2)\n\t\tcopy(nodes, s.nodes)\n\t\ts.nodes = nodes\n\t}\n\ts.nodes[s.count] = n\n\ts.count++\n}", "func (c *Context) Push(op operation.Operation) {\n\tc.operations = append(c.operations, op)\n}", "func (stack *Stack) Push(value interface{}) {\n\tstack.list.Add(nil, value)\n}", "func (a *LocalActivations) Push(activation *LocalActivation) {\n\ta.activations = append(\n\t\ta.activations,\n\t\tactivation,\n\t)\n}", "func (job *Flusher) Push(item *Incoming) error {\n\tif job.enabled.Load() {\n\t\tindex := job.queueIndex.Inc() & job.queueMask\n\t\treturn job.queues[index].Push(item)\n\t}\n\treturn errFlusherDisabled\n}", "func (s *stack) Push(v []byte) {\n\tif v == nil {\n\t\treturn\n\t}\n\n\t*s = append(*s, v)\n}", "func (q *Stack) Push(val interface{}) {\n\tq.Items.Append(val)\n}", "func (s *Stack) Push(in *node.Node) {\n\ts.stack = append(s.stack, in)\n\ts.n++\n}", "func (s *Stack) Push(data ...interface{}) {\n\ts.data = append(s.data, data...)\n}", "func (f *FilterAndOr) Add(filter IFilter) *FilterAndOr {\n\tif filter != nil {\n\t\tf.Filters = append(f.Filters, filter)\n\t}\n\treturn f\n}", "func (stack *Stack) Push(val interface{}) error {\n\tif stack == nil {\n\t\treturn errors.New(\"the stack is nil\")\n\t}\n\t*stack = append(*stack, val)\n\treturn nil\n}", "func (s *Stack) Push(str string) {\n\t*s = append(*s, str)\n}", "func (s *stack) Push(str string) {\n\t*s = append(*s, str)\n}", "func (ds *DrawStack) Push(a Stackable) {\n\tds.toPush = append(ds.toPush, a)\n\n}", "func (p *Stack) Push(v interface{}) {\n\n\tp.data = append(p.data, v)\n}", "func (s *MyStack) Push(x int) {\n\ts.Q = append(s.Q, x)\n}", "func (s *BoolStack) Push(b bool) {\n\ts.sp++\n\ts.data = append(s.data, b)\n}", "func (s *Stack) Push(element int) {\n\t*s = append(*s, element)\n}", "func (s *MyStatic) AddFilter(name string, f FilterFunc) {\n\ts.filters[name] = f\n\treturn\n}", "func (s *Stack) Push(value string) {\n\t*s = append(*s, value)\n}", "func (p *Pipeline) Push(buffer []byte) {\n\tfmt.Printf(\"In Push\")\n\tb := C.CBytes(buffer)\n\tdefer C.free(b)\n\tC.gstreamer_receive_push_buffer(p.Pipeline, b, C.int(len(buffer)))\n}", "func (h *Host) Push(r *rec.Rec) {\n\tselect {\n\tcase h.Ch <- r:\n\tdefault:\n\t\th.throttled.Inc()\n\t}\n}", "func (s *scope) Push() {\n\tlevel := &scopeLevel{map[string]Variable{}, map[Variable]Value{}}\n\ts.levels = append(s.levels, level)\n}", "func (stack *Stack) Push(value interface{}) {\n\tstack.list.PushBack(value)\n}", "func (s *Stack) Push(node *TreeNode) {\n\t*s = append(*s, node)\n}", "func (f *PipelineRunFunc) PushHook(hook func() (interface{}, error)) {\n\tf.hooks = append(f.hooks, hook)\n}", "func (lc *LoggingContext) Push(name string) (error, bool) {\n\tif lc.curr == \"\" {\n\t\treturn errors.New(fmt.Sprintf(\"Cannot push context to \\\"%s\\\"; no loggers have been added\", name)), false\n\t}\n\tlc.guard.Lock()\n\tdefer lc.guard.Unlock()\n\tlogger := lc.logmap[name]\n\tif logger == nil {\n\t\treturn errors.New(fmt.Sprintf(\"Logger \\\"%s\\\" not found. Still using \\\"%s\\\"\", name, lc.curr)), true\n\t}\n\tlc.logstack.PushBack(lc.curr)\n\tlc.curr = name\n\tl.UseLogger(*logger)\n\treturn nil, true\n}", "func (s *Stack) Push(x string) {\n\ts.data = append(s.data, x)\n}", "func (gf *GlobalFilter) AddToFilter(gdp *gdpv1alpha1.GlobalDeploymentPolicy) {\n\tgf.GlobalLock.Lock()\n\tdefer gf.GlobalLock.Unlock()\n\tif len(gdp.Spec.MatchRules.AppSelector.Label) == 1 {\n\t\tk, v := getLabelKeyAndValue(gdp.Spec.MatchRules.AppSelector.Label)\n\t\tappFilter := AppFilter{\n\t\t\tLabel: Label{\n\t\t\t\tKey: k,\n\t\t\t\tValue: v,\n\t\t\t},\n\t\t}\n\t\tgf.AppFilter = &appFilter\n\t}\n\tif len(gdp.Spec.MatchRules.NamespaceSelector.Label) == 1 {\n\t\tgf.NSFilter = createNewNSFilter(gdp.Spec.MatchRules.NamespaceSelector.Label)\n\t}\n\t// Add applicable clusters\n\tgf.ApplicableClusters = gdp.Spec.MatchClusters\n\t// Add traffic split\n\tfor _, ts := range gdp.Spec.TrafficSplit {\n\t\tct := ClusterTraffic{\n\t\t\tClusterName: ts.Cluster,\n\t\t\tWeight: int32(ts.Weight),\n\t\t}\n\t\tgf.TrafficSplit = append(gf.TrafficSplit, ct)\n\t}\n\tgf.ComputeChecksum()\n\tLogf(\"ns: %s, object: NSFilter, msg: added/changed the global filter\", gdp.ObjectMeta.Namespace)\n}", "func (this *MyStack) Push(x int) {\n\tthis.wareHouse.Push(x)\n}", "func (s *Stack) Push(elem interface{}) {\n\ts.list.AddLast(elem)\n}", "func (q *Stack) Push(v interface{}) *list.Element {\n\treturn q.data.PushFront(v)\n}", "func (s *stackImpl) Push(ts ...T) {\n\ts.items = append(s.items, ts...)\n\ts.top += len(ts)\n}", "func InsertFilter(pattern string, pos int, filter FilterFunc, params ...bool) *App {\n\topts := oldToNewFilterOpts(params)\n\treturn (*App)(web.InsertFilter(pattern, pos, func(ctx *context.Context) {\n\t\tfilter((*context2.Context)(ctx))\n\t}, opts...))\n}", "func (s *Stack) Push(v interface{}) {\n\ts.v = append(s.v, v)\n}", "func (ss *SliceStack) Push(item adts.ContainerElement) bool {\n\treturn ss.Add(item)\n}", "func (f *PipelineAddFunc) PushHook(hook func(string, ...interface{})) {\n\tf.hooks = append(f.hooks, hook)\n}", "func (s *Stack) Push(value interface{}) {\n\tif s.size == 0 {\n\t\ts.elements = make([]*Element, 0, 1)\n\t}\n\ts.top = &Element{value, s.top}\n\ts.elements = append(s.elements, s.top)\n\ts.size++\n}", "func RegisterFilter(name string, fg FilterGetter) {\n\tfilters[name] = fg\n}", "func (i *IQR) Push(x float64) error {\n\terr := i.quantile.Push(x)\n\tif err != nil {\n\t\treturn errors.Wrapf(err, \"error pushing %f to Quantile\", x)\n\t}\n\treturn nil\n}", "func (lp *logPoller) RegisterFilter(filter Filter, qopts ...pg.QOpt) error {\n\tif len(filter.Addresses) == 0 {\n\t\treturn errors.Errorf(\"at least one address must be specified\")\n\t}\n\tif len(filter.EventSigs) == 0 {\n\t\treturn errors.Errorf(\"at least one event must be specified\")\n\t}\n\n\tfor _, eventSig := range filter.EventSigs {\n\t\tif eventSig == [common.HashLength]byte{} {\n\t\t\treturn errors.Errorf(\"empty event sig\")\n\t\t}\n\t}\n\tfor _, addr := range filter.Addresses {\n\t\tif addr == [common.AddressLength]byte{} {\n\t\t\treturn errors.Errorf(\"empty address\")\n\t\t}\n\t}\n\n\tlp.filterMu.Lock()\n\tdefer lp.filterMu.Unlock()\n\n\tif existingFilter, ok := lp.filters[filter.Name]; ok {\n\t\tif existingFilter.Contains(&filter) {\n\t\t\t// Nothing new in this Filter\n\t\t\treturn nil\n\t\t}\n\t\tlp.lggr.Warnw(\"Updating existing filter with more events or addresses\", \"filter\", filter)\n\t} else {\n\t\tlp.lggr.Debugw(\"Creating new filter\", \"filter\", filter)\n\t}\n\n\tif err := lp.orm.InsertFilter(filter, qopts...); err != nil {\n\t\treturn errors.Wrap(err, \"RegisterFilter failed to save filter to db\")\n\t}\n\tlp.filters[filter.Name] = filter\n\tlp.filterDirty = true\n\treturn nil\n}", "func (ls *ListStack) Push(item adts.ContainerElement) bool {\n\treturn ls.Add(item)\n}", "func (filterOrder *OrderFilters) AppendFilter(filters ...Filterer) {\n\tif cntFilters := len(filters); cntFilters > 0 {\n\t\tif filterOrder.filters == nil {\n\t\t\tfilterOrder.filters = []Filterer{}\n\t\t} else {\n\t\t\t// if already filterOrder have some filter so set next filter pointer\n\t\t\tfilterOrder.filters[len(filterOrder.filters)-1].SetNextFilter(&filters[0])\n\t\t}\n\t\tfor i := range filters {\n\t\t\tfilterOrder.filters = append(filterOrder.filters, filters[i]) // Save interface of filter\n\t\t\tfilters[i].SetSelfPointer(&filters[i]) // IMPORTANT! Save self interface pointer\n\t\t\tif i+1 != cntFilters {\n\t\t\t\tfilters[i].SetNextFilter(&filters[i+1]) // Save pointer to interface to next filter\n\t\t\t} else {\n\t\t\t\tfilters[i].SetNextFilter(nil) // flag end of filter\n\t\t\t}\n\t\t}\n\t}\n}", "func (s *MyStack) Push(x int) {\n\tn := len(s.queue)\n\ts.queue = append(s.queue, x)\n\tfor n > 0 {\n\t\ts.queue = append(s.queue, s.Pop())\n\t\tn--\n\t}\n}", "func (s *Stack[T]) Push(v T) {\n\t*s = append(*s, v) // Simply append the new value to the end of the stack\n}", "func (ar *ActionsReq) AddFilter(f *ReqFilter) (ok bool) {\n\tif f == nil {\n\t\treturn false\n\t}\n\tif ar.Filters == nil {\n\t\tar.Filters = make([]*ReqFilter, 0)\n\t}\n\tar.Filters = append(ar.Filters, f)\n\treturn true\n}", "func InsertFilter(l *Link, f *Context, fsi, fdi uint) int {\n\treturn int(C.avfilter_insert_filter((*C.struct_AVFilterLink)(l), (*C.struct_AVFilterContext)(f), C.uint(fsi), C.uint(fdi)))\n}", "func (st *Stack) Push(s string) {\n\tst.lines = append(st.lines, s)\n}", "func (s *exprStack) push(expr Expression) {\n\ttrace_util_0.Count(_util_00000, 169)\n\ts.stack = append(s.stack, expr)\n}", "func (l *Limiter) Filter(fn ...Filter) {\n\tl.filters = append(l.filters, fn...)\n}", "func (m *RouteMux) Filter(filter http.HandlerFunc) {\n\tm.filters = append(m.filters, filter)\n}", "func (ss *StringStack) Push(str string) {\n\tss.stack = append(ss.stack, str)\n}", "func (p *TimerStack) Push(eventName string) {\n\tp.stack = append(p.stack, &event{eventName, time.Now()})\n}", "func (s *Stack) Push(obj interface{}) {\n\ts.arr = append(s.arr, obj)\n}", "func (s *Stack) Push(v string) {\n\ts.pool = append(s.pool, v)\n}", "func (l *lexer) Push(tok *common.Token) {\n\t// Push the token onto the queue\n\tl.tokens.PushFront(tok)\n}", "func (s *Stack) Push(newHead *Node) {\n\ts.lf.Wait()\n\n\tnewHead.next = s.head\n\ts.head = newHead\n\n\ts.lf.Signal()\n}", "func (s *Stack) Push(item interface{}) {\n\tif s.max > 0 {\n\t\tf := NewFibonacci()\n\t\tfor s.Len() >= s.max {\n\t\t\tWarnfunc(\"Stack overflow. Waiting...\")\n\t\t\tf.WaitForIt(time.Second)\n\t\t}\n\t}\n\n\tn := &stackItem{data: item}\n\n\ts.mutex.Lock()\n\tdefer s.mutex.Unlock()\n\tif s.top == nil {\n\t\ts.top = n\n\t} else {\n\t\tn.next = s.top\n\t\ts.top = n\n\t}\n\n\ts.count++\n}", "func (s *BlockingStack) Push(v interface{}) {\n\ts.pushLock.Lock()\n\tdefer s.pushLock.Unlock()\n\tif s.Len() >= s.maxSize {\n\t\ts.pushBlockState = true\n\t\t<-s.pushBlock\n\t\ts.pushBlockState = false\n\t}\n\ts.top = &BlockingstackElement{\n\t\tvalue: v,\n\t\tprev: s.top,\n\t}\n\ts.size++\n\tif s.popBlockState {\n\t\ts.popBlock <- 1\n\t}\n}", "func (this *MyQueue) Push(x int) {\n\tthis.pushStack = append(this.pushStack, x)\n}", "func (s *Stack) Push(item interface{}) {\n\toldNode := s.node\n\tnewNode := nodeStack{TElement: item, Previous: &oldNode}\n\ts.node = newNode\n\ts.size++\n}", "func (p *Proc) Push() {\n\tp.stk.save()\n}", "func (w *interceptRW) Push(target string, opts *http.PushOptions) error {\n\tif w, ok := w.ResponseWriter.(http.Pusher); ok {\n\t\treturn w.Push(target, opts)\n\t}\n\treturn http.ErrNotSupported\n}", "func RegisterFilter(name string, v interface{}) {\n\troot.Filters[name] = v\n}", "func (this *MyStack) Push(x int) {\n\tthis.queue.PushBack(x)\n}" ]
[ "0.79087645", "0.6517648", "0.6221675", "0.5896108", "0.5835994", "0.5707383", "0.568775", "0.5643665", "0.56123495", "0.5579379", "0.5567732", "0.5551948", "0.55376995", "0.55253106", "0.55200595", "0.5502361", "0.54964405", "0.5488007", "0.5446844", "0.5409003", "0.54061323", "0.5402967", "0.5402074", "0.53494495", "0.53367054", "0.5326273", "0.53188443", "0.53188443", "0.53024673", "0.5300158", "0.52998644", "0.52994514", "0.5299248", "0.52930754", "0.52924585", "0.5285989", "0.5282185", "0.5280339", "0.5278605", "0.5276786", "0.5272031", "0.5267219", "0.52638316", "0.5258514", "0.52573067", "0.5254343", "0.52479625", "0.5242707", "0.5229472", "0.52241594", "0.52233016", "0.5212111", "0.52108383", "0.52076423", "0.520505", "0.520198", "0.5196515", "0.5175091", "0.51744074", "0.5174121", "0.51564544", "0.5141872", "0.5141693", "0.51402223", "0.51393104", "0.5133645", "0.5131636", "0.51301974", "0.512097", "0.51186275", "0.50935894", "0.5092504", "0.5086487", "0.507478", "0.50634116", "0.5061481", "0.50610334", "0.5057245", "0.5047977", "0.5046276", "0.50374043", "0.5030659", "0.5028414", "0.5027332", "0.5023661", "0.50235224", "0.5004195", "0.5003408", "0.49934414", "0.49871942", "0.4985164", "0.49842435", "0.498357", "0.49834448", "0.49802324", "0.4977439", "0.49768448", "0.49748617", "0.4972481", "0.4969406" ]
0.78237396
1
FilterPop pops a filter instance from the chain's filter stack.
func (goldsmith *Goldsmith) FilterPop() *Goldsmith { count := len(goldsmith.fileFilters) if count == 0 { panic("attempted to pop empty filter stack") } goldsmith.fileFilters = goldsmith.fileFilters[:count-1] return goldsmith }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (self *Goldsmith) FilterPop() *Goldsmith {\n\tself.filters.pop()\n\tself.index++\n\treturn self\n}", "func Pop() Action {\n\treturn ActionPop{}\n}", "func (p *Proc) Pop() {\n\tp.stk.load()\n}", "func (s *stack) pop() {\n\ts.items = s.items[:len(s.items)-1]\n}", "func (st *stack) Pop() {\n\tif st.top == nil {\n\t\treturn\n\t}\n\n\tst.top = st.top.next\n\tst.length--\n}", "func (cr *ChainReader) Pop() *Record {\n\treturn cr.readers[cr.current].Pop()\n}", "func (w *walk) pop() {\n\tif len(*w) > 0 {\n\t\t*w = (*w)[:len(*w)-1]\n\t}\n}", "func (stack *PfxStack) Pop() {\n\tstack.Entries = stack.Entries[:len(stack.Entries)-1]\n\tif !stack.Entries[len(stack.Entries)-1].Last {\n\t\tstack.Entries[len(stack.Entries)-1].Preamble = stack.GetPreamble(stack.FirstDash)\n\t}\n}", "func (this *Stack) Pop() interface{} {\n\tsize := len(this.stack)\n\tres := this.stack[size-1]\n\tthis.stack = append([]interface{}{}, this.stack[0:size-1]...)\n\treturn res\n}", "func (stack *Stack) Pop() (interface{}, error) {\n\tif stack == nil {\n\t\treturn nil, errors.New(\"the stack is nil\")\n\t}\n\tl := len(*stack)\n\tif l == 0 {\n\t\treturn nil, errors.New(\"the stack len is 0\")\n\t}\n\tret := (*stack)[l-1]\n\t*stack = (*stack)[:l-1]\n\n\treturn ret, nil\n}", "func (s *Stack) Pop() (interface{}, error) {\n\n\tif s.IsEmpty() {\n\t\treturn nil, fmt.Errorf(\"stack is empty\")\n\t}\n\treturn s.list.RemoveLast()\n}", "func (s *Stack) Pop() {\n\ts.data = s.data[:len(s.data)-1]\n\t//fmt.Printf(\"popped stack size=%d\\n\", len(s.data))\n}", "func (q *Stack) Pop() interface{} {\n\treturn q.data.Remove(q.data.Front())\n}", "func (s *Stack) Pop() (x interface{}) {\n\tif len(s.data) == 0 {\n\t\treturn\n\t}\n\n\tx = s.data[len(s.data)-1]\n\ts.data = s.data[0 : len(s.data)-1]\n\treturn\n}", "func (p *TimerStack) Pop() {\n\tlastIndex := len(p.stack) - 1\n\tpopped := p.stack[lastIndex]\n\tp.stack = p.stack[:lastIndex]\n\telapsed := time.Since(popped.start)\n\tp.popEventHandler.PopEvent(popped.eventName, elapsed)\n}", "func (set Set) Pop(ctx context.Context) (string, error) {\n\treq := newRequest(\"*2\\r\\n$4\\r\\nSPOP\\r\\n$\")\n\treq.addString(set.name)\n\treturn set.c.cmdString(ctx, req)\n}", "func (ss *StringStack) Pop() {\n\tss.stack = ss.stack[:len(ss.stack)-1]\n}", "func (s *Stack) Pop() {\n\tif s.head != nil {\n\t\ttemp := s.head.next\n\t\ts.head = temp\n\t\ts.Length--\n\t}\n}", "func (c *canvasRenderer) Pop() {\n\tc.currentLayer = c.currentLayer.Previous()\n}", "func (s *IntStack) Pop() {\n\tlength := len(s.stack)\n\ts.stack = s.stack[:length - 1]\n}", "func (s *Stack) Pop() (*StackElement, error) {\n\tif s.IsEmpty() {\n\t\treturn nil, ErrStackUnderflow\n\t}\n\n\te := s.top\n\ts.top = e.next\n\ts.count--\n\treturn e, nil\n}", "func (st *Stack) Pop() {\n\tif len(st.lines) <= 1 {\n\t\tst.lines = nil\n\t\treturn\n\t}\n\tst.lines = st.lines[:len(st.lines)-1]\n}", "func (r *renderer) pop() reflect.Value {\n\tif len(r.stack) == 0 {\n\t\treturn reflect.Value{}\n\t}\n\tctx := r.stack[len(r.stack)-1]\n\tr.stack = r.stack[:len(r.stack)-1]\n\treturn ctx\n}", "func (stack *Stack) Pop() (stuff interface{}, err error) {\n\ttheStack := *stack\n\n\tif len(theStack) == 0 {\n\t\treturn nil, errors.New(\"Tried to pop an empty stack.\")\n\t}\n\n\t//get last element\n\tlast := theStack[len(theStack)-1]\n\t//reduce stack by 1\n\t*stack = theStack[:len(theStack)-1]\n\n\treturn last, nil\n}", "func (q *Stack) Pop() interface{} {\n\treturn q.Items.Pop().Value\n}", "func (a *LocalActivations) Pop() {\n\tcount := len(a.activations)\n\tif count < 1 {\n\t\treturn\n\t}\n\ta.activations = a.activations[:count-1]\n}", "func (is *InputSteerer) Pop() {\n\tis.mu.Lock()\n\tdefer is.mu.Unlock()\n\tis.ws = is.ws[:len(is.ws)-1]\n}", "func (s *Stack) Pop() (val interface{}) {\n\tif s.isEmpty() {\n\t\treturn\n\t}\n\treturn s.list.RemoveHead()\n}", "func (ds *DrawStack) Pop() {\n\tds.toPop++\n}", "func (i *incStack) Pop() {\n\tif len(i.stack) == 0 {\n\t\treturn\n\t}\n\ti.stack = i.stack[:len(i.stack)-1]\n}", "func (st *scopeStack) pop() {\n\tl := len(st.stack)\n\tif l == 1 {\n\t\tpanic(\"popped the standard library (pre-main) scope\")\n\t}\n\t// TODO OPT: Optimize for space, this will never free any underlying memory.\n\tst.stack = st.stack[:l-1]\n}", "func (s *Stack) Pop() error {\n\tif len(s.a) == 0 {\n\t\treturn &EmptyError{}\n\t}\n\ts.a = s.a[:len(s.a)-1]\n\ts.Size = s.Size - 1\n\treturn nil\n}", "func (s *SimpleStack) Pop() (val interface{}, err error) {\n\tif s.isEmpty() {\n\t\terr = errors.New(\"stack is empty\")\n\t\treturn\n\t}\n\tval = s.data[s.top]\n\ts.top--\n\treturn\n}", "func (s *UndoStack) Pop() UndoFn {\n\tn := len(s.states)\n\tif n == 0 {\n\t\treturn nil\n\t}\n\tv := s.states[n-1]\n\ts.states = s.states[:n-1]\n\treturn v\n}", "func (s *Stack) Pop() (item float64) {\n\ts.Length--\n\titem = s.Items[s.Length]\n\ts.Items = s.Items[:s.Length]\n\treturn\n}", "func (s *Stack[T]) Pop() T {\n\tv := s.array[len(s.array)-1]\n\ts.array = s.array[:len(s.array)-1]\n\treturn v\n}", "func (s *Stack) Pop() (interface{}, error) {\n\tvar e *element = s.top\n\tif e == nil {\n\t\treturn 0, &StackError{msg: \"empty\"}\n\t}\n\ts.top = e.under\n\ts.count -= 1\n\treturn e.value, nil\n}", "func (s *Stack) Pop() interface{} {\n\tif len(s.data) > 0 {\n\t\tret := s.data[len(s.data)-1]\n\t\ts.data = s.data[:len(s.data)-1]\n\t\treturn ret\n\t}\n\n\treturn nil\n}", "func (s *Stack) Pop() string {\n\tindex := len(*s) - 1\n\telement := (*s)[index]\n\t*s = (*s)[:index]\n\n\treturn element\n}", "func Pop(ctx echo.Context) error {\n\n\treq := types.PopRequest{}\n\n\terr := ctx.Bind(&req)\n\n\tif err != nil {\n\t\tfmt.Println(err)\n\t}\n\n\tif !registration.IsAgentRegistered(req.Token) {\n\t\treturn ctx.JSON(403, types.ValidateResponse{Success: false, Message: \"Security Token Not Recognized\"})\n\t}\n\n\tmsg, err := GetFromQueue(req.Queue)\n\n\tdata := types.Message{}\n\n\tjson.Unmarshal(msg, &data)\n\n\tresp := types.PopResponse{Message: data.Message, Queue: req.Queue}\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn ctx.JSON(200, resp)\n}", "func (l *list) Pop() {\n\tl.elements = l.elements[:len(l.elements)-1]\n}", "func (s *StackTemplate) Pop() *interface{} {\n\tif s == nil {\n\t\treturn nil\n\t}\n\n\tif last := len(*s) - 1; last < 0 {\n\t\treturn nil\n\t} else {\n\t\titem := (*s)[len(*s)-1]\n\t\treduced := (*s)[:last]\n\t\t*s = reduced\n\t\treturn &item\n\t}\n}", "func (s *Stack) Pop() interface{} {\r\n\tn := len(s.stk)\r\n\tvalue := s.stk[n-1]\r\n\ts.stk = s.stk[:n-1]\r\n\treturn value\r\n}", "func (q *Queue) Pop(ctx context.Context) (r *Rq, err error) {\n\tq.mu.Lock()\n\tdefer q.mu.Unlock()\n\tif len(q.q) == 0 {\n\t\treturn nil, ErrQueueEmpty\n\t}\n\tr, q.q = q.q[0], q.q[1:]\n\treturn\n}", "func (l *pqList) Pop() interface{} {\n\treturn l.Remove(len(l.Slice) - 1)\n}", "func (s *stack) Pop() string {\n\tif len(*s) == 0 {\n\t\treturn \"\"\n\t}\n\n\tstr := (*s)[len(*s)-1]\n\t*s = (*s)[:len(*s)-1]\n\treturn str\n}", "func (stack *Stack) Pop() string {\n\tret := (*stack)[len(*stack)-1]\n\t*stack = (*stack)[:len(*stack)-1]\n\treturn ret\n}", "func (s *Stack) Pop() interface{} {\n\tv := s.v[len(s.v)]\n\ts.v = s.v[:len(s.v)-1]\n\treturn v\n}", "func (s *Stack) Pop() DrawPather {\n\tif s.Len() == 0 {\n\t\treturn nil\n\t}\n\ttmp := (*s)[s.Len()-1]\n\t*s = (*s)[:s.Len()-1]\n\treturn tmp\n}", "func (s *Stack) Pop() interface{} {\n\tif s.IsEmpty() {\n\t\treturn nil\n\t}\n\telement := (*s)[len(*s)-1]\n\t*s = (*s)[:len(*s)-1]\n\treturn element\n}", "func (stack *Item) Pop() (newstack *Item, top *Item) {\n\ttop = stack\n\tnewstack = stack.Next\n\treturn\n}", "func (s *Stack) Pop() (interface{}, error) {\n\tif s.top == nil {\n\t\treturn nil, errors.New(\"Empty Stack Exception\")\n\t}\n\titem := s.top.data\n\ts.top = s.top.next\n\treturn item, nil\n}", "func (s *LinkedStack) Pop() (interface{}, error) {\n\tif s.count == 0 {\n\t\treturn nil, errors.New(\"Stack is already empty\")\n\t}\n\tresult := s.topPtr.item\n\ts.topPtr = s.topPtr.next\n\ts.count--\n\treturn result, nil\n}", "func (stack *Stack) Pop() interface{} {\n\te := stack.list.Back()\n\tif e != nil {\n\t\tstack.list.Remove(e)\n\t\treturn e.Value\n\t}\n\treturn nil\n}", "func (s *stack) pop() {\n\tif s.isEmpty() {\n\t\tfmt.Println(\"Stack Underflows\")\n\t\treturn\n\t}\n\t*s = (*s)[:len(*s)-1]\n}", "func (sa *StackArray) Pop() interface{} {\n\tif sa.isEmpty() {\n\t\tfmt.Println(\"The stack is empty\")\n\t\treturn nil\n\t}\n\n\titem := sa.ArrayStack[sa.Top]\n\tsa.Top--\n\n\treturn item\n}", "func (s *stack) Pop() []byte {\n\tif len(*s) == 0 {\n\t\treturn nil\n\t}\n\n\tv := (*s)[len(*s)-1]\n\t*s = (*s)[:len(*s)-1]\n\n\treturn v\n}", "func (s *Stack) Pop() (interface{}, error) {\n\tif s.Empty() {\n\t\treturn nil, errors.New(\"can't get top of empty stack\")\n\t}\n\ttop, _ := s.Top()\n\ts.arr = s.arr[:s.Size()-1]\n\treturn top, nil\n}", "func (s *Stack) Pop() int {\n\tlength := len(s.items) - 1\n\ttoRemove := s.items[length]\n\ts.items = s.items[:length]\n\treturn toRemove\n}", "func (q *Stack) Pop() string {\n\ts := *q\n\tlast := s[len(s)-1]\n\t*q = s[:len(s)-1]\n\treturn last\n}", "func (vector *Vector) Pop() {\n\tvar element interface{}\n\telement, *vector = (*vector)[len(*vector)-1], (*vector)[:len(*vector)-1]\n\t// Note: dropping element here.\n\t_ = element\n}", "func (s *Stack) Pop() (value interface{}) {\n\tif s.size > 0 {\n\t\ts.top, value = s.top.next, s.top.value\n\t\ts.elements = s.elements[:s.size-1]\n\t\ts.size--\n\t\treturn\n\t}\n\treturn nil\n}", "func (s *Stack) Pop() interface{} {\n\tif s.Last == nil {\n\t\treturn nil\n\t}\n\n\ttop := s.Peek()\n\ts.Last = s.Last.Next\n\n\ts.Length--\n\n\treturn top\n}", "func (goldsmith *Goldsmith) FilterPush(filter Filter) *Goldsmith {\n\tgoldsmith.fileFilters = append(goldsmith.fileFilters, filter)\n\treturn goldsmith\n}", "func (b *Builder) Pop(count uint32) {\n\tb.popStackMulti(int(count))\n\tb.instructions = append(b.instructions, asm.Pop{\n\t\tCount: count,\n\t})\n}", "func (pq *PriorityQueue) Pop() interface{} {\n\treturn Pop(pq.list)\n}", "func (s *Stack) Pop() int {\n\tl := len(s-item) - 1\n\ttoRemove := s.items[l]\n\ts.items = s.items[:l]\n\treturn toRemove\n}", "func (s *MyStack) Pop() int {\n\titem := s.queue[0]\n\ts.queue = s.queue[1:]\n\treturn item\n}", "func (r *RecordSlice) Pop() interface{} {\n\trec := r.zvals[len(r.zvals)-1]\n\tr.zvals = r.zvals[:len(r.zvals)-1]\n\treturn &rec\n}", "func (stack *NodeStack) Pop() *Node {\n\tel := stack.elements[len(stack.elements)-1]\n\tstack.elements = stack.elements[:len(stack.elements)-1]\n\treturn el\n}", "func Pop(context *endly.Context) *model.Process {\n\tvar processes = processes(context)\n\tvar process = processes.Pop()\n\tif process != nil && process.Source != nil {\n\t\tcontext.Source = process.Source\n\t}\n\treturn process\n}", "func (ob *ObservableArray) Pop() *js.Object {\n\treturn ob.o.Call(\"pop\")\n}", "func (s *exprStack) pop() Expression {\n\ttrace_util_0.Count(_util_00000, 163)\n\tif s.len() == 0 {\n\t\ttrace_util_0.Count(_util_00000, 165)\n\t\treturn nil\n\t}\n\ttrace_util_0.Count(_util_00000, 164)\n\tlastIdx := s.len() - 1\n\texpr := s.stack[lastIdx]\n\ts.stack = s.stack[:lastIdx]\n\treturn expr\n}", "func (q *Stack) Pop() interface{} {\n\tq.lock.Lock()\n\tdefer q.lock.Unlock()\n\n\tif q.top == nil {\n\t\treturn nil\n\t}\n\n\tn := doPop(q)\n\n\treturn n.data\n}", "func (s *BoolStack) Pop() bool {\n\tif s.sp < 0 {\n\t\treturn false\n\t}\n\tr := s.data[s.sp]\n\ts.data = s.data[:s.sp]\n\ts.sp--\n\treturn r\n}", "func (s *Stack) Pop() (bool, string) {\n\n\tif s.UnderFlow() {\n\t\treturn true, \"\"\n\t} else {\n\t\tindex := len(s.pool) - 1 \n\t\tremoved := s.pool[index] \n\t\ts.pool = s.pool[:index] \n\t\treturn true, removed\n\t}\n}", "func (cs *copyStack) pop() *Type {\n\tn := len(*cs)\n\tif n == 0 {\n\t\treturn nil\n\t}\n\n\tt := (*cs)[n-1]\n\t*cs = (*cs)[:n-1]\n\treturn t\n}", "func (p *Stack) Pop() (v interface{}, ok bool) {\n\n\tn := len(p.data)\n\tif n > 0 {\n\t\tv, ok = p.data[n-1], true\n\t\tp.data = p.data[:n-1]\n\t}\n\treturn\n}", "func (v *Data) Pop() PicData {\n\treturn v.Remove(len(*v) - 1)\n}", "func (s *Stack) Pop() *StackNode {\n\tt := s.Top\n\tif t.Next != nil {\n\t\ts.Top = s.Top.Next\n\t}\n\treturn t\n}", "func (s *Stack) Pop() string {\n\t*s = (*s)[:len(*s)-1]\n\tif len(*s) == 0 {\n\t\treturn \"\"\n\t}\n\treturn (*s)[len(*s)-1]\n\n}", "func (this *Stack) Pop() interface{} {\n\tif this.length == 0 {\n\t\treturn nil\n\t}\n\n\tn := this.top\n\tthis.top = n.prev\n\tthis.length--\n\treturn n.value\n}", "func (s *BlockingStack) Pop() interface{} {\n\ts.popLock.Lock()\n\tdefer s.popLock.Unlock()\n\tif s.Len() <= 0 {\n\t\ts.popBlockState = true\n\t\t<-s.popBlock\n\t\ts.popBlockState = false\n\t}\n\tret := s.top.value\n\ts.top = s.top.prev\n\ts.size--\n\tif s.pushBlockState {\n\t\ts.pushBlock <- 1\n\t}\n\treturn ret\n}", "func (s *Stack) Pop() interface{} {\n\ts.mutex.Lock()\n\tdefer s.mutex.Unlock()\n\n\tvar n *stackItem\n\tif s.top != nil {\n\t\tn = s.top\n\t\ts.top = n.next\n\t\ts.count--\n\t}\n\n\tif n == nil {\n\t\treturn nil\n\t}\n\n\treturn n.data\n\n}", "func (self *Queue) Pop() {\n\tcurr := self.final.GetNext()\n\tif self.final.GetNext() == nil {\n\t\tself.final = nil\n\t} else {\n\t\tself.final.SetNext(curr.GetNext())\n\t}\n\tself.size--\n}", "func (self *Goldsmith) FilterPush(filter Filter) *Goldsmith {\n\tself.filters.push(filter, self.index)\n\tself.index++\n\treturn self\n}", "func op_POP(pc *uint64, in *interpreter, ctx *callCtx) uint64 {\n\tctx.stack.Pop()\n\treturn 0\n}", "func (ob *StringStack) Pop() string {\n\tmax := len(ob.ar) - 1\n\tif max < 0 {\n\t\tmod.Error(\"Pop() on empty stack\")\n\t\treturn \"\"\n\t}\n\tob.ar = ob.ar[:max]\n\tmax--\n\treturn ob.ar[max]\n}", "func (s *Stack) Pop() *Tree {\n\tif s.count == 0 {\n\t\treturn nil\n\t}\n\tnode := s.nodes[s.count-1]\n\ts.count--\n\treturn node\n}", "func (s *simpleStack) Pop() (StackData, error) {\n\tl := len(s.s)\n\tif l < 1 {\n\t\treturn 0, emptyStack{s}\n\t}\n\tv := s.s[l-1]\n\ts.s = s.s[:l-1]\n\treturn v, nil\n}", "func (p *SliceOfMap) Pop() (elem *Object) {\n\telem = p.Last()\n\tp.DropLast()\n\treturn\n}", "func (shelf *Shelf) Pop() interface{} {\n\tx := shelf.queue[shelf.Len()-1]\n\tshelf.queue = shelf.queue[:shelf.Len()-1]\n\treturn x\n}", "func (a *Array) Pop() interface{} {\n\tdefer func() {\n\t\ta.Data = a.Data[:a.Length-1]\n\t\ta.Length--\n\t}()\n\treturn a.Data[a.Length-1]\n}", "func (s *Stack) Pop() *NodeStack {\n\tif s.len == 0 {\n\t\treturn nil\n\t}\n\ts.len--\n\tpopped := s.nodes[s.len]\n\ts.nodes = s.nodes[:s.len]\n\treturn popped\n}", "func (es *eeStack) pop() (v interface{}, t eeType) {\r\n\tt = es.popType()\r\n\tv = t.pop(es)\r\n\treturn\r\n}", "func (h *Heap) Pop() (interface{}, error) {\n\tobj := heap.Pop(h.data)\n\tif obj != nil {\n\t\tif h.metricRecorder != nil {\n\t\t\th.metricRecorder.Dec()\n\t\t}\n\t\treturn obj, nil\n\t}\n\treturn nil, fmt.Errorf(\"object was removed from heap data\")\n}", "func (d *Deck) Pop() *Card {\n\tcard := *d.Cards[len(d.Cards)-1]\n\td.Cards = d.Cards[:len(d.Cards)-1]\n\treturn &card\n}", "func (this *MyStack) Pop() int {\n\tx := this.Queue[0]\n\tthis.Queue = this.Queue[1:]\n\treturn x\n}", "func (s *Stack) Pop() (value interface{}) {\n\tif s.size > 0 {\n\t\tvalue, s.top = s.top.value, s.top.next\n\t\ts.size--\n\t\treturn\n\t}\n\treturn nil\n}", "func (s *Stack) Pop() (value interface{}) {\n\tif s.size > 0 {\n\t\tvalue, s.top = s.top.value, s.top.next\n\t\ts.size--\n\t\treturn\n\t}\n\treturn nil\n}" ]
[ "0.8019365", "0.6269033", "0.59818035", "0.59293777", "0.59005564", "0.58554685", "0.583324", "0.5819097", "0.58162224", "0.58127576", "0.581118", "0.5795101", "0.5793802", "0.5781115", "0.5770458", "0.57550913", "0.5749995", "0.57444715", "0.5732523", "0.57297", "0.5724439", "0.57157695", "0.57138073", "0.57056785", "0.56990194", "0.5660522", "0.56489176", "0.5639048", "0.5630965", "0.56258017", "0.56198335", "0.5616474", "0.55978036", "0.5590318", "0.5583051", "0.55806756", "0.55732226", "0.55677897", "0.556762", "0.5545817", "0.55447984", "0.5537306", "0.5536519", "0.55339324", "0.5530358", "0.55266505", "0.5518722", "0.551731", "0.5510029", "0.55099905", "0.5501782", "0.55000544", "0.54973155", "0.5484043", "0.54703045", "0.5468532", "0.5465812", "0.5464344", "0.5459463", "0.54487574", "0.54442906", "0.54398423", "0.5437918", "0.5432201", "0.5428849", "0.5424482", "0.54244256", "0.54155654", "0.54125875", "0.541118", "0.5410868", "0.54040015", "0.5401451", "0.5393503", "0.53897744", "0.53880286", "0.5377975", "0.53772134", "0.5376644", "0.5371458", "0.5369937", "0.53650755", "0.53642255", "0.5362412", "0.5354904", "0.5353878", "0.5352958", "0.5351662", "0.5351615", "0.53422827", "0.53356254", "0.5328851", "0.5326137", "0.5325359", "0.53191686", "0.53157055", "0.53146267", "0.52978754", "0.5291988", "0.5291988" ]
0.81650084
0
End stops a chain, writing all recieved files to targetDir as output.
func (goldsmith *Goldsmith) End(targetDir string) []error { goldsmith.targetDir = targetDir for _, context := range goldsmith.contexts { go context.step() } context := goldsmith.contexts[len(goldsmith.contexts)-1] export: for file := range context.outputFiles { for _, fileFilter := range goldsmith.fileFilters { accept, err := fileFilter.Accept(file) if err != nil { goldsmith.fault(fileFilter.Name(), file, err) continue export } if !accept { continue export } } goldsmith.exportFile(file) } if goldsmith.clean { goldsmith.removeUnreferencedFiles() } return goldsmith.errors }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (self *Goldsmith) End(targetDir string) []error {\n\tself.targetDir = targetDir\n\n\tself.Chain(&saver{clean: self.clean})\n\tfor _, context := range self.contexts {\n\t\tgo context.step()\n\t}\n\n\tcontext := self.contexts[len(self.contexts)-1]\n\tfor range context.filesOut {\n\n\t}\n\n\treturn self.errors\n}", "func (l LcvTrans) End(args ...interface{}) error {\n\tlog.INFO.Printf(\"LCV transfer perf-test done.\")\n\treturn nil\n}", "func (w *Watcher) finish() {\n\tclose(w.Fork)\n\tclose(w.Exec)\n\tclose(w.Exit)\n\tclose(w.Error)\n}", "func (h *AuditLogHandler) FinalizeChain() {\n\th.mutex.Lock()\n\tdefer h.mutex.Unlock()\n\n\tsendServiceLogs()\n}", "func (s *Basegff3Listener) ExitEnd(ctx *EndContext) {}", "func (w *BaseWorker) emitEnd() {\n\tif len(w.nextWorkers) > 0 {\n\t\tfor _, worker := range w.nextWorkers {\n\t\t\tif nin := worker.Input(); nin != nil {\n\t\t\t\tnin <- nil\n\t\t\t}\n\t\t}\n\t}\n\tw.output <- nil\n}", "func (ll LiveLogger) End() {\n\tclose(ll.msgChannel)\n\tll.open = false\n}", "func (po *privOutput) exit() {\n\tif po.Producer != nil {\n\t\terrs := po.Producer.Close()\n\t\tif !errs.Empty() {\n\t\t\tpo.logErr(errs.Error())\n\t\t}\n\t}\n\n\tfor _, f := range po.filts {\n\t\tf.Exit()\n\t}\n\n\tpo.wg.Done()\n}", "func (f *FinalizedChain) ColdEnd() Step {\n\tf.RLock()\n\tdefer f.RUnlock()\n\treturn f.end()\n}", "func (z *Stream) End() {\n\tC.lzma_end(z.C())\n}", "func (t *Tailer) Stop() {\n\tatomic.StoreInt32(&t.didFileRotate, 0)\n\tt.stop <- struct{}{}\n\tt.source.RemoveInput(t.path)\n\t// wait for the decoder to be flushed\n\t<-t.done\n}", "func End() {\n\tglobalProbe.End()\n}", "func (c *Chain) Stop() (err error) {\n\t// Close all opened resources\n\treturn c.state.Close(true)\n}", "func (m *OutboundMock) Finish() {\n\tm.MinimockFinish()\n}", "func (e *etcdMasterElector) Stop() {\n\tclose(e.done)\n}", "func (c *Chain) End(handler http.Handler) http.Handler {\n\tif handler == nil {\n\t\thandler = http.HandlerFunc(emptyHandler)\n\t}\n\n\tfor i := len(c.hs) - 1; i >= 0; i-- {\n\t\thandler = c.hs[i](handler)\n\t}\n\n\treturn handler\n}", "func (test *singleFileTest) endTestCase() {\n\ttest.expectedGraph().Files = make(map[string]mojom_files.MojomFile)\n\ttest.expectedGraph().Files[test.fileName()] = *test.expectedFile()\n\ttest.testCaseNum += 1\n}", "func (s *Basememcached_protocolListener) ExitEnd(ctx *EndContext) {}", "func (a *ACBuild) End() error {\n\t_, err := os.Stat(a.ContextPath)\n\tswitch {\n\tcase os.IsNotExist(err):\n\t\treturn errNoBuildInProgress\n\tcase err != nil:\n\t\treturn err\n\t}\n\n\tif err = a.lock(); err != nil {\n\t\treturn err\n\t}\n\n\terr = os.RemoveAll(a.ContextPath)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}", "func (s *BasecluListener) ExitIterator(ctx *IteratorContext) {}", "func (l *ProgressBarLogger) End() {\n\tif l.cancelFunc != nil {\n\t\tl.cancelFunc()\n\t}\n\tl.bar.Finish()\n\tl.logger.Logf(\"\\n%s\", l.finalMessage)\n}", "func (w *Writer) End()", "func (l *ProgressBarNoTTYLogger) End() {\n\tif l.cancelFunc != nil {\n\t\tl.cancelFunc()\n\t}\n\tif l.logger != nil && l.finalMessage != \"\" {\n\t\tl.logger.Logf(l.finalMessage)\n\t}\n}", "func (l Log) finish(path string, file *os.File) (Log, error) {\n\tl.EndTime = time.Now()\n\tl.Status = \"Finished\"\n\treturn l, l.saveAndWriteToLog(path, file, \"Finished\")\n}", "func (t *Targeter) Stop() {\n\tt.once.Do(func() {\n\t\tclose(t.done)\n\t})\n}", "func (m *mapper) stop() { syncClose(m.done) }", "func End() (time.Duration, error) {\n\tstart := time.Now()\n\tlog.Infof(\"END: begin device Gather processes stop... at %s\", start.String())\n\t// Stop all device processes and its measurements. Once finished they will be removed\n\t// from the bus and node closed (snmp connections for measurements will be closed)\n\tDeviceProcessStop()\n\tlog.Info(\"END: begin selfmon Gather processes stop...\")\n\t// stop the selfmon process\n\tselfmonProc.StopGather()\n\tlog.Info(\"END: waiting for all Gather goroutines stop...\")\n\t// wait until Done\n\tgatherWg.Wait()\n\tlog.Info(\"END: releasing Selfmonitoring Resources\")\n\tselfmonProc.End()\n\tlog.Info(\"END: begin sender processes stop...\")\n\t// log.Info(\"DEBUG Gather WAIT %+v\", GatherWg)\n\t// log.Info(\"DEBUG SENDER WAIT %+v\", senderWg)\n\t// stop all Output Emitter\n\tStopInfluxOut(influxdb)\n\tlog.Info(\"END: waiting for all Sender goroutines stop..\")\n\tsenderWg.Wait()\n\tlog.Info(\"END: releasing Sender Resources\")\n\tReleaseInfluxOut(influxdb)\n\tlog.Infof(\"END: Finished from %s to %s [Duration : %s]\", start.String(), time.Now().String(), time.Since(start).String())\n\treturn time.Since(start), nil\n}", "func (fs *fileStream) Stop() {\n\tfs.stopOnce.Do(func() {\n\t\tglog.Info(\"signalling stop at next EOF\")\n\t\tclose(fs.stopChan)\n\t})\n}", "func (g *Generator) exit() {\n\tg.depth--\n}", "func (w *BaseWorker) Chain(nextWorker Worker) Worker {\n\tif w.output == nil {\n\t\tw.output = NewFileStream()\n\t}\n\t// override the input in the worker\n\tnextWorker.SetInput(w.output)\n\n\tif o := nextWorker.Output(); o == nil {\n\t\tnextWorker.SetOutput(NewFileStream())\n\t}\n\tw.nextWorkers = append(w.nextWorkers, nextWorker)\n\tnextWorker.Start()\n\treturn nextWorker\n}", "func (b *Blinker) Stop() {\n\tclose(b.stop)\n}", "func (l *Listener) ExitAtEnd(ctx *parser.AtEndContext) {}", "func (s *BasevhdlListener) ExitTarget(ctx *TargetContext) {}", "func (t *Trace) End(opts ...EventOption) {\n\tt.event.setDuration(time.Since(t.event.Start()))\n\tt.Event(t.event, opts...)\n\tfor _, i := range t.impl {\n\t\ti.Teardown()\n\t}\n}", "func End() http.Handler {\n\treturn http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tfmt.Println(\"Returning response and terminating\")\n\t})\n}", "func (f *FakeOutput) Stop() error { return nil }", "func (m *Mock) Done() {\n\tm._history = nil\n\tm.Ts.Close()\n}", "func (p *literalProcessor) stop() { syncClose(p.done) }", "func (s *SimPDF) Finish(fileOutput string) {\n\tif internal.ValidateFilePath(fileOutput) {\n\t\tinternal.MoveFilePath(fileOutput, fileOutput+\".bak\")\n\t}\n\ts.PDF.OutputFileAndClose(fileOutput)\n}", "func (s *scpSession) endDirectory() error {\n\t_, err := fmt.Fprintf(s.in, \"%s\\n\", msgEndDir)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"error while ending a recursive directory: err=%s\", err)\n\t}\n\n\treturn s.readReply()\n}", "func EndProgress() {\n\tendTime = time.Now().Round(time.Second)\n\tendChan <- true\n\tclose(endChan)\n}", "func (da *DiskArchiver) OutputLoop() error {\n\tvar err error\n\tda.outFile, err = os.Create(da.outFilePath)\n\tif err != nil {\n\t\treturn errors.Wrapf(err, \"Error creating file %s\", da.outFilePath)\n\t}\n\tdefer da.outFile.Close()\n\tif _, err := os.Lstat(da.symlinkPath); err == nil {\n\t\terr = os.Remove(da.symlinkPath)\n\t\tif err != nil {\n\t\t\treturn errors.Wrapf(err, \"Error removing symlink %v\", da.symlinkPath)\n\t\t}\n\t}\n\terr = os.Symlink(da.outFilePath, da.symlinkPath)\n\tif err != nil {\n\t\treturn err\n\t}\n\tlog.Printf(\"Writing to file %s (and symlink %s)\", da.outFilePath, da.symlinkPath)\n\tfor tweet := range da.ArchiveChannel {\n\t\terr = da.output(tweet)\n\t\tif err != nil {\n\t\t\tlog.Printf(\"Got error %v when saving tweet %+v to disk, skipping\", err, tweet)\n\t\t}\n\t}\n\treturn nil\n}", "func (Sm *ServiceMaster) ReceiveFinish(filenames interface{}, response *bool) error {\n\tfnames, _ := filenames.([]string)\n\tSm.Mr.ReceiveFinish(fnames)\n\t*response = true\n\treturn nil\n}", "func (dc *Decompressor) Finish() error {\n\tvar err error\n\tselect {\n\tcase <-dc.ctx.Done():\n\t\terr = dc.ctx.Err()\n\tdefault:\n\t}\n\t// NOTE, that the the assemble method must read all of the output\n\t// produced by the workers, even in the event of an error. Otherwise\n\t// a deadlock will occur with the workers trying to write blocks to\n\t// the channel that the assemble method is no longer reading from.\n\tclose(dc.workCh)\n\tdc.workWg.Wait()\n\tclose(dc.doneCh)\n\tdc.doneWg.Wait()\n\treturn err\n}", "func (m *TesterMock) Finish() {\n\tm.MinimockFinish()\n}", "func (p *ProgressMeter) Finish() {\n\tclose(p.finished)\n\tp.update()\n\tp.logger.Close()\n\tif !p.dryRun && p.estimatedBytes > 0 {\n\t\tfmt.Fprintf(os.Stdout, \"\\n\")\n\t}\n}", "func (p *ProgressMeter) Finish() {\n\tclose(p.finished)\n\tp.update()\n\tp.logger.Close()\n\tif !p.dryRun && p.estimatedBytes > 0 {\n\t\tfmt.Fprintf(os.Stdout, \"\\n\")\n\t}\n}", "func (ps *rateLimiter) Stop() { close(ps.exit) }", "func (p *Pipeline) Stop() {\n\tp.mut.Lock()\n\tdefer p.mut.Unlock()\n\tif p.err == nil {\n\t\tclose(p.done)\n\t}\n}", "func (d *DownloadProxy) Done() {\n\td.wg.Done()\n}", "func (lc *Closer) Done() {\n\tlc.waiting.Done()\n}", "func (f *FilePlayer) Stop() {\n\tf.doneCh <- true\n}", "func endLogging() {\n\tif logFile != nil {\n\t\tlogFile.Close()\n\t}\n\tlog.SetOutput(os.Stderr)\n}", "func (a *Agent) flushLoop(\n\tctx context.Context,\n\toutput *models.RunningOutput,\n\tticker Ticker,\n) {\n\tlogError := func(err error) {\n\t\tif err != nil {\n\t\t\tlog.Printf(\"E! [agent] Error writing to %s: %v\", output.LogName(), err)\n\t\t}\n\t}\n\n\t// watch for flush requests\n\tflushRequested := make(chan os.Signal, 1)\n\twatchForFlushSignal(flushRequested)\n\tdefer stopListeningForFlushSignal(flushRequested)\n\n\tfor {\n\t\t// Favor shutdown over other methods.\n\t\tselect {\n\t\tcase <-ctx.Done():\n\t\t\tlogError(a.flushOnce(output, ticker, output.Write))\n\t\t\treturn\n\t\tdefault:\n\t\t}\n\n\t\tselect {\n\t\tcase <-ctx.Done():\n\t\t\tlogError(a.flushOnce(output, ticker, output.Write))\n\t\t\treturn\n\t\tcase <-output.ShutdownChan:\n\t\t\tlog.Println(\"I! [agent] stopping output plugin\", output.Config.Name)\n\t\t\ta.pluginLock.Lock()\n\t\t\tdelete(a.runningPlugins, output.UniqueId)\n\t\t\ta.pluginLock.Unlock()\n\t\t\t// delete output from output unit slice\n\t\t\tfor i, ro := range a.ou.outputs {\n\t\t\t\tif output == ro {\n\t\t\t\t\t// swap with last output and truncate slice\n\t\t\t\t\tif len(a.ou.outputs) > 1 {\n\t\t\t\t\t\ta.ou.outputs[i] = a.ou.outputs[len(a.ou.outputs)-1]\n\t\t\t\t\t}\n\t\t\t\t\ta.ou.outputs = a.ou.outputs[:len(a.ou.outputs)-1]\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\t\t\toutput.Close()\n\t\t\treturn\n\t\tcase <-ticker.Elapsed():\n\t\t\tlogError(a.flushOnce(output, ticker, output.Write))\n\t\tcase <-flushRequested:\n\t\t\tlogError(a.flushOnce(output, ticker, output.Write))\n\t\tcase <-output.BatchReady:\n\t\t\t// Favor the ticker over batch ready\n\t\t\tselect {\n\t\t\tcase <-ticker.Elapsed():\n\t\t\t\tlogError(a.flushOnce(output, ticker, output.Write))\n\t\t\tdefault:\n\t\t\t\tlogError(a.flushOnce(output, ticker, output.WriteBatch))\n\t\t\t}\n\t\t}\n\t}\n}", "func (localOptimizer) finish(operation chan<- Task, result <-chan Task) {\n\t// Guarantee that result is closed before operation is closed.\n\tfor range result {\n\t}\n}", "func (s *BaseGShellListener) ExitCommandBlockTail(ctx *CommandBlockTailContext) {}", "func (r *SbProxy) closeDone() {\n\tr.doneOnce.Do(func() { close(r.doneCh) })\n}", "func (s *downStream) endStream() {\n\tif s.responseSender != nil && !s.downstreamRecvDone {\n\t\t// not reuse buffer\n\t\tatomic.StoreUint32(&s.reuseBuffer, 0)\n\t}\n\ts.cleanStream()\n\n\t// note: if proxy logic resets the stream, there maybe some underlying data in the conn.\n\t// we ignore this for now, fix as a todo\n}", "func (builder *SentenceBuilder) ConcealEnd() *SentenceBuilder {\n\treturn builder.write(concealResetSeq)\n}", "func (c *Chain) EndFn(handlerFunc http.HandlerFunc) http.Handler {\n\tif handlerFunc == nil {\n\t\thandlerFunc = http.HandlerFunc(emptyHandler)\n\t}\n\n\treturn c.End(handlerFunc)\n}", "func (r *reducer) stop() {\n\tfor _, m := range r.mappers {\n\t\tm.stop()\n\t}\n\tsyncClose(r.done)\n}", "func (s *BaseBrainfuckListener) ExitLoop(ctx *LoopContext) {}", "func (m *ShifterMock) Finish() {\n\tm.MinimockFinish()\n}", "func (px *Paxos) Done(seq int) {\n\t// Your code here.\n\tpx.peerDone(seq, px.me)\n}", "func (r *Response) End() {\n\tr.apiTest.run()\n}", "func (s *BasejossListener) ExitDoneCMD(ctx *DoneCMDContext) {}", "func (w *fileBufferedWritable) Finish() error {\n\terr := w.bw.Flush()\n\tif err == nil {\n\t\terr = w.file.Sync()\n\t}\n\terr = firstError(err, w.file.Close())\n\tw.bw = nil\n\tw.file = nil\n\treturn err\n}", "func (t *Tailer) onStop() {\n\tlog.Info(\"Closing\", t.path, \"for tailer key\", buildTailerKey(t))\n\tt.file.Close()\n\tt.decoder.Stop()\n}", "func (c *TestCase) FinishCase() {\n\tc.Finish <- true\n\t<-c.Finish\n}", "func (c FinalOutput) Close() {}", "func (s *SubmissionHandler) End(e error) {\n\tdefer s.conn.Close()\n\tmsg := OK\n\tif e != nil {\n\t\tmsg = \"ERROR: \" + e.Error()\n\t\tutil.Log(e, LOG_RECEIVER)\n\t}\n\ts.write(msg)\n}", "func (p *profiler) End() {\n\n\tvar m runtime.MemStats\n\truntime.ReadMemStats(&m)\n\tif !p.closed {\n\t\tp.endTime = time.Now()\n\t\tp.endMemory = bToKb(m.Alloc)\n\t\tp.closed = true\n\t}\n\n\tfor _, eachProfiler := range p.profilers {\n\t\tif !eachProfiler.closed {\n\t\t\teachProfiler.endTime = time.Now()\n\t\t\teachProfiler.endMemory = bToKb(m.Alloc)\n\t\t\teachProfiler.closed = true\n\t\t}\n\t\tif len(eachProfiler.profilers) > 0 {\n\t\t\teachProfiler.End()\n\t\t}\n\t}\n}", "func (d *basicDisease) endStep() {\n\td.numNodesInfectedBy = make([]uint, d.numNodes)\n}", "func (c *collector) Stop() {\n\tclose(c.stop)\n}", "func (m *HeavySyncMock) Finish() {\n\tm.MinimockFinish()\n}", "func (strm *Stream) Stop() error {\n\tstrm.Mux.Lock()\n\tdefer strm.Mux.Unlock()\n\tstrm.Streak.Deactivate()\n\tstrm.Running = false\n\tif !strm.KeepFiles {\n\t\tdefer func() {\n\t\t\tlogrus.Debugf(\"%s directory is being removed | Stream\", strm.StorePath)\n\t\t\tif err := os.RemoveAll(strm.StorePath); err != nil {\n\t\t\t\tlogrus.Error(err)\n\t\t\t}\n\t\t}()\n\t}\n\tif err := strm.CMD.Process.Kill(); err != nil {\n\t\tif strings.Contains(err.Error(), \"process already finished\") {\n\t\t\treturn nil\n\t\t}\n\t\tif strings.Contains(err.Error(), \"signal: killed\") {\n\t\t\treturn nil\n\t\t}\n\t\treturn err\n\t}\n\treturn nil\n}", "func (l *Listener) ExitTarget(ctx *parser.TargetContext) {}", "func (t *Trace) Finish() {\n\tt.t.Finish()\n}", "func (t *Trace) Finish() {\n\tt.t.Finish()\n}", "func (s *BasevhdlListener) ExitWaveform(ctx *WaveformContext) {}", "func (h *LinkerdInfo) Stop() {\n\th.log.Info(\"shutting down\")\n\tclose(h.stopCh)\n}", "func (i *instrumentor) tracingEnd(ctx context.Context, span trace.Span) {\n\tspan.End()\n\n\t// force flush any tracing data since lambda may freeze\n\terr := i.configuration.Flusher.ForceFlush(ctx)\n\tif err != nil {\n\t\terrorLogger.Println(\"failed to force a flush, lambda may freeze before instrumentation exported: \", err)\n\t}\n}", "func (lf *logFile) finalize() error {\n\tif lf.end {\n\t\tpanic(\"unreachable\")\n\t}\n\tbuf := new(bytes.Buffer)\n\terr := binary.Write(buf, binary.LittleEndian, lf.maxTS)\n\tif err != nil {\n\t\treturn errors.Trace(err)\n\t}\n\n\terr = binary.Write(buf, binary.LittleEndian, fileEndMagic)\n\tif err != nil {\n\t\treturn errors.Trace(err)\n\t}\n\n\t_, err = lf.fd.Write(buf.Bytes())\n\tif err != nil {\n\t\treturn errors.Trace(err)\n\t}\n\n\tlf.end = true\n\n\treturn errors.Trace(lf.fdatasync())\n}", "func (d *D) stop() {\n\tclose(d.stopCh)\n}", "func (mp *zkMasterParticipation) Stop() {\n\tclose(mp.stop)\n\t<-mp.done\n}", "func (h *proxyHandler) FinishPipe(args []any) (replyBuf, error) {\n\th.lock.Lock()\n\tdefer h.lock.Unlock()\n\n\tvar ret replyBuf\n\n\tpipeidv, err := parseUint64(args[0])\n\tif err != nil {\n\t\treturn ret, err\n\t}\n\tpipeid := uint32(pipeidv)\n\n\tf, ok := h.activePipes[pipeid]\n\tif !ok {\n\t\treturn ret, fmt.Errorf(\"finishpipe: no active pipe %d\", pipeid)\n\t}\n\n\t// Wait for the goroutine to complete\n\tf.wg.Wait()\n\t// And only now do we close the write half; this forces the client to call this API\n\tf.w.Close()\n\t// Propagate any errors from the goroutine worker\n\terr = f.err\n\tdelete(h.activePipes, pipeid)\n\treturn ret, err\n}", "func (c *Controller) Finish() {}", "func (s *streamStrategy) Stop() {\n\tclose(s.inputChan)\n\t<-s.done\n}", "func (Mr *Master) ReceiveFinish(filenames []string) {\n\tlog.Println(\"outside of lock\")\n\tMr.Timer.Lock()\n\tlog.Println(\"in lock\")\n\tfor _, filename := range filenames {\n\t\tMr.FileCondition[filename] = 2\n\t}\n\tMr.Timer.Unlock()\n}", "func (o *Output) cleanup() {\r\n\to.writer.Flush()\r\n\to.file.Close()\r\n}", "func (chunk *Chunk) Done() {\n\tfor linenum, rx := range chunk.wantErrs {\n\t\tchunk.report.Errorf(\"\\n%s:%d: expected error matching %q\", chunk.filename, linenum, rx)\n\t}\n}", "func (r *TestRequest) End(offset, length uint64, flags int, errval int) error {\n\tr.handleProgressReceived <- &TestProgressUpdate{\n\t\tCookie: r.cookie,\n\t\tOffset: offset,\n\t\tLength: length,\n\t\tFlags: flags,\n\t\tTotal: length,\n\t\tErrval: errval,\n\t\tComplete: true,\n\t}\n\tclose(r.handleProgressReceived)\n\treturn nil\n}", "func (rf *Raft) Stop() {\n\trf.logger.SetOutput(ioutil.Discard)\n}", "func (dw *DirWatcher) Stop() {\n\tdw.qrun <- false\n}", "func (n *Node) PipelineDone() chan struct{} {\n\tdoneCh := make(chan struct{})\n\tgo func() {\n\t\tdefer close(doneCh)\n\t\tTopological(n, func(node *Node) {\n\t\t\t<-node.done()\n\t\t})\n\t}()\n\treturn doneCh\n}", "func (m *ProbeManager) Stop() {\n\tclose(m.done)\n}", "func (task SchTask) End(taskName string, own bool) string {\n\n\tif Debug {\n\t\treturn dbgMessage\n\t}\n\n\tif own {\n\t\ttaskName = task.prefix + taskName\n\t}\n\tcmd := exec.Command(task.bin, _End.Command, _End.taskname, taskName)\n\n\toutput, err := cmd.CombinedOutput()\n\tcatch(output, err)\n\n\treturn string(output)\n}", "func (s *BaseSyslParserListener) ExitTarget(ctx *TargetContext) {}", "func (c *context) Done() <-chan struct{} { return c.c.Done() }", "func (in *IndefiniteObserver) End() {\n\tdefer func() {\n\t\tin.finalizers = nil\n\t\tin.subs = nil\n\t}()\n\n\tfor _, fl := range in.finalizers {\n\t\tfl()\n\t}\n\n\tfor _, sub := range in.subs {\n\t\tif sub.observer == nil {\n\t\t\tcontinue\n\t\t}\n\n\t\tsub.End()\n\t}\n}" ]
[ "0.71970665", "0.5926289", "0.563864", "0.5495288", "0.5435768", "0.54234654", "0.53833944", "0.5333229", "0.5251561", "0.52479935", "0.52450943", "0.52362496", "0.51799625", "0.5138691", "0.5135351", "0.5124795", "0.5070243", "0.5032854", "0.5029565", "0.5008167", "0.49917817", "0.49917307", "0.49812213", "0.49702436", "0.49384412", "0.49378216", "0.49317554", "0.49268886", "0.49181712", "0.49096954", "0.4878765", "0.48771396", "0.4876603", "0.48498166", "0.4844863", "0.48348877", "0.4813313", "0.4810609", "0.47873655", "0.47860146", "0.47858396", "0.47836503", "0.4781759", "0.4780628", "0.4779207", "0.4771282", "0.4771282", "0.4769704", "0.47695434", "0.4765233", "0.47635242", "0.47557563", "0.47553226", "0.47508228", "0.4748034", "0.47468707", "0.47459754", "0.47421315", "0.47406477", "0.47379324", "0.47365582", "0.47330636", "0.47160488", "0.4715147", "0.46927544", "0.4686253", "0.46825966", "0.46796307", "0.46704566", "0.46629012", "0.4656889", "0.46540537", "0.46498594", "0.4645778", "0.4644844", "0.46377638", "0.46244812", "0.461563", "0.461563", "0.45936364", "0.45913246", "0.4579586", "0.45730734", "0.4571375", "0.45647264", "0.45629522", "0.45612976", "0.4558203", "0.4553147", "0.45509416", "0.45427698", "0.45352328", "0.4531691", "0.4531238", "0.45280474", "0.45262462", "0.45251173", "0.45207894", "0.4518871", "0.45187402" ]
0.6753699
1
New returns a 6character random code.
func New() string { return GenerateReasonableCode(6) }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func GenerateNewCode() *Code {\n\treturn &Code{randomCode(10)}\n}", "func New() string {\n\tbytes := make([]byte, 16)\n\tsafeRandom(bytes)\n\tbytes[6] = (4 << 4) | (bytes[6] & 0xf)\n\tbytes[8] = bytes[8] & 0x3f\n\tbytes[8] = bytes[8] | 0x80\n\tbuf := make([]byte, 36)\n\thex.Encode(buf[0:8], bytes[0:4])\n\tbuf[8] = '-'\n\thex.Encode(buf[9:13], bytes[4:6])\n\tbuf[13] = '-'\n\thex.Encode(buf[14:18], bytes[6:8])\n\tbuf[18] = '-'\n\thex.Encode(buf[19:23], bytes[8:10])\n\tbuf[23] = '-'\n\thex.Encode(buf[24:], bytes[10:])\n\treturn string(buf)\n}", "func newID() string {\n\tvar b [8]byte\n\t_, err := rand.Read(b[:])\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn fmt.Sprintf(\"%x\", b[:])\n}", "func NewScratchCode() int {\n\tvar r = []rune(\"0123456789\")\n\tb := make([]rune, 8)\n\tfor i := range b {\n\t\t// First character can not be \"0\"\n\t\tif i == 0 {\n\t\t\tb[i] = r[rand.Intn(9)+1]\n\t\t\tcontinue\n\t\t}\n\t\tb[i] = r[rand.Intn(10)]\n\t}\n\tc, _ := strconv.Atoi(string(b))\n\treturn c\n}", "func RandomCodeSix() string {\n\trand.Seed(time.Now().UTC().UnixNano())\n\tcode := 100000 + rand.Intn(900000)\n\treturn strconv.Itoa(code)\n}", "func New(length int) (nid string) {\n nid = \"\"\n\n for i := 0; i < length; i++ {\n var (\n r1 int\n )\n\n r1 = rnd.Intn(9)\n\n if i == 0 {\n for r1 == 0 {\n r1 = rnd.Intn(9)\n }\n }\n\n nid += strconv.Itoa(r1)\n }\n return\n}", "func New() string {\n\tb := make([]byte, ByteSize)\n\t_, err := rand.Read(b)\n\tif err != nil {\n\t\t// Cryptographic pseudo-random number generation shouldn't fail, but\n\t\t// if it does it is probably worth the panic.\n\t\tpanic(fmt.Sprintf(\"uuid: %v\", err))\n\t}\n\treturn fmt.Sprintf(Format, b[:4], b[4:6], b[6:8], b[8:10], b[10:])\n}", "func newRandomID(n int) string {\n\trand.Seed(time.Now().UTC().UnixNano())\n\tsid := make([]rune, n)\n\tfor i := range sid {\n\t\tsid[i] = letters[rand.Intn(len(letters))]\n\t}\n\treturn string(sid)\n}", "func newRandomID(n int) string {\n\trand.Seed(UTCNow().UnixNano())\n\tsid := make([]rune, n)\n\tfor i := range sid {\n\t\tsid[i] = letters[rand.Intn(len(letters))]\n\t}\n\treturn string(sid)\n}", "func newUUID() (string, error) {\n\tuuid := make([]byte, 16)\n\tn, err := io.ReadFull(rand.Reader, uuid)\n\tif n != len(uuid) || err != nil {\n\t\treturn \"\", err\n\t}\n\t// variant bits; see section 4.1.1\n\tuuid[8] = uuid[8]&^0xc0 | 0x80\n\t// version 4 (pseudo-random); see section 4.1.3\n\tuuid[6] = uuid[6]&^0xf0 | 0x40\n\treturn fmt.Sprintf(\"%x-%x-%x-%x-%x\", uuid[0:4], uuid[4:6], uuid[6:8], uuid[8:10], uuid[10:]), nil\n}", "func newUUID() (string, error) {\n\tuuid := make([]byte, 16)\n\tn, err := io.ReadFull(rand.Reader, uuid)\n\tif n != len(uuid) || err != nil {\n\t\treturn \"\", err\n\t}\n\t// variant bits; see section 4.1.1\n\tuuid[8] = uuid[8]&^0xc0 | 0x80\n\t// version 4 (pseudo-random); see section 4.1.3\n\tuuid[6] = uuid[6]&^0xf0 | 0x40\n\treturn fmt.Sprintf(\"%x-%x-%x-%x-%x\", uuid[0:4], uuid[4:6], uuid[6:8], uuid[8:10], uuid[10:]), nil\n}", "func newUUID() (string, error) {\n\tuuid := make([]byte, 16)\n\tn, err := io.ReadFull(rand.Reader, uuid)\n\tif n != len(uuid) || err != nil {\n\t\treturn \"\", err\n\t}\n\t// variant bits; see section 4.1.1\n\tuuid[8] = uuid[8]&^0xc0 | 0x80\n\t// version 4 (pseudo-random); see section 4.1.3\n\tuuid[6] = uuid[6]&^0xf0 | 0x40\n\treturn fmt.Sprintf(\"%x-%x-%x-%x-%x\", uuid[0:4], uuid[4:6], uuid[6:8], uuid[8:10], uuid[10:]), nil\n}", "func newUUID() (string, error) {\n\tuuid := make([]byte, 16)\n\tn, err := io.ReadFull(rand.Reader, uuid)\n\tif n != len(uuid) || err != nil {\n\t\treturn \"\", err\n\t}\n\t// variant bits; see section 4.1.1\n\tuuid[8] = uuid[8]&^0xc0 | 0x80\n\t// version 4 (pseudo-random); see section 4.1.3\n\tuuid[6] = uuid[6]&^0xf0 | 0x40\n\treturn fmt.Sprintf(\"%x-%x-%x-%x-%x\", uuid[0:4], uuid[4:6], uuid[6:8], uuid[8:10], uuid[10:]), nil\n}", "func (s *session) newID() (interface{}, error) {\n\tvar b [32]byte\n\t_, err := rand.Read(b[:])\n\treturn hex.EncodeToString(b[:]), err\n}", "func newRandomASCIIString(rng *rand.Rand, minLen, maxLenDiff int) string {\n\tstr := make([]byte, minLen+rng.Intn(maxLenDiff))\n\tconst firstPrintableASCII = 32\n\tconst lastPrintableASCII = 126\n\tfor i := range str {\n\t\tstr[i] = byte(firstPrintableASCII + rng.Intn(lastPrintableASCII-firstPrintableASCII))\n\t}\n\treturn string(str)\n}", "func RandomRefCode(strlen int) string {\n\tconst chars = \"abcdefghijklmnopqrstuvwxyz0123456789\"\n\tresult := make([]byte, strlen)\n\tfor i := 0; i < strlen; i++ {\n\t\tresult[i] = chars[rand.Intn(len(chars))]\n\t}\n\treturn string(result)\n}", "func RandCode() string {\n\trand.Seed(time.Now().UTC().UnixNano())\n\tres := make([]byte, codeLength)\n\tfor i := range res {\n\t\tres[i] = letters[rand.Intn(len(letters))]\n\t}\n\treturn string(res)\n}", "func newUID() ([]byte, error) {\n\t// uuid := make([]byte, 16)\n\t// n, err := io.ReadFull(rand.Reader, uuid)\n\t// if n != len(uuid) || err != nil {\n\t// \treturn nil, err\n\t// }\n\t// // variant bits; see section 4.1.1\n\t// uuid[8] = uuid[8]&^0xc0 | 0x80\n\t// // version 4 (pseudo-random); see section 4.1.3\n\t// uuid[6] = uuid[6]&^0xf0 | 0x40\n\t// return []byte(fmt.Sprintf(\"%x-%x-%x-%x-%x\", uuid[0:4], uuid[4:6], uuid[6:8], uuid[8:10], uuid[10:])), nil\n\treturn []byte(uniuri.New()), nil\n}", "func NewState() (string, error) {\n\trawState := make([]byte, 16)\n\t_, err := rand.Read(rawState)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\treturn hex.EncodeToString(rawState), nil\n}", "func NewUuid() string {\n b := make([]byte, 16)\n rand.Read(b)\n return fmt.Sprintf(\"%x\", b)\n}", "func NewIdentifier() string {\n\tif !seededrng {\n\t\trand.Seed(time.Now().Unix())\n\t\tseededrng = true\n\t}\n\tr := make([]byte, 9) // 9 bytes * (4/3 encoding) = 12 characters\n\trand.Read(r)\n\ts := base64.URLEncoding.EncodeToString(r)\n\treturn s\n}", "func New() string {\n\treturn uuid.NewV4().String()\n}", "func NewUuid62() string {\n return ConvertUp(NewUuid(), base62alphabet)\n}", "func genString(n int) string {\n\tvalidChar := []rune(\"abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ\")\n\ts := make([]rune, n)\n\tfor i := range s {\n\t\ts[i] = validChar[rand.Intn(len(validChar))]\n\t}\n\treturn string(s)\n}", "func New() (str string, err error) {\n\tvar (\n\t\tn int\n\t\tuuid = make([]byte, Size)\n\t)\n\tif n, err = io.ReadFull(rand.Reader, uuid); err != nil {\n\t\treturn\n\t}\n\tif n != Size {\n\t\treturn \"\", ErrUUIDSize\n\t}\n\tuuid[8] = uuid[8]&^0xc0 | 0x80\n\tuuid[6] = uuid[6]&^0xf0 | 0x40\n\tstr = fmt.Sprintf(\"%x-%x-%x-%x-%x\", uuid[0:4], uuid[4:6], uuid[6:8], uuid[8:10], uuid[10:])\n\treturn\n}", "func New() UUID {\n\tbuf := make([]byte, 16)\n\trand.Read(buf)\n\tbuf[6] = (buf[6] & 0x0f) | 0x40\n\tvar uuid UUID\n\tcopy(uuid[:], buf[:])\n\tuuid[8] = (uuid[8] & 0x3f) | 0x80\n\treturn uuid\n}", "func newCard() string {\n\treturn \"five of diamonds\"\n}", "func generateNewToken(hashed_usr string) string {\n\t//generate random string\n\ttoken := GenerateRandomString()\n\tSaveToken(hashed_usr, token)\n\treturn token\n}", "func GenerateRandomCode(length int, characters string) string {\n\tif characters == \"\" {\n\t\tcharacters = \"ohruix3yetu5dei7oqu4gothah4Esei6xudez9saejueshuThaj4ooPh1Shi8engahGhiesaeng9meib8iPhaeNg7eikohSh8ae9\"\n\t}\n\n\tb := make([]byte, length)\n\tfor i := range b {\n\t\tb[i] = characters[rand.Int63()%int64(len(characters))]\n\t}\n\treturn string(b)\n}", "func GetNewId() string {\n return srand(5)\n}", "func GenRandomHexCode() string {\n\tvar code string\n\tfor i := 0; i < 3; i++ {\n\t\tcode += fmt.Sprintf(\"%02X\", rand.Intn(255))\n\t}\n\t// fmt.Println(code)\n\treturn code\n}", "func (s *source) new() (UUID, error) {\n\tvar uuid UUID\n\t_, err := io.ReadFull(s.random, uuid[:])\n\tif err != nil {\n\t\treturn UUID{}, err\n\t}\n\tuuid[6] = (uuid[6] & 0x0f) | 0x40 // Version 4\n\tuuid[8] = (uuid[8] & 0x3f) | 0x80 // Variant is 10\n\treturn uuid, nil\n}", "func New() string {\n\treturn shortuuid.New()\n}", "func New(charset string) *N36 {\n\tn := &N36{\n\t\tcharset: charset,\n\t\tseedRand: rand.New(rand.NewSource(time.Now().UnixNano())),\n\t}\n\n\treturn n\n}", "func NewID(size int) string {\n\tb := make([]byte, size)\n\tfor i := range b {\n\t\tb[i] = ABC[rand.Intn(len(ABC))]\n\t}\n\n\treturn string(b)\n}", "func GenCode(db *sql.DB) string {\n\tvar letters = []rune(\"ABCDEFGHJKMNPQRSTUVWXYZ23456789\")\n\n\tfor {\n\t\tb := make([]rune, codeLen)\n\t\tfor i := range b {\n\t\t\tb[i] = letters[rand.Intn(len(letters))]\n\t\t}\n\t\tcode := string(b)\n\t\tif !codeExists(db, code) {\n\t\t\treturn code\n\t\t}\n\t}\n}", "func Char() uint8 {\n\tt := \"az\"\n\treturn uint8(rand.Intn(int(t[1] - t[0])) + int(t[0]))\n}", "func newCard() string {\n\treturn \"Five of Diamonds\"\n}", "func newCard() string {\n\treturn \"Five of Diamonds\"\n}", "func createNonce() string {\n\tnonceLen := 42 // taken from their example\n\tsrc := []byte(\"abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ09123456789\")\n\n\trslt := make([]byte, nonceLen)\n\tfor i := 0; i < nonceLen; i++ {\n\t\trslt[i] = src[rand.Intn(len(src))]\n\t}\n\n\treturn string(rslt)\n}", "func genString(length int) string {\n\tb := make([]byte, length)\n\tfor i := range b {\n\t\tb[i] = charset[seededRand.Intn(len(charset))]\n\t}\n\treturn string(b)\n}", "func createLobbyCode() string {\n\n\t// Create and seed the generator.\n\t// Typically a non-fixed seed should be used, such as time.Now().UnixNano().\n\t// Using a fixed seed will produce the same output on every run.\n\tr := rand.New(rand.NewSource(time.Now().UnixNano()))\n\n\t// The tabwriter here helps us generate aligned output.\n\tw := tabwriter.NewWriter(os.Stdout, 1, 1, 1, ' ', 0)\n\tdefer w.Flush()\n\n\tcode := \"\"\n\n\trands := []int{r.Intn(25), r.Intn(25), r.Intn(25), r.Intn(25)}\n\n\tfor _, i := range rands {\n\t\tcode = code + string(toChar(i))\n\t}\n\treturn code\n}", "func NewLotteryCode() int {\n\tseed := time.Now().UnixNano() // seed\n\tr := rand.New(rand.NewSource(seed))\n\treturn r.Intn(config.BASE_LOTTERY_CODE)\n}", "func newid() string {\n\treturn make_key(6)\n}", "func newUUID() (uuid.UUID, error) {\n\tu := [16]byte{}\n\t// Set all bits to randomly (or pseudo-randomly) chosen values.\n\t_, err := rand.Read(u[:])\n\tif err != nil {\n\t\treturn uuid.UUID{}, err\n\t}\n\tu[8] = (u[8]&(0xff>>2) | (0x02 << 6)) // u.setVariant(ReservedRFC4122)\n\tu[6] = (u[6] & 0xF) | (uuid.V4 << 4) // u.setVersion(V4)\n\treturn uuid.FromBytes(u[:])\n}", "func createRandomString(starterString string) string {\n\tresult := starterString + randomString(8)\n\treturn result\n}", "func _() string {\n\trand.Seed(time.Now().UnixNano())\n\tchars := []rune(\"ABCDEFGHIJKLMNOPQRSTUVWXYZÅÄÖ\" +\n\t\t\"abcdefghijklmnopqrstuvwxyzåäö\" +\n\t\t\"0123456789\")\n\tlength := 12\n\tvar b strings.Builder\n\tfor i := 0; i < length; i++ {\n\t\tb.WriteRune(chars[rand.Intn(len(chars))])\n\t}\n\treturn b.String()\n}", "func newCard() string {\n\treturn \"Queen of Hearts\"\n}", "func creaParole() string {\n\tvar r rune\n\tvar lenStringGenerata string\n\trand.Seed(time.Now().UnixNano())\n\t\n\tfor i := 0; i < lenString; i++ {\n\t\tr = rune(rand.Intn(26) + 97)\n\t\tlenStringGenerata += string(r)\n\t}\n\treturn lenStringGenerata\n}", "func genFtaa() string {\n\treturn util.RandString(18)\n}", "func (correlation) New() string {\n\treturn utils.NewUUID()\n}", "func GetNewId() string {\n\treturn srand(5)\n}", "func (a *attack) newUDID() {\n\tuuid := make([]byte, 21)\n\tn, err := io.ReadFull(rand.Reader, uuid)\n\tif n != len(uuid) || err != nil {\n\t\tfmt.Printf(\"[*] Error generating UDID: %v\\n\", err)\n\t}\n\ta.udid = fmt.Sprintf(\"%x\", uuid)\n}", "func New() string {\n\tvar result string\n\tbackoff.RetryNotify(func() error { //nolint:errcheck\n\t\tuuid, err := uuid.NewV4()\n\t\tif err != nil {\n\t\t\treturn errors.EnsureStack(err)\n\t\t}\n\t\tresult = uuid.String()\n\t\treturn nil\n\t}, backoff.NewInfiniteBackOff(), func(err error, d time.Duration) error {\n\t\tfmt.Printf(\"error from uuid.NewV4: %v\", err)\n\t\treturn nil\n\t})\n\treturn result\n}", "func NewID(length int) string {\n\tb := make([]byte, length*6/8)\n\t_, err := rand.Read(b)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn base64.RawURLEncoding.EncodeToString(b)\n}", "func NewState() (string, error) {\n\tb, err := random(32)\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"could not generate a random: %w\", err)\n\t}\n\treturn base64URLEncode(b), nil\n}", "func generateNewState() string {\n\tvar data [32]byte\n\tif _, err := crand.Read(data[:]); err != nil {\n\t\tpanic(err)\n\t}\n\treturn base64.URLEncoding.EncodeToString(data[:])\n}", "func newSalt(length int) []byte {\n\tsalt := make([]byte, length)\n\t_, _ = rand.Read(salt)\n\tfor i, item := range salt {\n\t\tsalt[i] = sourceStr[item%byte(len(sourceStr))]\n\t}\n\treturn salt\n}", "func New(password string) string {\n\tp, _ := bcrypt.GenerateFromPassword([]byte(password), 10)\n\treturn string(p)\n}", "func New() (UUID, error) {\n\tvar uuid UUID\n\t_, err := io.ReadFull(rander, uuid[:])\n\tif err != nil {\n\t\treturn Nil, err\n\t}\n\tuuid[6] = (uuid[6] & 0x0f) | 0x40 // Version 4\n\tuuid[8] = (uuid[8] & 0x3f) | 0x80 // Variant is 10\n\treturn uuid, nil\n}", "func generateCode(length int) string {\n\tcode := \"\"\n\n\tfor i := 0; i < length; i++ {\n\t\tcode += string(codeChars[rand.Intn(len(codeChars))])\n\t}\n\n\tif isDirty(code) {\n\t\treturn generateCode(length)\n\t}\n\n\t// make sure code isn't already in use\n\tif _, ok := Find(code); ok {\n\t\treturn generateCode(length)\n\t}\n\n\treturn code\n}", "func new256Asm() hash.Hash { return nil }", "func GenerateCode() string {\n\tcode, _ := uuid.NewRandom()\n\treturn code.String()\n}", "func randomCode(mappings map[string]string) string {\n\trand.Seed(time.Now().UnixNano())\n\tvar b strings.Builder\n\tb.WriteByte(digits[rand.Intn(len(digits))])\n\tfor i := 1; i < maxRandomCodeLen; i++ {\n\t\tb.WriteByte(chars[rand.Intn(len(chars))])\n\t\t// If long enough, check if exists in mappings, and return if not\n\t\tif i+1 >= minRandomCodeLen {\n\t\t\ts := b.String()\n\t\t\tif _, exists := mappings[s]; !exists {\n\t\t\t\treturn s\n\t\t\t}\n\t\t}\n\t}\n\t// Failed to find an unused code? Just retry?\n\treturn randomCode(mappings)\n}", "func genStr(n int) string {\n\tb := make([]byte, n)\n\trand.Read(b)\n\treturn fmt.Sprintf(\"%x\", b)[:n]\n}", "func generateshortid() string {\n\t// It doesn't exist! Generate a new shortid for it\n\t// From: http://stackoverflow.com/questions/22892120/how-to-generate-a-random-string-of-a-fixed-length-in-golang\n\tvar chars = []rune(\"0123456789abcdefghijklmnopqrstuvwxyz\")\n\ts := make([]rune, 6)\n\tfor i := range s {\n\t\ts[i] = chars[rand.Intn(len(chars))]\n\t}\n\n\treturn string(s)\n}", "func generateNewAccount() string {\n\taccount := crypto.GenerateAccount()\n\tpassphrase, err := mnemonic.FromPrivateKey(account.PrivateKey)\n\tif err != nil {\n\t\tfmt.Printf(\"Error creating new account: %s\\n\", err)\n\t} else {\n\t\tfmt.Printf(\"Created new account: %s\\n\", account.Address)\n\t\tfmt.Printf(\"Generated mnemonic: \\\"%s\\\"\\n\", passphrase)\n\t}\n\treturn account.Address.String()\n}", "func newGame() *guessingGame {\n\treturn &guessingGame{\n\t\tnum: rand.Intn(10) + 1,\n\t}\n}", "func NewUUID() string {\n\tuuid := make([]byte, 16)\n\tio.ReadFull(rand.Reader, uuid)\n\tuuid[8] = uuid[8]&^0xc0 | 0x80\n\tuuid[6] = uuid[6]&^0xf0 | 0x40\n\treturn fmt.Sprintf(\"%x-%x-%x-%x-%x\", uuid[0:4], uuid[4:6], uuid[6:8], uuid[8:10], uuid[10:])\n}", "func newID() string {\n\treturn \"_\" + uuid.New().String()\n}", "func newSession() string {\n\tu, _ := uuid.NewV4()\n\treturn u.String()\n}", "func GenerateAlphanumericCode(length uint) (string, error) {\n\tvar result string\n\tfor i := uint(0); i < length; i++ {\n\t\tch, err := randomFromCharset()\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t\tresult = result + ch\n\t}\n\treturn result, nil\n}", "func Generate(l int, noSym bool) string {\n\tseededRand := rand.New(rand.NewSource(time.Now().UnixNano()))\n\tif l == 0 {\n\t\tl = seededRand.Intn(13) + 8\n\t}\n\tb := make([]byte, l)\n\tfor i := range b {\n\t\tif noSym {\n\t\t\tb[i] = charsetNoSym[seededRand.Intn(len(charsetNoSym))]\n\t\t} else {\n\t\t\tb[i] = charset[seededRand.Intn(len(charset))]\n\t\t}\n\t}\n\treturn string(b)\n}", "func RandChar() (c byte, err error) {\n\tmax := big.NewInt(int64(len(Symbols)))\n\tn, err := rand.Int(rand.Reader, max)\n\tif err != nil {\n\t\treturn\n\t}\n\tc = Symbols[n.Int64()]\n\treturn\n}", "func Random() string {\n\tpassword, _ := RandomLength(4, \"en\")\n\treturn password\n}", "func newPwd() (string, error) {\n\tpwLgth := 15\n\tlower := []byte(\"abcdefghijklmnopqrstuvwxyz\")\n\tupper := []byte(\"ABCDEFGHIJKLMNOPQRSTUVWXYZ\")\n\tnumbers := []byte(\"0123456789\")\n\tspecial := []byte(`~!@#$%^&*_-+=|\\(){}[]:;<>,.?/`)\n\tchars := bytes.Join([][]byte{lower, upper, numbers, special}, nil)\n\n\tfor {\n\t\tb := make([]byte, pwLgth)\n\t\tfor i := range b {\n\t\t\tci, err := rand.Int(rand.Reader, big.NewInt(int64(len(chars))))\n\t\t\tif err != nil {\n\t\t\t\treturn \"\", err\n\t\t\t}\n\t\t\tb[i] = chars[ci.Int64()]\n\t\t}\n\n\t\tvar l, u, n, s int\n\t\tif bytes.ContainsAny(lower, string(b)) {\n\t\t\tl = 1\n\t\t}\n\t\tif bytes.ContainsAny(upper, string(b)) {\n\t\t\tu = 1\n\t\t}\n\t\tif bytes.ContainsAny(numbers, string(b)) {\n\t\t\tn = 1\n\t\t}\n\t\tif bytes.ContainsAny(special, string(b)) {\n\t\t\ts = 1\n\t\t}\n\t\t// If the password does not meet Windows complexity requirements, try again.\n\t\t// https://technet.microsoft.com/en-us/library/cc786468\n\t\tif l+u+n+s >= 3 {\n\t\t\treturn string(b), nil\n\t\t}\n\t}\n}", "func generateToken() string {\n\tcharset := \"abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789\"\n\trand.Seed(time.Now().UnixNano())\n\n\tchars := make([]byte, tokenLength)\n for i := range chars {\n chars[i] = charset[rand.Intn(len(charset))]\n }\n\n\tmsg := string(chars)\n\tfmt.Println(msg)\n\treturn msg\n}", "func RandomString(strlen int) string {\n rand.Seed(time.Now().UTC().UnixNano())\n const chars = \"abcdefghijklmnopqrstuvwxyz\"\n result := make([]byte, strlen)\n for i := 0; i < strlen; i++ {\n result[i] = chars[rand.Intn(len(chars))]\n }\n return string(result)\n}", "func Generate() (string, error) {\n\treturn GenerateLen(strLen)\n}", "func (p *Probe) newRunID() uint16 {\n\treturn uint16(p.runCnt)<<8 + uint16(rand.Intn(0x00ff))\n}", "func GenRandString(length int, elem StringElem) (code string) {\n\tvar pool []string\n\tswitch elem {\n\tcase ElemLowerCharAndNum:\n\t\tpool = []string{\n\t\t\t\"0\", \"1\", \"2\", \"3\", \"4\", \"5\", \"6\", \"7\", \"8\", \"9\",\n\t\t\t\"a\", \"b\", \"c\", \"d\", \"e\", \"f\", \"g\", \"h\", \"i\", \"j\", \"k\", \"l\", \"m\", \"n\", \"o\", \"p\", \"q\", \"r\", \"s\", \"t\", \"u\", \"v\", \"w\", \"x\", \"y\", \"z\",\n\t\t}\n\t\tbreak\n\tcase ElemUpperCharAndNum:\n\t\tpool = []string{\n\t\t\t\"0\", \"1\", \"2\", \"3\", \"4\", \"5\", \"6\", \"7\", \"8\", \"9\",\n\t\t\t\"A\", \"B\", \"C\", \"D\", \"E\", \"F\", \"G\", \"H\", \"I\", \"J\", \"K\", \"L\", \"M\", \"N\", \"O\", \"P\", \"Q\", \"R\", \"S\", \"T\", \"U\", \"V\", \"W\", \"X\", \"Y\", \"Z\",\n\t\t}\n\t\tbreak\n\tcase ElemAllChar:\n\t\tpool = []string{\n\t\t\t\"0\", \"1\", \"2\", \"3\", \"4\", \"5\", \"6\", \"7\", \"8\", \"9\",\n\t\t\t\"A\", \"B\", \"C\", \"D\", \"E\", \"F\", \"G\", \"H\", \"I\", \"J\", \"K\", \"L\", \"M\", \"N\", \"O\", \"P\", \"Q\", \"R\", \"S\", \"T\", \"U\", \"V\", \"W\", \"X\", \"Y\", \"Z\",\n\t\t\t\"a\", \"b\", \"c\", \"d\", \"e\", \"f\", \"g\", \"h\", \"i\", \"j\", \"k\", \"l\", \"m\", \"n\", \"o\", \"p\", \"q\", \"r\", \"s\", \"t\", \"u\", \"v\", \"w\", \"x\", \"y\", \"z\",\n\t\t}\n\t\tbreak\n\tdefault:\n\t\tpool = []string{\n\t\t\t\"0\", \"1\", \"2\", \"3\", \"4\", \"5\", \"6\", \"7\", \"8\", \"9\",\n\t\t\t\"A\", \"B\", \"C\", \"D\", \"E\", \"F\", \"G\", \"H\", \"I\", \"J\", \"K\", \"L\", \"M\", \"N\", \"O\", \"P\", \"Q\", \"R\", \"S\", \"T\", \"U\", \"V\", \"W\", \"X\", \"Y\", \"Z\",\n\t\t\t\"a\", \"b\", \"c\", \"d\", \"e\", \"f\", \"g\", \"h\", \"i\", \"j\", \"k\", \"l\", \"m\", \"n\", \"o\", \"p\", \"q\", \"r\", \"s\", \"t\", \"u\", \"v\", \"w\", \"x\", \"y\", \"z\",\n\t\t}\n\t}\n\n\tpoolSize := len(pool)\n\n\tfor i := 1; i <= length; i++ {\n\t\trand.Seed(time.Now().UnixNano())\n\t\tcode += pool[rand.Intn(poolSize)]\n\t}\n\n\treturn code\n}", "func createContextID(n int) string {\n\tb := make([]rune, n)\n\tfor i := range b {\n\t\tb[i] = letterRunes[rand.Intn(len(letterRunes))]\n\t}\n\treturn string(b)\n}", "func NewId() string {\n\t// generate 128 random bits (6 more than standard UUID)\n\tbytes := make([]byte, 16)\n\t_, err := rand.Read(bytes)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\t// convert them to base 32 encoding\n\ts := base32.StdEncoding.EncodeToString(bytes)\n\treturn strings.ToLower(strings.TrimRight(s, \"=\"))\n}", "func Generate(charset string, length int) string {\n\tvar buffer bytes.Buffer\n\n\trand.Seed(time.Now().UTC().UnixNano())\n\n\tclen := len(charset)\n\n\tfor i := 0; i < length; i++ {\n\t\toffset := rand.Intn(clen)\n\t\tbuffer.WriteString(charset[offset : offset+1])\n\t}\n\n\treturn buffer.String()\n}", "func Generate() string {\n\tbuf := make([]byte, 16)\n\tif _, err := rand.Read(buf); err != nil {\n\t\tpanic(fmt.Errorf(\"failed to read random bytes: %v\", err))\n\t}\n\n\treturn fmt.Sprintf(\"%08x-%04x-%04x-%04x-%12x\",\n\t\tbuf[0:4],\n\t\tbuf[4:6],\n\t\tbuf[6:8],\n\t\tbuf[8:10],\n\t\tbuf[10:16])\n}", "func randString(n int) string {\n\tb := make([]byte, n)\n\tfor i := range b {\n\t\tb[i] = charset[rand.Intn(len(charset))]\n\t}\n\treturn string(b)\n}", "func New() (*GUID, error) {\n\tb := new(GUID)\n\t_, err := rand.Read(b[:])\n\tb.setVarient()\n\treturn b, err\n}", "func New(s string) *Seed {\n\th := sha1.New()\n\th.Write([]byte(s))\n\n\tresult := h.Sum(nil)\n\treturn &Seed{seed: fmt.Sprintf(\"%x\", result)}\n}", "func generatePseudoRand() string {\n\talphanum := \"0123456789abcdefghigklmnopqrst\"\n\tvar bytes = make([]byte, 10)\n\trand.Read(bytes)\n\tfor i, b := range bytes {\n\t\tbytes[i] = alphanum[b%byte(len(alphanum))]\n\t}\n\treturn string(bytes)\n}", "func randomString(n int) string {\n\tresult := make([]byte, n)\n\tfor i := range result {\n\t\tresult[i] = charSet[rnd.Intn(len(charSet))]\n\t}\n\treturn string(result)\n}", "func generateRandomString(n int) string {\n\tvar letter = []rune(\"abcdefghijklmnopqrstuvwxyz0123456789\")\n\n\tb := make([]rune, n)\n\trand.Seed(time.Now().UnixNano())\n\tfor i := range b {\n\t\tb[i] = letter[rand.Intn(len(letter))]\n\t}\n\treturn string(b)\n}", "func generateName() string {\n\tserialCode := make([]rune, 2)\n\tserialNumber := 999\n\n\tfor i := range serialCode {\n\t\trandNumber := rand.Intn(len(letters))\n\t\tserialNumber -= randNumber\n\t\tserialCode[i] = letters[randNumber]\n\t}\n\n\treturn fmt.Sprint(string(serialCode), serialNumber)\n}", "func randDigit() rune {\n\treturn rune(byte(rand.Intn(10)) + '0')\n}", "func GenerateCode(length uint) (string, error) {\n\tlimit := big.NewInt(0)\n\tlimit.Exp(big.NewInt(10), big.NewInt(int64(length)), nil)\n\tdigits, err := rand.Int(rand.Reader, limit)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\t// The zero pad format is variable length based on the length of the request code.\n\tformat := fmt.Sprint(\"%0\", length, \"d\")\n\tresult := fmt.Sprintf(format, digits.Int64())\n\n\treturn result, nil\n}", "func (*dynamicUUIDProvider) New() string {\n\treturn uuid.New()\n}", "func randString(n int) string {\n\tb := make([]byte, n)\n\t_, err := rand.Read(b)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tfor i := range b {\n\t\tb[i] = 'a' + (b[i] % 26)\n\t}\n\treturn string(b)\n}", "func genUUID() string {\n\tuuid := make([]byte, 16)\n\tn, err := rand.Read(uuid)\n\tif n != len(uuid) || err != nil {\n\t\treturn strconv.FormatInt(time.Now().UnixNano(), 10)\n\t}\n\t// TODO: verify the two lines implement RFC 4122 correctly\n\tuuid[8] = 0x80 // variant bits see page 5\n\tuuid[4] = 0x40 // version 4 Pseudo Random, see page 7\n\n\treturn hex.EncodeToString(uuid)\n}", "func RandomString(n int) *string {\n\tvar letter = []rune(\"abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789\")\n\n\tb := make([]rune, n)\n\tfor i := range b {\n\t\tb[i] = letter[rand.Intn(len(letter))]\n\t}\n\n\ts := string(b)\n\treturn &s\n}", "func NewUUID() string {\n\tuuid := make([]byte, 16)\n\tn, err := io.ReadFull(rand.Reader, uuid)\n\tif n != len(uuid) || err != nil {\n\t\treturn \"\"\n\t}\n\t// variant bits; see section 4.1.1\n\tuuid[8] = uuid[8]&^0xc0 | 0x80\n\t// version 4 (pseudo-random); see section 4.1.3\n\tuuid[6] = uuid[6]&^0xf0 | 0x40\n\treturn fmt.Sprintf(\"%x-%x-%x-%x-%x\", uuid[0:4], uuid[4:6], uuid[6:8], uuid[8:10], uuid[10:])\n}", "func newName() (n string) {\n\n\tfor unique := false; unique == false; {\n\n\t\tchars := make([]rune, 5)\n\t\tchars[0] = rune(rand.Intn(26) + 65)\n\t\tchars[1] = rune(rand.Intn(26) + 65)\n\t\tchars[2] = rune(rand.Intn(10) + 48)\n\t\tchars[3] = rune(rand.Intn(10) + 48)\n\t\tchars[4] = rune(rand.Intn(10) + 48)\n\t\tn = string(chars)\n\n\t\tunique = !robotRegister[n]\n\n\t}\n\n\trobotRegister[n] = true\n\treturn n\n}" ]
[ "0.72599936", "0.721639", "0.6998489", "0.69644547", "0.695688", "0.6882035", "0.68808156", "0.67376745", "0.67370284", "0.6508424", "0.6508424", "0.6508424", "0.6508424", "0.6495184", "0.64481455", "0.6381521", "0.6348876", "0.6322811", "0.63130254", "0.6299039", "0.6296314", "0.6293776", "0.6270109", "0.61812454", "0.6179345", "0.61776197", "0.6171102", "0.61703914", "0.61699134", "0.61544853", "0.61489576", "0.6148025", "0.6146176", "0.61313856", "0.6130464", "0.61204386", "0.61045766", "0.61034024", "0.61034024", "0.6078725", "0.60648215", "0.60629255", "0.60543984", "0.60491186", "0.6033074", "0.5999382", "0.5979979", "0.59766763", "0.5975347", "0.5921394", "0.5914347", "0.59043765", "0.5884362", "0.5880845", "0.5870324", "0.58674693", "0.5848665", "0.58266246", "0.5823931", "0.5812944", "0.57986605", "0.5798491", "0.5795332", "0.57863224", "0.5786251", "0.57680494", "0.5756426", "0.5752363", "0.5739812", "0.5735089", "0.5732289", "0.57215613", "0.5706351", "0.5704948", "0.5703051", "0.5683475", "0.5681689", "0.5681274", "0.5642792", "0.56421363", "0.5635049", "0.5629553", "0.5624228", "0.56193316", "0.5614122", "0.5596359", "0.5594644", "0.5581991", "0.55768424", "0.5566541", "0.5553251", "0.5546024", "0.55456275", "0.55443144", "0.5541258", "0.55396485", "0.5532664", "0.55219334", "0.5517197", "0.55140686" ]
0.72766
0